node.js dashboard works

This commit is contained in:
2025-11-30 22:17:14 -08:00
parent c6d51f2a49
commit ebbc5ac5cf
22 changed files with 829 additions and 196 deletions

View File

@ -1,13 +1,18 @@
This Ansible Role will set up a Matt-Cloud host as a SSD health monitoring platform. This Ansible Role will set up a Matt-Cloud host as a SSD health monitoring platform.
When the GUI is not installed, the terminal will auto login and show the information for the currently attached drives When the GUI is not installed and it is requested, the terminal will auto login and show the information for the currently attached drives.
The data is also stored locally and can be viewed on a web dashboard at port 8088. The historical data is stored locally in a SQLite database which is managed by a Python Flask API. It can be viewed on a web dashboard at port 80.
When deployed with Matt-Cloud Jenkins there is a Chromium kiosk option that will allow this web dashboard to be deployed as a local kiosk. There is a live dashboard of currently attched drives on port 3000. This will show all attached drives along with the health data for these drives. This uses a websocket to live-update the data. The catch is I haven't learned how to mix PHP and Node.js sites yet, thus the multiple ports. It sounds to me like the next part of this project is another web server container that is a proxy with both pages. That does not exist yet.
The daabase is handled by the python service now. There may be issues with some of the status commands due to hardware differences. I have only found an issue when using the NanoPi devices and have included the corrected string. journalctl is your friend, get to know it.
When deployed with Matt-Cloud Jenkins there is a Chromium kiosk option that will allow the live dashboard to be deployed as a local kiosk.
Instead of using an API key with the python service, I am binding everything to the docker interface to keep it unreachable externally. It's easier and good enough, it's not like this is the nuclear codes or anything. Instead of using an API key with the python service, I am binding everything to the docker interface to keep it unreachable externally. It's easier and good enough, it's not like this is the nuclear codes or anything.
https://jenkins.matt-cloud.com/job/Projects/job/SSD%20Health%20Checker/ https://jenkins.matt-cloud.com/job/Projects/job/SSD%20Health%20Checker/

View File

@ -7,21 +7,27 @@ ssd_health_packages:
- python3-packaging - python3-packaging
- python3-venv - python3-venv
- sqlite3 - sqlite3
- jq
# autologin vars # autologin vars
autologin_password: "kingduy" autologin_password: "kingduy"
autologin: true autologin: true
autologin_user: "ssd_health" autologin_user: "ssd_health"
# php container vars # the docker-compose file is about to get too complicated to be a template
container_name: "ssd_dashboard" # # php container vars
container_http_port: "80" # container_name: "ssd_dashboard"
extra_volumes: "" # container_http_port: "80"
# extra_volumes: ""
# api service vars # api service vars
api_service_name: "drive_index" api_service_name: "drive_index"
api_service_folder: "{{ service_folder }}" api_service_folder: "{{ service_folder }}"
api_service_exe: "{{ service_folder }}/venv/bin/python -u {{ service_folder }}/app.py" api_service_exe: "{{ service_folder }}/venv/bin/python -u {{ service_folder }}/app.py"
extra_options: |
After=docker.service
BindsTo=docker.service
ReloadPropagatedFrom=docker.service
# kiosk service vars # kiosk service vars
kiosk_service_name: "drive_check" kiosk_service_name: "drive_check"
@ -43,5 +49,7 @@ sleep_time: "5"
install_kiosk: false install_kiosk: false
quick_refresh: false quick_refresh: false
service_only: false service_only: false
armcpu_check: false
service_mode: false
... ...

View File

@ -18,6 +18,7 @@ function fetchSSDData() {
return json_decode($result, true); // Decode JSON as an associative array return json_decode($result, true); // Decode JSON as an associative array
} }
?> ?>
<!DOCTYPE html> <!DOCTYPE html>
<html lang="en"> <html lang="en">
@ -28,60 +29,51 @@ function fetchSSDData() {
</head> </head>
<body> <body>
<div class="container"> <div class="container">
<button onclick="window.location.reload();" class="title-button"><h1>SSD Health Dashboard</h1></button> <button onclick="window.location.reload();" class="title-button"><h1>SSD Health Dashboard</h1></button><p>
This lists every disk ever scanned by this device.<br>
<?php <?php
$i=0; echo "For a live dashboard, please visit <a href=http://{$_SERVER['HTTP_HOST']}:3000/>this link</a>.<p>";
$ssdData = fetchSSDData(); // Fetch data from the API $ssdData = fetchSSDData(); // Fetch data from the API
echo '<div class="group-columns">'; // Start the table
foreach ($ssdData as $ssd): echo '<table class="ssd-list" style="border-collapse:collapse;width:100%;">';
if ($i % 2 == 0) { // Table header (optional but handy)
echo '</div><div class="group-columns">'; echo '<thead>
<tr>
<th>Disk ID</th>
<th>Model String</th>
<th>Serial Number</th>
<th>TB Written</th>
<th>Disk Capacity</th>
<th>Disk Flavor</th>
<th>SMART Result</th>
</tr>
</thead>';
// Table body - one row per SSD
echo '<tbody>';
foreach ($ssdData as $ssd) {
// Escape the values so the page stays safe
$id = htmlspecialchars($ssd['id']);
$model = htmlspecialchars($ssd['model']);
$serial = htmlspecialchars($ssd['serial']);
$tbw = htmlspecialchars($ssd['TBW']);
$cap = htmlspecialchars($ssd['capacity']);
$flavor = htmlspecialchars($ssd['flavor']);
$smart = htmlspecialchars($ssd['smart']);
echo "<tr>
<td>{$id}</td>
<td>{$model}</td>
<td>{$serial}</td>
<td>{$tbw}</td>
<td>{$cap}</td>
<td>{$flavor}</td>
<td>{$smart}</td>
</tr>";
} }
echo <<<EOL echo '</tbody></table>';
<div class="meter">
<table>
<tr><td align ="right">
Disk ID:
</td><td align ="left">
{$ssd['id']}
</td></tr><tr>
<tr><td align ="right">
Model String:
</td><td align ="left">
{$ssd['model']}
</td></tr><tr>
<tr><td align ="right">
Serial Number:
</td><td align ="left">
{$ssd['serial']}
</td></tr><tr>
<tr><td align ="right">
TB Written:
</td><td align ="left">
{$ssd['TBW']}
</td></tr><tr>
<tr><td align ="right">
Disk Capacity:
</td><td align ="left">
{$ssd['capacity']}
</td></tr><tr>
<tr><td align ="right">
Disk Flavor:
</td><td align ="left">
{$ssd['flavor']}
</td></tr><tr>
<tr><td align ="right">
SMART Result:
</td><td align ="left">
{$ssd['smart']}
</td></tr>
</table>
</div>
EOL;
$i++;
endforeach;
echo '</div>';
?> ?>
</div> </div>
</body> </body>
</html> </html>

View File

@ -24,8 +24,27 @@ body {
margin: 4px 2px; margin: 4px 2px;
cursor: pointer; cursor: pointer;
} }
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 10px;
}
.container { .container {
max-width: 800px; max-width: 950px;
margin: 0 auto;
padding: 20px;
background-color: #34495e; /* Darker background for container */
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); /* Slightly darker shadow */
margin-top: 20px;
}
.container-small {
max-width: 550px;
margin: 0 auto; margin: 0 auto;
padding: 20px; padding: 20px;
background-color: #34495e; /* Darker background for container */ background-color: #34495e; /* Darker background for container */

View File

@ -1,14 +1,95 @@
from flask import Flask, jsonify, request from flask import Flask, jsonify, request
import sqlite3 import sqlite3
import json import redis, json, time
import os import os
import subprocess
import re
app = Flask(__name__) app = Flask(__name__)
db_path = '/opt/ssd_health/drive_records.db' db_path = '/opt/ssd_health/drive_records.db'
debug_output = False
secure_api = True
####################################################
### Redis Functions
####################################################
r = redis.Redis(host='172.17.0.1', port=6379)
def update_disk_redis():
active = get_active_drive_records(as_json=False)
all_rec = get_all_drive_records(as_json=False)
enriched = merge_active_with_details(active, all_rec)
r.publish('attached_disks', json.dumps(enriched))
if debug_output:
print("=== Active drives sent to Redis ===")
print(json.dumps(enriched, indent=2))
def update_stats_redis():
# store the data in vm_list
data = get_host_stats(as_json=False)
# push data to redis
# Publish to the Redis channel that the WS server is listening on
r.publish('host_stats', json.dumps(data))
if debug_output:
print("=== Stats Redis Update ===")
print(json.dumps(data, indent=2))
return True
def merge_active_with_details(active, all_records):
# Build a quick lookup dictionary keyed by serial
record_by_serial = {rec['serial']: rec for rec in all_records}
# Add the extra fields to each active drive
for drive in active:
rec = record_by_serial.get(drive['serial'])
if rec:
extra = {k: v for k, v in rec.items() if k not in ('id', 'serial')}
drive.update(extra)
return active
####################################################
### Host Stats Function
####################################################
def get_host_stats(as_json=False):
total_memory_command = "free -h | grep 'Mem:' | awk '{print $2}'"
total_memory = run_command(total_memory_command, zero_only=True)
used_memory_command = "free -h | grep 'Mem:' | awk '{print $3}'"
used_memory = run_command(used_memory_command, zero_only=True)
free_memory_command = "free -h | grep 'Mem:' | awk '{print $4}'"
free_memory = run_command(free_memory_command, zero_only=True)
cpu_load_command = "uptime | grep -oP '(?<=age: ).*'"
cpu_load = run_command(cpu_load_command, zero_only=True)
# nano pi command
#cpu_temp_command = "sensors | grep 'temp1:' | cut -d+ -f 2 | awk '{print $1}'"
cpu_temp_command = "sensors | grep Package | cut -d+ -f 2 | awk '{print $1}'"
cpu_temp = run_command(cpu_temp_command, zero_only=True)
cpu_temp_stripped = re.sub(r'\u00b0C', '', cpu_temp)
cpu_temp_fixed = f"{cpu_temp_stripped} C"
ip_address_command = "ip -o -4 ad | grep -e eth -e tun | awk '{print $2\": \" $4}'"
ip_addresses = run_command(ip_address_command, zero_only=True)
time_now_command = "date +%r"
time_now = run_command(time_now_command, zero_only=True)
# Redis stores in this order, or at least the html renders it in this order
stats = [{
"memory_total": total_memory,
"memory_used": used_memory,
"memory_free": free_memory,
"cpu_load": cpu_load,
"cpu_temp": cpu_temp_fixed,
"ip_addresses": ip_addresses,
"time": time_now
}]
if debug_output:
print("=== Current Host Stats ===")
print(json.dumps(stats, indent=2))
return jsonify(stats) if as_json else stats
####################################################
### db functions
####################################################
# init db function
def init_db(): def init_db():
print("Initializing DB") print("Checking Database...")
db_check = "SELECT name FROM sqlite_master WHERE type='table' AND name='drive_records';" db_check = "SELECT name FROM sqlite_master WHERE type='table' AND name='drive_records';"
create_table_command = """ create_table_command = """
CREATE TABLE drive_records ( CREATE TABLE drive_records (
@ -21,25 +102,36 @@ def init_db():
smart TEXT NOT NULL smart TEXT NOT NULL
); );
""" """
active_disks_command = """
CREATE TABLE active_disks (
id INTEGER PRIMARY KEY,
name TEXT,
serial TEXT,
size TEXT
);
"""
# this code deletes the db file if 0 bytes # this code deletes the db file if 0 bytes
if os.path.exists(db_path) and os.path.getsize(db_path) == 0: if os.path.exists(db_path) and os.path.getsize(db_path) == 0:
try: try:
print("Database is 0 bytes, deleting.") print("Database file exists and is 0 bytes, deleting.")
os.remove(db_path) os.remove(db_path)
except Exception as e: except Exception as e:
print(f"error during file deletion - 405: {e}") print(f"error during file deletion - 405: {e}")
return jsonify({'error during file deletion': e}), 405 return jsonify({'error during file deletion': e}), 405
try: try:
result = bool(query_db(db_check)) result = bool(query_db(db_check))
print(result)
# Check if any tables were found # Check if any tables were found
if result: if result:
print(result)
print("drive_records exists - 205") print("drive_records exists - 205")
else: else:
print("drive_records does not exist, creating") print("drive_records does not exist, creating")
try: try:
result_init = query_db(create_table_command) result_init = query_db(create_table_command)
result_active = query_db(active_disks_command)
print(result_init) print(result_init)
print(result_active)
print("Database created - 201") print("Database created - 201")
except sqlite3.Error as e: except sqlite3.Error as e:
print(f"error during table initialization: {e}") print(f"error during table initialization: {e}")
@ -54,16 +146,56 @@ def query_db(sql_query):
try: try:
with sqlite3.connect(db_path) as conn: with sqlite3.connect(db_path) as conn:
cursor = conn.cursor() cursor = conn.cursor()
if debug_output:
print("Executing SQL query:", sql_query) print("Executing SQL query:", sql_query)
cursor.execute(sql_query) cursor.execute(sql_query)
rows = cursor.fetchall() rows = cursor.fetchall()
if debug_output:
print("Query Result:", rows)
return rows return rows
except sqlite3.Error as e: except sqlite3.Error as e:
print("An error occurred:", e) print("An error occurred:", e)
return [] return []
# is this redundant? oh my, yes
# does it save me time? also, big yes
# note how the one above doesn't have the query params
# i don't want to re-write the subroutine i took from the VM party
def query_database(query_string, query_params=None):
if debug_output:
print(query_string, query_params)
# Connect to the SQLite database (or create it if it doesn't exist)
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
if query_params is not None:
cursor.execute(query_string, query_params)
else:
cursor.execute(query_string)
result = cursor.fetchall()
if debug_output:
print(result)
# Commit the transaction and close the connection
conn.commit()
conn.close()
return result
####################################################
### Other Helper Functions
####################################################
# subroutine to run a command, return stdout as array unless zero_only then return [0]
def run_command(cmd, zero_only=False):
# Run the command and capture the output
result = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Decode the byte output to a string
output = result.stdout.decode('utf-8')
# Split the output into lines and store it in an array
output_lines = [line for line in output.split('\n') if line]
# Return result
return output_lines[0] if zero_only else output_lines
# Function to return all drive records in database # Function to return all drive records in database
def get_all_drive_records(): def get_all_drive_records(as_json=True):
get_all_drives = "SELECT * FROM drive_records" get_all_drives = "SELECT * FROM drive_records"
rows = query_db(get_all_drives) rows = query_db(get_all_drives)
drives = [] drives = []
@ -78,24 +210,45 @@ def get_all_drive_records():
'smart': row[6] 'smart': row[6]
} }
drives.append(drive) drives.append(drive)
return jsonify(drives) return jsonify(drives) if as_json else drives
# Function to return all active drives in database
def get_active_drive_records(as_json=True):
get_active_drives = "SELECT * FROM active_disks"
rows = query_db(get_active_drives)
drives = []
for row in rows:
drive = {
'id': row[0],
'name': row[1],
'serial': row[2],
'size': row[3]
}
drives.append(drive)
return jsonify(drives) if as_json else drives
# Function to check if a serial number exists in the database # Function to check if a serial number exists in the database
def check_serial_exists(serial): def check_serial_exists(serial):
serial_check = f"SELECT * FROM drive_records WHERE serial='{serial}'" serial_check = f"SELECT * FROM drive_records WHERE serial='{serial}'"
if debug_output:
print(serial_check) print(serial_check)
return bool(query_db(serial_check)) return bool(query_db(serial_check))
####################################################
### Flask Routes
####################################################
# Route to check if a serial number exists in the database # Route to check if a serial number exists in the database
@app.route('/check', methods=['GET']) @app.route('/check', methods=['GET'])
def check(): def check():
serial_lookup = request.args.get('serial_lookup') serial_lookup = request.args.get('serial_lookup')
if debug_output:
print(f"Serial to check: {serial_lookup}") print(f"Serial to check: {serial_lookup}")
if not serial_lookup: if not serial_lookup:
return jsonify({'error': 'No serial number provided'}), 400 return jsonify({'error': 'No serial number provided'}), 400
exists = check_serial_exists(serial_lookup) exists = check_serial_exists(serial_lookup)
return jsonify({'serial_number_exists': exists}) return jsonify({'serial_number_exists': exists, 'serial_lookup': serial_lookup})
# Route to get all drive records in JSON format # Route to get all drive records in JSON format
@app.route('/drives', methods=['GET']) @app.route('/drives', methods=['GET'])
@ -115,6 +268,7 @@ def add_drive():
if None in [serial, model, flavor, capacity, TBW, smart]: if None in [serial, model, flavor, capacity, TBW, smart]:
return jsonify({'error': 'Missing required query parameter(s)'}), 400 return jsonify({'error': 'Missing required query parameter(s)'}), 400
add_drive_query = f"INSERT INTO drive_records (serial, model, flavor, capacity, TBW, smart) VALUES ('{serial}', '{model}', '{flavor}', '{capacity}', '{TBW}', '{smart}'); " add_drive_query = f"INSERT INTO drive_records (serial, model, flavor, capacity, TBW, smart) VALUES ('{serial}', '{model}', '{flavor}', '{capacity}', '{TBW}', '{smart}'); "
if debug_output:
print(add_drive_query) print(add_drive_query)
return jsonify(query_db(add_drive_query)) return jsonify(query_db(add_drive_query))
@ -128,18 +282,95 @@ def update_drive():
if None in [serial, TBW, smart]: if None in [serial, TBW, smart]:
return jsonify({'error': 'Missing required query parameter(s)'}), 400 return jsonify({'error': 'Missing required query parameter(s)'}), 400
update_drive_query = f"UPDATE drive_records SET TBW = '{TBW}', smart = '{smart}' WHERE serial = '{serial}';" update_drive_query = f"UPDATE drive_records SET TBW = '{TBW}', smart = '{smart}' WHERE serial = '{serial}';"
if debug_output:
print(update_drive_query) print(update_drive_query)
return jsonify(query_db(update_drive_query)) return jsonify(query_db(update_drive_query))
# Route to return active drives
@app.route('/list_active_drives', methods=['GET'])
def list_active_drives():
return get_active_drive_records()
# list disks as sda,serial
def list_disk_and_serial():
# Init blank devices array
devices = []
# get the devices
cmd = "lsblk -o NAME,SERIAL,SIZE,TYPE | grep sd | grep disk | awk '{print $1 \",\" $2. \",\" $3}'"
# try to run the command, should not fail
try:
devices = run_command(cmd)
except subprocess.CalledProcessError as e:
print(f"An error occurred: {e.stderr.decode('utf-8')}")
# return the devices as an array
return sorted([item for item in devices if item])
# Route to refresh active drives
@app.route('/refresh_active_drives', methods=['GET'])
def refresh_active_drives(): # List of items to be inserted; each item is a tuple (name, serial, size)
current_items = list_disk_and_serial()
# Loop through the list and insert items, checking for duplicates based on 'serial'
for item in current_items:
item = item.split(',')
# Check if the serial already exists in the database
existing_item = query_database('SELECT * FROM active_disks WHERE name = ?', (item[0],))
if not existing_item:
# If no duplicate is found, insert the new item
if debug_output:
print(f"Disk /dev/{item[0]} inserted, updating database")
verified_serial = run_command(f"hdparm -I /dev/{item[0]} | grep 'Serial\ Number' | cut -d: -f2 | awk '{{print $1}}' ", zero_only=True)
if debug_output:
print(f"Verified serial number through smartctl: {verified_serial}")
item[1] = verified_serial
query_database('INSERT INTO active_disks (name, serial, size) VALUES (?, ?, ?)', item)
update_disk_redis()
# Remove items from the database that are not in the current list of items
# first grab all the disks in the database
for row in query_database('SELECT name, serial FROM active_disks'):
drive_object = ""
drive_serial = ""
# the drive is missing until proven present, let's see if it exists
not_found = True
# load the currently attached drives in another array
for item in current_items:
item = item.split(',')
# this is where the drive is found, set this to false
if row[0] == item[0]:
drive_object = item[0]
drive_serial = item[1]
not_found = False
# if the drive was not found in the above loop, it's missing, remove it and loop to the next record
if not_found:
target_name = row[0].split(',')
if debug_output:
print(f"Deleting disk /dev/{drive_object} - serial {drive_serial}")
query_database('DELETE FROM active_disks WHERE name = ?', target_name)
update_disk_redis()
update_disk_redis()
update_stats_redis()
return jsonify({"function": "update_disk_database"})
# host stats
@app.route('/host_stats', methods=['GET'])
def host_stats():
update_stats_redis()
return jsonify(get_host_stats())
# test route # test route
@app.route('/test', methods=['GET']) @app.route('/test', methods=['GET'])
def test(): def test():
db_check = "SELECT name FROM sqlite_master WHERE type='table' AND name='drive_records';" db_check = "SELECT name FROM sqlite_master WHERE type='table';"
return query_db(db_check) return query_db(db_check)
if __name__ == '__main__': if __name__ == '__main__':
result=init_db() result=init_db()
print(result) print(result)
if secure_api:
app.run(debug=True, host='172.17.0.1', port=5000) app.run(debug=True, host='172.17.0.1', port=5000)
else:
app.run(debug=True, host='0.0.0.0', port=5000)

18
files/ws_node/Dockerfile Normal file
View File

@ -0,0 +1,18 @@
# Use an official Node runtime
FROM node:20-alpine
# Create app directory
WORKDIR /usr/src/app
# Install dependencies
COPY package.json .
RUN npm install --only=production
# Copy app source
COPY . .
# Expose the port that the app listens on
EXPOSE 3000
# Start the server
CMD ["node", "server.js"]

View File

@ -0,0 +1,13 @@
{
"name": "redis-table-demo",
"version": "1.0.0",
"main": "server.js",
"scripts": {
"start": "node server.js"
},
"dependencies": {
"express": "^4.18.2",
"socket.io": "^4.7.2",
"redis": "^4.6.7"
}
}

View File

@ -0,0 +1,90 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Attached Disks - Live Table</title>
<link rel="stylesheet" href="styles.css">
</head>
<body>
<div class="container">
<h2>Attached Disks</h2>
<div id="disk_table" class="column">Connecting…</div>
</div>
<div class="container">
<h2>System Stats</h2>
<div id="host_stats" class="column">Connecting…</div>
</div>
<!-- Socket.IO client library -->
<script src="socket.io/socket.io.js"></script>
<script>
const socket = io();
// listen for redis updates, render and error handle
socket.on('attached_disks', renderDiskTable);
socket.on('host_stats', renderStatsTable);
socket.on('connect_error', err => {
safeSetText('disk_table', `Could not connect to server - ${err.message}`);
safeSetText('host_stats', `Could not connect to server - ${err.message}`);
});
socket.on('reconnect', attempt => {
safeSetText('disk_table', `Re-connected (attempt ${attempt})`);
safeSetText('host_stats', `Re-connected (attempt ${attempt})`);
});
function safeSetText(id, txt) {
const el = document.getElementById(id);
if (el) el.textContent = txt;
}
// table rendering functions
function renderDiskTable(data) { renderGenericTable('disk_table', data, 'No Disks found'); }
function renderStatsTable(data) { renderGenericTable('host_stats', data, 'No Stats available'); }
function renderGenericTable(containerId, data, emptyMsg) {
const container = document.getElementById(containerId);
if (!Array.isArray(data) || !data.length) {
container.textContent = emptyMsg;
return;
}
const table = renderTable(data);
container.innerHTML = '';
container.appendChild(table);
}
function renderTable(data) {
// Columns are inferred from the first object (order matters)
const cols = Object.keys(data[0]);
// Create table
const table = document.createElement('table');
// Header
const thead = table.createTHead();
const headerRow = thead.insertRow();
cols.forEach(col => {
const th = document.createElement('th');
th.textContent = col.charAt(0).toUpperCase() + col.slice(1);
headerRow.appendChild(th);
});
// Body
const tbody = table.createTBody();
data.forEach(item => {
const tr = tbody.insertRow();
cols.forEach(col => {
const td = tr.insertCell();
td.textContent = item[col];
});
});
return table;
}
</script>
</body>
</html>

View File

@ -0,0 +1,111 @@
/* styles.css */
body {
font-family: Arial, sans-serif;
margin: 0;
padding: 0;
background-color: #2c3e50; /* Dark background color */
color: #bdc3c7; /* Dimmer text color */
}
.hidden-info {
display: none;
}
.title-button {
background-color: #34495e;
border: none;
color: white;
padding: 15px 32px;
text-align: center;
text-decoration: none;
display: inline-block;
font-size: 16px;
margin: 4px 2px;
cursor: pointer;
}
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 10px;
}
.container {
max-width: 950px;
margin: 0 auto;
padding: 20px;
background-color: #34495e; /* Darker background for container */
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); /* Slightly darker shadow */
margin-top: 20px;
}
.container-small {
max-width: 550px;
margin: 0 auto;
padding: 20px;
background-color: #34495e; /* Darker background for container */
border-radius: 8px;
box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); /* Slightly darker shadow */
margin-top: 20px;
}
h1, h2, h3, h4 {
color: #bdc3c7; /* Dimmer text color */
}
ul {
list-style-type: none;
padding: 0;
}
li {
margin-bottom: 10px;
color: #bdc3c7; /* Dimmer text color */
}
.group-columns {
display: flex;
}
.group-rows {
display: flex;
flex-wrap: wrap;
justify-content: flex-start; /* Left justification */
margin-top: 10px;
}
.group-column {
flex: 0 0 calc(33% - 10px); /* Adjust width of each column */
}
.column {
flex: 1;
padding: 0 10px; /* Adjust spacing between columns */
}
.subcolumn {
margin-left: 10px;
}
.grid {
display: flex;
flex-wrap: wrap;
justify-content: space-between;
margin-top: 5px;
}
.meter {
width: calc(90% - 5px);
max-width: calc(45% - 5px);
margin-bottom: 5px;
border: 1px solid #7f8c8d; /* Light border color */
border-radius: 5px;
padding: 5px;
text-align: center;
background-color: #2c3e50; /* Dark background for meter */
}

54
files/ws_node/server.js Normal file
View File

@ -0,0 +1,54 @@
const http = require('http');
const express = require('express');
const { createClient } = require('redis');
const { Server } = require('socket.io');
const app = express();
const server = http.createServer(app);
const io = new Server(server);
// Serve static files (index.html)
app.use(express.static('public'));
// ---------- Redis subscriber ----------
const redisClient = createClient({
url: 'redis://172.17.0.1:6379'
});
redisClient.on('error', err => console.error('Redis error', err));
(async () => {
await redisClient.connect();
// Subscribe to the channel that sends disk lists
const sub = redisClient.duplicate(); // duplicate to keep separate pub/sub
await sub.connect();
await sub.subscribe(
['attached_disks', 'host_stats'],
(message, channel) => { // <-- single handler
let payload;
try {
payload = JSON.parse(message); // message is a JSON string
} catch (e) {
console.error(`Failed to parse ${channel}`, e);
return;
}
io.emit(channel, payload);
}
);
sub.on('error', err => console.error('Subscriber error', err));
})();
// ---------- Socket.io ----------
io.on('connection', socket => {
console.log('client connected:', socket.id);
// Optional: send the current state on connect if you keep it cached
});
// ---------- Start ----------
const PORT = process.env.PORT || 3000;
server.listen(PORT, () => {
console.log(`Server listening on http://localhost:${PORT}`);
});

View File

@ -34,15 +34,15 @@
ExecStart= ExecStart=
ExecStart=-/sbin/agetty --autologin {{ autologin_user }} --noclear %I 38400 linux ExecStart=-/sbin/agetty --autologin {{ autologin_user }} --noclear %I 38400 linux
# create hello_there ASCII art
- name: autologin - generate hello_there.txt for the lulz
when: not quick_refresh | bool
include_tasks: hello_there.yaml
- name: autologin - script permission settings - name: autologin - quick refresh skippable tasks
when: not service_only | bool when: not quick_refresh | bool
block: block:
# create hello_there ASCII art
- name: autologin - generate hello_there.txt for the lulz
include_tasks: hello_there.yaml
- name: User setup - update permissions on smartctl 1 - name: User setup - update permissions on smartctl 1
shell: "chmod 755 /usr/sbin/smartctl" shell: "chmod 755 /usr/sbin/smartctl"

View File

@ -24,17 +24,36 @@
owner: root owner: root
group: root group: root
- name: docker container - name: websocket tasks
when: not quick_refresh | bool
block:
- name: websocket - copy websocket server files
copy:
src: "ws_node"
dest: "{{ service_folder }}"
mode: 0755
owner: root
group: root
- name: websocket - build docker container
community.docker.docker_image_build:
name: ws_node
rebuild: always
path: "{{ service_folder }}/ws_node"
dockerfile: Dockerfile
- name: docker containers
when: not quick_refresh | bool when: not quick_refresh | bool
block: block:
- name: service_control_website - template docker-compose.yaml - name: service_control_website - template docker-compose.yaml
template: template:
src: docker-compose-php.yaml src: docker-compose.yaml
dest: "{{ service_control_web_folder }}/docker-compose.yaml" dest: "{{ service_control_web_folder }}/docker-compose.yaml"
mode: 0644 mode: 0644
- name: "service_control_website - Start container at {{ container_http_port }}" - name: "service_control_website - Start containers"
shell: "docker-compose -f {{ service_control_web_folder }}/docker-compose.yaml up -d" shell: "docker-compose -f {{ service_control_web_folder }}/docker-compose.yaml up -d"
register: docker_output register: docker_output
- debug: | - debug: |

View File

@ -11,6 +11,12 @@
group: "{{ autologin_user }}" group: "{{ autologin_user }}"
mode: '0755' mode: '0755'
- name: "Drive Index - {{ api_service_name }} - stop service if running"
ignore_errors: yes
systemd:
name: "{{ api_service_name }}.service"
state: stopped
- name: Drive Index - copy script files - name: Drive Index - copy script files
copy: copy:
src: scripts/ src: scripts/
@ -40,6 +46,9 @@
pytz pytz
requests requests
opencv-python opencv-python
flask-socketio
eventlet
redis
owner: "{{ autologin_user }}" owner: "{{ autologin_user }}"
group: "{{ autologin_user }}" group: "{{ autologin_user }}"
mode: 0644 mode: 0644
@ -54,12 +63,6 @@
- name: Drive Index - api service handler - name: Drive Index - api service handler
block: block:
- name: "Drive Index - {{ api_service_name }} - stop service if running"
ignore_errors: yes
systemd:
name: "{{ api_service_name }}.service"
state: stopped
- name: "Drive Index - template {{ api_service_name }}.service" - name: "Drive Index - template {{ api_service_name }}.service"
vars: vars:
service_name: "{{ api_service_name }}" service_name: "{{ api_service_name }}"
@ -70,12 +73,9 @@
dest: "/etc/systemd/system/{{ api_service_name }}.service" dest: "/etc/systemd/system/{{ api_service_name }}.service"
mode: 0644 mode: 0644
- name: "Drive Index - {{ api_service_name }} - daemon reload" - name: "Drive Index - {{ api_service_name }} - enable and start service, daemon reload"
systemd: systemd:
daemon_reload: yes daemon_reload: yes
- name: "Drive Index - {{ api_service_name }} - enable and start service"
systemd:
name: "{{ api_service_name }}.service" name: "{{ api_service_name }}.service"
state: started state: started
enabled: yes enabled: yes

View File

@ -1,30 +0,0 @@
---
- name: Initialiaze - Install Packages
when: not quick_refresh | bool
apt:
name:
- "{{ ssd_health_packages_item }}"
state: present
loop: "{{ ssd_health_packages }}"
loop_control:
loop_var: ssd_health_packages_item
- name: "Initialiaze - create {{ autologin_user }} user"
when: not quick_refresh | bool
user:
name: "{{ autologin_user }}"
groups: disk
password: "{{ autologin_password | password_hash('sha512') }}"
shell: /bin/bash
- name: "Initialiaze - ensure {{ autologin_user }} home folder exists"
when: not quick_refresh | bool
file:
path: "/home/{{ autologin_user }}"
state: directory
owner: "{{ autologin_user }}"
group: "{{ autologin_user }}"
mode: '0700'
...

65
tasks/initialize.yaml Normal file
View File

@ -0,0 +1,65 @@
---
- name: Initialize - set service_only when ran through cosmos-server
when: refresh_special | bool
set_fact:
service_only: true
- name: Initialize - Install Packages
when: not quick_refresh | bool
apt:
name:
- "{{ ssd_health_packages_item }}"
state: present
loop: "{{ ssd_health_packages }}"
loop_control:
loop_var: ssd_health_packages_item
- name: "Initialize - create {{ autologin_user }} user"
when: not quick_refresh | bool
user:
name: "{{ autologin_user }}"
groups: disk
password: "{{ autologin_password | password_hash('sha512') }}"
shell: /bin/bash
- name: "Initialize - ensure {{ autologin_user }} home folder exists"
when: not quick_refresh | bool
file:
path: "/home/{{ autologin_user }}"
state: directory
owner: "{{ autologin_user }}"
group: "{{ autologin_user }}"
mode: '0700'
- name: Initialize - set vars when needed
when: quick_refresh | bool or refresh_special | bool
block:
- name: Check CPU Arch
shell: "dpkg --print-architecture"
register: cpu_architecture_output
- name: Set bool armcpu_check
when: '"arm" in cpu_architecture_output.stdout'
set_fact:
armcpu_check: true
- debug: |
msg="CPU architecture: {{ cpu_architecture_output.stdout_lines[0] }}"
msg="armcpu_check: {{ armcpu_check }}"
- name: Initialize - docker-buildx handler
when: armcpu_check | bool
block:
- name: docker-buildx handler - download deb file
get_url:
url: "http://ftp.us.debian.org/debian/pool/main/d/docker-buildx/docker-buildx_0.13.1+ds1-3_arm64.deb"
dest: "/tmp/docker-buildx_0.13.1+ds1-3_arm64.deb"
mode: '0644'
- name: docker-buildx handler - install package
apt:
deb: "/tmp/docker-buildx_0.13.1+ds1-3_arm64.deb"
...

View File

@ -2,7 +2,7 @@
# create and configure user account # create and configure user account
- name: Drive health - initialize early steps - name: Drive health - initialize early steps
include_tasks: initialiaze.yaml include_tasks: initialize.yaml
# create drive index service # create drive index service
- name: Drive health - drive_index service - name: Drive health - drive_index service
@ -14,11 +14,11 @@
# set up autologin # set up autologin
- name: Drive health - configure autologin - name: Drive health - configure autologin
when: not install_kiosk | bool or not service_only | bool when: not install_kiosk | bool and not service_only | bool
include_tasks: autologin.yaml include_tasks: autologin.yaml
# configure service-mode # configure service-mode
- name: Drive health - configure autologin - name: Drive health - configure service-mode, disable autologin
when: install_kiosk | bool or service_only | bool when: install_kiosk | bool or service_only | bool
include_tasks: service_mode.yaml include_tasks: service_mode.yaml

View File

@ -1,28 +1,28 @@
--- ---
- name: autologin - edit logind to reduce vterms to one # reverse autologin things
- name: no autologin - edit logind to set vterms to six
lineinfile: lineinfile:
dest: /etc/systemd/logind.conf dest: /etc/systemd/logind.conf
regexp: '^#NAutoVTs=' regexp: '^#NAutoVTs='
line: 'NAutoVTs=6' line: 'NAutoVTs=6'
backrefs: yes backrefs: yes
- name: autologin - create getty tty1 service folder - name: no autologin - "delete autologin files"
file: ignore_errors: yes
path: /etc/systemd/system/getty@tty1.service.d/ shell: |
state: directory rm /home/{{ autologin_user }}/.bash_aliases
mode: '0700' rm /etc/systemd/system/getty@tty1.service.d/override.conf
rm /etc/sudoers.d/smartctl
- name: autologin - "delete {{ autologin_user }} .bash_aliases" #- name: no autologin - remove autologin override to getty tty1 service
shell: "rm /home/{{ autologin_user }}/.bash_aliases" # shell: "rm /etc/systemd/system/getty@tty1.service.d/override.conf"
#
#- name: no autologin - "User setup - allow {{ autologin_user }} to smartctl"
# shell: "rm /etc/sudoers.d/smartctl"
- name: autologin - remove autologin override to getty tty1 service - name: no autologin - Restart getty@tty1 service
shell: "rm /etc/systemd/system/getty@tty1.service.d/override.conf"
- name: autologin - "User setup - allow {{ autologin_user }} to smartctl"
shell: "rm /etc/sudoers.d/smartctl"
- name: autologin - Restart getty@tty1 service
systemd: systemd:
name: getty@tty1.service name: getty@tty1.service
state: restarted state: restarted

View File

@ -4,7 +4,15 @@
set_fact: set_fact:
sleep_time: "1" sleep_time: "1"
- name: "Service Mode - {{ kiosk_service_name }}.service - stop service if running"
ignore_errors: yes
systemd:
name: "{{ kiosk_service_name }}.service"
state: stopped
- name: "Service Mode - template drive_check.sh again" - name: "Service Mode - template drive_check.sh again"
vars:
service_mode: true
template: template:
src: drive_check.sh src: drive_check.sh
dest: "{{ service_folder }}/drive_check.sh" dest: "{{ service_folder }}/drive_check.sh"
@ -12,12 +20,6 @@
owner: "{{ autologin_user }}" owner: "{{ autologin_user }}"
group: "{{ autologin_user }}" group: "{{ autologin_user }}"
- name: "Service Mode - {{ kiosk_service_name }}.service - stop service if running"
ignore_errors: yes
systemd:
name: "{{ kiosk_service_name }}.service"
state: stopped
- name: "Service Mode - template {{ kiosk_service_name }}.service" - name: "Service Mode - template {{ kiosk_service_name }}.service"
vars: vars:
service_name: "{{ kiosk_service_name }}" service_name: "{{ kiosk_service_name }}"
@ -28,12 +30,9 @@
dest: "/etc/systemd/system/{{ kiosk_service_name }}.service" dest: "/etc/systemd/system/{{ kiosk_service_name }}.service"
mode: 0644 mode: 0644
- name: "Service Mode - {{ kiosk_service_name }} - daemon reload" - name: "Service Mode - {{ kiosk_service_name }} - enable and start service api and daemon reload"
systemd: systemd:
daemon_reload: yes daemon_reload: yes
- name: "Service Mode - {{ kiosk_service_name }} - enable and start service api"
systemd:
name: "{{ kiosk_service_name }}.service" name: "{{ kiosk_service_name }}.service"
state: started state: started
enabled: yes enabled: yes

View File

@ -1,12 +0,0 @@
services:
{{ container_name }}:
container_name: {{ container_name }}
image: php:8.0-apache
ports:
- {{ container_http_port }}:80
volumes:
- ./html:/var/www/html/
{{ extra_volumes }}
network_mode: bridge
restart: always

View File

@ -0,0 +1,28 @@
services:
web_dash:
container_name: web_dash
image: php:8.0-apache
ports:
- 80:80
volumes:
- ./html:/var/www/html/
network_mode: bridge
restart: always
ws_node:
container_name: ws_node
image: ws_node
ports:
- 3000:3000
network_mode: bridge
restart: always
depends_on:
- redis
redis:
image: redis:7-alpine
network_mode: bridge
restart: always
ports:
- 172.17.0.1:6379:6379

View File

@ -1,14 +1,28 @@
#!/bin/bash #!/bin/bash
# this is a big loop # this is a big loop
# it shows SSD health data # it shows SSD health data
SERVICE_MODE = {{ service_mode }}
exec 2> /dev/null exec 2> /dev/null
while true; do while true; do
clear clear
# Show IP Info
if ! $SERVICE_MODE; then
echo ===Visit the IP in your browser to view history======
fi
ip -o -4 ad | grep -v -e docker -e 127.0.0.1 -e br- | awk '{print $2 "," $4}' | column -s , -t
# get all disks # get all disks
DISK_LIST=$(ls -lo /dev/sd? | awk '{print $9}') DISK_LIST=$(ls -lo /dev/sd? | awk '{print $9}')
# process each disk # process each disk
IFS=$'\n' read -rd '' -a DISK_ARRAY <<< "$DISK_LIST" IFS=$'\n' read -rd '' -a DISK_ARRAY <<< "$DISK_LIST"
for DISK in "${DISK_ARRAY[@]}"; do for DISK in "${DISK_ARRAY[@]}"; do
# update active drives more frequently
if ! $SERVICE_MODE; then
echo "Issuing request to update drive database"
fi
curl -s "http://172.17.0.1:5000/refresh_active_drives"
# store smartctl data once # store smartctl data once
SMART_DATA=$(smartctl -x $DISK) SMART_DATA=$(smartctl -x $DISK)
NVME_CHECK=$(echo "$SMART_DATA" | grep "NVMe Version") NVME_CHECK=$(echo "$SMART_DATA" | grep "NVMe Version")
@ -28,6 +42,7 @@ while true; do
SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs) SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs)
FLAVOR="SATA SSD" FLAVOR="SATA SSD"
DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists) DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists)
if ! $SERVICE_MODE; then
# Display drive data # Display drive data
echo "============ $DISK Disk Info - SSD: ============" echo "============ $DISK Disk Info - SSD: ============"
#echo "DRIVE_EXISTS: $DRIVE_EXISTS" #echo "DRIVE_EXISTS: $DRIVE_EXISTS"
@ -42,6 +57,7 @@ while true; do
echo "$DISK has $PLR% lifetime remaining" echo "$DISK has $PLR% lifetime remaining"
fi fi
echo echo
fi
if [ -x "$TBW"] ; then if [ -x "$TBW"] ; then
TBW="unknown" TBW="unknown"
fi fi
@ -66,6 +82,7 @@ while true; do
SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs) SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs)
FLAVOR="NVMe" FLAVOR="NVMe"
DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists) DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists)
if ! $SERVICE_MODE; then
# Display Disk Info # Display Disk Info
echo "============ $DISK Disk Info - NVMe: ============" echo "============ $DISK Disk Info - NVMe: ============"
#echo "DRIVE_EXISTS: $DRIVE_EXISTS" #echo "DRIVE_EXISTS: $DRIVE_EXISTS"
@ -79,6 +96,7 @@ while true; do
if [ -x "$TBW"] ; then if [ -x "$TBW"] ; then
TBW="unknown" TBW="unknown"
fi fi
fi
# database handler # database handler
if [ "$DRIVE_EXISTS" == "false" ] ; then if [ "$DRIVE_EXISTS" == "false" ] ; then
H_MODEL=$(echo $MODEL | sed 's/ /%20/g') H_MODEL=$(echo $MODEL | sed 's/ /%20/g')
@ -90,10 +108,14 @@ while true; do
fi fi
fi fi
else else
if ! $SERVICE_MODE; then
echo "Skipping $DISK, not SATA SSD or NVMe" echo "Skipping $DISK, not SATA SSD or NVMe"
fi fi
fi
done done
# wait {{ sleep_time }} seconds, loop again # wait {{ sleep_time }} seconds, loop again
if ! $SERVICE_MODE; then
echo "Sleeping {{ sleep_time }} seconds" echo "Sleeping {{ sleep_time }} seconds"
fi
sleep {{ sleep_time }} sleep {{ sleep_time }}
done done

View File

@ -2,6 +2,7 @@
[Unit] [Unit]
Description={{ service_name }} Description={{ service_name }}
After=network.target After=network.target
{{ extra_options }}
[Service] [Service]
User=root User=root