From ebbc5ac5cf49f5c76e936c2ed1118a8b1e5fb023 Mon Sep 17 00:00:00 2001 From: Matt Date: Sun, 30 Nov 2025 22:17:14 -0800 Subject: [PATCH] node.js dashboard works --- README.md | 15 +- defaults/main.yaml | 16 +- files/dashboard/index.php | 94 +++++------ files/dashboard/styles.css | 21 ++- files/scripts/app.py | 263 +++++++++++++++++++++++++++-- files/ws_node/Dockerfile | 18 ++ files/ws_node/package.json | 13 ++ files/ws_node/public/index.html | 90 ++++++++++ files/ws_node/public/styles.css | 111 ++++++++++++ files/ws_node/server.js | 54 ++++++ tasks/autologin.yaml | 12 +- tasks/dashboard.yaml | 25 ++- tasks/drive_index.yaml | 20 +-- tasks/initialiaze.yaml | 30 ---- tasks/initialize.yaml | 65 +++++++ tasks/main.yaml | 8 +- tasks/no_autologin.yaml | 30 ++-- tasks/service_mode.yaml | 23 ++- templates/docker-compose-php.yaml | 12 -- templates/docker-compose.yaml | 28 +++ templates/drive_check.sh | 76 ++++++--- templates/service_template.service | 1 + 22 files changed, 829 insertions(+), 196 deletions(-) create mode 100644 files/ws_node/Dockerfile create mode 100644 files/ws_node/package.json create mode 100644 files/ws_node/public/index.html create mode 100644 files/ws_node/public/styles.css create mode 100644 files/ws_node/server.js delete mode 100644 tasks/initialiaze.yaml create mode 100644 tasks/initialize.yaml delete mode 100644 templates/docker-compose-php.yaml create mode 100644 templates/docker-compose.yaml diff --git a/README.md b/README.md index 5ba4f8b..67ba783 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,18 @@ This Ansible Role will set up a Matt-Cloud host as a SSD health monitoring platform. -When the GUI is not installed, the terminal will auto login and show the information for the currently attached drives +When the GUI is not installed and it is requested, the terminal will auto login and show the information for the currently attached drives. -The data is also stored locally and can be viewed on a web dashboard at port 8088. +The historical data is stored locally in a SQLite database which is managed by a Python Flask API. It can be viewed on a web dashboard at port 80. -When deployed with Matt-Cloud Jenkins there is a Chromium kiosk option that will allow this web dashboard to be deployed as a local kiosk. +There is a live dashboard of currently attched drives on port 3000. This will show all attached drives along with the health data for these drives. This uses a websocket to live-update the data. The catch is I haven't learned how to mix PHP and Node.js sites yet, thus the multiple ports. It sounds to me like the next part of this project is another web server container that is a proxy with both pages. That does not exist yet. -The daabase is handled by the python service now. +There may be issues with some of the status commands due to hardware differences. I have only found an issue when using the NanoPi devices and have included the corrected string. journalctl is your friend, get to know it. + +When deployed with Matt-Cloud Jenkins there is a Chromium kiosk option that will allow the live dashboard to be deployed as a local kiosk. Instead of using an API key with the python service, I am binding everything to the docker interface to keep it unreachable externally. It's easier and good enough, it's not like this is the nuclear codes or anything. -https://jenkins.matt-cloud.com/job/Projects/job/SSD%20Health%20Checker/ \ No newline at end of file +https://jenkins.matt-cloud.com/job/Projects/job/SSD%20Health%20Checker/ + + + diff --git a/defaults/main.yaml b/defaults/main.yaml index 561bb75..3294ad9 100644 --- a/defaults/main.yaml +++ b/defaults/main.yaml @@ -7,21 +7,27 @@ ssd_health_packages: - python3-packaging - python3-venv - sqlite3 + - jq # autologin vars autologin_password: "kingduy" autologin: true autologin_user: "ssd_health" -# php container vars -container_name: "ssd_dashboard" -container_http_port: "80" -extra_volumes: "" +# the docker-compose file is about to get too complicated to be a template +# # php container vars +# container_name: "ssd_dashboard" +# container_http_port: "80" +# extra_volumes: "" # api service vars api_service_name: "drive_index" api_service_folder: "{{ service_folder }}" api_service_exe: "{{ service_folder }}/venv/bin/python -u {{ service_folder }}/app.py" +extra_options: | + After=docker.service + BindsTo=docker.service + ReloadPropagatedFrom=docker.service # kiosk service vars kiosk_service_name: "drive_check" @@ -43,5 +49,7 @@ sleep_time: "5" install_kiosk: false quick_refresh: false service_only: false +armcpu_check: false +service_mode: false ... \ No newline at end of file diff --git a/files/dashboard/index.php b/files/dashboard/index.php index 6744a76..1bafe84 100644 --- a/files/dashboard/index.php +++ b/files/dashboard/index.php @@ -18,6 +18,7 @@ function fetchSSDData() { return json_decode($result, true); // Decode JSON as an associative array } + ?> @@ -28,60 +29,51 @@ function fetchSSDData() {
- +

+ This lists every disk ever scanned by this device.
+ this link.

"; $ssdData = fetchSSDData(); // Fetch data from the API - echo '

'; - foreach ($ssdData as $ssd): - if ($i % 2 == 0) { - echo '
'; - } - echo << - - - - - - - - -
- Disk ID: - - {$ssd['id']} -
- Model String: - - {$ssd['model']} -
- Serial Number: - - {$ssd['serial']} -
- TB Written: - - {$ssd['TBW']} -
- Disk Capacity: - - {$ssd['capacity']} -
- Disk Flavor: - - {$ssd['flavor']} -
- SMART Result: - - {$ssd['smart']} -
-
- EOL; - $i++; - endforeach; - echo '
'; + // Start the table + echo ''; + // Table header (optional but handy) + echo ' + + + + + + + + + + '; + // Table body - one row per SSD + echo ''; + foreach ($ssdData as $ssd) { + // Escape the values so the page stays safe + $id = htmlspecialchars($ssd['id']); + $model = htmlspecialchars($ssd['model']); + $serial = htmlspecialchars($ssd['serial']); + $tbw = htmlspecialchars($ssd['TBW']); + $cap = htmlspecialchars($ssd['capacity']); + $flavor = htmlspecialchars($ssd['flavor']); + $smart = htmlspecialchars($ssd['smart']); + + echo " + + + + + + + + "; + } + echo '
Disk IDModel StringSerial NumberTB WrittenDisk CapacityDisk FlavorSMART Result
{$id}{$model}{$serial}{$tbw}{$cap}{$flavor}{$smart}
'; ?> + \ No newline at end of file diff --git a/files/dashboard/styles.css b/files/dashboard/styles.css index 190d5f3..f362362 100644 --- a/files/dashboard/styles.css +++ b/files/dashboard/styles.css @@ -24,8 +24,27 @@ body { margin: 4px 2px; cursor: pointer; } + + +table, th, td { + border: 1px solid black; + border-collapse: collapse; +} +th, td { + padding: 10px; +} + .container { - max-width: 800px; + max-width: 950px; + margin: 0 auto; + padding: 20px; + background-color: #34495e; /* Darker background for container */ + border-radius: 8px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); /* Slightly darker shadow */ + margin-top: 20px; +} +.container-small { + max-width: 550px; margin: 0 auto; padding: 20px; background-color: #34495e; /* Darker background for container */ diff --git a/files/scripts/app.py b/files/scripts/app.py index 05d23f4..7994b89 100644 --- a/files/scripts/app.py +++ b/files/scripts/app.py @@ -1,14 +1,95 @@ from flask import Flask, jsonify, request import sqlite3 -import json +import redis, json, time import os +import subprocess +import re app = Flask(__name__) db_path = '/opt/ssd_health/drive_records.db' +debug_output = False +secure_api = True + +#################################################### +### Redis Functions +#################################################### + +r = redis.Redis(host='172.17.0.1', port=6379) + +def update_disk_redis(): + active = get_active_drive_records(as_json=False) + all_rec = get_all_drive_records(as_json=False) + enriched = merge_active_with_details(active, all_rec) + r.publish('attached_disks', json.dumps(enriched)) + if debug_output: + print("=== Active drives sent to Redis ===") + print(json.dumps(enriched, indent=2)) + +def update_stats_redis(): + # store the data in vm_list + data = get_host_stats(as_json=False) + # push data to redis + # Publish to the Redis channel that the WS server is listening on + r.publish('host_stats', json.dumps(data)) + if debug_output: + print("=== Stats Redis Update ===") + print(json.dumps(data, indent=2)) + return True + +def merge_active_with_details(active, all_records): + # Build a quick lookup dictionary keyed by serial + record_by_serial = {rec['serial']: rec for rec in all_records} + # Add the extra fields to each active drive + for drive in active: + rec = record_by_serial.get(drive['serial']) + if rec: + extra = {k: v for k, v in rec.items() if k not in ('id', 'serial')} + drive.update(extra) + return active + +#################################################### +### Host Stats Function +#################################################### +def get_host_stats(as_json=False): + total_memory_command = "free -h | grep 'Mem:' | awk '{print $2}'" + total_memory = run_command(total_memory_command, zero_only=True) + used_memory_command = "free -h | grep 'Mem:' | awk '{print $3}'" + used_memory = run_command(used_memory_command, zero_only=True) + free_memory_command = "free -h | grep 'Mem:' | awk '{print $4}'" + free_memory = run_command(free_memory_command, zero_only=True) + cpu_load_command = "uptime | grep -oP '(?<=age: ).*'" + cpu_load = run_command(cpu_load_command, zero_only=True) + # nano pi command + #cpu_temp_command = "sensors | grep 'temp1:' | cut -d+ -f 2 | awk '{print $1}'" + cpu_temp_command = "sensors | grep Package | cut -d+ -f 2 | awk '{print $1}'" + cpu_temp = run_command(cpu_temp_command, zero_only=True) + cpu_temp_stripped = re.sub(r'\u00b0C', '', cpu_temp) + cpu_temp_fixed = f"{cpu_temp_stripped} C" + ip_address_command = "ip -o -4 ad | grep -e eth -e tun | awk '{print $2\": \" $4}'" + ip_addresses = run_command(ip_address_command, zero_only=True) + time_now_command = "date +%r" + time_now = run_command(time_now_command, zero_only=True) + # Redis stores in this order, or at least the html renders it in this order + stats = [{ + "memory_total": total_memory, + "memory_used": used_memory, + "memory_free": free_memory, + "cpu_load": cpu_load, + "cpu_temp": cpu_temp_fixed, + "ip_addresses": ip_addresses, + "time": time_now + }] + if debug_output: + print("=== Current Host Stats ===") + print(json.dumps(stats, indent=2)) + return jsonify(stats) if as_json else stats + +#################################################### +### db functions +#################################################### -# init db function def init_db(): - print("Initializing DB") + print("Checking Database...") db_check = "SELECT name FROM sqlite_master WHERE type='table' AND name='drive_records';" create_table_command = """ CREATE TABLE drive_records ( @@ -21,26 +102,37 @@ def init_db(): smart TEXT NOT NULL ); """ + active_disks_command = """ + CREATE TABLE active_disks ( + id INTEGER PRIMARY KEY, + name TEXT, + serial TEXT, + size TEXT + ); + """ + # this code deletes the db file if 0 bytes if os.path.exists(db_path) and os.path.getsize(db_path) == 0: try: - print("Database is 0 bytes, deleting.") + print("Database file exists and is 0 bytes, deleting.") os.remove(db_path) except Exception as e: print(f"error during file deletion - 405: {e}") return jsonify({'error during file deletion': e}), 405 try: result = bool(query_db(db_check)) + print(result) # Check if any tables were found if result: - print(result) print("drive_records exists - 205") else: print("drive_records does not exist, creating") try: result_init = query_db(create_table_command) + result_active = query_db(active_disks_command) print(result_init) - print("Database created - 201") + print(result_active) + print("Database created - 201") except sqlite3.Error as e: print(f"error during table initialization: {e}") return jsonify({'error during table initialization - 401': e}), 401 @@ -54,16 +146,56 @@ def query_db(sql_query): try: with sqlite3.connect(db_path) as conn: cursor = conn.cursor() - print("Executing SQL query:", sql_query) + if debug_output: + print("Executing SQL query:", sql_query) cursor.execute(sql_query) rows = cursor.fetchall() + if debug_output: + print("Query Result:", rows) return rows except sqlite3.Error as e: print("An error occurred:", e) return [] +# is this redundant? oh my, yes +# does it save me time? also, big yes +# note how the one above doesn't have the query params +# i don't want to re-write the subroutine i took from the VM party +def query_database(query_string, query_params=None): + if debug_output: + print(query_string, query_params) + # Connect to the SQLite database (or create it if it doesn't exist) + conn = sqlite3.connect(db_path) + cursor = conn.cursor() + if query_params is not None: + cursor.execute(query_string, query_params) + else: + cursor.execute(query_string) + result = cursor.fetchall() + if debug_output: + print(result) + # Commit the transaction and close the connection + conn.commit() + conn.close() + return result + +#################################################### +### Other Helper Functions +#################################################### + +# subroutine to run a command, return stdout as array unless zero_only then return [0] +def run_command(cmd, zero_only=False): + # Run the command and capture the output + result = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + # Decode the byte output to a string + output = result.stdout.decode('utf-8') + # Split the output into lines and store it in an array + output_lines = [line for line in output.split('\n') if line] + # Return result + return output_lines[0] if zero_only else output_lines + # Function to return all drive records in database -def get_all_drive_records(): +def get_all_drive_records(as_json=True): get_all_drives = "SELECT * FROM drive_records" rows = query_db(get_all_drives) drives = [] @@ -78,24 +210,45 @@ def get_all_drive_records(): 'smart': row[6] } drives.append(drive) - return jsonify(drives) + return jsonify(drives) if as_json else drives + +# Function to return all active drives in database +def get_active_drive_records(as_json=True): + get_active_drives = "SELECT * FROM active_disks" + rows = query_db(get_active_drives) + drives = [] + for row in rows: + drive = { + 'id': row[0], + 'name': row[1], + 'serial': row[2], + 'size': row[3] + } + drives.append(drive) + return jsonify(drives) if as_json else drives # Function to check if a serial number exists in the database def check_serial_exists(serial): serial_check = f"SELECT * FROM drive_records WHERE serial='{serial}'" - print(serial_check) + if debug_output: + print(serial_check) return bool(query_db(serial_check)) +#################################################### +### Flask Routes +#################################################### + # Route to check if a serial number exists in the database @app.route('/check', methods=['GET']) def check(): serial_lookup = request.args.get('serial_lookup') - print(f"Serial to check: {serial_lookup}") + if debug_output: + print(f"Serial to check: {serial_lookup}") if not serial_lookup: return jsonify({'error': 'No serial number provided'}), 400 exists = check_serial_exists(serial_lookup) - return jsonify({'serial_number_exists': exists}) + return jsonify({'serial_number_exists': exists, 'serial_lookup': serial_lookup}) # Route to get all drive records in JSON format @app.route('/drives', methods=['GET']) @@ -115,7 +268,8 @@ def add_drive(): if None in [serial, model, flavor, capacity, TBW, smart]: return jsonify({'error': 'Missing required query parameter(s)'}), 400 add_drive_query = f"INSERT INTO drive_records (serial, model, flavor, capacity, TBW, smart) VALUES ('{serial}', '{model}', '{flavor}', '{capacity}', '{TBW}', '{smart}'); " - print(add_drive_query) + if debug_output: + print(add_drive_query) return jsonify(query_db(add_drive_query)) # Route to update drive in database @@ -128,18 +282,95 @@ def update_drive(): if None in [serial, TBW, smart]: return jsonify({'error': 'Missing required query parameter(s)'}), 400 update_drive_query = f"UPDATE drive_records SET TBW = '{TBW}', smart = '{smart}' WHERE serial = '{serial}';" - print(update_drive_query) + if debug_output: + print(update_drive_query) return jsonify(query_db(update_drive_query)) +# Route to return active drives +@app.route('/list_active_drives', methods=['GET']) +def list_active_drives(): + return get_active_drive_records() + +# list disks as sda,serial +def list_disk_and_serial(): + # Init blank devices array + devices = [] + # get the devices + cmd = "lsblk -o NAME,SERIAL,SIZE,TYPE | grep sd | grep disk | awk '{print $1 \",\" $2. \",\" $3}'" + # try to run the command, should not fail + try: + devices = run_command(cmd) + except subprocess.CalledProcessError as e: + print(f"An error occurred: {e.stderr.decode('utf-8')}") + # return the devices as an array + return sorted([item for item in devices if item]) + +# Route to refresh active drives +@app.route('/refresh_active_drives', methods=['GET']) +def refresh_active_drives(): # List of items to be inserted; each item is a tuple (name, serial, size) + current_items = list_disk_and_serial() + # Loop through the list and insert items, checking for duplicates based on 'serial' + for item in current_items: + item = item.split(',') + # Check if the serial already exists in the database + existing_item = query_database('SELECT * FROM active_disks WHERE name = ?', (item[0],)) + if not existing_item: + # If no duplicate is found, insert the new item + if debug_output: + print(f"Disk /dev/{item[0]} inserted, updating database") + verified_serial = run_command(f"hdparm -I /dev/{item[0]} | grep 'Serial\ Number' | cut -d: -f2 | awk '{{print $1}}' ", zero_only=True) + if debug_output: + print(f"Verified serial number through smartctl: {verified_serial}") + item[1] = verified_serial + query_database('INSERT INTO active_disks (name, serial, size) VALUES (?, ?, ?)', item) + update_disk_redis() + # Remove items from the database that are not in the current list of items + # first grab all the disks in the database + for row in query_database('SELECT name, serial FROM active_disks'): + drive_object = "" + drive_serial = "" + # the drive is missing until proven present, let's see if it exists + not_found = True + # load the currently attached drives in another array + for item in current_items: + item = item.split(',') + # this is where the drive is found, set this to false + if row[0] == item[0]: + drive_object = item[0] + drive_serial = item[1] + not_found = False + # if the drive was not found in the above loop, it's missing, remove it and loop to the next record + if not_found: + target_name = row[0].split(',') + if debug_output: + print(f"Deleting disk /dev/{drive_object} - serial {drive_serial}") + query_database('DELETE FROM active_disks WHERE name = ?', target_name) + update_disk_redis() + update_disk_redis() + update_stats_redis() + return jsonify({"function": "update_disk_database"}) + +# host stats +@app.route('/host_stats', methods=['GET']) +def host_stats(): + update_stats_redis() + return jsonify(get_host_stats()) + # test route @app.route('/test', methods=['GET']) def test(): - db_check = "SELECT name FROM sqlite_master WHERE type='table' AND name='drive_records';" + db_check = "SELECT name FROM sqlite_master WHERE type='table';" return query_db(db_check) if __name__ == '__main__': result=init_db() print(result) - app.run(debug=True, host='172.17.0.1', port=5000) + + if secure_api: + app.run(debug=True, host='172.17.0.1', port=5000) + else: + app.run(debug=True, host='0.0.0.0', port=5000) + + \ No newline at end of file diff --git a/files/ws_node/Dockerfile b/files/ws_node/Dockerfile new file mode 100644 index 0000000..f3aed61 --- /dev/null +++ b/files/ws_node/Dockerfile @@ -0,0 +1,18 @@ +# Use an official Node runtime +FROM node:20-alpine + +# Create app directory +WORKDIR /usr/src/app + +# Install dependencies +COPY package.json . +RUN npm install --only=production + +# Copy app source +COPY . . + +# Expose the port that the app listens on +EXPOSE 3000 + +# Start the server +CMD ["node", "server.js"] \ No newline at end of file diff --git a/files/ws_node/package.json b/files/ws_node/package.json new file mode 100644 index 0000000..3b2246c --- /dev/null +++ b/files/ws_node/package.json @@ -0,0 +1,13 @@ +{ + "name": "redis-table-demo", + "version": "1.0.0", + "main": "server.js", + "scripts": { + "start": "node server.js" + }, + "dependencies": { + "express": "^4.18.2", + "socket.io": "^4.7.2", + "redis": "^4.6.7" + } +} \ No newline at end of file diff --git a/files/ws_node/public/index.html b/files/ws_node/public/index.html new file mode 100644 index 0000000..8671407 --- /dev/null +++ b/files/ws_node/public/index.html @@ -0,0 +1,90 @@ + + + + + Attached Disks - Live Table + + + + + +
+

Attached Disks

+
Connecting…
+
+
+

System Stats

+
Connecting…
+
+ + + + + + + \ No newline at end of file diff --git a/files/ws_node/public/styles.css b/files/ws_node/public/styles.css new file mode 100644 index 0000000..f362362 --- /dev/null +++ b/files/ws_node/public/styles.css @@ -0,0 +1,111 @@ +/* styles.css */ + +body { + font-family: Arial, sans-serif; + margin: 0; + padding: 0; + background-color: #2c3e50; /* Dark background color */ + color: #bdc3c7; /* Dimmer text color */ +} + +.hidden-info { + display: none; +} + +.title-button { + background-color: #34495e; + border: none; + color: white; + padding: 15px 32px; + text-align: center; + text-decoration: none; + display: inline-block; + font-size: 16px; + margin: 4px 2px; + cursor: pointer; +} + + +table, th, td { + border: 1px solid black; + border-collapse: collapse; +} +th, td { + padding: 10px; +} + +.container { + max-width: 950px; + margin: 0 auto; + padding: 20px; + background-color: #34495e; /* Darker background for container */ + border-radius: 8px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); /* Slightly darker shadow */ + margin-top: 20px; +} +.container-small { + max-width: 550px; + margin: 0 auto; + padding: 20px; + background-color: #34495e; /* Darker background for container */ + border-radius: 8px; + box-shadow: 0 2px 4px rgba(0, 0, 0, 0.3); /* Slightly darker shadow */ + margin-top: 20px; +} + +h1, h2, h3, h4 { + color: #bdc3c7; /* Dimmer text color */ +} + +ul { + list-style-type: none; + padding: 0; +} + +li { + margin-bottom: 10px; + color: #bdc3c7; /* Dimmer text color */ +} + +.group-columns { + display: flex; +} + +.group-rows { + display: flex; + flex-wrap: wrap; + justify-content: flex-start; /* Left justification */ + margin-top: 10px; +} + +.group-column { + flex: 0 0 calc(33% - 10px); /* Adjust width of each column */ +} + +.column { + flex: 1; + padding: 0 10px; /* Adjust spacing between columns */ +} + +.subcolumn { + margin-left: 10px; +} + +.grid { + display: flex; + flex-wrap: wrap; + justify-content: space-between; + margin-top: 5px; +} + +.meter { + width: calc(90% - 5px); + max-width: calc(45% - 5px); + margin-bottom: 5px; + border: 1px solid #7f8c8d; /* Light border color */ + border-radius: 5px; + padding: 5px; + text-align: center; + background-color: #2c3e50; /* Dark background for meter */ +} + diff --git a/files/ws_node/server.js b/files/ws_node/server.js new file mode 100644 index 0000000..f2fb9a2 --- /dev/null +++ b/files/ws_node/server.js @@ -0,0 +1,54 @@ +const http = require('http'); +const express = require('express'); +const { createClient } = require('redis'); +const { Server } = require('socket.io'); + +const app = express(); +const server = http.createServer(app); +const io = new Server(server); + +// Serve static files (index.html) +app.use(express.static('public')); + +// ---------- Redis subscriber ---------- +const redisClient = createClient({ + url: 'redis://172.17.0.1:6379' +}); +redisClient.on('error', err => console.error('Redis error', err)); + +(async () => { + await redisClient.connect(); + + // Subscribe to the channel that sends disk lists + const sub = redisClient.duplicate(); // duplicate to keep separate pub/sub + await sub.connect(); + + await sub.subscribe( + ['attached_disks', 'host_stats'], + (message, channel) => { // <-- single handler + let payload; + try { + payload = JSON.parse(message); // message is a JSON string + } catch (e) { + console.error(`Failed to parse ${channel}`, e); + return; + } + + io.emit(channel, payload); + } + ); + + sub.on('error', err => console.error('Subscriber error', err)); +})(); + +// ---------- Socket.io ---------- +io.on('connection', socket => { + console.log('client connected:', socket.id); + // Optional: send the current state on connect if you keep it cached +}); + +// ---------- Start ---------- +const PORT = process.env.PORT || 3000; +server.listen(PORT, () => { + console.log(`Server listening on http://localhost:${PORT}`); +}); \ No newline at end of file diff --git a/tasks/autologin.yaml b/tasks/autologin.yaml index 84c41ed..1a25d1b 100644 --- a/tasks/autologin.yaml +++ b/tasks/autologin.yaml @@ -34,15 +34,15 @@ ExecStart= ExecStart=-/sbin/agetty --autologin {{ autologin_user }} --noclear %I 38400 linux -# create hello_there ASCII art -- name: autologin - generate hello_there.txt for the lulz - when: not quick_refresh | bool - include_tasks: hello_there.yaml -- name: autologin - script permission settings - when: not service_only | bool +- name: autologin - quick refresh skippable tasks + when: not quick_refresh | bool block: + # create hello_there ASCII art + - name: autologin - generate hello_there.txt for the lulz + include_tasks: hello_there.yaml + - name: User setup - update permissions on smartctl 1 shell: "chmod 755 /usr/sbin/smartctl" diff --git a/tasks/dashboard.yaml b/tasks/dashboard.yaml index 4ff047a..dcf41fd 100644 --- a/tasks/dashboard.yaml +++ b/tasks/dashboard.yaml @@ -24,17 +24,36 @@ owner: root group: root -- name: docker container +- name: websocket tasks + when: not quick_refresh | bool + block: + + - name: websocket - copy websocket server files + copy: + src: "ws_node" + dest: "{{ service_folder }}" + mode: 0755 + owner: root + group: root + + - name: websocket - build docker container + community.docker.docker_image_build: + name: ws_node + rebuild: always + path: "{{ service_folder }}/ws_node" + dockerfile: Dockerfile + +- name: docker containers when: not quick_refresh | bool block: - name: service_control_website - template docker-compose.yaml template: - src: docker-compose-php.yaml + src: docker-compose.yaml dest: "{{ service_control_web_folder }}/docker-compose.yaml" mode: 0644 - - name: "service_control_website - Start container at {{ container_http_port }}" + - name: "service_control_website - Start containers" shell: "docker-compose -f {{ service_control_web_folder }}/docker-compose.yaml up -d" register: docker_output - debug: | diff --git a/tasks/drive_index.yaml b/tasks/drive_index.yaml index 2362caf..f0e036e 100644 --- a/tasks/drive_index.yaml +++ b/tasks/drive_index.yaml @@ -11,6 +11,12 @@ group: "{{ autologin_user }}" mode: '0755' + - name: "Drive Index - {{ api_service_name }} - stop service if running" + ignore_errors: yes + systemd: + name: "{{ api_service_name }}.service" + state: stopped + - name: Drive Index - copy script files copy: src: scripts/ @@ -40,6 +46,9 @@ pytz requests opencv-python + flask-socketio + eventlet + redis owner: "{{ autologin_user }}" group: "{{ autologin_user }}" mode: 0644 @@ -54,12 +63,6 @@ - name: Drive Index - api service handler block: - - name: "Drive Index - {{ api_service_name }} - stop service if running" - ignore_errors: yes - systemd: - name: "{{ api_service_name }}.service" - state: stopped - - name: "Drive Index - template {{ api_service_name }}.service" vars: service_name: "{{ api_service_name }}" @@ -70,12 +73,9 @@ dest: "/etc/systemd/system/{{ api_service_name }}.service" mode: 0644 - - name: "Drive Index - {{ api_service_name }} - daemon reload" + - name: "Drive Index - {{ api_service_name }} - enable and start service, daemon reload" systemd: daemon_reload: yes - - - name: "Drive Index - {{ api_service_name }} - enable and start service" - systemd: name: "{{ api_service_name }}.service" state: started enabled: yes diff --git a/tasks/initialiaze.yaml b/tasks/initialiaze.yaml deleted file mode 100644 index eaf68e8..0000000 --- a/tasks/initialiaze.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- - -- name: Initialiaze - Install Packages - when: not quick_refresh | bool - apt: - name: - - "{{ ssd_health_packages_item }}" - state: present - loop: "{{ ssd_health_packages }}" - loop_control: - loop_var: ssd_health_packages_item - -- name: "Initialiaze - create {{ autologin_user }} user" - when: not quick_refresh | bool - user: - name: "{{ autologin_user }}" - groups: disk - password: "{{ autologin_password | password_hash('sha512') }}" - shell: /bin/bash - -- name: "Initialiaze - ensure {{ autologin_user }} home folder exists" - when: not quick_refresh | bool - file: - path: "/home/{{ autologin_user }}" - state: directory - owner: "{{ autologin_user }}" - group: "{{ autologin_user }}" - mode: '0700' - -... \ No newline at end of file diff --git a/tasks/initialize.yaml b/tasks/initialize.yaml new file mode 100644 index 0000000..9e996d2 --- /dev/null +++ b/tasks/initialize.yaml @@ -0,0 +1,65 @@ +--- + +- name: Initialize - set service_only when ran through cosmos-server + when: refresh_special | bool + set_fact: + service_only: true + +- name: Initialize - Install Packages + when: not quick_refresh | bool + apt: + name: + - "{{ ssd_health_packages_item }}" + state: present + loop: "{{ ssd_health_packages }}" + loop_control: + loop_var: ssd_health_packages_item + +- name: "Initialize - create {{ autologin_user }} user" + when: not quick_refresh | bool + user: + name: "{{ autologin_user }}" + groups: disk + password: "{{ autologin_password | password_hash('sha512') }}" + shell: /bin/bash + +- name: "Initialize - ensure {{ autologin_user }} home folder exists" + when: not quick_refresh | bool + file: + path: "/home/{{ autologin_user }}" + state: directory + owner: "{{ autologin_user }}" + group: "{{ autologin_user }}" + mode: '0700' + +- name: Initialize - set vars when needed + when: quick_refresh | bool or refresh_special | bool + block: + + - name: Check CPU Arch + shell: "dpkg --print-architecture" + register: cpu_architecture_output + + - name: Set bool armcpu_check + when: '"arm" in cpu_architecture_output.stdout' + set_fact: + armcpu_check: true + - debug: | + msg="CPU architecture: {{ cpu_architecture_output.stdout_lines[0] }}" + msg="armcpu_check: {{ armcpu_check }}" + +- name: Initialize - docker-buildx handler + when: armcpu_check | bool + block: + + - name: docker-buildx handler - download deb file + get_url: + url: "http://ftp.us.debian.org/debian/pool/main/d/docker-buildx/docker-buildx_0.13.1+ds1-3_arm64.deb" + dest: "/tmp/docker-buildx_0.13.1+ds1-3_arm64.deb" + mode: '0644' + + - name: docker-buildx handler - install package + apt: + deb: "/tmp/docker-buildx_0.13.1+ds1-3_arm64.deb" + +... \ No newline at end of file diff --git a/tasks/main.yaml b/tasks/main.yaml index ccfc5f0..8bfc8e7 100644 --- a/tasks/main.yaml +++ b/tasks/main.yaml @@ -2,7 +2,7 @@ # create and configure user account - name: Drive health - initialize early steps - include_tasks: initialiaze.yaml + include_tasks: initialize.yaml # create drive index service - name: Drive health - drive_index service @@ -14,14 +14,14 @@ # set up autologin - name: Drive health - configure autologin - when: not install_kiosk | bool or not service_only | bool + when: not install_kiosk | bool and not service_only | bool include_tasks: autologin.yaml # configure service-mode -- name: Drive health - configure autologin +- name: Drive health - configure service-mode, disable autologin when: install_kiosk | bool or service_only | bool include_tasks: service_mode.yaml - + # Install chrome kiosk - name: install chromium kiosk when: install_kiosk | bool and not service_only | bool diff --git a/tasks/no_autologin.yaml b/tasks/no_autologin.yaml index df032f4..5cf90c4 100644 --- a/tasks/no_autologin.yaml +++ b/tasks/no_autologin.yaml @@ -1,28 +1,28 @@ --- -- name: autologin - edit logind to reduce vterms to one +# reverse autologin things + +- name: no autologin - edit logind to set vterms to six lineinfile: dest: /etc/systemd/logind.conf regexp: '^#NAutoVTs=' line: 'NAutoVTs=6' backrefs: yes -- name: autologin - create getty tty1 service folder - file: - path: /etc/systemd/system/getty@tty1.service.d/ - state: directory - mode: '0700' +- name: no autologin - "delete autologin files" + ignore_errors: yes + shell: | + rm /home/{{ autologin_user }}/.bash_aliases + rm /etc/systemd/system/getty@tty1.service.d/override.conf + rm /etc/sudoers.d/smartctl -- name: autologin - "delete {{ autologin_user }} .bash_aliases" - shell: "rm /home/{{ autologin_user }}/.bash_aliases" - -- name: autologin - remove autologin override to getty tty1 service - shell: "rm /etc/systemd/system/getty@tty1.service.d/override.conf" +#- name: no autologin - remove autologin override to getty tty1 service +# shell: "rm /etc/systemd/system/getty@tty1.service.d/override.conf" +# +#- name: no autologin - "User setup - allow {{ autologin_user }} to smartctl" +# shell: "rm /etc/sudoers.d/smartctl" -- name: autologin - "User setup - allow {{ autologin_user }} to smartctl" - shell: "rm /etc/sudoers.d/smartctl" - -- name: autologin - Restart getty@tty1 service +- name: no autologin - Restart getty@tty1 service systemd: name: getty@tty1.service state: restarted diff --git a/tasks/service_mode.yaml b/tasks/service_mode.yaml index 19e8797..cbbca30 100644 --- a/tasks/service_mode.yaml +++ b/tasks/service_mode.yaml @@ -3,14 +3,6 @@ - name: Service Mode - set sleep_time to 1 set_fact: sleep_time: "1" - -- name: "Service Mode - template drive_check.sh again" - template: - src: drive_check.sh - dest: "{{ service_folder }}/drive_check.sh" - mode: 0755 - owner: "{{ autologin_user }}" - group: "{{ autologin_user }}" - name: "Service Mode - {{ kiosk_service_name }}.service - stop service if running" ignore_errors: yes @@ -18,6 +10,16 @@ name: "{{ kiosk_service_name }}.service" state: stopped +- name: "Service Mode - template drive_check.sh again" + vars: + service_mode: true + template: + src: drive_check.sh + dest: "{{ service_folder }}/drive_check.sh" + mode: 0755 + owner: "{{ autologin_user }}" + group: "{{ autologin_user }}" + - name: "Service Mode - template {{ kiosk_service_name }}.service" vars: service_name: "{{ kiosk_service_name }}" @@ -28,12 +30,9 @@ dest: "/etc/systemd/system/{{ kiosk_service_name }}.service" mode: 0644 -- name: "Service Mode - {{ kiosk_service_name }} - daemon reload" +- name: "Service Mode - {{ kiosk_service_name }} - enable and start service api and daemon reload" systemd: daemon_reload: yes - -- name: "Service Mode - {{ kiosk_service_name }} - enable and start service api" - systemd: name: "{{ kiosk_service_name }}.service" state: started enabled: yes diff --git a/templates/docker-compose-php.yaml b/templates/docker-compose-php.yaml deleted file mode 100644 index 3833e80..0000000 --- a/templates/docker-compose-php.yaml +++ /dev/null @@ -1,12 +0,0 @@ -services: - - {{ container_name }}: - container_name: {{ container_name }} - image: php:8.0-apache - ports: - - {{ container_http_port }}:80 - volumes: - - ./html:/var/www/html/ - {{ extra_volumes }} - network_mode: bridge - restart: always diff --git a/templates/docker-compose.yaml b/templates/docker-compose.yaml new file mode 100644 index 0000000..ef90209 --- /dev/null +++ b/templates/docker-compose.yaml @@ -0,0 +1,28 @@ +services: + + web_dash: + container_name: web_dash + image: php:8.0-apache + ports: + - 80:80 + volumes: + - ./html:/var/www/html/ + network_mode: bridge + restart: always + + ws_node: + container_name: ws_node + image: ws_node + ports: + - 3000:3000 + network_mode: bridge + restart: always + depends_on: + - redis + + redis: + image: redis:7-alpine + network_mode: bridge + restart: always + ports: + - 172.17.0.1:6379:6379 \ No newline at end of file diff --git a/templates/drive_check.sh b/templates/drive_check.sh index 92b54ac..9d4750a 100644 --- a/templates/drive_check.sh +++ b/templates/drive_check.sh @@ -1,14 +1,28 @@ #!/bin/bash # this is a big loop # it shows SSD health data + +SERVICE_MODE = {{ service_mode }} + exec 2> /dev/null while true; do clear + # Show IP Info + + if ! $SERVICE_MODE; then + echo ===Visit the IP in your browser to view history====== + fi + ip -o -4 ad | grep -v -e docker -e 127.0.0.1 -e br- | awk '{print $2 "," $4}' | column -s , -t # get all disks DISK_LIST=$(ls -lo /dev/sd? | awk '{print $9}') # process each disk IFS=$'\n' read -rd '' -a DISK_ARRAY <<< "$DISK_LIST" for DISK in "${DISK_ARRAY[@]}"; do + # update active drives more frequently + if ! $SERVICE_MODE; then + echo "Issuing request to update drive database" + fi + curl -s "http://172.17.0.1:5000/refresh_active_drives" # store smartctl data once SMART_DATA=$(smartctl -x $DISK) NVME_CHECK=$(echo "$SMART_DATA" | grep "NVMe Version") @@ -28,20 +42,22 @@ while true; do SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs) FLAVOR="SATA SSD" DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists) - # Display drive data - echo "============ $DISK Disk Info - SSD: ============" - #echo "DRIVE_EXISTS: $DRIVE_EXISTS" - echo "Serial Number: $SERIAL" - echo "Model String: $MODEL" - echo "SMART Check: $SMART" - echo "Disk capacity: $CAPACITY" - echo "TB Written: $TBW TB" - if [ -z "$PLR" ] ; then - echo "Percent Lifetime Remaining data not available" - else - echo "$DISK has $PLR% lifetime remaining" + if ! $SERVICE_MODE; then + # Display drive data + echo "============ $DISK Disk Info - SSD: ============" + #echo "DRIVE_EXISTS: $DRIVE_EXISTS" + echo "Serial Number: $SERIAL" + echo "Model String: $MODEL" + echo "SMART Check: $SMART" + echo "Disk capacity: $CAPACITY" + echo "TB Written: $TBW TB" + if [ -z "$PLR" ] ; then + echo "Percent Lifetime Remaining data not available" + else + echo "$DISK has $PLR% lifetime remaining" + fi + echo fi - echo if [ -x "$TBW"] ; then TBW="unknown" fi @@ -66,18 +82,20 @@ while true; do SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs) FLAVOR="NVMe" DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists) - # Display Disk Info - echo "============ $DISK Disk Info - NVMe: ============" - #echo "DRIVE_EXISTS: $DRIVE_EXISTS" - echo "Serial Number: $SERIAL" - echo "Model String: $MODEL" - echo "SMART Check: $SMART" - echo "Disk capacity: $CAPACITY" - echo "TB Written: $TBW TB" - echo "NAND spare blocks: $AVAIL_SPARE" - echo - if [ -x "$TBW"] ; then - TBW="unknown" + if ! $SERVICE_MODE; then + # Display Disk Info + echo "============ $DISK Disk Info - NVMe: ============" + #echo "DRIVE_EXISTS: $DRIVE_EXISTS" + echo "Serial Number: $SERIAL" + echo "Model String: $MODEL" + echo "SMART Check: $SMART" + echo "Disk capacity: $CAPACITY" + echo "TB Written: $TBW TB" + echo "NAND spare blocks: $AVAIL_SPARE" + echo + if [ -x "$TBW"] ; then + TBW="unknown" + fi fi # database handler if [ "$DRIVE_EXISTS" == "false" ] ; then @@ -90,10 +108,14 @@ while true; do fi fi else - echo "Skipping $DISK, not SATA SSD or NVMe" + if ! $SERVICE_MODE; then + echo "Skipping $DISK, not SATA SSD or NVMe" + fi fi done # wait {{ sleep_time }} seconds, loop again - echo "Sleeping {{ sleep_time }} seconds" + if ! $SERVICE_MODE; then + echo "Sleeping {{ sleep_time }} seconds" + fi sleep {{ sleep_time }} done diff --git a/templates/service_template.service b/templates/service_template.service index 6f5ef22..92b3dcb 100644 --- a/templates/service_template.service +++ b/templates/service_template.service @@ -2,6 +2,7 @@ [Unit] Description={{ service_name }} After=network.target +{{ extra_options }} [Service] User=root