feb git sync
This commit is contained in:
@ -10,6 +10,7 @@ ssd_health_packages:
|
|||||||
- jq
|
- jq
|
||||||
- hdparm
|
- hdparm
|
||||||
- redis-tools
|
- redis-tools
|
||||||
|
- lm-sensors
|
||||||
|
|
||||||
# autologin vars
|
# autologin vars
|
||||||
autologin_password: "kingduy"
|
autologin_password: "kingduy"
|
||||||
@ -25,7 +26,7 @@ autologin_user: "ssd_health"
|
|||||||
# api service vars
|
# api service vars
|
||||||
api_service_name: "drive_index"
|
api_service_name: "drive_index"
|
||||||
api_service_folder: "{{ service_folder }}"
|
api_service_folder: "{{ service_folder }}"
|
||||||
api_service_exe: "{{ service_folder }}/venv/bin/python -u {{ service_folder }}/app.py"
|
api_service_exe: "{{ service_folder }}/venv/bin/python -u {{ service_folder }}/ssd_api.py"
|
||||||
extra_options: |
|
extra_options: |
|
||||||
After=docker.service
|
After=docker.service
|
||||||
BindsTo=docker.service
|
BindsTo=docker.service
|
||||||
@ -53,5 +54,6 @@ quick_refresh: false
|
|||||||
service_only: false
|
service_only: false
|
||||||
armcpu_check: false
|
armcpu_check: false
|
||||||
service_mode: false
|
service_mode: false
|
||||||
|
install_docker: true
|
||||||
|
|
||||||
...
|
...
|
||||||
@ -34,6 +34,8 @@ function fetchSSDData() {
|
|||||||
For a live dashboard, please visit <a href=/>home</a>.
|
For a live dashboard, please visit <a href=/>home</a>.
|
||||||
</div>
|
</div>
|
||||||
<div class="container">
|
<div class="container">
|
||||||
|
Search for disk:<br>
|
||||||
|
<input id="search" type="text" placeholder="Search by ID, model, serial…" /><p>
|
||||||
<?php
|
<?php
|
||||||
$ssdData = fetchSSDData(); // Fetch data from the API
|
$ssdData = fetchSSDData(); // Fetch data from the API
|
||||||
// Start the table
|
// Start the table
|
||||||
@ -44,20 +46,20 @@ function fetchSSDData() {
|
|||||||
<th>Disk ID</th>
|
<th>Disk ID</th>
|
||||||
<th>Model String</th>
|
<th>Model String</th>
|
||||||
<th>Serial Number</th>
|
<th>Serial Number</th>
|
||||||
<th>TB Written</th>
|
<th>GB Written</th>
|
||||||
<th>Disk Capacity</th>
|
<th>Disk Capacity</th>
|
||||||
<th>Disk Flavor</th>
|
<th>Disk Flavor</th>
|
||||||
<th>SMART Result</th>
|
<th>SMART Result</th>
|
||||||
</tr>
|
</tr>
|
||||||
</thead>';
|
</thead>';
|
||||||
// Table body - one row per SSD
|
// Table body - one row per SSD
|
||||||
echo '<tbody>';
|
echo '<tbody id="ssd-body">';
|
||||||
foreach ($ssdData as $ssd) {
|
foreach ($ssdData as $ssd) {
|
||||||
// Escape the values so the page stays safe
|
// Escape the values so the page stays safe
|
||||||
$id = htmlspecialchars($ssd['id']);
|
$id = htmlspecialchars($ssd['id']);
|
||||||
$model = htmlspecialchars($ssd['model']);
|
$model = htmlspecialchars($ssd['model']);
|
||||||
$serial = htmlspecialchars($ssd['serial']);
|
$serial = htmlspecialchars($ssd['serial']);
|
||||||
$tbw = htmlspecialchars($ssd['TBW']);
|
$tbw = htmlspecialchars($ssd['gb_written']);
|
||||||
$cap = htmlspecialchars($ssd['capacity']);
|
$cap = htmlspecialchars($ssd['capacity']);
|
||||||
$flavor = htmlspecialchars($ssd['flavor']);
|
$flavor = htmlspecialchars($ssd['flavor']);
|
||||||
$smart = htmlspecialchars($ssd['smart']);
|
$smart = htmlspecialchars($ssd['smart']);
|
||||||
@ -77,4 +79,37 @@ function fetchSSDData() {
|
|||||||
</div>
|
</div>
|
||||||
|
|
||||||
</body>
|
</body>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
document.addEventListener('DOMContentLoaded', () => {
|
||||||
|
const searchInput = document.getElementById('search');
|
||||||
|
const tbody = document.getElementById('ssd-body');
|
||||||
|
const rows = Array.from(tbody.rows); // snapshot of rows
|
||||||
|
|
||||||
|
// Optional: a simple debounce so we don't react on every keystroke
|
||||||
|
const debounce = (fn, delay) => {
|
||||||
|
let timer;
|
||||||
|
return (...args) => {
|
||||||
|
clearTimeout(timer);
|
||||||
|
timer = setTimeout(() => fn.apply(this, args), delay);
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
const filterRows = debounce(() => {
|
||||||
|
const query = searchInput.value.trim().toLowerCase();
|
||||||
|
rows.forEach(row => {
|
||||||
|
// Grab all cells in this row as a single string
|
||||||
|
const rowText = Array.from(row.cells)
|
||||||
|
.map(cell => cell.textContent)
|
||||||
|
.join(' ')
|
||||||
|
.toLowerCase();
|
||||||
|
|
||||||
|
row.style.display = rowText.includes(query) ? '' : 'none';
|
||||||
|
});
|
||||||
|
}, 200); // 200 ms debounce
|
||||||
|
|
||||||
|
searchInput.addEventListener('input', filterRows);
|
||||||
|
});
|
||||||
|
</script>
|
||||||
|
|
||||||
</html>
|
</html>
|
||||||
94
files/oop_code/HostRedis.py
Normal file
94
files/oop_code/HostRedis.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
|
||||||
|
import redis
|
||||||
|
import subprocess
|
||||||
|
import re
|
||||||
|
import json
|
||||||
|
|
||||||
|
####################################################
|
||||||
|
### Redis Functions
|
||||||
|
####################################################
|
||||||
|
debug_output = False
|
||||||
|
r = redis.Redis(host='172.17.0.1', port=6379)
|
||||||
|
|
||||||
|
def update_stats_redis():
|
||||||
|
# store the data in vm_list
|
||||||
|
data = get_host_stats(as_json=False)
|
||||||
|
# push data to redis
|
||||||
|
# Publish to the Redis channel that the WS server is listening on
|
||||||
|
r.publish('host_stats', json.dumps(data))
|
||||||
|
if debug_output:
|
||||||
|
print("=== Stats Redis Update ===")
|
||||||
|
print(json.dumps(data, indent=2))
|
||||||
|
return True
|
||||||
|
|
||||||
|
####################################################
|
||||||
|
### Host Stats Function
|
||||||
|
####################################################
|
||||||
|
def get_host_stats(as_json=False):
|
||||||
|
total_memory_command = "free -h | grep 'Mem:' | awk '{print $2}'"
|
||||||
|
total_memory = run_shell(total_memory_command, zero_only=True)
|
||||||
|
used_memory_command = "free -h | grep 'Mem:' | awk '{print $3}'"
|
||||||
|
used_memory = run_shell(used_memory_command, zero_only=True)
|
||||||
|
free_memory_command = "free -h | grep 'Mem:' | awk '{print $4}'"
|
||||||
|
free_memory = run_shell(free_memory_command, zero_only=True)
|
||||||
|
cpu_load_command = "uptime | grep -oP '(?<=age: ).*'"
|
||||||
|
cpu_load = run_shell(cpu_load_command, zero_only=True)
|
||||||
|
# nano pi command
|
||||||
|
#cpu_temp_command = "sensors | grep 'temp1:' | cut -d+ -f 2 | awk '{print $1}'"
|
||||||
|
cpu_temp_command = "sensors | grep -e Sensor -e Package | cut -d+ -f 2 | awk '{print $1}'"
|
||||||
|
cpu_temp = run_shell(cpu_temp_command, zero_only=True)
|
||||||
|
cpu_temp_stripped = re.sub(r'\u00b0C', '', cpu_temp)
|
||||||
|
cpu_temp_fixed = f"{cpu_temp_stripped} C"
|
||||||
|
ip_address_command = "ip -o -4 ad | grep -v -e docker -e 127.0.0.1 | awk '{print $2\": \" $4}'"
|
||||||
|
ip_addresses = run_shell(ip_address_command, zero_only=True)
|
||||||
|
time_now_command = "date +%r"
|
||||||
|
time_now = run_shell(time_now_command, zero_only=True)
|
||||||
|
# Redis stores in this order, or at least the html renders it in this order
|
||||||
|
stats = [{
|
||||||
|
"memory_total": total_memory,
|
||||||
|
"memory_used": used_memory,
|
||||||
|
"memory_free": free_memory,
|
||||||
|
"cpu_load": cpu_load,
|
||||||
|
"cpu_temp": cpu_temp_fixed,
|
||||||
|
"ip_addresses": ip_addresses,
|
||||||
|
"time": time_now
|
||||||
|
}]
|
||||||
|
if check_for_battery():
|
||||||
|
battery_level_command = "acpi | grep Battery | awk {print'$3 \" \" $4'}"
|
||||||
|
battery_level = run_shell(battery_level_command, zero_only=True)
|
||||||
|
stats = [{
|
||||||
|
"memory_total": total_memory,
|
||||||
|
"memory_used": used_memory,
|
||||||
|
"memory_free": free_memory,
|
||||||
|
"cpu_load": cpu_load,
|
||||||
|
"cpu_temp": cpu_temp_fixed,
|
||||||
|
"ip_addresses": ip_addresses,
|
||||||
|
"battery_level": battery_level,
|
||||||
|
"time": time_now
|
||||||
|
}]
|
||||||
|
if debug_output:
|
||||||
|
print("=== Current Host Stats ===")
|
||||||
|
print(json.dumps(stats, indent=2))
|
||||||
|
return jsonify(stats) if as_json else stats
|
||||||
|
|
||||||
|
def check_for_battery():
|
||||||
|
battery_check_command = "acpi | grep Battery | awk {print'$1'}"
|
||||||
|
battery_check = run_shell(battery_check_command, zero_only=True)
|
||||||
|
if battery_check == 'Battery':
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# subroutine to run a command, return stdout as array unless zero_only then return [0]
|
||||||
|
def run_shell(cmd, zero_only=False):
|
||||||
|
# Run the command and capture the output
|
||||||
|
result = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
# Decode the byte output to a string
|
||||||
|
output = result.stdout.decode('utf-8')
|
||||||
|
# Split the output into lines and store it in an array
|
||||||
|
output_lines = [line for line in output.split('\n') if line]
|
||||||
|
# Return result
|
||||||
|
try:
|
||||||
|
return output_lines[0] if zero_only else output_lines
|
||||||
|
except:
|
||||||
|
return output_lines
|
||||||
306
files/oop_code/SSDObject.py
Normal file
306
files/oop_code/SSDObject.py
Normal file
@ -0,0 +1,306 @@
|
|||||||
|
|
||||||
|
###############################################################
|
||||||
|
# SSDObject.py
|
||||||
|
# ssd object class function created for ssd monitor
|
||||||
|
# functions for handling database and shell commands
|
||||||
|
# class definitions and other class helper functions
|
||||||
|
###############################################################
|
||||||
|
|
||||||
|
import sqlite3
|
||||||
|
import json, redis
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
db_path = '/opt/ssd_health/drive_records.db'
|
||||||
|
debug_output = False
|
||||||
|
show_records = False
|
||||||
|
suppress_errors = False
|
||||||
|
push_redis = False
|
||||||
|
|
||||||
|
###############################################################
|
||||||
|
# Class Definition
|
||||||
|
#
|
||||||
|
# Most of the heavy lifting is done on object instantiation
|
||||||
|
# Object is instantiated with just the dev_id and flavor
|
||||||
|
# Drive data is collected on instantition and stored to database
|
||||||
|
###############################################################
|
||||||
|
|
||||||
|
class SSDObject:
|
||||||
|
def __str__(self) -> str:
|
||||||
|
return f"""Drive at /dev/{self.dev_id} is a '{self.model}' {self.capacity} {self.flavor}, SN: {self.serial}"""
|
||||||
|
|
||||||
|
def __init__(self, dev_id: str):
|
||||||
|
self.dev_id = dev_id
|
||||||
|
if not check_serial_attached(self.dev_id):
|
||||||
|
raise TypeError(f"No device at /dev/{self.dev_id}")
|
||||||
|
self._smart_data = return_smartctl(dev_id)
|
||||||
|
self.flavor = self._get_flavor()
|
||||||
|
if self.flavor == "HDD":
|
||||||
|
raise TypeError("Unable to instantiate HDD")
|
||||||
|
if self.flavor == "Error":
|
||||||
|
raise TypeError("Unable to instantiate storage device")
|
||||||
|
self.serial = self._smart_data['serial_number']
|
||||||
|
self.model = self._smart_data['model_name']
|
||||||
|
self.capacity_bytes = self._smart_data['user_capacity']['bytes']
|
||||||
|
self.smart_status = self._smart_data['smart_status']['passed']
|
||||||
|
self.capacity = self._get_human_capacity()
|
||||||
|
self.sector_size = return_sector_size(self.dev_id)
|
||||||
|
self.gb_written = self._get_gbw()
|
||||||
|
self._update_db()
|
||||||
|
|
||||||
|
def _get_flavor(self) -> str:
|
||||||
|
if "rotation_rate" in json.dumps(self._smart_data):
|
||||||
|
if int(self._smart_data['rotation_rate'] == 0):
|
||||||
|
if not suppress_errors:
|
||||||
|
print(f"Warning - /dev/{self.dev_id} is a weird SSD with a rotation rate of 0")
|
||||||
|
return "SSD"
|
||||||
|
else:
|
||||||
|
return "HDD"
|
||||||
|
elif "NVMe" in json.dumps(self._smart_data):
|
||||||
|
return "NVMe"
|
||||||
|
elif "Solid State" in json.dumps(self._smart_data):
|
||||||
|
return "SSD"
|
||||||
|
elif "Unknown USB bridge" in json.dumps(self._smart_data):
|
||||||
|
return "Error"
|
||||||
|
|
||||||
|
def _get_human_capacity(self) -> str:
|
||||||
|
size = self.capacity_bytes
|
||||||
|
factor = 1024
|
||||||
|
units = [
|
||||||
|
(factor ** 4, "TiB"),
|
||||||
|
(factor ** 3, "GiB"),
|
||||||
|
(factor ** 2, "MiB"),
|
||||||
|
(factor ** 1, "KiB"),
|
||||||
|
]
|
||||||
|
for thresh, suffix in units:
|
||||||
|
if size > thresh:
|
||||||
|
value = size / thresh
|
||||||
|
return f"{value:.{0}f} {suffix}"
|
||||||
|
|
||||||
|
def _get_gbw(self) -> str:
|
||||||
|
result = ''
|
||||||
|
gib_factor = 2 ** 30
|
||||||
|
if self.flavor == "SSD":
|
||||||
|
data_units_written = return_ls_written(self._smart_data)
|
||||||
|
result = round(data_units_written * self.sector_size / gib_factor, 2)
|
||||||
|
elif self.flavor == "NVMe":
|
||||||
|
data_units_written = float(self._smart_data['nvme_smart_health_information_log']['data_units_written'])
|
||||||
|
result = round(data_units_written * self.sector_size / gib_factor, 2)
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _update_db(self):
|
||||||
|
if push_redis:
|
||||||
|
update_disk_redis()
|
||||||
|
if self.exists():
|
||||||
|
drive_query = f"""
|
||||||
|
UPDATE drive_records SET gb_written = '{self.gb_written}', smart = '{self.smart_status}' WHERE serial = '{self.serial}';
|
||||||
|
"""
|
||||||
|
else:
|
||||||
|
drive_query = f"""
|
||||||
|
INSERT INTO drive_records (serial, model, flavor, capacity, gb_written, smart)
|
||||||
|
VALUES ('{self.serial}', '{self.model}', '{self.flavor}', '{self.capacity}', '{self.gb_written}', '{self.smart_status}');
|
||||||
|
|
||||||
|
"""
|
||||||
|
query_db(drive_query)
|
||||||
|
|
||||||
|
def exists(self) -> bool:
|
||||||
|
return check_serial_exists(self.serial)
|
||||||
|
|
||||||
|
def attached(self) -> bool:
|
||||||
|
return(check_serial_attached(self.serial))
|
||||||
|
|
||||||
|
|
||||||
|
########################################
|
||||||
|
# Other Helper Functions
|
||||||
|
########################################
|
||||||
|
|
||||||
|
# subroutine to run a command, return stdout as array unless zero_only then return [0]
|
||||||
|
def run_command(cmd, zero_only=False):
|
||||||
|
# Run the command and capture the output
|
||||||
|
result = subprocess.run(cmd, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
# Decode the byte output to a string
|
||||||
|
output = result.stdout.decode('utf-8')
|
||||||
|
# Split the output into lines and store it in an array
|
||||||
|
output_lines = [line for line in output.split('\n') if line]
|
||||||
|
# Return result
|
||||||
|
try:
|
||||||
|
return output_lines[0] if zero_only else output_lines
|
||||||
|
except:
|
||||||
|
return output_lines
|
||||||
|
|
||||||
|
def return_smartctl(drive_id):
|
||||||
|
smartctl_string = f"/usr/sbin/smartctl --json -x /dev/{drive_id} || true"
|
||||||
|
smartctl_result = subprocess.run(smartctl_string, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
smartctl_data = json.loads(smartctl_result.stdout.decode("utf-8"))
|
||||||
|
return smartctl_data
|
||||||
|
|
||||||
|
def return_sector_size(drive_id):
|
||||||
|
sector_size_command = f"fdisk -l /dev/{drive_id} | grep 'Sector size' | awk '{{print $4}}'"
|
||||||
|
sector_size_result = subprocess.run(sector_size_command, shell=True, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||||
|
return int(sector_size_result.stdout.decode("utf-8"))
|
||||||
|
|
||||||
|
def return_ls_written(data):
|
||||||
|
pages = data.get("ata_device_statistics", {}).get("pages", [])
|
||||||
|
for page in pages:
|
||||||
|
for entry in page.get("table", []):
|
||||||
|
if entry.get("name") == "Logical Sectors Written":
|
||||||
|
return entry.get("value")
|
||||||
|
|
||||||
|
# Function to return all drive records in database
|
||||||
|
def get_all_drive_records():
|
||||||
|
get_all_drives = "SELECT * FROM drive_records"
|
||||||
|
rows = query_db(get_all_drives)
|
||||||
|
drives = []
|
||||||
|
for row in rows:
|
||||||
|
drive = {
|
||||||
|
'id': row[0],
|
||||||
|
'serial': row[1],
|
||||||
|
'model': row[2],
|
||||||
|
'flavor': row[3],
|
||||||
|
'capacity': row[4],
|
||||||
|
'gb_written': round(float(row[5]),2),
|
||||||
|
'smart': row[6]
|
||||||
|
}
|
||||||
|
drives.append(drive)
|
||||||
|
return json.dumps(drives)
|
||||||
|
|
||||||
|
|
||||||
|
# return attached disks
|
||||||
|
def list_disk_and_serial():
|
||||||
|
# Init blank devices array
|
||||||
|
devices = []
|
||||||
|
# get the devices
|
||||||
|
cmd = "lsblk -o NAME,SERIAL,SIZE,TYPE | grep sd | grep disk | awk '{print $1 \",\" $2. \",\" $3}'"
|
||||||
|
# try to run the command, should not fail
|
||||||
|
try:
|
||||||
|
while 'disk' in run_command(cmd, zero_only=False):
|
||||||
|
time.sleep(0.5)
|
||||||
|
devices = run_command(cmd, zero_only=False)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
print(f"An error occurred: {e.stderr.decode('utf-8')}")
|
||||||
|
drives = []
|
||||||
|
for device in devices:
|
||||||
|
if debug_output:
|
||||||
|
print(device)
|
||||||
|
drive = {
|
||||||
|
"dev_id": device.split(',')[0],
|
||||||
|
"serial": device.split(',')[1],
|
||||||
|
"capacity": device.split(',')[2]
|
||||||
|
}
|
||||||
|
drives.append(drive)
|
||||||
|
# return the devices as an array
|
||||||
|
if debug_output:
|
||||||
|
print(drives)
|
||||||
|
return drives
|
||||||
|
|
||||||
|
# Function to check if a serial number exists in the database
|
||||||
|
def check_serial_exists(serial):
|
||||||
|
serial_check = f"SELECT * FROM drive_records WHERE serial='{serial}'"
|
||||||
|
if debug_output:
|
||||||
|
print(serial_check)
|
||||||
|
return bool(query_db(serial_check))
|
||||||
|
|
||||||
|
def check_serial_attached(serial):
|
||||||
|
serial_check = f"lsblk -o NAME,SERIAL,SIZE,TYPE | grep {serial} || true"
|
||||||
|
if run_command(serial_check, zero_only=False):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
# Function to run SQL Query
|
||||||
|
def query_db(sql_query):
|
||||||
|
try:
|
||||||
|
with sqlite3.connect(db_path) as conn:
|
||||||
|
cursor = conn.cursor()
|
||||||
|
if debug_output:
|
||||||
|
print("Executing SQL query:", sql_query)
|
||||||
|
cursor.execute(sql_query)
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
if debug_output:
|
||||||
|
print("Query Result:", rows)
|
||||||
|
conn.commit()
|
||||||
|
conn.close()
|
||||||
|
return rows
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
if not suppress_errors:
|
||||||
|
print("An error occurred:", e)
|
||||||
|
return []
|
||||||
|
|
||||||
|
def init_db():
|
||||||
|
print("Checking Database...")
|
||||||
|
db_check = "SELECT name FROM sqlite_master WHERE type='table' AND name='drive_records';"
|
||||||
|
create_table_command = """
|
||||||
|
CREATE TABLE drive_records (
|
||||||
|
id INTEGER PRIMARY KEY,
|
||||||
|
serial TEXT NOT NULL,
|
||||||
|
model TEXT NOT NULL,
|
||||||
|
flavor TEXT NOT NULL,
|
||||||
|
capacity TEXT NOT NULL,
|
||||||
|
gb_written TEXT NOT NULL,
|
||||||
|
smart TEXT NOT NULL
|
||||||
|
);
|
||||||
|
"""
|
||||||
|
|
||||||
|
# check for drives
|
||||||
|
try:
|
||||||
|
result = bool(query_db(db_check))
|
||||||
|
if debug_output:
|
||||||
|
print(f"Database exists: {result}")
|
||||||
|
# Check if any tables were found
|
||||||
|
if result:
|
||||||
|
print("drive_records exists, skipping db init")
|
||||||
|
if debug_output or show_records:
|
||||||
|
all_drives = json.loads(get_all_drive_records())
|
||||||
|
print("--- Drive Records ---")
|
||||||
|
for drive in all_drives:
|
||||||
|
print(f"{drive['model']} - SN: {drive['serial']}")
|
||||||
|
print("--- End Records ---")
|
||||||
|
print()
|
||||||
|
else:
|
||||||
|
print("drive_records does not exist, creating")
|
||||||
|
try:
|
||||||
|
result_init = query_db(create_table_command)
|
||||||
|
if debug_output:
|
||||||
|
print(result_init)
|
||||||
|
print("Database created - 201")
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
if not suppress_errors:
|
||||||
|
print(f"error during table initialization: {e}")
|
||||||
|
return jsonify({'error during table initialization - 401': e}), 401
|
||||||
|
|
||||||
|
except sqlite3.Error as e:
|
||||||
|
if not suppress_errors:
|
||||||
|
print(f"error during table check: {e}")
|
||||||
|
return jsonify({'error during table check - 400': e}), 400
|
||||||
|
|
||||||
|
|
||||||
|
####################################################
|
||||||
|
### Redis Functions
|
||||||
|
####################################################
|
||||||
|
|
||||||
|
r = redis.Redis(host='172.17.0.1', port=6379)
|
||||||
|
|
||||||
|
def update_disk_redis():
|
||||||
|
active = list_disk_and_serial()
|
||||||
|
all_rec = json.loads(get_all_drive_records())
|
||||||
|
enriched = merge_active_with_details(active, all_rec)
|
||||||
|
r.publish('attached_disks', json.dumps(enriched))
|
||||||
|
if debug_output:
|
||||||
|
print("=== Active drives sent to Redis ===")
|
||||||
|
print(json.dumps(enriched, indent=2))
|
||||||
|
|
||||||
|
def merge_active_with_details(active, all_records):
|
||||||
|
# Build a quick lookup dictionary keyed by serial
|
||||||
|
record_by_serial = {rec['serial']: rec for rec in all_records}
|
||||||
|
# Add the extra fields to each active drive
|
||||||
|
for drive in active:
|
||||||
|
rec = record_by_serial.get(drive['serial'])
|
||||||
|
if rec:
|
||||||
|
extra = {k: v for k, v in rec.items() if k not in ('id', 'serial')}
|
||||||
|
drive.update(extra)
|
||||||
|
return active
|
||||||
|
|
||||||
|
########################################
|
||||||
|
# Run init_db when Class file is imported
|
||||||
|
########################################
|
||||||
|
|
||||||
|
init_db()
|
||||||
99
files/oop_code/ssd_api.py
Normal file
99
files/oop_code/ssd_api.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
from flask import Flask, jsonify, request
|
||||||
|
import sqlite3
|
||||||
|
import redis, json, time
|
||||||
|
from SSDObject import *
|
||||||
|
from flask_apscheduler import APScheduler
|
||||||
|
from HostRedis import *
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
debug_output = False
|
||||||
|
secure_api = False
|
||||||
|
push_redis = True
|
||||||
|
|
||||||
|
####################################################
|
||||||
|
### Flask Routes
|
||||||
|
####################################################
|
||||||
|
|
||||||
|
# Route to check if a serial number exists in the database
|
||||||
|
@app.route('/check', methods=['GET'])
|
||||||
|
def check():
|
||||||
|
serial_lookup = request.args.get('serial_lookup')
|
||||||
|
if debug_output:
|
||||||
|
print(f"Serial to check: {serial_lookup}")
|
||||||
|
if not serial_lookup:
|
||||||
|
return jsonify({'error': 'No serial number provided'}), 400
|
||||||
|
|
||||||
|
exists = check_serial_exists(serial_lookup)
|
||||||
|
return jsonify({'serial_number_exists': exists, 'serial_lookup': serial_lookup})
|
||||||
|
|
||||||
|
# Route to get all drive records in JSON format
|
||||||
|
@app.route('/drives', methods=['GET'])
|
||||||
|
def index():
|
||||||
|
return get_all_drive_records()
|
||||||
|
|
||||||
|
# Route to return active drives
|
||||||
|
@app.route('/list_active_drives', methods=['GET'])
|
||||||
|
def list_active_drives():
|
||||||
|
return jsonify(list_disk_and_serial())
|
||||||
|
|
||||||
|
# host stats
|
||||||
|
@app.route('/host_stats', methods=['GET'])
|
||||||
|
def host_stats():
|
||||||
|
if push_redis:
|
||||||
|
update_stats_redis()
|
||||||
|
return jsonify(get_host_stats())
|
||||||
|
|
||||||
|
# test route
|
||||||
|
@app.route('/test', methods=['GET'])
|
||||||
|
def test():
|
||||||
|
db_check = "SELECT name FROM sqlite_master WHERE type='table';"
|
||||||
|
return query_db(db_check)
|
||||||
|
|
||||||
|
####################################################
|
||||||
|
### Flask Scheduler Handlers
|
||||||
|
####################################################
|
||||||
|
def ssd_scan():
|
||||||
|
disk_list_command = "ls -lo /dev/sd? | awk '{print $9}' | cut -d/ -f3"
|
||||||
|
disk_list = run_command(disk_list_command, zero_only = False)
|
||||||
|
disk_objects = []
|
||||||
|
for disk in disk_list:
|
||||||
|
if debug_output:
|
||||||
|
print(disk)
|
||||||
|
try:
|
||||||
|
disk_objects.append(SSDObject(dev_id = disk))
|
||||||
|
except (TypeError, ValueError, KeyError, OSError) as e:
|
||||||
|
print(f"Error - {e}")
|
||||||
|
if debug_output:
|
||||||
|
for disk in disk_objects:
|
||||||
|
print(disk)
|
||||||
|
print(list_disk_and_serial())
|
||||||
|
if push_redis:
|
||||||
|
update_stats_redis()
|
||||||
|
update_disk_redis()
|
||||||
|
time.sleep(0.2)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
# send immediate stats update to redis
|
||||||
|
if push_redis:
|
||||||
|
update_stats_redis()
|
||||||
|
|
||||||
|
# Flask scheduler for scanner
|
||||||
|
scheduler = APScheduler()
|
||||||
|
scheduler.add_job(id='ssd_check',
|
||||||
|
func=ssd_scan,
|
||||||
|
trigger='interval',
|
||||||
|
seconds=1)
|
||||||
|
scheduler.init_app(app)
|
||||||
|
scheduler.start()
|
||||||
|
|
||||||
|
if secure_api:
|
||||||
|
app.run(debug=True, host='172.17.0.1', port=5000)
|
||||||
|
else:
|
||||||
|
app.run(debug=True, host='0.0.0.0', port=5000)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -73,7 +73,7 @@
|
|||||||
const headerRow = thead.insertRow();
|
const headerRow = thead.insertRow();
|
||||||
cols.forEach(col => {
|
cols.forEach(col => {
|
||||||
const th = document.createElement('th');
|
const th = document.createElement('th');
|
||||||
th.textContent = col.charAt(0).toUpperCase() + col.slice(1);
|
th.textContent = col;
|
||||||
headerRow.appendChild(th);
|
headerRow.appendChild(th);
|
||||||
});
|
});
|
||||||
// Body
|
// Body
|
||||||
|
|||||||
@ -33,7 +33,6 @@
|
|||||||
group: root
|
group: root
|
||||||
|
|
||||||
- name: websocket tasks
|
- name: websocket tasks
|
||||||
when: not quick_refresh | bool
|
|
||||||
block:
|
block:
|
||||||
|
|
||||||
- name: websocket - copy websocket server files
|
- name: websocket - copy websocket server files
|
||||||
@ -47,9 +46,20 @@
|
|||||||
- name: websocket - build docker container
|
- name: websocket - build docker container
|
||||||
community.docker.docker_image_build:
|
community.docker.docker_image_build:
|
||||||
name: ws_node
|
name: ws_node
|
||||||
|
tag: latest
|
||||||
rebuild: always
|
rebuild: always
|
||||||
path: "{{ service_folder }}/ws_node"
|
path: "{{ service_folder }}/ws_node"
|
||||||
dockerfile: Dockerfile
|
dockerfile: Dockerfile
|
||||||
|
labels:
|
||||||
|
ssd_health: "true"
|
||||||
|
|
||||||
|
- name: Prune containers with labels
|
||||||
|
community.docker.docker_prune:
|
||||||
|
containers: true
|
||||||
|
containers_filters:
|
||||||
|
label:
|
||||||
|
ssd_health: "true"
|
||||||
|
|
||||||
|
|
||||||
- name: docker containers
|
- name: docker containers
|
||||||
when: not quick_refresh | bool
|
when: not quick_refresh | bool
|
||||||
|
|||||||
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
- name: Drive Index - copy script files
|
- name: Drive Index - copy script files
|
||||||
copy:
|
copy:
|
||||||
src: scripts/
|
src: oop_code/
|
||||||
dest: "{{ service_folder }}/"
|
dest: "{{ service_folder }}/"
|
||||||
owner: "{{ autologin_user }}"
|
owner: "{{ autologin_user }}"
|
||||||
group: "{{ autologin_user }}"
|
group: "{{ autologin_user }}"
|
||||||
@ -46,9 +46,9 @@
|
|||||||
pytz
|
pytz
|
||||||
requests
|
requests
|
||||||
opencv-python
|
opencv-python
|
||||||
flask-socketio
|
|
||||||
eventlet
|
|
||||||
redis
|
redis
|
||||||
|
flask_apscheduler
|
||||||
|
|
||||||
owner: "{{ autologin_user }}"
|
owner: "{{ autologin_user }}"
|
||||||
group: "{{ autologin_user }}"
|
group: "{{ autologin_user }}"
|
||||||
mode: 0644
|
mode: 0644
|
||||||
|
|||||||
@ -5,6 +5,14 @@
|
|||||||
set_fact:
|
set_fact:
|
||||||
service_only: true
|
service_only: true
|
||||||
|
|
||||||
|
- name: Initialize - install docker when required
|
||||||
|
when: service_only | bool or not install_docker | bool
|
||||||
|
include_role:
|
||||||
|
name: "docker_workstation"
|
||||||
|
vars:
|
||||||
|
docker_full: false
|
||||||
|
|
||||||
|
|
||||||
- name: Initialize - Install Packages
|
- name: Initialize - Install Packages
|
||||||
when: not quick_refresh | bool
|
when: not quick_refresh | bool
|
||||||
apt:
|
apt:
|
||||||
@ -33,7 +41,7 @@
|
|||||||
mode: '0700'
|
mode: '0700'
|
||||||
|
|
||||||
- name: Initialize - set vars when needed
|
- name: Initialize - set vars when needed
|
||||||
when: quick_refresh | bool or refresh_special | bool
|
#when: quick_refresh | bool or refresh_special | bool
|
||||||
block:
|
block:
|
||||||
|
|
||||||
- name: Check CPU Arch
|
- name: Check CPU Arch
|
||||||
|
|||||||
@ -14,12 +14,13 @@
|
|||||||
|
|
||||||
# set up autologin
|
# set up autologin
|
||||||
- name: Drive health - configure autologin
|
- name: Drive health - configure autologin
|
||||||
when: not install_kiosk | bool and not service_only | bool
|
when: not install_kiosk | bool and not service_only | bool and not armcpu_check | bool
|
||||||
include_tasks: autologin.yaml
|
include_tasks: autologin.yaml
|
||||||
|
|
||||||
# configure service-mode
|
# disablep autologin
|
||||||
- name: Drive health - configure service-mode
|
- name: Drive health - disable autologin
|
||||||
include_tasks: service_mode.yaml
|
when: install_kiosk | bool or service_only | bool
|
||||||
|
include_tasks: autologin.yaml
|
||||||
|
|
||||||
# Install chrome kiosk
|
# Install chrome kiosk
|
||||||
- name: install chromium kiosk
|
- name: install chromium kiosk
|
||||||
|
|||||||
@ -5,7 +5,7 @@
|
|||||||
- name: no autologin - edit logind to set vterms to six
|
- name: no autologin - edit logind to set vterms to six
|
||||||
lineinfile:
|
lineinfile:
|
||||||
dest: /etc/systemd/logind.conf
|
dest: /etc/systemd/logind.conf
|
||||||
regexp: '^#NAutoVTs='
|
regexp: '^#NAutoVTs=1'
|
||||||
line: 'NAutoVTs=6'
|
line: 'NAutoVTs=6'
|
||||||
backrefs: yes
|
backrefs: yes
|
||||||
|
|
||||||
@ -16,12 +16,6 @@
|
|||||||
rm /etc/systemd/system/getty@tty1.service.d/override.conf
|
rm /etc/systemd/system/getty@tty1.service.d/override.conf
|
||||||
rm /etc/sudoers.d/smartctl
|
rm /etc/sudoers.d/smartctl
|
||||||
|
|
||||||
#- name: no autologin - remove autologin override to getty tty1 service
|
|
||||||
# shell: "rm /etc/systemd/system/getty@tty1.service.d/override.conf"
|
|
||||||
#
|
|
||||||
#- name: no autologin - "User setup - allow {{ autologin_user }} to smartctl"
|
|
||||||
# shell: "rm /etc/sudoers.d/smartctl"
|
|
||||||
|
|
||||||
- name: no autologin - Restart getty@tty1 service
|
- name: no autologin - Restart getty@tty1 service
|
||||||
systemd:
|
systemd:
|
||||||
name: getty@tty1.service
|
name: getty@tty1.service
|
||||||
|
|||||||
@ -1,43 +0,0 @@
|
|||||||
---
|
|
||||||
# This will run the drive_check_service.sh script as a service
|
|
||||||
- name: Service Mode - set service mode vars
|
|
||||||
set_fact:
|
|
||||||
sleep_time: ".5"
|
|
||||||
service_mode: true
|
|
||||||
|
|
||||||
- name: "Service Mode - drive_check.service - stop service if running"
|
|
||||||
ignore_errors: yes
|
|
||||||
systemd:
|
|
||||||
name: "drive_check.service"
|
|
||||||
state: stopped
|
|
||||||
|
|
||||||
- name: "Service Mode - template drive_check_service.sh "
|
|
||||||
template:
|
|
||||||
src: drive_check_service.sh
|
|
||||||
dest: "{{ service_folder }}/drive_check_service.sh"
|
|
||||||
mode: 0755
|
|
||||||
owner: "{{ autologin_user }}"
|
|
||||||
group: "{{ autologin_user }}"
|
|
||||||
|
|
||||||
- name: "Service Mode - template drive_check_service.service"
|
|
||||||
vars:
|
|
||||||
service_name: "drive_check"
|
|
||||||
service_working_folder: "{{ service_folder }}"
|
|
||||||
service_exe: "{{ service_folder }}/drive_check_service.sh"
|
|
||||||
template:
|
|
||||||
src: "service_template.service"
|
|
||||||
dest: "/etc/systemd/system/drive_check.service"
|
|
||||||
mode: 0644
|
|
||||||
|
|
||||||
- name: "Service Mode - drive_check_service - enable and start service api and daemon reload"
|
|
||||||
systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
name: "drive_check.service"
|
|
||||||
state: started
|
|
||||||
enabled: yes
|
|
||||||
|
|
||||||
- name: Service Mode - remove autologin
|
|
||||||
when: install_kiosk | bool or service_only | bool
|
|
||||||
include_tasks: no_autologin.yaml
|
|
||||||
|
|
||||||
...
|
|
||||||
@ -1,74 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# this is a big loop to handle the database
|
|
||||||
|
|
||||||
exec 2> /dev/null
|
|
||||||
while true; do
|
|
||||||
# update active drives more frequently
|
|
||||||
curl -s "http://172.17.0.1:5000/refresh_active_drives"
|
|
||||||
clear
|
|
||||||
# get all disks
|
|
||||||
DISK_LIST=$(ls -lo /dev/sd? | awk '{print $9}')
|
|
||||||
# process each disk
|
|
||||||
IFS=$'\n' read -rd '' -a DISK_ARRAY <<< "$DISK_LIST"
|
|
||||||
for DISK in "${DISK_ARRAY[@]}"; do
|
|
||||||
# store smartctl data once
|
|
||||||
SMART_DATA=$(smartctl -x $DISK)
|
|
||||||
NVME_CHECK=$(echo "$SMART_DATA" | grep "NVMe Version")
|
|
||||||
SSD_CHECK=$(echo "$SMART_DATA" | grep "Rotation Rate" | grep "Solid State")
|
|
||||||
# if either SATA SSD or NVMe
|
|
||||||
if [ -n "$NVME_CHECK" ] || [ -n "$SSD_CHECK" ]; then
|
|
||||||
BLOCK_SIZE=$(fdisk -l $DISK | grep 'Sector size' | awk '{print $4}' )
|
|
||||||
# SATA Logic
|
|
||||||
if [ -n "$SSD_CHECK" ] ; then
|
|
||||||
# Set Variables
|
|
||||||
TBW=$(echo "$SMART_DATA" | grep "Logical Sectors Written" | \
|
|
||||||
awk -v BLOCK_SIZE="$BLOCK_SIZE" '{print $4 * BLOCK_SIZE / (2 ^ 40)}')
|
|
||||||
PLR=$(echo "$SMART_DATA" | grep Percent_Lifetime_Remain | awk '{print $4}')
|
|
||||||
CAPACITY=$(echo "$SMART_DATA" | grep "User Capacity" | cut -d '[' -f 2 | sed 's/]//g')
|
|
||||||
SERIAL=$(echo "$SMART_DATA" | grep "Serial Number" | cut -d ":" -f 2 | xargs)
|
|
||||||
MODEL=$(echo "$SMART_DATA" | grep "Device Model" | cut -d ":" -f 2 | xargs)
|
|
||||||
SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs)
|
|
||||||
FLAVOR="SATA SSD"
|
|
||||||
DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists)
|
|
||||||
if [ -x "$TBW"] ; then
|
|
||||||
TBW="unknown"
|
|
||||||
fi
|
|
||||||
# database handler
|
|
||||||
if [ "$DRIVE_EXISTS" == "false" ] ; then
|
|
||||||
H_MODEL=$(echo $MODEL | sed 's/ /%20/g')
|
|
||||||
H_FLAVOR=$(echo $FLAVOR | sed 's/ /%20/g')
|
|
||||||
H_CAPACITY=$(echo $CAPACITY | sed 's/ /%20/g')
|
|
||||||
curl -s "http://172.17.0.1:5000/add_drive?serial=$SERIAL&model=$H_MODEL&flavor=$H_FLAVOR&capacity=$H_CAPACITY&TBW=$TBW&smart=$SMART"
|
|
||||||
else
|
|
||||||
curl -s "http://172.17.0.1:5000/update_drive?serial=$SERIAL&TBW=$TBW&smart=$SMART"
|
|
||||||
fi
|
|
||||||
# NVMe Logic
|
|
||||||
elif [ -n "$NVME_CHECK" ] ; then
|
|
||||||
# Set Variables
|
|
||||||
MODEL=$(echo "$SMART_DATA" | grep "Model Number" | cut -d ":" -f 2 | xargs)
|
|
||||||
SERIAL=$(echo "$SMART_DATA" | grep "Serial Number" | cut -d ":" -f 2 | xargs)
|
|
||||||
TBW=$(echo "$SMART_DATA" | grep "Data Units Written" | sed 's/,//g' | \
|
|
||||||
awk -v BLOCK_SIZE="$BLOCK_SIZE" '{print $4 * BLOCK_SIZE / (2 ^ 30)}')
|
|
||||||
AVAIL_SPARE=$(echo "$SMART_DATA" | grep "Available Spare:" | cut -d ':' -f 2 | xargs)
|
|
||||||
CAPACITY=$(echo "$SMART_DATA" | grep "amespace 1 Size" | cut -d '[' -f 2 | sed 's/]//g')
|
|
||||||
SMART=$(echo "$SMART_DATA" | grep "self-assessment test result" | cut -d ":" -f 2 | xargs)
|
|
||||||
FLAVOR="NVMe"
|
|
||||||
DRIVE_EXISTS=$(curl -s "http://172.17.0.1:5000/check?serial_lookup=$SERIAL" | jq .serial_number_exists)
|
|
||||||
if [ -x "$TBW"] ; then
|
|
||||||
TBW="unknown"
|
|
||||||
fi
|
|
||||||
# database handler
|
|
||||||
if [ "$DRIVE_EXISTS" == "false" ] ; then
|
|
||||||
H_MODEL=$(echo $MODEL | sed 's/ /%20/g')
|
|
||||||
H_FLAVOR=$(echo $FLAVOR | sed 's/ /%20/g')
|
|
||||||
H_CAPACITY=$(echo $CAPACITY | sed 's/ /%20/g')
|
|
||||||
curl -s "http://172.17.0.1:5000/add_drive?serial=$SERIAL&model=$H_MODEL&flavor=$H_FLAVOR&capacity=$H_CAPACITY&TBW=$TBW&smart=$SMART"
|
|
||||||
else
|
|
||||||
curl -s "http://172.17.0.1:5000/update_drive?serial=$SERIAL&TBW=$TBW&smart=$SMART"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
# sleep and loop again
|
|
||||||
sleep 0.5
|
|
||||||
done
|
|
||||||
Reference in New Issue
Block a user