add purge_venv to jenkinsfile for disk api

This commit is contained in:
2026-04-25 21:39:45 -07:00
parent 67f3f8440b
commit b4839fde66
7 changed files with 81 additions and 25 deletions

View File

@ -2,7 +2,9 @@ from flask import Flask, jsonify
from flask_apscheduler import APScheduler
import psutil
import os
import requests, json
import requests
from requests import RequestException, Response
import json
from subprocess import check_output
app = Flask(__name__)
@ -28,7 +30,9 @@ def get_crystal_disk_info():
# Split the file content into sections for each drive
drive_sections = output.split('----------------------------------------------------------------------------')
disk_id = 0
for section in drive_sections:
lines = section.strip().splitlines()
data = {
"Hostname": None,
@ -43,7 +47,8 @@ def get_crystal_disk_info():
"Host Writes": None,
"Wear Level Count": None,
"Drive Letter": None,
"Interface": None
"Interface": None,
"Transfer Mode": None
}
for line in lines:
if "Model" in line:
@ -78,14 +83,20 @@ def get_crystal_disk_info():
data["Drive Letter"] = line.split(":", 1)[1].strip()
elif "Disk Size" in line:
if ":" in line:
data["Disk Size"] = line.split(":", 1)[1].strip()
raw = line.split(":", 1)[1].strip()
data["Disk Size"] = raw.split('GB')[0].strip() + ' GB'
elif "Interface" in line:
if ":" in line:
data["Disk Size"] = line.split(":", 1)[1].strip()
data["Interface"] = line.split(":", 1)[1].strip()
elif "Transfer Mode" in line:
if ":" in line:
data["Transfer Mode"] = line.split(":", 1)[1].strip()
# This makes sure something was changed,
if any(value is not None for value in data.values()):
data["Disk ID"] = disk_id
drives.append(data)
data["Hostname"] = "{{ hostname_output.stdout_lines[0] }}"
disk_id = disk_id + 1
#data["Hostname"] = "{{ hostname_output.stdout_lines[0] }}"
if not drives:
raise ValueError("No drive data found")
@ -103,7 +114,7 @@ def get_disk_info():
disk_info.append({
'device': partition.device.replace('\\\\', '\\').rstrip('\\'),
#'mountpoint': partition.mountpoint,
#'fstype': partition.fstype,
'fstype': partition.fstype,
'total': bytes_to_human_readable(usage.total),
'used': bytes_to_human_readable(usage.used),
'free': bytes_to_human_readable(usage.free),
@ -122,35 +133,56 @@ def drive_health():
return jsonify(get_crystal_disk_info())
def server_reporter():
base_url="http://172.25.1.18:5001/client_update"
url = f"{base_url}/process"
data_dict = get_crystal_disk_info()
response = requests.post(url, json=data_dict)
#base_url="https://cosmostat.matt-cloud.com"
base_url="http://10.200.27.20:5001"
url = f"{base_url}/storage_client_update"
drives_dict = get_crystal_disk_info()
data_dict = {
"hostname": "{{ hostname_output.stdout_lines[0] }}",
"drives": drives_dict["drives"],
"API_KEY": "deadbeef",
"storage_summary": get_disk_info()
# Raise an exception for non2xx status codes
response.raise_for_status()
}
result = []
try:
response = requests.post(url, json=data_dict)
# Raise an exception for non2xx status codes
response.raise_for_status()
result = response.json()
except:
result = {
"message": "error"
}
# Return the JSON payload
return response.json()
return result
if __name__ == '__main__':
# Background Loop Function
# That makes this the service loop
def background_loop():
# disk info Loop Function
def update_disk_info():
diskinfo_command = f"{{ storage_api_root }}\\dist\\DiskInfo64.exe /CopyExit"
result = check_output(diskinfo_command, shell=True)
print(result)
server_reporter()
return result
scheduler.add_job(id='background_loop',
func=background_loop,
# gonna try something wild
scheduler.add_job(id='update_disk_info',
func=update_disk_info,
trigger='interval',
seconds=60)
seconds=10000)
scheduler.add_job(id='server_reporter',
func=server_reporter,
trigger='interval',
seconds=15)
scheduler.init_app(app)
scheduler.start()
background_loop()
update_disk_info()
app.run(host='0.0.0.0', port={{ api_service_port }})