add purge_venv to jenkinsfile for disk api

This commit is contained in:
2026-04-25 21:39:45 -07:00
parent 67f3f8440b
commit b4839fde66
7 changed files with 81 additions and 25 deletions

View File

@ -2,4 +2,9 @@
roles_path = /var/jenkins_home/ansible-windows/roles
ansible_root = /var/jenkins_home/ansible-windows
ansible_root = /var/jenkins_home/ansible-windows
# show verbose time details in log
stdout_callback = default
callback_whitelist = profile_roles, profile_tasks
callbacks_enabled = profile_roles, profile_tasks

View File

@ -6,6 +6,8 @@ pipeline {
string(name: 'host_ip', description: 'Target System Address')
string(name: 'api_service_port', defaultValue: "5000", description: 'API Service Port, probably don\'t change this')
booleanParam(name: 'refresh_api', defaultValue: false, description: 'When checked this will just update the API')
booleanParam(name: 'purge_venv', defaultValue: false, description: 'When checked this will remove the venv folder')
// reference for later
// choice(name: 'DEPLOY_ENV', choices: ['dev', 'staging', 'prod'], description: 'Environment to deploy to')
// booleanParam(name: 'rename_host', defaultValue: true, description: 'When checked hostname will be renamed')
@ -55,7 +57,7 @@ pipeline {
ansible-playbook -i \$inventory_file \$playbook_file \
--ssh-common-args='-o StrictHostKeyChecking=no' \
--extra-vars "api_service_port=${params.api_service_port} refresh_api=${params.refresh_api}"
--extra-vars "api_service_port=${params.api_service_port} refresh_api=${params.refresh_api} purge_venv=${params.purge_venv}"
"""
}
}

View File

@ -6,9 +6,9 @@
roles:
- show_user_vars
#- show_user_vars
- cosmos_init
#- cosmos_init
- storage_api

View File

@ -16,6 +16,8 @@ nssm_folder: "{{ cosmos_root_folder }}\\nssm"
disk_service_name: "disk_api"
purge_venv: false
api_service_port: "5000"
...

View File

@ -1,3 +1,4 @@
Flask
flask_apscheduler
psutil
requests

View File

@ -5,6 +5,20 @@
name: python
state: present
- name: Purge venv if asked
when: purge_venv | bool
block:
- name: Remove service
ansible.windows.win_service:
name: "{{ disk_service_name }}"
state: absent
- name: purge folder
ansible.windows.win_file:
path: "{{ python_venv }}"
state: absent
- name: Create venv folder
ansible.windows.win_file:
path: "{{ python_venv }}"

View File

@ -2,7 +2,9 @@ from flask import Flask, jsonify
from flask_apscheduler import APScheduler
import psutil
import os
import requests, json
import requests
from requests import RequestException, Response
import json
from subprocess import check_output
app = Flask(__name__)
@ -28,7 +30,9 @@ def get_crystal_disk_info():
# Split the file content into sections for each drive
drive_sections = output.split('----------------------------------------------------------------------------')
disk_id = 0
for section in drive_sections:
lines = section.strip().splitlines()
data = {
"Hostname": None,
@ -43,7 +47,8 @@ def get_crystal_disk_info():
"Host Writes": None,
"Wear Level Count": None,
"Drive Letter": None,
"Interface": None
"Interface": None,
"Transfer Mode": None
}
for line in lines:
if "Model" in line:
@ -78,14 +83,20 @@ def get_crystal_disk_info():
data["Drive Letter"] = line.split(":", 1)[1].strip()
elif "Disk Size" in line:
if ":" in line:
data["Disk Size"] = line.split(":", 1)[1].strip()
raw = line.split(":", 1)[1].strip()
data["Disk Size"] = raw.split('GB')[0].strip() + ' GB'
elif "Interface" in line:
if ":" in line:
data["Disk Size"] = line.split(":", 1)[1].strip()
data["Interface"] = line.split(":", 1)[1].strip()
elif "Transfer Mode" in line:
if ":" in line:
data["Transfer Mode"] = line.split(":", 1)[1].strip()
# This makes sure something was changed,
if any(value is not None for value in data.values()):
data["Disk ID"] = disk_id
drives.append(data)
data["Hostname"] = "{{ hostname_output.stdout_lines[0] }}"
disk_id = disk_id + 1
#data["Hostname"] = "{{ hostname_output.stdout_lines[0] }}"
if not drives:
raise ValueError("No drive data found")
@ -103,7 +114,7 @@ def get_disk_info():
disk_info.append({
'device': partition.device.replace('\\\\', '\\').rstrip('\\'),
#'mountpoint': partition.mountpoint,
#'fstype': partition.fstype,
'fstype': partition.fstype,
'total': bytes_to_human_readable(usage.total),
'used': bytes_to_human_readable(usage.used),
'free': bytes_to_human_readable(usage.free),
@ -122,35 +133,56 @@ def drive_health():
return jsonify(get_crystal_disk_info())
def server_reporter():
base_url="http://172.25.1.18:5001/client_update"
url = f"{base_url}/process"
data_dict = get_crystal_disk_info()
response = requests.post(url, json=data_dict)
#base_url="https://cosmostat.matt-cloud.com"
base_url="http://10.200.27.20:5001"
url = f"{base_url}/storage_client_update"
drives_dict = get_crystal_disk_info()
data_dict = {
"hostname": "{{ hostname_output.stdout_lines[0] }}",
"drives": drives_dict["drives"],
"API_KEY": "deadbeef",
"storage_summary": get_disk_info()
# Raise an exception for non2xx status codes
response.raise_for_status()
}
result = []
try:
response = requests.post(url, json=data_dict)
# Raise an exception for non2xx status codes
response.raise_for_status()
result = response.json()
except:
result = {
"message": "error"
}
# Return the JSON payload
return response.json()
return result
if __name__ == '__main__':
# Background Loop Function
# That makes this the service loop
def background_loop():
# disk info Loop Function
def update_disk_info():
diskinfo_command = f"{{ storage_api_root }}\\dist\\DiskInfo64.exe /CopyExit"
result = check_output(diskinfo_command, shell=True)
print(result)
server_reporter()
return result
scheduler.add_job(id='background_loop',
func=background_loop,
# gonna try something wild
scheduler.add_job(id='update_disk_info',
func=update_disk_info,
trigger='interval',
seconds=60)
seconds=10000)
scheduler.add_job(id='server_reporter',
func=server_reporter,
trigger='interval',
seconds=15)
scheduler.init_app(app)
scheduler.start()
background_loop()
update_disk_info()
app.run(host='0.0.0.0', port={{ api_service_port }})