diff --git a/.bin/Scripts/activate.py b/.bin/Scripts/activate.py deleted file mode 100644 index fa54fa5d..00000000 --- a/.bin/Scripts/activate.py +++ /dev/null @@ -1,63 +0,0 @@ -# Wizard Kit: Activate Windows using various methods - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.activation import * -init_global_vars() -os.system('title {}: Windows Activation Tool'.format(KIT_NAME_FULL)) - -if __name__ == '__main__': - try: - stay_awake() - clear_screen() - print_info('{}: Windows Activation Tool\n'.format(KIT_NAME_FULL)) - # Bail early if already activated - if windows_is_activated(): - print_info('This system is already activated') - sleep(5) - exit_script() - other_results = { - 'Error': { - 'BIOSKeyNotFoundError': 'BIOS key not found.', - }} - - # Determine activation method - activation_methods = [ - {'Name': 'Activate with BIOS key', 'Function': activate_with_bios}, - ] - if global_vars['OS']['Version'] not in ('8', '8.1', '10'): - activation_methods[0]['Disabled'] = True - actions = [ - {'Name': 'Quit', 'Letter': 'Q'}, - ] - - while True: - selection = menu_select( - '{}: Windows Activation Menu'.format(KIT_NAME_FULL), - main_entries=activation_methods, action_entries=actions) - - if (selection.isnumeric()): - result = try_and_print( - message = activation_methods[int(selection)-1]['Name'], - function = activation_methods[int(selection)-1]['Function'], - other_results=other_results) - if result['CS']: - break - else: - sleep(2) - elif selection == 'Q': - exit_script() - - # Done - print_success('\nDone.') - pause("Press Enter to exit...") - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/build-ufd b/.bin/Scripts/build-ufd deleted file mode 100755 index 45c3ff35..00000000 --- a/.bin/Scripts/build-ufd +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/env python3 -# -# pylint: disable=no-name-in-module,wildcard-import,wrong-import-position -# vim: sts=2 sw=2 ts=2 -"""Wizard Kit: UFD build tool""" - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from docopt import docopt -from functions.common import * -from functions.ufd import * -from settings.ufd import * -init_global_vars(silent=True) - -# Main section -if __name__ == '__main__': - # pylint: disable=invalid-name - # Set log - try: - global_vars['LogDir'] = '{}/Logs'.format( - get_user_home(get_user_name())) - set_log_file('Build UFD ({Date-Time}).log'.format(**global_vars)) - except: # pylint: disable=bare-except - major_exception() - - # Header - print_success(KIT_NAME_FULL) - print_standard('UFD Build Tool') - print_standard(' ') - - # Check if running as root - if not running_as_root(): - print_error('ERROR: This script is meant to be run as root.') - abort(False) - - # Docopt - try: - args = docopt(DOCSTRING) - except SystemExit as sys_exit: - # Catch docopt exits - exit_script(sys_exit.code) - except: # pylint: disable=bare-except - major_exception() - - try: - # Verify selections - ufd_dev = verify_ufd(args['--ufd-device']) - sources = verify_sources(args, UFD_SOURCES) - show_selections(args, sources, ufd_dev, UFD_SOURCES) - if not args['--force']: - confirm_selections(args) - - # Prep UFD - if not args['--update']: - print_info('Prep UFD') - prep_device(ufd_dev, UFD_LABEL, use_mbr=args['--use-mbr']) - - # Mount UFD - try_and_print( - indent=2, - message='Mounting UFD...', - function=mount, - mount_source=find_first_partition(ufd_dev), - mount_point='/mnt/UFD', - read_write=True, - ) - - # Remove Arch folder - if args['--update']: - try_and_print( - indent=2, - message='Removing Linux...', - function=remove_arch, - ) - - # Copy sources - print_standard(' ') - print_info('Copy Sources') - for s_label, s_path in sources.items(): - try_and_print( - indent=2, - message='Copying {}...'.format(s_label), - function=copy_source, - source=s_path, - items=ITEMS[s_label], - overwrite=True, - ) - - # Update boot entries - print_standard(' ') - print_info('Boot Setup') - try_and_print( - indent=2, - message='Updating boot entries...', - function=update_boot_entries, - boot_entries=BOOT_ENTRIES, - boot_files=BOOT_FILES, - iso_label=ISO_LABEL, - ufd_label=UFD_LABEL, - ) - - # Install syslinux (to partition) - try_and_print( - indent=2, - message='Syslinux (partition)...', - function=install_syslinux_to_partition, - partition=find_first_partition(ufd_dev), - ) - - # Unmount UFD - try_and_print( - indent=2, - message='Unmounting UFD...', - function=unmount, - mount_point='/mnt/UFD', - ) - - # Install syslinux (to device) - try_and_print( - indent=2, - message='Syslinux (device)...', - function=install_syslinux_to_dev, - ufd_dev=ufd_dev, - use_mbr=args['--use-mbr'], - ) - - # Hide items - print_standard(' ') - print_info('Final Touches') - try_and_print( - indent=2, - message='Hiding items...', - function=hide_items, - ufd_dev=ufd_dev, - items=ITEMS_HIDDEN, - ) - - # Done - if not args['--force']: - print_standard('\nDone.') - pause('Press Enter to exit...') - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: # pylint: disable=bare-except - major_exception() diff --git a/.bin/Scripts/cbs_fix.py b/.bin/Scripts/cbs_fix.py deleted file mode 100644 index 167f95aa..00000000 --- a/.bin/Scripts/cbs_fix.py +++ /dev/null @@ -1,43 +0,0 @@ -# Wizard Kit: Backup CBS Logs and prep CBS temp data for deletion - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.cleanup import * -from functions.data import * -init_global_vars() -os.system('title {}: CBS Cleanup'.format(KIT_NAME_FULL)) -set_log_file('CBS Cleanup.log') - -if __name__ == '__main__': - try: - # Prep - stay_awake() - clear_screen() - folder_path = r'{}\Backups'.format(KIT_NAME_SHORT) - dest = select_destination(folder_path=folder_path, - prompt='Which disk are we using for temp data and backup?') - - # Show details - print_info('{}: CBS Cleanup Tool\n'.format(KIT_NAME_FULL)) - show_data('Backup / Temp path:', dest) - print_standard('\n') - if (not ask('Proceed with CBS cleanup?')): - abort() - - # Run Cleanup - try_and_print(message='Running cleanup...', function=cleanup_cbs, - cs='Done', dest_folder=dest) - - # Done - print_standard('\nDone.') - pause("Press Enter to exit...") - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/ddrescue-tui b/.bin/Scripts/ddrescue-tui deleted file mode 100755 index 6ee8ad57..00000000 --- a/.bin/Scripts/ddrescue-tui +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -# -## Wizard Kit: ddrescue TUI Launcher - -source launch-in-tmux - -SESSION_NAME="ddrescue-tui" -WINDOW_NAME="ddrescue TUI" -TMUX_CMD="ddrescue-tui-menu" - -launch_in_tmux "$@" diff --git a/.bin/Scripts/ddrescue-tui-menu b/.bin/Scripts/ddrescue-tui-menu deleted file mode 100755 index eab8cd3f..00000000 --- a/.bin/Scripts/ddrescue-tui-menu +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/python3 -# -## Wizard Kit: TUI for ddrescue cloning and imaging - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.ddrescue import * -from functions.hw_diags import * -init_global_vars() - -if __name__ == '__main__': - try: - # Prep - clear_screen() - args = list(sys.argv) - run_mode = '' - source_path = None - dest_path = None - - # Parse args - try: - script_name = os.path.basename(args.pop(0)) - run_mode = str(args.pop(0)).lower() - source_path = args.pop(0) - dest_path = args.pop(0) - except IndexError: - # We'll set the missing paths later - pass - - # Show usage - if re.search(r'-+(h|help)', str(sys.argv), re.IGNORECASE): - show_usage(script_name) - exit_script() - - # Start cloning/imaging - if run_mode in ('clone', 'image'): - menu_ddrescue(source_path, dest_path, run_mode) - else: - if not re.search(r'^-*(h|help\?)', run_mode, re.IGNORECASE): - print_error('Invalid mode.') - - # Done - print_standard('\nDone.') - pause("Press Enter to exit...") - tmux_switch_client() - exit_script() - except GenericAbort: - abort() - except GenericError as ge: - msg = 'Generic Error' - if str(ge): - msg = str(ge) - print_error(msg) - abort() - except SystemExit as sys_exit: - tmux_switch_client() - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/functions/common.py b/.bin/Scripts/functions/common.py deleted file mode 100644 index 689cc85f..00000000 --- a/.bin/Scripts/functions/common.py +++ /dev/null @@ -1,954 +0,0 @@ -# Wizard Kit: Functions - Common - -import os -import psutil -import re -import shutil -import subprocess -import sys -import time -import traceback -try: - import winreg -except ModuleNotFoundError: - if psutil.WINDOWS: - raise - -from settings.main import * -from settings.tools import * -from settings.windows_builds import * -from subprocess import CalledProcessError - - -# Global variables -global_vars = {} - - -# STATIC VARIABLES -COLORS = { - 'CLEAR': '\033[0m', - 'RED': '\033[31m', - 'ORANGE': '\033[31;1m', - 'GREEN': '\033[32m', - 'YELLOW': '\033[33m', - 'BLUE': '\033[34m', - 'PURPLE': '\033[35m', - 'CYAN': '\033[36m', - } -try: - HKU = winreg.HKEY_USERS - HKCR = winreg.HKEY_CLASSES_ROOT - HKCU = winreg.HKEY_CURRENT_USER - HKLM = winreg.HKEY_LOCAL_MACHINE -except NameError: - if psutil.WINDOWS: - raise - - -# Error Classes -class BIOSKeyNotFoundError(Exception): - pass - -class BinNotFoundError(Exception): - pass - -class GenericAbort(Exception): - pass - -class GenericError(Exception): - pass - -class GenericRepair(Exception): - pass - -class MultipleInstallationsError(Exception): - pass - -class NoProfilesError(Exception): - pass - -class Not4KAlignedError(Exception): - pass - -class NotInstalledError(Exception): - pass - -class OSInstalledLegacyError(Exception): - pass - -class PathNotFoundError(Exception): - pass - -class UnsupportedOSError(Exception): - pass - -class SecureBootDisabledError(Exception): - pass - -class SecureBootNotAvailError(Exception): - pass - -class SecureBootUnknownError(Exception): - pass - -class WindowsOutdatedError(Exception): - pass - -class WindowsUnsupportedError(Exception): - pass - - -# General functions -def abort(show_prompt=True): - """Abort script.""" - print_warning('Aborted.') - if show_prompt: - sleep(1) - pause(prompt='Press Enter to exit... ') - exit_script(1) - - -def ask(prompt='Kotaero!'): - """Prompt the user with a Y/N question, returns bool.""" - answer = None - prompt = '{} [Y/N]: '.format(prompt) - while answer is None: - tmp = input(prompt) - if re.search(r'^y(es|)$', tmp, re.IGNORECASE): - answer = True - elif re.search(r'^n(o|ope|)$', tmp, re.IGNORECASE): - answer = False - message = '{prompt}{answer_text}'.format( - prompt = prompt, - answer_text = 'Yes' if answer else 'No') - print_log(message=message) - return answer - - -def beep(repeat=1): - """Play system bell with optional repeat.""" - for i in range(repeat): - # Print bell char - print('\a') - sleep(0.5) - - -def choice(choices, prompt='Kotaero!'): - """Prompt the user with a choice question, returns str.""" - answer = None - choices = [str(c) for c in choices] - choices_short = {c[:1].upper(): c for c in choices} - prompt = '{} [{}]: '.format(prompt, '/'.join(choices)) - regex = '^({}|{})$'.format( - '|'.join([c[:1] for c in choices]), - '|'.join(choices)) - - # Get user's choice - while answer is None: - tmp = input(prompt) - if re.search(regex, tmp, re.IGNORECASE): - answer = tmp - - # Log result - message = '{prompt}{answer_text}'.format( - prompt = prompt, - answer_text = 'Yes' if answer else 'No') - print_log(message=message) - - # Fix answer formatting to match provided values - answer = choices_short[answer[:1].upper()] - - # Done - return answer - - -def clear_screen(): - """Simple wrapper for cls/clear.""" - if psutil.WINDOWS: - os.system('cls') - else: - os.system('clear') - - -def convert_to_bytes(size): - """Convert human-readable size str to bytes and return an int.""" - size = str(size) - tmp = re.search(r'(\d+\.?\d*)\s+([PTGMKB])B?', size.upper()) - if tmp: - size = float(tmp.group(1)) - units = tmp.group(2) - if units == 'P': - size *= 1024 ** 5 - if units == 'T': - size *= 1024 ** 4 - elif units == 'G': - size *= 1024 ** 3 - elif units == 'M': - size *= 1024 ** 2 - elif units == 'K': - size *= 1024 ** 1 - elif units == 'B': - size *= 1024 ** 0 - size = int(size) - else: - return -1 - - return size - - -def exit_script(return_value=0): - """Exits the script after some cleanup and opens the log (if set).""" - # Remove dirs (if empty) - for dir in ['BackupDir', 'LogDir', 'TmpDir']: - try: - os.rmdir(global_vars[dir]) - except Exception: - pass - - # Open Log (if it exists) - log = global_vars.get('LogFile', '') - if log and os.path.exists(log) and psutil.WINDOWS and ENABLED_OPEN_LOGS: - try: - extract_item('NotepadPlusPlus', silent=True) - popen_program( - [global_vars['Tools']['NotepadPlusPlus'], - global_vars['LogFile']]) - except Exception: - print_error('ERROR: Failed to extract Notepad++ and open log.') - pause('Press Enter to exit...') - - # Kill Caffeine if still running - kill_process('caffeine.exe') - - # Exit - sys.exit(return_value) - - -def extract_item(item, filter='', silent=False): - """Extract item from .cbin into .bin.""" - cmd = [ - global_vars['Tools']['SevenZip'], 'x', '-aos', '-bso0', '-bse0', - '-p{ArchivePassword}'.format(**global_vars), - r'-o{BinDir}\{item}'.format(item=item, **global_vars), - r'{CBinDir}\{item}.7z'.format(item=item, **global_vars), - filter] - if not silent: - print_standard('Extracting "{item}"...'.format(item=item)) - try: - run_program(cmd) - except FileNotFoundError: - if not silent: - print_warning('WARNING: Archive not found') - except subprocess.CalledProcessError: - if not silent: - print_warning('WARNING: Errors encountered while exctracting data') - - -def get_process(name=None): - """Get process by name, returns psutil.Process obj.""" - proc = None - if not name: - raise GenericError - - for p in psutil.process_iter(): - try: - if p.name() == name: - proc = p - except psutil._exceptions.NoSuchProcess: - # Process finished during iteration? Going to ignore - pass - return proc - - -def get_simple_string(prompt='Enter string'): - """Get string from user (restricted character set), returns str.""" - simple_string = None - while simple_string is None: - _input = input('{}: '.format(prompt)) - if re.match(r"^(\w|-| |\.|')+$", _input, re.ASCII): - simple_string = _input.strip() - return simple_string - - -def get_ticket_number(): - """Get TicketNumber from user, save in LogDir, and return as str.""" - if not ENABLED_TICKET_NUMBERS: - return None - ticket_number = None - while ticket_number is None: - _input = input('Enter ticket number: ') - if re.match(r'^([0-9]+([-_]?\w+|))$', _input): - ticket_number = _input - out_file = r'{}\TicketNumber'.format(global_vars['LogDir']) - if not psutil.WINDOWS: - out_file = out_file.replace('\\', '/') - with open(out_file, 'w', encoding='utf-8') as f: - f.write(ticket_number) - return ticket_number - - -def human_readable_size(size, decimals=0): - """Convert size from bytes to a human-readable format, returns str.""" - # Prep string formatting - width = 3+decimals - if decimals > 0: - width += 1 - - # Convert size to int - try: - size = int(size) - except ValueError: - size = convert_to_bytes(size) - except TypeError: - size = -1 - - # Verify we have a valid size - if size < 0: - return '{size:>{width}} b'.format(size='???', width=width) - - # Convert to sensible units - if size >= 1024 ** 5: - size /= 1024 ** 5 - units = 'PB' - elif size >= 1024 ** 4: - size /= 1024 ** 4 - units = 'TB' - elif size >= 1024 ** 3: - size /= 1024 ** 3 - units = 'GB' - elif size >= 1024 ** 2: - size /= 1024 ** 2 - units = 'MB' - elif size >= 1024 ** 1: - size /= 1024 ** 1 - units = 'KB' - else: - size /= 1024 ** 0 - units = ' B' - - # Return - return '{size:>{width}.{decimals}f} {units}'.format( - size=size, width=width, decimals=decimals, units=units) - - -def kill_process(name): - """Kill any running caffeine.exe processes.""" - for proc in psutil.process_iter(): - if proc.name() == name: - proc.kill() - - -def major_exception(): - """Display traceback and exit""" - print_error('Major exception') - print_warning(SUPPORT_MESSAGE) - print(traceback.format_exc()) - print_log(traceback.format_exc()) - try: - upload_crash_details() - except GenericAbort: - # User declined upload - print_warning('Upload: Aborted') - sleep(10) - except GenericError: - # No log file or uploading disabled - sleep(10) - except: - print_error('Upload: NS') - sleep(10) - else: - print_success('Upload: CS') - pause('Press Enter to exit...') - exit_script(1) - - -def menu_select( - title='[Untitled Menu]', - prompt='Please make a selection', secret_actions=[], secret_exit=False, - main_entries=[], action_entries=[], disabled_label='DISABLED', - spacer=''): - """Display options in a menu and return selected option as a str.""" - # Bail early - if not main_entries and not action_entries: - raise Exception("MenuError: No items given") - - # Set title - if 'Title' in global_vars: - title = '{}\n\n{}'.format(global_vars['Title'], title) - - # Build menu - menu_splash = '{}\n{}\n'.format(title, spacer) - width = len(str(len(main_entries))) - valid_answers = [] - if secret_exit: - valid_answers.append('Q') - if secret_actions: - valid_answers.extend(secret_actions) - - # Add main entries - for i in range(len(main_entries)): - entry = main_entries[i] - # Add Spacer - if ('CRLF' in entry): - menu_splash += '{}\n'.format(spacer) - entry_str = '{number:>{width}}: {name}'.format( - number = i+1, - width = width, - name = entry.get('Display Name', entry['Name'])) - if entry.get('Disabled', False): - entry_str = '{YELLOW}{entry_str} ({disabled}){CLEAR}'.format( - entry_str = entry_str, - disabled = disabled_label, - **COLORS) - else: - valid_answers.append(str(i+1)) - menu_splash += '{}\n'.format(entry_str) - menu_splash += '{}\n'.format(spacer) - - # Add action entries - for entry in action_entries: - # Add Spacer - if ('CRLF' in entry): - menu_splash += '{}\n'.format(spacer) - valid_answers.append(entry['Letter']) - menu_splash += '{letter:>{width}}: {name}\n'.format( - letter = entry['Letter'].upper(), - width = len(str(len(action_entries))), - name = entry['Name']) - - answer = '' - - while (answer.upper() not in valid_answers): - clear_screen() - print(menu_splash) - answer = input('{}: '.format(prompt)) - - return answer.upper() - - -def non_clobber_rename(full_path): - """Append suffix to path, if necessary, to avoid clobbering path""" - new_path = full_path - _i = 1; - while os.path.exists(new_path): - new_path = '{path}_{i}'.format(i=_i, path=full_path) - _i += 1 - - return new_path - - -def pause(prompt='Press Enter to continue... '): - """Simple pause implementation.""" - if prompt[-1] != ' ': - prompt += ' ' - input(prompt) - - -def ping(addr='google.com'): - """Attempt to ping addr.""" - cmd = [ - 'ping', - '-n' if psutil.WINDOWS else '-c', - '2', - addr] - run_program(cmd) - - -def popen_program(cmd, pipe=False, minimized=False, shell=False, **kwargs): - """Run program and return a subprocess.Popen object.""" - cmd_kwargs = {'args': cmd, 'shell': shell} - for kw in ('encoding', 'errors'): - if kw in kwargs: - cmd_kwargs[kw] = kwargs[kw] - - if minimized: - startupinfo = subprocess.STARTUPINFO() - startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW - startupinfo.wShowWindow = 6 - cmd_kwargs['startupinfo'] = startupinfo - - if pipe: - cmd_kwargs.update({ - 'stdout': subprocess.PIPE, - 'stderr': subprocess.PIPE, - }) - - if 'cwd' in kwargs: - cmd_kwargs['cwd'] = kwargs['cwd'] - - return subprocess.Popen(**cmd_kwargs) - - -def print_error(*args, **kwargs): - """Prints message to screen in RED.""" - print_standard(*args, color=COLORS['RED'], **kwargs) - - -def print_info(*args, **kwargs): - """Prints message to screen in BLUE.""" - print_standard(*args, color=COLORS['BLUE'], **kwargs) - - -def print_standard(message='Generic info', - color=None, end='\n', timestamp=True, **kwargs): - """Prints message to screen and log (if set).""" - display_message = message - if color: - display_message = color + message + COLORS['CLEAR'] - # **COLORS is used below to support non-"standard" color printing - print(display_message.format(**COLORS), end=end, **kwargs) - print_log(message, end, timestamp) - - -def print_success(*args, **kwargs): - """Prints message to screen in GREEN.""" - print_standard(*args, color=COLORS['GREEN'], **kwargs) - - -def print_warning(*args, **kwargs): - """Prints message to screen in YELLOW.""" - print_standard(*args, color=COLORS['YELLOW'], **kwargs) - - -def print_log(message='', end='\n', timestamp=True): - """Writes message to a log if LogFile is set.""" - time_str = time.strftime("%Y-%m-%d %H%M%z: ") if timestamp else '' - if 'LogFile' in global_vars and global_vars['LogFile']: - with open(global_vars['LogFile'], 'a', encoding='utf-8') as f: - for line in message.splitlines(): - f.write('{timestamp}{line}{end}'.format( - timestamp = time_str, - line = line, - end = end)) - - -def run_program(cmd, check=True, pipe=True, shell=False, **kwargs): - """Run program and return a subprocess.CompletedProcess object.""" - cmd = [c for c in cmd if c] - if shell: - cmd = ' '.join(cmd) - - cmd_kwargs = {'args': cmd, 'check': check, 'shell': shell} - for kw in ('encoding', 'errors'): - if kw in kwargs: - cmd_kwargs[kw] = kwargs[kw] - - if pipe: - cmd_kwargs.update({ - 'stdout': subprocess.PIPE, - 'stderr': subprocess.PIPE, - }) - - if 'cwd' in kwargs: - cmd_kwargs['cwd'] = kwargs['cwd'] - - return subprocess.run(**cmd_kwargs) - - -def set_title(title='[Some Title]'): - """Set title. - - Used for window title and menu titles.""" - global_vars['Title'] = title - os.system('title {}'.format(title)) - - -def show_data( - message='[Some message]', data='[Some data]', - indent=8, width=32, - info=False, warning=False, error=False): - """Display info with formatting.""" - message = '{indent}{message:<{width}}{data}'.format( - indent=' '*indent, width=width, message=message, data=data) - if error: - print_error(message) - elif warning: - print_warning(message) - elif info: - print_info(message) - else: - print_standard(message) - - -def sleep(seconds=2): - """Wait for a while.""" - time.sleep(seconds) - - -def stay_awake(): - """Prevent the system from sleeping or hibernating.""" - # DISABLED due to VCR2008 dependency - return - # Bail if caffeine is already running - for proc in psutil.process_iter(): - if proc.name() == 'caffeine.exe': - return - # Extract and run - extract_item('Caffeine', silent=True) - try: - popen_program([global_vars['Tools']['Caffeine']]) - except Exception: - print_error('ERROR: No caffeine available.') - print_warning('Please set the power setting to High Performance.') - - -def strip_colors(s): - """Remove all ASCII color escapes from string, returns str.""" - for c in COLORS.values(): - s = s.replace(c, '') - return s - - -def get_exception(s): - """Get exception by name, returns Exception object.""" - try: - obj = getattr(sys.modules[__name__], s) - except AttributeError: - # Try builtin classes - obj = getattr(sys.modules['builtins'], s) - return obj - - -def try_and_print(message='Trying...', - function=None, cs='CS', ns='NS', other_results={}, - catch_all=True, print_return=False, silent_function=True, - indent=8, width=32, *args, **kwargs): - """Run function, print if successful or not, and return dict. - - other_results is in the form of - { - 'Warning': {'ExceptionClassName': 'Result Message'}, - 'Error': {'ExceptionClassName': 'Result Message'} - } - The the ExceptionClassNames will be excepted conditions - and the result string will be printed in the correct color. - catch_all=False will re-raise unspecified exceptions.""" - err = None - out = None - w_exceptions = other_results.get('Warning', {}).keys() - w_exceptions = tuple(get_exception(e) for e in w_exceptions) - e_exceptions = other_results.get('Error', {}).keys() - e_exceptions = tuple(get_exception(e) for e in e_exceptions) - w_results = other_results.get('Warning', {}) - e_results = other_results.get('Error', {}) - - # Run function and catch errors - print_standard('{indent}{message:<{width}}'.format( - indent=' '*indent, message=message, width=width), end='', flush=True) - try: - out = function(*args, **kwargs) - if print_return: - str_list = out - if isinstance(out, subprocess.CompletedProcess): - str_list = out.stdout.decode().strip().splitlines() - print_standard(str_list[0].strip(), timestamp=False) - for item in str_list[1:]: - print_standard('{indent}{item}'.format( - indent=' '*(indent+width), item=item.strip())) - elif silent_function: - print_success(cs, timestamp=False) - except w_exceptions as e: - _result = w_results.get(e.__class__.__name__, 'Warning') - print_warning(_result, timestamp=False) - err = e - except e_exceptions as e: - _result = e_results.get(e.__class__.__name__, 'Error') - print_error(_result, timestamp=False) - err = e - except Exception: - print_error(ns, timestamp=False) - err = traceback.format_exc() - - # Return or raise? - if err and not catch_all: - raise - else: - return {'CS': not bool(err), 'Error': err, 'Out': out} - - -def upload_crash_details(): - """Upload log and runtime data to the CRASH_SERVER. - - Intended for uploading to a public Nextcloud share.""" - if not ENABLED_UPLOAD_DATA: - raise GenericError - - import requests - if 'LogFile' in global_vars and global_vars['LogFile']: - if ask('Upload crash details to {}?'.format(CRASH_SERVER['Name'])): - with open(global_vars['LogFile']) as f: - data = '{}\n'.format(f.read()) - data += '#############################\n' - data += 'Runtime Details:\n\n' - data += 'sys.argv: {}\n\n'.format(sys.argv) - try: - data += generate_global_vars_report() - except Exception: - data += 'global_vars: {}\n'.format(global_vars) - filename = global_vars.get('LogFile', 'Unknown') - filename = re.sub(r'.*(\\|/)', '', filename) - filename += '.txt' - url = '{}/Crash_{}__{}'.format( - CRASH_SERVER['Url'], - global_vars.get('Date-Time', 'Unknown Date-Time'), - filename) - r = requests.put( - url, data=data, - headers={'X-Requested-With': 'XMLHttpRequest'}, - auth=(CRASH_SERVER['User'], CRASH_SERVER['Pass'])) - # Raise exception if upload NS - if not r.ok: - raise Exception - else: - # User said no - raise GenericAbort - else: - # No LogFile defined (or invalid LogFile) - raise GenericError - - -def wait_for_process(name, poll_rate=3): - """Wait for process by name.""" - running = True - while running: - sleep(poll_rate) - running = False - for proc in psutil.process_iter(): - try: - if re.search(r'^{}'.format(name), proc.name(), re.IGNORECASE): - running = True - except psutil._exceptions.NoSuchProcess: - # Assuming process closed during iteration - pass - sleep(1) - - -# global_vars functions -def init_global_vars(silent=False): - """Sets global variables based on system info.""" - if not silent: - print_info('Initializing') - if psutil.WINDOWS: - os.system('title Wizard Kit') - if psutil.LINUX: - init_functions = [ - ['Checking environment...', set_linux_vars], - ['Clearing collisions...', clean_env_vars], - ] - else: - init_functions = [ - ['Checking .bin...', find_bin], - ['Checking environment...', set_common_vars], - ['Checking OS...', check_os], - ['Checking tools...', check_tools], - ['Creating folders...', make_tmp_dirs], - ['Clearing collisions...', clean_env_vars], - ] - try: - if silent: - for f in init_functions: - f[1]() - else: - for f in init_functions: - try_and_print( - message=f[0], function=f[1], - cs='Done', ns='Error', catch_all=False) - except: - major_exception() - - -def check_os(): - """Set OS specific variables.""" - tmp = {} - - # Query registry - path = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion' - with winreg.OpenKey(HKLM, path) as key: - for name in ['CurrentBuild', 'CurrentVersion', 'ProductName']: - try: - tmp[name] = winreg.QueryValueEx(key, name)[0] - except FileNotFoundError: - tmp[name] = 'Unknown' - - # Handle CurrentBuild collision - if tmp['CurrentBuild'] == '9200': - if tmp['CurrentVersion'] == '6.2': - # Windown 8, set to fake build number - tmp['CurrentBuild'] = '9199' - else: - # Windows 8.1, leave alone - pass - - # Check bit depth - tmp['Arch'] = 32 - if 'PROGRAMFILES(X86)' in global_vars['Env']: - tmp['Arch'] = 64 - - # Get Windows build info - build_info = WINDOWS_BUILDS.get(tmp['CurrentBuild'], None) - if build_info is None: - # Not in windows_builds.py - build_info = [ - 'Unknown', - 'Build {}'.format(tmp['CurrentBuild']), - None, - None, - 'unrecognized'] - else: - build_info = list(build_info) - tmp['Version'] = build_info.pop(0) - tmp['Release'] = build_info.pop(0) - tmp['Codename'] = build_info.pop(0) - tmp['Marketing Name'] = build_info.pop(0) - tmp['Notes'] = build_info.pop(0) - - # Set name - tmp['Name'] = tmp['ProductName'] - if tmp['Release']: - tmp['Name'] += ' {}'.format(tmp['Release']) - if tmp['Codename']: - tmp['Name'] += ' "{}"'.format(tmp['Codename']) - if tmp['Marketing Name']: - tmp['Name'] += ' / "{}"'.format(tmp['Marketing Name']) - tmp['Name'] = re.sub(r'\s+', ' ', tmp['Name']) - - # Set display name - tmp['DisplayName'] = '{} x{}'.format(tmp['Name'], tmp['Arch']) - if tmp['Notes']: - tmp['DisplayName'] += ' ({})'.format(tmp['Notes']) - - global_vars['OS'] = tmp - - -def check_tools(): - """Set tool variables based on OS bit-depth and tool availability.""" - if global_vars['OS'].get('Arch', 32) == 64: - global_vars['Tools'] = { - k: v.get('64', v.get('32')) for (k, v) in TOOLS.items()} - else: - global_vars['Tools'] = {k: v.get('32') for (k, v) in TOOLS.items()} - - # Fix paths - global_vars['Tools'] = {k: os.path.join(global_vars['BinDir'], v) - for (k, v) in global_vars['Tools'].items()} - - -def clean_env_vars(): - """Remove conflicting global_vars and env variables. - - This fixes an issue where both global_vars and - global_vars['Env'] are expanded at the same time.""" - for key in global_vars.keys(): - global_vars['Env'].pop(key, None) - - -def find_bin(): - """Find .bin folder in the cwd or it's parents.""" - wd = os.getcwd() - base = None - while base is None: - if os.path.exists('.bin'): - base = os.getcwd() - break - if re.fullmatch(r'\w:\\', os.getcwd()): - break - os.chdir('..') - os.chdir(wd) - if base is None: - raise BinNotFoundError - global_vars['BaseDir'] = base - - -def generate_global_vars_report(): - """Build readable string from global_vars, returns str.""" - report = ['global_vars: {'] - for k, v in sorted(global_vars.items()): - if k == 'Env': - continue - if isinstance(v, list): - report.append(' {}: ['.format(str(k))) - for item in v: - report.append(' {}'.format(str(v))) - report.append(' ]') - elif isinstance(v, dict): - report.append(' {}: {{'.format(str(k))) - for item_k, item_v in sorted(v.items()): - report.append(' {:<15} {}'.format( - str(item_k)+':', str(item_v))) - report.append(' }') - else: - report.append(' {:<18}{}'.format(str(k)+':', str(v))) - report.append(' Env:') - for k, v in sorted(global_vars.get('Env', {}).items()): - report.append(' {:<15} {}'.format( - str(k)+':', str(v))) - report.append('}') - - return '\n'.join(report) - - -def make_tmp_dirs(): - """Make temp directories.""" - os.makedirs(global_vars['BackupDir'], exist_ok=True) - os.makedirs(global_vars['LogDir'], exist_ok=True) - os.makedirs(r'{}\{}'.format( - global_vars['LogDir'], KIT_NAME_FULL), exist_ok=True) - os.makedirs(r'{}\Tools'.format(global_vars['LogDir']), exist_ok=True) - os.makedirs(global_vars['TmpDir'], exist_ok=True) - - -def set_common_vars(): - """Set common variables.""" - global_vars['Date'] = time.strftime("%Y-%m-%d") - global_vars['Date-Time'] = time.strftime("%Y-%m-%d_%H%M_%z") - global_vars['Env'] = os.environ.copy() - - global_vars['ArchivePassword'] = ARCHIVE_PASSWORD - global_vars['BinDir'] = r'{BaseDir}\.bin'.format(**global_vars) - global_vars['CBinDir'] = r'{BaseDir}\.cbin'.format(**global_vars) - global_vars['ClientDir'] = r'{SYSTEMDRIVE}\{prefix}'.format( - prefix=KIT_NAME_SHORT, **global_vars['Env']) - global_vars['BackupDir'] = r'{ClientDir}\Backups'.format(**global_vars) - global_vars['LogDir'] = r'{ClientDir}\Logs\{Date}'.format(**global_vars) - global_vars['QuarantineDir'] = r'{ClientDir}\Quarantine'.format(**global_vars) - global_vars['TmpDir'] = r'{BinDir}\tmp'.format(**global_vars) - - -def set_linux_vars(): - """Set common variables in a Linux environment. - - These assume we're running under a WK-Linux build.""" - result = run_program(['mktemp', '-d']) - global_vars['TmpDir'] = result.stdout.decode().strip() - global_vars['Date'] = time.strftime("%Y-%m-%d") - global_vars['Date-Time'] = time.strftime("%Y-%m-%d_%H%M_%z") - global_vars['Env'] = os.environ.copy() - global_vars['BinDir'] = '/usr/local/bin' - global_vars['LogDir'] = '{}/Logs'.format(global_vars['Env']['HOME']) - global_vars['Tools'] = { - 'wimlib-imagex': 'wimlib-imagex', - 'SevenZip': '7z', - } - - -def set_log_file(log_name): - """Sets global var LogFile and creates path as needed.""" - if psutil.LINUX: - folder_path = global_vars['LogDir'] - else: - folder_path = '{}{}{}'.format( - global_vars['LogDir'], - os.sep, - KIT_NAME_FULL) - log_file = '{}{}{}'.format( - folder_path, - os.sep, - log_name) - os.makedirs(folder_path, exist_ok=True) - global_vars['LogFile'] = log_file - - -if __name__ == '__main__': - print("This file is not meant to be called directly.") - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/functions/network.py b/.bin/Scripts/functions/network.py deleted file mode 100644 index 5b5d4f52..00000000 --- a/.bin/Scripts/functions/network.py +++ /dev/null @@ -1,53 +0,0 @@ -# Wizard Kit: Functions - Network - -import os -import shutil -import sys - -from functions.common import * - - -# REGEX -REGEX_VALID_IP = re.compile( - r'(10.\d+.\d+.\d+' - r'|172.(1[6-9]|2\d|3[0-1])' - r'|192.168.\d+.\d+)', - re.IGNORECASE) - - -def is_connected(): - """Check for a valid private IP.""" - devs = psutil.net_if_addrs() - for dev in devs.values(): - for family in dev: - if REGEX_VALID_IP.search(family.address): - # Valid IP found - return True - # Else - return False - - -def show_valid_addresses(): - """Show all valid private IP addresses assigned to the system.""" - devs = psutil.net_if_addrs() - for dev, families in sorted(devs.items()): - for family in families: - if REGEX_VALID_IP.search(family.address): - # Valid IP found - show_data(message=dev, data=family.address) - - -def speedtest(): - """Run a network speedtest using speedtest-cli.""" - result = run_program(['speedtest-cli', '--simple']) - output = [line.strip() for line in result.stdout.decode().splitlines() - if line.strip()] - output = [line.split() for line in output] - output = [(a, float(b), c) for a, b, c in output] - return ['{:10}{:6.2f} {}'.format(*line) for line in output] - - -if __name__ == '__main__': - print("This file is not meant to be called directly.") - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/functions/ufd.py b/.bin/Scripts/functions/ufd.py deleted file mode 100644 index 32f08201..00000000 --- a/.bin/Scripts/functions/ufd.py +++ /dev/null @@ -1,471 +0,0 @@ -"""Wizard Kit: Functions - UFD""" -# pylint: disable=broad-except,wildcard-import -# vim: sts=2 sw=2 ts=2 - -import os -import re -import shutil -import pathlib -from collections import OrderedDict -from functions.common import * - - -def case_insensitive_search(path, item): - """Search path for item case insensitively, returns str.""" - regex_match = '^{}$'.format(item) - real_path = '' - - # Quick check first - if os.path.exists('{}/{}'.format(path, item)): - real_path = '{}{}{}'.format( - path, - '' if path == '/' else '/', - item, - ) - - # Check all items in dir - for entry in os.scandir(path): - if re.match(regex_match, entry.name, re.IGNORECASE): - real_path = '{}{}{}'.format( - path, - '' if path == '/' else '/', - entry.name, - ) - - # Done - if not real_path: - raise FileNotFoundError('{}/{}'.format(path, item)) - - return real_path - - -def confirm_selections(args): - """Ask tech to confirm selections, twice if necessary.""" - if not ask('Is the above information correct?'): - abort(False) - ## Safety check - if not args['--update']: - print_standard(' ') - print_warning('SAFETY CHECK') - print_standard( - 'All data will be DELETED from the disk and partition(s) listed above.') - print_standard( - 'This is irreversible and will lead to {RED}DATA LOSS.{CLEAR}'.format( - **COLORS)) - if not ask('Asking again to confirm, is this correct?'): - abort(False) - - print_standard(' ') - - -def copy_source(source, items, overwrite=False): - """Copy source items to /mnt/UFD.""" - is_image = source.is_file() - - # Mount source if necessary - if is_image: - mount(source, '/mnt/Source') - - # Copy items - for i_source, i_dest in items: - i_source = '{}{}'.format( - '/mnt/Source' if is_image else source, - i_source, - ) - i_dest = '/mnt/UFD{}'.format(i_dest) - try: - recursive_copy(i_source, i_dest, overwrite=overwrite) - except FileNotFoundError: - # Going to assume (hope) that this is fine - pass - - # Unmount source if necessary - if is_image: - unmount('/mnt/Source') - - -def find_first_partition(dev_path): - """Find path to first partition of dev, returns str.""" - cmd = [ - 'lsblk', - '--list', - '--noheadings', - '--output', 'name', - '--paths', - dev_path, - ] - result = run_program(cmd, encoding='utf-8', errors='ignore') - part_path = result.stdout.splitlines()[-1].strip() - - return part_path - - -def find_path(path): - """Find path case-insensitively, returns pathlib.Path obj.""" - path_obj = pathlib.Path(path).resolve() - - # Quick check first - if path_obj.exists(): - return path_obj - - # Fix case - parts = path_obj.relative_to('/').parts - real_path = '/' - for part in parts: - try: - real_path = case_insensitive_search(real_path, part) - except NotADirectoryError: - # Reclassify error - raise FileNotFoundError(path) - - # Raise error if path doesn't exist - path_obj = pathlib.Path(real_path) - if not path_obj.exists(): - raise FileNotFoundError(path_obj) - - # Done - return path_obj - - -def get_user_home(user): - """Get path to user's home dir, returns str.""" - home_dir = None - cmd = ['getent', 'passwd', user] - result = run_program(cmd, encoding='utf-8', errors='ignore', check=False) - try: - home_dir = result.stdout.split(':')[5] - except Exception: - # Just use HOME from ENV (or '/root' if that fails) - home_dir = os.environ.get('HOME', '/root') - - return home_dir - - -def get_user_name(): - """Get real user name, returns str.""" - user = None - if 'SUDO_USER' in os.environ: - user = os.environ.get('SUDO_USER', 'Unknown') - else: - user = os.environ.get('USER', 'Unknown') - - return user - - -def hide_items(ufd_dev, items): - """Set FAT32 hidden flag for items.""" - # pylint: disable=invalid-name - with open('/root/.mtoolsrc', 'w') as f: - f.write('drive U: file="{}"\n'.format( - find_first_partition(ufd_dev))) - f.write('mtools_skip_check=1\n') - - # Hide items - for item in items: - cmd = ['yes | mattrib +h "U:/{}"'.format(item)] - run_program(cmd, check=False, shell=True) - - -def install_syslinux_to_dev(ufd_dev, use_mbr): - """Install Syslinux to UFD (dev).""" - cmd = [ - 'dd', - 'bs=440', - 'count=1', - 'if=/usr/lib/syslinux/bios/{}.bin'.format( - 'mbr' if use_mbr else 'gptmbr', - ), - 'of={}'.format(ufd_dev), - ] - run_program(cmd) - - -def install_syslinux_to_partition(partition): - """Install Syslinux to UFD (partition).""" - cmd = [ - 'syslinux', - '--install', - '--directory', - '/arch/boot/syslinux/', - partition, - ] - run_program(cmd) - - -def is_valid_path(path_obj, path_type): - """Verify path_obj is valid by type, returns bool.""" - valid_path = False - if path_type == 'DIR': - valid_path = path_obj.is_dir() - elif path_type == 'KIT': - valid_path = path_obj.is_dir() and path_obj.joinpath('.bin').exists() - elif path_type == 'IMG': - valid_path = path_obj.is_file() and path_obj.suffix.lower() == '.img' - elif path_type == 'ISO': - valid_path = path_obj.is_file() and path_obj.suffix.lower() == '.iso' - elif path_type == 'UFD': - valid_path = path_obj.is_block_device() - - return valid_path - - -def mount(mount_source, mount_point, read_write=False): - """Mount mount_source on mount_point.""" - os.makedirs(mount_point, exist_ok=True) - cmd = [ - 'mount', - mount_source, - mount_point, - '-o', - 'rw' if read_write else 'ro', - ] - run_program(cmd) - - -def prep_device(dev_path, label, use_mbr=False, indent=2): - """Format device in preparation for applying the WizardKit components - - This is done is four steps: - 1. Zero-out first 64MB (this deletes the partition table and/or bootloader) - 2. Create a new partition table (GPT by default, optionally MBR) - 3. Set boot flag - 4. Format partition (FAT32, 4K aligned) - """ - # Zero-out first 64MB - cmd = 'dd bs=4M count=16 if=/dev/zero of={}'.format(dev_path).split() - try_and_print( - indent=indent, - message='Zeroing first 64MB...', - function=run_program, - cmd=cmd, - ) - - # Create partition table - cmd = 'parted {} --script -- mklabel {} mkpart primary fat32 4MiB {}'.format( - dev_path, - 'msdos' if use_mbr else 'gpt', - '-1s' if use_mbr else '-4MiB', - ).split() - try_and_print( - indent=indent, - message='Creating partition table...', - function=run_program, - cmd=cmd, - ) - - # Set boot flag - cmd = 'parted {} set 1 {} on'.format( - dev_path, - 'boot' if use_mbr else 'legacy_boot', - ).split() - try_and_print( - indent=indent, - message='Setting boot flag...', - function=run_program, - cmd=cmd, - ) - - # Format partition - cmd = [ - 'mkfs.vfat', '-F', '32', - '-n', label, - find_first_partition(dev_path), - ] - try_and_print( - indent=indent, - message='Formatting partition...', - function=run_program, - cmd=cmd, - ) - - -def recursive_copy(source, dest, overwrite=False): - """Copy source to dest recursively. - - NOTE: This uses rsync style source/dest syntax. - If the source has a trailing slash then it's contents are copied, - otherwise the source itself is copied. - - Examples assuming "ExDir/ExFile.txt" exists: - recursive_copy("ExDir", "Dest/") results in "Dest/ExDir/ExFile.txt" - recursive_copy("ExDir/", "Dest/") results in "Dest/ExFile.txt" - - NOTE 2: dest does not use find_path because it might not exist. - """ - copy_contents = source.endswith('/') - source = find_path(source) - dest = pathlib.Path(dest).resolve().joinpath(source.name) - os.makedirs(dest.parent, exist_ok=True) - - if source.is_dir(): - if copy_contents: - # Trailing slash syntax - for item in os.scandir(source): - recursive_copy(item.path, dest.parent, overwrite=overwrite) - elif not dest.exists(): - # No conflict, copying whole tree (no merging needed) - shutil.copytree(source, dest) - elif not dest.is_dir(): - # Refusing to replace file with dir - raise FileExistsError('Refusing to replace file: {}'.format(dest)) - else: - # Dest exists and is a dir, merge dirs - for item in os.scandir(source): - recursive_copy(item.path, dest, overwrite=overwrite) - elif source.is_file(): - if not dest.exists(): - # No conflict, copying file - shutil.copy2(source, dest) - elif not dest.is_file(): - # Refusing to replace dir with file - raise FileExistsError('Refusing to replace dir: {}'.format(dest)) - elif overwrite: - # Dest file exists, deleting and replacing file - os.remove(dest) - shutil.copy2(source, dest) - else: - # Refusing to delete file when overwrite=False - raise FileExistsError('Refusing to delete file: {}'.format(dest)) - - -def remove_arch(): - """Remove arch dir from UFD. - - This ensures a clean installation to the UFD and resets the boot files - """ - shutil.rmtree(find_path('/mnt/UFD/arch')) - - -def running_as_root(): - """Check if running with effective UID of 0, returns bool.""" - return os.geteuid() == 0 - - -def show_selections(args, sources, ufd_dev, ufd_sources): - """Show selections including non-specified options.""" - - # Sources - print_info('Sources') - for label in ufd_sources.keys(): - if label in sources: - print_standard(' {label:<18} {path}'.format( - label=label+':', - path=sources[label], - )) - else: - print_standard(' {label:<18} {YELLOW}Not Specified{CLEAR}'.format( - label=label+':', - **COLORS, - )) - print_standard(' ') - - # Destination - print_info('Destination') - cmd = [ - 'lsblk', '--nodeps', '--noheadings', '--paths', - '--output', 'NAME,FSTYPE,TRAN,SIZE,VENDOR,MODEL,SERIAL', - ufd_dev, - ] - result = run_program(cmd, check=False, encoding='utf-8', errors='ignore') - print_standard(result.stdout.strip()) - cmd = [ - 'lsblk', '--noheadings', '--paths', - '--output', 'NAME,SIZE,FSTYPE,LABEL,MOUNTPOINT', - ufd_dev, - ] - result = run_program(cmd, check=False, encoding='utf-8', errors='ignore') - for line in result.stdout.splitlines()[1:]: - print_standard(line) - - # Notes - if args['--update']: - print_warning('Updating kit in-place') - elif args['--use-mbr']: - print_warning('Formatting using legacy MBR') - print_standard(' ') - - -def unmount(mount_point): - """Unmount mount_point.""" - cmd = ['umount', mount_point] - run_program(cmd) - - -def update_boot_entries(boot_entries, boot_files, iso_label, ufd_label): - """Update boot files for UFD usage""" - configs = [] - - # Find config files - for c_path, c_ext in boot_files.items(): - c_path = find_path('/mnt/UFD{}'.format(c_path)) - for item in os.scandir(c_path): - if item.name.lower().endswith(c_ext.lower()): - configs.append(item.path) - - # Update Linux labels - cmd = [ - 'sed', - '--in-place', - '--regexp-extended', - 's/{}/{}/'.format(iso_label, ufd_label), - *configs, - ] - run_program(cmd) - - # Uncomment extra entries if present - for b_path, b_comment in boot_entries.items(): - try: - find_path('/mnt/UFD{}'.format(b_path)) - except (FileNotFoundError, NotADirectoryError): - # Entry not found, continue to next entry - continue - - # Entry found, update config files - cmd = [ - 'sed', - '--in-place', - 's/#{}#//'.format(b_comment), - *configs, - ] - run_program(cmd, check=False) - - -def verify_sources(args, ufd_sources): - """Check all sources and abort if necessary, returns dict.""" - sources = OrderedDict() - - for label, data in ufd_sources.items(): - s_path = args[data['Arg']] - if s_path: - try: - s_path_obj = find_path(s_path) - except FileNotFoundError: - print_error('ERROR: {} not found: {}'.format(label, s_path)) - abort(False) - if not is_valid_path(s_path_obj, data['Type']): - print_error('ERROR: Invalid {} source: {}'.format(label, s_path)) - abort(False) - sources[label] = s_path_obj - - return sources - - -def verify_ufd(dev_path): - """Check that dev_path is a valid UFD, returns pathlib.Path obj.""" - ufd_dev = None - - try: - ufd_dev = find_path(dev_path) - except FileNotFoundError: - print_error('ERROR: UFD device not found: {}'.format(dev_path)) - abort(False) - - if not is_valid_path(ufd_dev, 'UFD'): - print_error('ERROR: Invalid UFD device: {}'.format(ufd_dev)) - abort(False) - - return ufd_dev - - -if __name__ == '__main__': - print("This file is not meant to be called directly.") diff --git a/.bin/Scripts/hw-diags-audio b/.bin/Scripts/hw-diags-audio deleted file mode 100755 index e581330f..00000000 --- a/.bin/Scripts/hw-diags-audio +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/python3 -# -## Wizard Kit: HW Diagnostics - Audio - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.common import * -init_global_vars() - -if __name__ == '__main__': - try: - # Prep - clear_screen() - print_standard('Hardware Diagnostics: Audio\n') - - # Set volume - try: - run_program('amixer -q set "Master" 80% unmute'.split()) - run_program('amixer -q set "PCM" 90% unmute'.split()) - except subprocess.CalledProcessError: - print_error('Failed to set volume') - - # Run tests - for mode in ['pink', 'wav']: - run_program( - cmd = 'speaker-test -c 2 -l 1 -t {}'.format(mode).split(), - check = False, - pipe = False) - - # Done - #print_standard('\nDone.') - #pause("Press Enter to exit...") - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/hw-diags-iobenchmark b/.bin/Scripts/hw-diags-iobenchmark deleted file mode 100755 index 6821b1a4..00000000 --- a/.bin/Scripts/hw-diags-iobenchmark +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# -## Wizard Kit: HW Diagnostics - Benchmarks - -function usage { - echo "Usage: ${0} device log-file" - echo " e.g. ${0} /dev/sda /tmp/tmp.XXXXXXX/benchmarks.log" -} - -# Bail early -if [ ! -b "${1}" ]; then - usage - exit 1 -fi - -# Run Benchmarks -echo 3 | sudo tee -a /proc/sys/vm/drop_caches >/dev/null 2>&1 -sudo dd bs=4M if="${1}" of=/dev/null status=progress 2>&1 | tee -a "${2}" diff --git a/.bin/Scripts/hw-diags-menu b/.bin/Scripts/hw-diags-menu deleted file mode 100755 index 7a122ae7..00000000 --- a/.bin/Scripts/hw-diags-menu +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/python3 -# -## Wizard Kit: HW Diagnostics - Menu - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.hw_diags import * -from functions.tmux import * -init_global_vars() - -if __name__ == '__main__': - # Show menu - try: - state = State() - menu_diags(state, sys.argv) - except KeyboardInterrupt: - print_standard(' ') - print_warning('Aborted') - print_standard(' ') - sleep(1) - pause('Press Enter to exit...') - except SystemExit as sys_exit: - tmux_switch_client() - exit_script(sys_exit.code) - except: - # Cleanup - tmux_kill_all_panes() - - if DEBUG_MODE: - # Custom major exception - print_standard(' ') - print_error('Major exception') - print_warning(SUPPORT_MESSAGE) - print(traceback.format_exc()) - print_log(traceback.format_exc()) - - # Save debug reports and upload data - try_and_print( - message='Saving debug reports...', - function=save_debug_reports, - state=state, global_vars=global_vars) - question = 'Upload crash details to {}?'.format(CRASH_SERVER['Name']) - if ENABLED_UPLOAD_DATA and ask(question): - try_and_print( - message='Uploading Data...', - function=upload_logdir, - global_vars=global_vars) - - # Done - sleep(1) - pause('Press Enter to exit...') - exit_script(1) - - else: - # "Normal" major exception - major_exception() - - # Done - tmux_kill_all_panes() - tmux_switch_client() - exit_script() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/hw-diags-network b/.bin/Scripts/hw-diags-network deleted file mode 100755 index 138ea67e..00000000 --- a/.bin/Scripts/hw-diags-network +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/python3 -# -## Wizard Kit: HW Diagnostics - Network - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.network import * - - -def check_connection(): - if not is_connected(): - # Raise to cause NS in try_and_print() - raise Exception - - -if __name__ == '__main__': - try: - # Prep - clear_screen() - print_standard('Hardware Diagnostics: Network\n') - - # Connect - print_standard('Initializing...') - connect_to_network() - - # Tests - try_and_print( - message='Network connection:', function=check_connection, cs='OK') - show_valid_addresses() - try_and_print(message='Internet connection:', function=ping, - addr='8.8.8.8', cs='OK') - try_and_print(message='DNS Resolution:', function=ping, cs='OK') - try_and_print(message='Speedtest:', function=speedtest, - print_return=True) - - # Done - print_standard('\nDone.') - #pause("Press Enter to exit...") - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/hw-diags-prime95 b/.bin/Scripts/hw-diags-prime95 deleted file mode 100755 index 4927da76..00000000 --- a/.bin/Scripts/hw-diags-prime95 +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/bash -# -## Wizard Kit: HW Diagnostics - Prime95 - -function usage { - echo "Usage: $0 log-dir" - echo " e.g. $0 /tmp/tmp.7Mh5f1RhSL9001" -} - -# Bail early -if [ ! -d "$1" ]; then - usage - exit 1 -fi - -# Run Prime95 -cd "$1" -mprime -t | grep -iv --line-buffered 'stress.txt' | tee -a "prime.log" - diff --git a/.bin/Scripts/hw-drive-info b/.bin/Scripts/hw-drive-info deleted file mode 100755 index df1e1748..00000000 --- a/.bin/Scripts/hw-drive-info +++ /dev/null @@ -1,39 +0,0 @@ -#!/bin/bash -# - -BLUE='\033[34m' -CLEAR='\033[0m' -IFS=$'\n' - -# List devices -for line in $(lsblk -do NAME,TRAN,SIZE,VENDOR,MODEL,SERIAL); do - if [[ "${line:0:4}" == "NAME" ]]; then - echo -e "${BLUE}${line}${CLEAR}" - else - echo "${line}" - fi -done -echo "" - -# List loopback devices -if [[ "$(losetup -l | wc -l)" > 0 ]]; then - for line in $(losetup -lO NAME,PARTSCAN,RO,BACK-FILE); do - if [[ "${line:0:4}" == "NAME" ]]; then - echo -e "${BLUE}${line}${CLEAR}" - else - echo "${line}" | sed -r 's#/dev/(loop[0-9]+)#\1 #' - fi - done - echo "" -fi - -# List partitions -for line in $(lsblk -o NAME,SIZE,FSTYPE,LABEL,MOUNTPOINT); do - if [[ "${line:0:4}" == "NAME" ]]; then - echo -e "${BLUE}${line}${CLEAR}" - else - echo "${line}" - fi -done -echo "" - diff --git a/.bin/Scripts/hw-sensors b/.bin/Scripts/hw-sensors deleted file mode 100755 index 39ca7147..00000000 --- a/.bin/Scripts/hw-sensors +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -# -## Wizard Kit: Sensor monitoring tool - -WINDOW_NAME="Hardware Sensors" -MONITOR="hw-sensors-monitor" - -# Start session -tmux new-session -n "$WINDOW_NAME" "$MONITOR" - diff --git a/.bin/Scripts/hw-sensors-monitor b/.bin/Scripts/hw-sensors-monitor deleted file mode 100755 index ffdbbad3..00000000 --- a/.bin/Scripts/hw-sensors-monitor +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/python3 -# -## Wizard Kit: Sensor monitoring tool - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.sensors import * -from functions.tmux import * -init_global_vars(silent=True) - -if __name__ == '__main__': - background = False - try: - if len(sys.argv) > 1 and os.path.exists(sys.argv[1]): - background = True - monitor_file = sys.argv[1] - monitor_pane = None - else: - result = run_program(['mktemp']) - monitor_file = result.stdout.decode().strip() - if not background: - monitor_pane = tmux_split_window( - percent=1, vertical=True, watch=monitor_file) - cmd = ['tmux', 'resize-pane', '-Z', '-t', monitor_pane] - run_program(cmd, check=False) - monitor_sensors(monitor_pane, monitor_file) - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/mount-all-volumes b/.bin/Scripts/mount-all-volumes deleted file mode 100755 index 5b34c579..00000000 --- a/.bin/Scripts/mount-all-volumes +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/python3 -# -## Wizard Kit: Volume mount tool - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.data import * -init_global_vars() - -if __name__ == '__main__': - try: - # Prep - clear_screen() - print_standard('{}: Volume mount tool'.format(KIT_NAME_FULL)) - - # Mount volumes - report = mount_volumes(all_devices=True) - - # Print report - print_info('\nResults') - for vol_name, vol_data in sorted(report.items()): - show_data(indent=4, width=20, **vol_data['show_data']) - - # Done - print_standard('\nDone.') - if 'gui' in sys.argv: - pause("Press Enter to exit...") - popen_program(['nohup', 'thunar', '/media'], pipe=True) - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/mount-backup-shares b/.bin/Scripts/mount-backup-shares deleted file mode 100755 index 0d8b7fd3..00000000 --- a/.bin/Scripts/mount-backup-shares +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/python3 -# -## Wizard Kit: Backup share mount tool - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.data import * -from functions.network import * -init_global_vars() - -if __name__ == '__main__': - try: - # Prep - clear_screen() - - # Mount - if is_connected(): - mount_backup_shares(read_write=True) - else: - # Couldn't connect - print_error('ERROR: No network connectivity.') - - # Done - print_standard('\nDone.') - #pause("Press Enter to exit...") - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/safemode_enter.py b/.bin/Scripts/safemode_enter.py deleted file mode 100644 index de9ad119..00000000 --- a/.bin/Scripts/safemode_enter.py +++ /dev/null @@ -1,39 +0,0 @@ -# Wizard Kit: Enter SafeMode by editing the BCD - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.safemode import * -init_global_vars() -os.system('title {}: SafeMode Tool'.format(KIT_NAME_FULL)) - -if __name__ == '__main__': - try: - clear_screen() - print_info('{}: SafeMode Tool\n'.format(KIT_NAME_FULL)) - other_results = { - 'Error': {'CalledProcessError': 'Unknown Error'}, - 'Warning': {}} - - if not ask('Enable booting to SafeMode (with Networking)?'): - abort() - - # Configure SafeMode - try_and_print(message='Set BCD option...', - function=enable_safemode, other_results=other_results) - try_and_print(message='Enable MSI in SafeMode...', - function=enable_safemode_msi, other_results=other_results) - - # Done - print_standard('\nDone.') - pause('Press Enter to reboot...') - reboot() - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/safemode_exit.py b/.bin/Scripts/safemode_exit.py deleted file mode 100644 index 6c47b02d..00000000 --- a/.bin/Scripts/safemode_exit.py +++ /dev/null @@ -1,39 +0,0 @@ -# Wizard Kit: Exit SafeMode by editing the BCD - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.safemode import * -init_global_vars() -os.system('title {}: SafeMode Tool'.format(KIT_NAME_FULL)) - -if __name__ == '__main__': - try: - clear_screen() - print_info('{}: SafeMode Tool\n'.format(KIT_NAME_FULL)) - other_results = { - 'Error': {'CalledProcessError': 'Unknown Error'}, - 'Warning': {}} - - if not ask('Disable booting to SafeMode?'): - abort() - - # Configure SafeMode - try_and_print(message='Remove BCD option...', - function=disable_safemode, other_results=other_results) - try_and_print(message='Disable MSI in SafeMode...', - function=disable_safemode_msi, other_results=other_results) - - # Done - print_standard('\nDone.') - pause('Press Enter to reboot...') - reboot() - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/sfc_scan.py b/.bin/Scripts/sfc_scan.py deleted file mode 100644 index ec85836a..00000000 --- a/.bin/Scripts/sfc_scan.py +++ /dev/null @@ -1,40 +0,0 @@ -# Wizard Kit: Check, and possibly repair, system file health via SFC - -import os -import sys - -# Init -sys.path.append(os.path.dirname(os.path.realpath(__file__))) -from functions.repairs import * -init_global_vars() -os.system('title {}: SFC Tool'.format(KIT_NAME_FULL)) -set_log_file('SFC Tool.log') - -if __name__ == '__main__': - try: - stay_awake() - clear_screen() - print_info('{}: SFC Tool\n'.format(KIT_NAME_FULL)) - other_results = { - 'Error': { - 'CalledProcessError': 'Unknown Error', - }, - 'Warning': { - 'GenericRepair': 'Repaired', - }} - if ask('Run a SFC scan now?'): - try_and_print(message='SFC scan...', - function=run_sfc_scan, other_results=other_results) - else: - abort() - - # Done - print_standard('\nDone.') - pause('Press Enter to exit...') - exit_script() - except SystemExit as sys_exit: - exit_script(sys_exit.code) - except: - major_exception() - -# vim: sts=2 sw=2 ts=2 diff --git a/.gitignore b/.gitignore index bcaa6c80..420330cc 100644 --- a/.gitignore +++ b/.gitignore @@ -1,41 +1,7 @@ -**/__pycache__/* -*.bak -*.exe -*.swp -.bin/7-Zip/ -.bin/AIDA64/ -.bin/BleachBit/ -.bin/ClassicStartSkin/ -.bin/ConEmu/ -.bin/Erunt/ -.bin/Everything/ -.bin/FastCopy/ -.bin/HWiNFO/HWiNFO*.ini -.bin/NotepadPlusPlus/ -.bin/ProcessKiller/ -.bin/ProduKey/ -.bin/Python/ -.bin/Tmp/ -.bin/XMPlay/ -.bin/_Drivers/SDIO/ -.cbin/*.7z -.cbin/AIDA64/ -.cbin/Autoruns/ -.cbin/BleachBit-Portable/ -.cbin/BlueScreenView/ -.cbin/Caffeine/ -.cbin/Du/ -.cbin/Everything/ -.cbin/FirefoxExtensions/ -.cbin/IObitUninstallerPortable/ -.cbin/ProduKey/ -.cbin/TestDisk/ -.cbin/TreeSizeFree-Portable/ -.cbin/XMPlay/ -.cbin/XYplorerFree/ -.cbin/_Drivers/ -.cbin/_Office/ -.cbin/_vcredists/ -.cbin/wimlib/ -BUILD*/ -OUT*/ +**/__pycache__ +**/*.7z +**/*.bak +**/*.exe +**/*.swp +setup/BUILD* +setup/OUT* diff --git a/.linux_items/include/syslinux/splash.png b/docs/TODO similarity index 100% rename from .linux_items/include/syslinux/splash.png rename to docs/TODO diff --git a/Images/ConEmu.png b/images/ConEmu.png similarity index 100% rename from Images/ConEmu.png rename to images/ConEmu.png diff --git a/Images/Linux.png b/images/Linux.png similarity index 100% rename from Images/Linux.png rename to images/Linux.png diff --git a/Images/Pxelinux.png b/images/Pxelinux.png similarity index 100% rename from Images/Pxelinux.png rename to images/Pxelinux.png diff --git a/Images/Syslinux.png b/images/Syslinux.png similarity index 100% rename from Images/Syslinux.png rename to images/Syslinux.png diff --git a/Images/WinPE.jpg b/images/WinPE.jpg similarity index 100% rename from Images/WinPE.jpg rename to images/WinPE.jpg diff --git a/Images/WizardHat.xcf b/images/WizardHat.xcf similarity index 100% rename from Images/WizardHat.xcf rename to images/WizardHat.xcf diff --git a/Images/logo.svg b/images/logo.svg similarity index 100% rename from Images/logo.svg rename to images/logo.svg diff --git a/Images/rEFInd.png b/images/rEFInd.png similarity index 100% rename from Images/rEFInd.png rename to images/rEFInd.png diff --git a/.bin/Scripts/Copy WizardKit.cmd b/scripts/Copy WizardKit.cmd similarity index 100% rename from .bin/Scripts/Copy WizardKit.cmd rename to scripts/Copy WizardKit.cmd diff --git a/.bin/Scripts/Launch.cmd b/scripts/Launch.cmd similarity index 100% rename from .bin/Scripts/Launch.cmd rename to scripts/Launch.cmd diff --git a/.bin/Scripts/Launcher_Template.cmd b/scripts/Launcher_Template.cmd similarity index 100% rename from .bin/Scripts/Launcher_Template.cmd rename to scripts/Launcher_Template.cmd diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 00000000..e6854ca9 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,6 @@ +## pylint ## + +These scripts use two spaces per indent instead of the default four. As such you will need to update your pylintrc file or run like this: + +`pylint --indent-after-paren=2 --indent-string=' ' wk` + diff --git a/scripts/activate.py b/scripts/activate.py new file mode 100644 index 00000000..0affc19a --- /dev/null +++ b/scripts/activate.py @@ -0,0 +1,31 @@ +"""Wizard Kit: Activate Windows using a BIOS key""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +def main(): + """Attempt to activate Windows and show result.""" + title = f'{wk.cfg.main.KIT_NAME_FULL}: Activation Tool' + try_print = wk.std.TryAndPrint() + wk.std.clear_screen() + wk.std.set_title(title) + wk.std.print_info(title) + print('') + + # Attempt activation + try_print.run('Attempting activation...', wk.os.win.activate_with_bios) + + # Done + print('') + print('Done.') + wk.std.pause('Press Enter to exit...') + + +if __name__ == '__main__': + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/.bin/Scripts/add-known-networks b/scripts/add-known-networks similarity index 100% rename from .bin/Scripts/add-known-networks rename to scripts/add-known-networks diff --git a/.bin/Scripts/apple-fans b/scripts/apple-fans similarity index 100% rename from .bin/Scripts/apple-fans rename to scripts/apple-fans diff --git a/.bin/Scripts/borrowed/set-eol.ps1 b/scripts/borrowed/set-eol.ps1 similarity index 100% rename from .bin/Scripts/borrowed/set-eol.ps1 rename to scripts/borrowed/set-eol.ps1 diff --git a/scripts/build-ufd b/scripts/build-ufd new file mode 100755 index 00000000..237c6691 --- /dev/null +++ b/scripts/build-ufd @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +"""Wizard Kit: Build UFD Tool""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +if __name__ == '__main__': + try: + wk.kit.ufd.build_ufd() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/.bin/Scripts/build_kit.ps1 b/scripts/build_kit.ps1 similarity index 100% rename from .bin/Scripts/build_kit.ps1 rename to scripts/build_kit.ps1 diff --git a/.bin/Scripts/build_pe.ps1 b/scripts/build_pe.ps1 similarity index 100% rename from .bin/Scripts/build_pe.ps1 rename to scripts/build_pe.ps1 diff --git a/scripts/check_disk.py b/scripts/check_disk.py new file mode 100644 index 00000000..7e6810c9 --- /dev/null +++ b/scripts/check_disk.py @@ -0,0 +1,49 @@ +"""Wizard Kit: Check or repair the %SYSTEMDRIVE% filesystem via CHKDSK""" +# vim: sts=2 sw=2 ts=2 + +import os +import wk + + +def main(): + """Run or schedule CHKDSK and show result.""" + title = f'{wk.cfg.main.KIT_NAME_FULL}: Check Disk Tool' + menu = wk.std.Menu(title=title) + try_print = wk.std.TryAndPrint() + wk.std.clear_screen() + wk.std.set_title(title) + print('') + + # Add menu entries + menu.add_option('Offline scan') + menu.add_option('Online scan') + + # Show menu and make selection + selection = menu.simple_select() + + # Run or schedule scan + if 'Offline' in selection[0]: + function = wk.os.win.run_chkdsk_offline + msg_good = 'Scheduled' + else: + function = wk.os.win.run_chkdsk_online + msg_good = 'No issues detected' + try_print.run( + message=f'CHKDSK ({os.environ.get("SYSTEMDRIVE")})...', + function=function, + msg_good=msg_good, + ) + + # Done + print('') + print('Done.') + wk.std.pause('Press Enter to exit...') + + +if __name__ == '__main__': + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/scripts/ddrescue-tui b/scripts/ddrescue-tui new file mode 100755 index 00000000..03e8eee2 --- /dev/null +++ b/scripts/ddrescue-tui @@ -0,0 +1,21 @@ +#!/bin/bash +# +## Wizard Kit: ddrescue TUI Launcher + +# Check if running under Linux +os_name="$(uname -s)" +if [[ "$os_name" == "Darwin" ]]; then + os_name="macOS" +fi +if [[ "$os_name" != "Linux" ]]; then + echo "This script is not supported under $os_name." 1>&2 + exit 1 +fi + +source ./launch-in-tmux + +SESSION_NAME="ddrescue-tui" +WINDOW_NAME="ddrescue TUI" +TMUX_CMD="./ddrescue-tui.py" + +launch_in_tmux "$@" diff --git a/scripts/ddrescue-tui.py b/scripts/ddrescue-tui.py new file mode 100755 index 00000000..89584890 --- /dev/null +++ b/scripts/ddrescue-tui.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +"""Wizard Kit: ddrescue TUI""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +if __name__ == '__main__': + try: + wk.hw.ddrescue.main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/.bin/Scripts/echo-and-hold b/scripts/echo-and-hold similarity index 100% rename from .bin/Scripts/echo-and-hold rename to scripts/echo-and-hold diff --git a/.bin/Scripts/hw-diags b/scripts/hw-diags similarity index 73% rename from .bin/Scripts/hw-diags rename to scripts/hw-diags index 70f84db4..feb87fc8 100755 --- a/.bin/Scripts/hw-diags +++ b/scripts/hw-diags @@ -2,10 +2,10 @@ # ## Wizard Kit: HW Diagnostics Launcher -source launch-in-tmux +source ./launch-in-tmux SESSION_NAME="hw-diags" WINDOW_NAME="Hardware Diagnostics" -TMUX_CMD="hw-diags-menu" +TMUX_CMD="./hw-diags.py" launch_in_tmux "$@" diff --git a/scripts/hw-diags.py b/scripts/hw-diags.py new file mode 100755 index 00000000..e3719875 --- /dev/null +++ b/scripts/hw-diags.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +"""Wizard Kit: Hardware Diagnostics""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +if __name__ == '__main__': + try: + wk.hw.diags.main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/scripts/hw-drive-info b/scripts/hw-drive-info new file mode 100755 index 00000000..76a0aa27 --- /dev/null +++ b/scripts/hw-drive-info @@ -0,0 +1,49 @@ +#!/bin/bash +# + +BLUE='\033[34m' +CLEAR='\033[0m' +IFS=$'\n' + +# Check if running under Linux +os_name="$(uname -s)" +if [[ "$os_name" == "Darwin" ]]; then + os_name="macOS" +fi +if [[ "$os_name" != "Linux" ]]; then + echo "This script is not supported under $os_name." 1>&2 + exit 1 +fi + +# List devices +for line in $(lsblk -do NAME,TRAN,SIZE,VENDOR,MODEL,SERIAL); do + if [[ "${line:0:4}" == "NAME" ]]; then + echo -e "${BLUE}${line}${CLEAR}" + else + echo "${line}" + fi +done +echo "" + +# List loopback devices +if [[ "$(losetup -l | wc -l)" > 0 ]]; then + for line in $(losetup -lO NAME,PARTSCAN,RO,BACK-FILE); do + if [[ "${line:0:4}" == "NAME" ]]; then + echo -e "${BLUE}${line}${CLEAR}" + else + echo "${line}" | sed -r 's#/dev/(loop[0-9]+)#\1 #' + fi + done + echo "" +fi + +# List partitions +for line in $(lsblk -o NAME,SIZE,FSTYPE,LABEL,MOUNTPOINT); do + if [[ "${line:0:4}" == "NAME" ]]; then + echo -e "${BLUE}${line}${CLEAR}" + else + echo "${line}" + fi +done +echo "" + diff --git a/.bin/Scripts/hw-info b/scripts/hw-info similarity index 51% rename from .bin/Scripts/hw-info rename to scripts/hw-info index 8321e7aa..2cd5f848 100755 --- a/.bin/Scripts/hw-info +++ b/scripts/hw-info @@ -9,22 +9,32 @@ YELLOW="\e[33m" BLUE="\e[34m" function print_in_columns() { - string="$1" - label="$(echo "$string" | sed -r 's/^\s*(.*:).*/\1/')" - value="$(echo "$string" | sed -r 's/^\s*.*:\s*(.*)/\1/')" - printf ' %-18s%s\n' "$label" "$value" + string="$1" + label="$(echo "$string" | sed -r 's/^\s*(.*:).*/\1/')" + value="$(echo "$string" | sed -r 's/^\s*.*:\s*(.*)/\1/')" + printf ' %-18s%s\n' "$label" "$value" } function print_dmi_value() { - name="$1" - file="/sys/devices/virtual/dmi/id/$2" - value="UNKNOWN" - if [[ -e "$file" ]]; then - value="$(cat "$file")" - fi - print_in_columns "$name: $value" + name="$1" + file="/sys/devices/virtual/dmi/id/$2" + value="UNKNOWN" + if [[ -e "$file" ]]; then + value="$(cat "$file")" + fi + print_in_columns "$name: $value" } +# Check if running under Linux +os_name="$(uname -s)" +if [[ "$os_name" == "Darwin" ]]; then + os_name="macOS" +fi +if [[ "$os_name" != "Linux" ]]; then + echo "This script is not supported under $os_name." 1>&2 + exit 1 +fi + # System echo -e "${BLUE}System Information${CLEAR}" print_dmi_value "Vendor" "sys_vendor" @@ -50,58 +60,58 @@ echo "" # Processor echo -e "${BLUE}Processor${CLEAR}" lscpu | grep -E '^(Arch|CPU.s.|Core|Thread|Model name|Virt)' \ - | sed -r 's/\(s\)(.*:)/s\1 /' \ - | sed -r 's/CPUs: /Threads:/' \ - | sed -r 's/^(.*:) / \1/' + | sed -r 's/\(s\)(.*:)/s\1 /' \ + | sed -r 's/CPUs: /Threads:/' \ + | sed -r 's/^(.*:) / \1/' echo "" # Memory echo -e "${BLUE}Memory${CLEAR}" first_device="True" while read -r line; do - if [[ "$line" == "Memory Device" ]]; then - if [[ "$first_device" == "True" ]]; then - first_device="False" - else - # Add space between devices - echo "" - fi + if [[ "$line" == "Memory Device" ]]; then + if [[ "$first_device" == "True" ]]; then + first_device="False" else - print_in_columns "$line" + # Add space between devices + echo "" fi + else + print_in_columns "$line" + fi done <<< $(sudo dmidecode -t memory \ - | grep -E '^(Memory Device|\s+(Type|Size|Speed|Manuf.*|Locator|Part Number):)') + | grep -E '^(Memory Device|\s+(Type|Size|Speed|Manuf.*|Locator|Part Number):)') echo "" # Graphics echo -e "${BLUE}Graphics${CLEAR}" -lspci | grep 'VGA' | sed -r 's/^.*:/ Device: /' \ - | sed 's/Intel Corporation/Intel/' \ - | sed 's/Generation Core Processor Family/Gen/' \ - | sed 's/Integrated Graphics Controller.*/iGPU/' -glxinfo 2>/dev/null | grep 'OpenGL renderer' | sed -r 's/^.*:/ OpenGL Renderer: /' \ - | sed 's/Mesa DRI //' +lspci | grep 'VGA' | sed -r 's/^.*:/ Device: /' \ + | sed 's/Intel Corporation/Intel/' \ + | sed 's/Generation Core Processor Family/Gen/' \ + | sed 's/Integrated Graphics Controller.*/iGPU/' +glxinfo 2>/dev/null | grep 'OpenGL renderer' | sed -r 's/^.*:/ OpenGL Renderer: /' \ + | sed 's/Mesa DRI //' echo "" # Audio echo -e "${BLUE}Audio${CLEAR}" while read -r line; do - if [[ "$line" =~ .*no.soundcards.found.* ]]; then - echo " No soundcards found" - else - print_in_columns "$line" - fi + if [[ "$line" = .*no.soundcards.found.* ]]; then + echo " No soundcards found" + else + print_in_columns "$line" + fi done <<< $(aplay -l 2>&1 | grep -Ei '(^card|no soundcards found)' | sed -r 's/.*\[(.*)\].*\[(.*)\].*/\1: \2/') echo "" # Network echo -e "${BLUE}Network${CLEAR}" lspci | grep -Ei '(ethernet|network|wireless|wifi)' \ - | sed -r 's/.*: (.*)$/ \1/' + | sed -r 's/.*: (.*)$/ \1/' echo "" # Drives echo -e "${BLUE}Drives${CLEAR}" -hw-drive-info | sed 's/^/ /' +hw-drive-info | sed 's/^/ /' echo "" diff --git a/scripts/hw-sensors b/scripts/hw-sensors new file mode 100755 index 00000000..d4665466 --- /dev/null +++ b/scripts/hw-sensors @@ -0,0 +1,46 @@ +#!/usr/bin/env python3 +"""Wizard Kit: Hardware Sensors""" +# vim: sts=2 sw=2 ts=2 + +import platform + +import wk + + +def main(): + """Show sensor data on screen.""" + sensors = wk.hw.sensors.Sensors() + if platform.system() == 'Darwin': + wk.std.clear_screen() + while True: + print('\033[100A', end='') + sensors.update_sensor_data() + wk.std.print_report(sensors.generate_report('Current', 'Max')) + wk.std.sleep(1) + elif platform.system() == 'Linux': + proc = wk.exe.run_program(cmd=['mktemp']) + sensors.start_background_monitor( + out_path=proc.stdout.strip(), + exit_on_thermal_limit=False, + temp_labels=('Current', 'Max'), + ) + watch_cmd = [ + 'watch', + '--color', + '--exec', + '--no-title', + '--interval', '1', + 'cat', + proc.stdout.strip(), + ] + wk.exe.run_program(watch_cmd, check=False, pipe=False) + +if __name__ == '__main__': + try: + main() + except KeyboardInterrupt: + pass + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/.bin/Scripts/init_client_dir.cmd b/scripts/init_client_dir.cmd similarity index 100% rename from .bin/Scripts/init_client_dir.cmd rename to scripts/init_client_dir.cmd diff --git a/.bin/Scripts/launch-in-tmux b/scripts/launch-in-tmux similarity index 62% rename from .bin/Scripts/launch-in-tmux rename to scripts/launch-in-tmux index e737b574..2be76959 100755 --- a/.bin/Scripts/launch-in-tmux +++ b/scripts/launch-in-tmux @@ -13,16 +13,16 @@ function ask() { done } -die () { +function err () { echo "$0:" "$@" >&2 - exit 1 + return 1 } function launch_in_tmux() { # Check for required vars - [[ -n "${SESSION_NAME:-}" ]] || die "Required variable missing (SESSION_NAME)" - [[ -n "${WINDOW_NAME:-}" ]] || die "Required variable missing (WINDOW_NAME)" - [[ -n "${TMUX_CMD:-}" ]] || die "Required variable missing (TMUX_CMD)" + [[ -n "${SESSION_NAME:-}" ]] || return $(err "Required variable missing (SESSION_NAME)") + [[ -n "${WINDOW_NAME:-}" ]] || return $(err "Required variable missing (WINDOW_NAME)") + [[ -n "${TMUX_CMD:-}" ]] || return $(err "Required variable missing (TMUX_CMD)") # Check for running session if tmux list-session | grep -q "$SESSION_NAME"; then @@ -32,31 +32,35 @@ function launch_in_tmux() { if [[ -n "${TMUX:-}" ]]; then # Running inside TMUX, switch to session tmux switch-client -t "$SESSION_NAME" + if ! jobs %% >/dev/null 2>&1; then + # No running jobs, try exiting abandoned tmux session + exit 0 + fi else # Running outside TMUX, attach to session tmux attach-session -t "$SESSION_NAME" fi - exit 0 + return 0 elif ask "Kill current session and start new session?"; then tmux kill-session -t "$SESSION_NAME" || \ die "Failed to kill session: $SESSION_NAME" else echo "Aborted." - echo "" - echo -n "Press Enter to exit... " - read -r - exit 0 + return 1 fi fi - # Start/Rename session + # Start session if [[ -n "${TMUX:-}" ]]; then - # Running inside TMUX, rename session/window and open the menu + # Running inside TMUX, save current session/window names + ORIGINAL_SESSION_NAME="$(tmux display-message -p '#S')" + ORIGINAL_WINDOW_NAME="$(tmux display-message -p '#W')" tmux rename-session "$SESSION_NAME" tmux rename-window "$WINDOW_NAME" "$TMUX_CMD" "$@" - tmux rename-session "${SESSION_NAME}_DONE" - tmux rename-window "${WINDOW_NAME}_DONE" + # Restore previous session/window names + tmux rename-session "${ORIGINAL_SESSION_NAME}" + tmux rename-window "${ORIGINAL_WINDOW_NAME}" else # Running outside TMUX, start/attach to session tmux new-session -s "$SESSION_NAME" -n "$WINDOW_NAME" "$TMUX_CMD" "$@" diff --git a/scripts/mount-all-volumes b/scripts/mount-all-volumes new file mode 100755 index 00000000..fb703011 --- /dev/null +++ b/scripts/mount-all-volumes @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +"""Wizard Kit: Mount all volumes""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +# Functions +def main(): + """Mount all volumes and show results.""" + wk.std.print_standard(f'{wk.cfg.main.KIT_NAME_FULL}: Volume mount tool') + wk.std.print_standard(' ') + + # Mount volumes and get report + wk.std.print_standard('Mounting volumes...') + report = wk.os.linux.mount_volumes() + + # Show results + wk.std.print_info('Results') + wk.std.print_report(report, indent=2) + + +if __name__ == '__main__': + if wk.std.PLATFORM != 'Linux': + os_name = wk.std.PLATFORM.replace('Darwin', 'macOS') + wk.std.print_error(f'This script is not supported under {os_name}.') + wk.std.abort() + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/scripts/mount-backup-shares b/scripts/mount-backup-shares new file mode 100755 index 00000000..69ae4a58 --- /dev/null +++ b/scripts/mount-backup-shares @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +"""Wizard Kit: Mount Backup Shares""" +# pylint: disable=invalid-name +# vim: sts=2 sw=2 ts=2 + +import wk + + +# Functions +def main(): + """Attempt to mount backup shares and print report.""" + wk.std.print_info('Mounting Backup Shares') + report = wk.net.mount_backup_shares() + for line in report: + color = 'GREEN' + line = f' {line}' + if 'Failed' in line: + color = 'RED' + elif 'Already' in line: + color = 'YELLOW' + print(wk.std.color_string(line, color)) + + +if __name__ == '__main__': + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/.bin/Scripts/mount-raw-image b/scripts/mount-raw-image similarity index 100% rename from .bin/Scripts/mount-raw-image rename to scripts/mount-raw-image diff --git a/.bin/Scripts/msword-search b/scripts/msword-search similarity index 100% rename from .bin/Scripts/msword-search rename to scripts/msword-search diff --git a/.bin/Scripts/check_disk.py b/scripts/outer_scripts_to_review/check_disk.py similarity index 100% rename from .bin/Scripts/check_disk.py rename to scripts/outer_scripts_to_review/check_disk.py diff --git a/.bin/Scripts/dism.py b/scripts/outer_scripts_to_review/dism.py similarity index 100% rename from .bin/Scripts/dism.py rename to scripts/outer_scripts_to_review/dism.py diff --git a/.bin/Scripts/install_sw_bundle.py b/scripts/outer_scripts_to_review/install_sw_bundle.py similarity index 100% rename from .bin/Scripts/install_sw_bundle.py rename to scripts/outer_scripts_to_review/install_sw_bundle.py diff --git a/.bin/Scripts/install_vcredists.py b/scripts/outer_scripts_to_review/install_vcredists.py similarity index 100% rename from .bin/Scripts/install_vcredists.py rename to scripts/outer_scripts_to_review/install_vcredists.py diff --git a/.bin/Scripts/system_diagnostics.py b/scripts/outer_scripts_to_review/system_diagnostics.py similarity index 100% rename from .bin/Scripts/system_diagnostics.py rename to scripts/outer_scripts_to_review/system_diagnostics.py diff --git a/.bin/Scripts/system_setup.py b/scripts/outer_scripts_to_review/system_setup.py similarity index 100% rename from .bin/Scripts/system_setup.py rename to scripts/outer_scripts_to_review/system_setup.py diff --git a/.bin/Scripts/transferred_keys.py b/scripts/outer_scripts_to_review/transferred_keys.py similarity index 100% rename from .bin/Scripts/transferred_keys.py rename to scripts/outer_scripts_to_review/transferred_keys.py diff --git a/.bin/Scripts/update_kit.py b/scripts/outer_scripts_to_review/update_kit.py similarity index 100% rename from .bin/Scripts/update_kit.py rename to scripts/outer_scripts_to_review/update_kit.py diff --git a/.bin/Scripts/user_data_transfer.py b/scripts/outer_scripts_to_review/user_data_transfer.py similarity index 100% rename from .bin/Scripts/user_data_transfer.py rename to scripts/outer_scripts_to_review/user_data_transfer.py diff --git a/.bin/Scripts/windows_updates.py b/scripts/outer_scripts_to_review/windows_updates.py similarity index 100% rename from .bin/Scripts/windows_updates.py rename to scripts/outer_scripts_to_review/windows_updates.py diff --git a/.bin/Scripts/winpe_root_menu.py b/scripts/outer_scripts_to_review/winpe_root_menu.py similarity index 100% rename from .bin/Scripts/winpe_root_menu.py rename to scripts/outer_scripts_to_review/winpe_root_menu.py diff --git a/.bin/Scripts/pacinit b/scripts/pacinit similarity index 100% rename from .bin/Scripts/pacinit rename to scripts/pacinit diff --git a/.bin/Scripts/photorec-sort b/scripts/photorec-sort similarity index 100% rename from .bin/Scripts/photorec-sort rename to scripts/photorec-sort diff --git a/.bin/Scripts/remount-rw b/scripts/remount-rw similarity index 100% rename from .bin/Scripts/remount-rw rename to scripts/remount-rw diff --git a/scripts/safemode_enter.py b/scripts/safemode_enter.py new file mode 100644 index 00000000..fffa585e --- /dev/null +++ b/scripts/safemode_enter.py @@ -0,0 +1,37 @@ +"""Wizard Kit: Enter SafeMode by editing the BCD""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +def main(): + """Prompt user to enter safe mode.""" + title = f'{wk.cfg.main.KIT_NAME_FULL}: SafeMode Tool' + try_print = wk.std.TryAndPrint() + wk.std.clear_screen() + wk.std.set_title(title) + wk.std.print_info(title) + print('') + + # Ask + if not wk.std.ask('Enable booting to SafeMode (with Networking)?'): + wk.std.abort() + print('') + + # Configure SafeMode + try_print.run('Set BCD option...', wk.os.win.enable_safemode) + try_print.run('Enable MSI in SafeMode...', wk.os.win.enable_safemode_msi) + + # Done + print('Done.') + wk.std.pause('Press Enter to reboot...') + wk.exe.run_program('shutdown -r -t 3'.split(), check=False) + + +if __name__ == '__main__': + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/scripts/safemode_exit.py b/scripts/safemode_exit.py new file mode 100644 index 00000000..e46c9ade --- /dev/null +++ b/scripts/safemode_exit.py @@ -0,0 +1,37 @@ +"""Wizard Kit: Exit SafeMode by editing the BCD""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +def main(): + """Prompt user to exit safe mode.""" + title = f'{wk.cfg.main.KIT_NAME_FULL}: SafeMode Tool' + try_print = wk.std.TryAndPrint() + wk.std.clear_screen() + wk.std.set_title(title) + wk.std.print_info(title) + print('') + + # Ask + if not wk.std.ask('Disable booting to SafeMode?'): + wk.std.abort() + print('') + + # Configure SafeMode + try_print.run('Remove BCD option...', wk.os.win.disable_safemode) + try_print.run('Disable MSI in SafeMode...', wk.os.win.disable_safemode_msi) + + # Done + print('Done.') + wk.std.pause('Press Enter to reboot...') + wk.exe.run_program('shutdown -r -t 3'.split(), check=False) + + +if __name__ == '__main__': + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/scripts/sfc_scan.py b/scripts/sfc_scan.py new file mode 100644 index 00000000..02d84f77 --- /dev/null +++ b/scripts/sfc_scan.py @@ -0,0 +1,35 @@ +"""Wizard Kit: Check, and possibly repair, system file health via SFC""" +# vim: sts=2 sw=2 ts=2 + +import wk + + +def main(): + """Run SFC and report result.""" + title = f'{wk.cfg.main.KIT_NAME_FULL}: SFC Tool' + try_print = wk.std.TryAndPrint() + wk.std.clear_screen() + wk.std.set_title(title) + wk.std.print_info(title) + print('') + + # Ask + if not wk.std.ask('Run a SFC scan now?'): + wk.std.abort() + print('') + + # Run + try_print.run('SFC scan...', wk.os.win.run_sfc_scan) + + # Done + print('Done') + wk.std.pause('Press Enter to exit...') + + +if __name__ == '__main__': + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/scripts/unmount-backup-shares b/scripts/unmount-backup-shares new file mode 100755 index 00000000..a71a5c7b --- /dev/null +++ b/scripts/unmount-backup-shares @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +"""Wizard Kit: Unmount Backup Shares""" +# pylint: disable=invalid-name +# vim: sts=2 sw=2 ts=2 + +import wk + + +# Functions +def main(): + """Attempt to mount backup shares and print report.""" + wk.std.print_info('Unmounting Backup Shares') + report = wk.net.unmount_backup_shares() + for line in report: + color = 'GREEN' + line = f' {line}' + if 'Not mounted' in line: + color = 'YELLOW' + print(wk.std.color_string(line, color)) + + +if __name__ == '__main__': + try: + main() + except SystemExit: + raise + except: #pylint: disable=bare-except + wk.std.major_exception() diff --git a/scripts/watch-mac b/scripts/watch-mac new file mode 100755 index 00000000..81029734 --- /dev/null +++ b/scripts/watch-mac @@ -0,0 +1,11 @@ +#!/bin/zsh +# +## watch-like utility + +WATCH_FILE="${1}" + +while :; do + echo -n "\e[100A" + cat "${WATCH_FILE}" + sleep 1s +done diff --git a/.bin/Scripts/wk-power-command b/scripts/wk-power-command similarity index 100% rename from .bin/Scripts/wk-power-command rename to scripts/wk-power-command diff --git a/.bin/Scripts/debug/hw_diags.py b/scripts/wk.prev/debug/hw_diags.py similarity index 100% rename from .bin/Scripts/debug/hw_diags.py rename to scripts/wk.prev/debug/hw_diags.py diff --git a/.bin/Scripts/functions/activation.py b/scripts/wk.prev/functions/activation.py similarity index 100% rename from .bin/Scripts/functions/activation.py rename to scripts/wk.prev/functions/activation.py diff --git a/.bin/Scripts/functions/backup.py b/scripts/wk.prev/functions/backup.py similarity index 100% rename from .bin/Scripts/functions/backup.py rename to scripts/wk.prev/functions/backup.py diff --git a/.bin/Scripts/functions/browsers.py b/scripts/wk.prev/functions/browsers.py similarity index 100% rename from .bin/Scripts/functions/browsers.py rename to scripts/wk.prev/functions/browsers.py diff --git a/.bin/Scripts/functions/cleanup.py b/scripts/wk.prev/functions/cleanup.py similarity index 100% rename from .bin/Scripts/functions/cleanup.py rename to scripts/wk.prev/functions/cleanup.py diff --git a/scripts/wk.prev/functions/common.py b/scripts/wk.prev/functions/common.py new file mode 100644 index 00000000..f2e019d3 --- /dev/null +++ b/scripts/wk.prev/functions/common.py @@ -0,0 +1,434 @@ +# Wizard Kit: Functions - Common + +import os +import psutil +import re +import shutil +import subprocess +import sys +import time +import traceback +try: + import winreg +except ModuleNotFoundError: + if psutil.WINDOWS: + raise + +from settings.main import * +from settings.tools import * +from settings.windows_builds import * +from subprocess import CalledProcessError + + +# Global variables +global_vars = {} + + +# STATIC VARIABLES +COLORS = { + 'CLEAR': '\033[0m', + 'RED': '\033[31m', + 'ORANGE': '\033[31;1m', + 'GREEN': '\033[32m', + 'YELLOW': '\033[33m', + 'BLUE': '\033[34m', + 'PURPLE': '\033[35m', + 'CYAN': '\033[36m', + } +try: + HKU = winreg.HKEY_USERS + HKCR = winreg.HKEY_CLASSES_ROOT + HKCU = winreg.HKEY_CURRENT_USER + HKLM = winreg.HKEY_LOCAL_MACHINE +except NameError: + if psutil.WINDOWS: + raise + + +# Error Classes +class BIOSKeyNotFoundError(Exception): + pass + +class BinNotFoundError(Exception): + pass + +class GenericAbort(Exception): + pass + +class GenericError(Exception): + pass + +class GenericRepair(Exception): + pass + +class MultipleInstallationsError(Exception): + pass + +class NoProfilesError(Exception): + pass + +class Not4KAlignedError(Exception): + pass + +class NotInstalledError(Exception): + pass + +class OSInstalledLegacyError(Exception): + pass + +class PathNotFoundError(Exception): + pass + +class UnsupportedOSError(Exception): + pass + +class SecureBootDisabledError(Exception): + pass + +class SecureBootNotAvailError(Exception): + pass + +class SecureBootUnknownError(Exception): + pass + +class WindowsOutdatedError(Exception): + pass + +class WindowsUnsupportedError(Exception): + pass + + +# General functions +def exit_script(return_value=0): + """Exits the script after some cleanup and opens the log (if set).""" + # Remove dirs (if empty) + for dir in ['BackupDir', 'LogDir', 'TmpDir']: + try: + os.rmdir(global_vars[dir]) + except Exception: + pass + + # Open Log (if it exists) + log = global_vars.get('LogFile', '') + if log and os.path.exists(log) and psutil.WINDOWS and ENABLED_OPEN_LOGS: + try: + extract_item('NotepadPlusPlus', silent=True) + popen_program( + [global_vars['Tools']['NotepadPlusPlus'], + global_vars['LogFile']]) + except Exception: + print_error('ERROR: Failed to extract Notepad++ and open log.') + pause('Press Enter to exit...') + + # Kill Caffeine if still running + kill_process('caffeine.exe') + + # Exit + sys.exit(return_value) + + +def extract_item(item, filter='', silent=False): + """Extract item from .cbin into .bin.""" + cmd = [ + global_vars['Tools']['SevenZip'], 'x', '-aos', '-bso0', '-bse0', + '-p{ArchivePassword}'.format(**global_vars), + r'-o{BinDir}\{item}'.format(item=item, **global_vars), + r'{CBinDir}\{item}.7z'.format(item=item, **global_vars), + filter] + if not silent: + print_standard('Extracting "{item}"...'.format(item=item)) + try: + run_program(cmd) + except FileNotFoundError: + if not silent: + print_warning('WARNING: Archive not found') + except subprocess.CalledProcessError: + if not silent: + print_warning('WARNING: Errors encountered while exctracting data') + + +def get_process(name=None): + """Get process by name, returns psutil.Process obj.""" + proc = None + if not name: + raise GenericError + + for p in psutil.process_iter(): + try: + if p.name() == name: + proc = p + except psutil._exceptions.NoSuchProcess: + # Process finished during iteration? Going to ignore + pass + return proc + + +def get_ticket_number(): + """Get TicketNumber from user, save in LogDir, and return as str.""" + if not ENABLED_TICKET_NUMBERS: + return None + ticket_number = None + while ticket_number is None: + _input = input('Enter ticket number: ') + if re.match(r'^([0-9]+([-_]?\w+|))$', _input): + ticket_number = _input + out_file = r'{}\TicketNumber'.format(global_vars['LogDir']) + if not psutil.WINDOWS: + out_file = out_file.replace('\\', '/') + with open(out_file, 'w', encoding='utf-8') as f: + f.write(ticket_number) + return ticket_number + + +def kill_process(name): + """Kill any running caffeine.exe processes.""" + for proc in psutil.process_iter(): + if proc.name() == name: + proc.kill() + + +def stay_awake(): + """Prevent the system from sleeping or hibernating.""" + # DISABLED due to VCR2008 dependency + return + # Bail if caffeine is already running + for proc in psutil.process_iter(): + if proc.name() == 'caffeine.exe': + return + # Extract and run + extract_item('Caffeine', silent=True) + try: + popen_program([global_vars['Tools']['Caffeine']]) + except Exception: + print_error('ERROR: No caffeine available.') + print_warning('Please set the power setting to High Performance.') + + +def wait_for_process(name, poll_rate=3): + """Wait for process by name.""" + running = True + while running: + sleep(poll_rate) + running = False + for proc in psutil.process_iter(): + try: + if re.search(r'^{}'.format(name), proc.name(), re.IGNORECASE): + running = True + except psutil._exceptions.NoSuchProcess: + # Assuming process closed during iteration + pass + sleep(1) + + +# global_vars functions +def init_global_vars(silent=False): + """Sets global variables based on system info.""" + if not silent: + print_info('Initializing') + if psutil.WINDOWS: + os.system('title Wizard Kit') + if psutil.LINUX: + init_functions = [ + ['Checking environment...', set_linux_vars], + ['Clearing collisions...', clean_env_vars], + ] + else: + init_functions = [ + ['Checking .bin...', find_bin], + ['Checking environment...', set_common_vars], + ['Checking OS...', check_os], + ['Checking tools...', check_tools], + ['Creating folders...', make_tmp_dirs], + ['Clearing collisions...', clean_env_vars], + ] + try: + if silent: + for f in init_functions: + f[1]() + else: + for f in init_functions: + try_and_print( + message=f[0], function=f[1], + cs='Done', ns='Error', catch_all=False) + except: + major_exception() + + +def check_os(): + """Set OS specific variables.""" + tmp = {} + + # Query registry + path = r'SOFTWARE\Microsoft\Windows NT\CurrentVersion' + with winreg.OpenKey(HKLM, path) as key: + for name in ['CurrentBuild', 'CurrentVersion', 'ProductName']: + try: + tmp[name] = winreg.QueryValueEx(key, name)[0] + except FileNotFoundError: + tmp[name] = 'Unknown' + + # Handle CurrentBuild collision + if tmp['CurrentBuild'] == '9200': + if tmp['CurrentVersion'] == '6.2': + # Windown 8, set to fake build number + tmp['CurrentBuild'] = '9199' + else: + # Windows 8.1, leave alone + pass + + # Check bit depth + tmp['Arch'] = 32 + if 'PROGRAMFILES(X86)' in global_vars['Env']: + tmp['Arch'] = 64 + + # Get Windows build info + build_info = WINDOWS_BUILDS.get(tmp['CurrentBuild'], None) + if build_info is None: + # Not in windows_builds.py + build_info = [ + 'Unknown', + 'Build {}'.format(tmp['CurrentBuild']), + None, + None, + 'unrecognized'] + else: + build_info = list(build_info) + tmp['Version'] = build_info.pop(0) + tmp['Release'] = build_info.pop(0) + tmp['Codename'] = build_info.pop(0) + tmp['Marketing Name'] = build_info.pop(0) + tmp['Notes'] = build_info.pop(0) + + # Set name + tmp['Name'] = tmp['ProductName'] + if tmp['Release']: + tmp['Name'] += ' {}'.format(tmp['Release']) + if tmp['Codename']: + tmp['Name'] += ' "{}"'.format(tmp['Codename']) + if tmp['Marketing Name']: + tmp['Name'] += ' / "{}"'.format(tmp['Marketing Name']) + tmp['Name'] = re.sub(r'\s+', ' ', tmp['Name']) + + # Set display name + tmp['DisplayName'] = '{} x{}'.format(tmp['Name'], tmp['Arch']) + if tmp['Notes']: + tmp['DisplayName'] += ' ({})'.format(tmp['Notes']) + + global_vars['OS'] = tmp + + +def check_tools(): + """Set tool variables based on OS bit-depth and tool availability.""" + if global_vars['OS'].get('Arch', 32) == 64: + global_vars['Tools'] = { + k: v.get('64', v.get('32')) for (k, v) in TOOLS.items()} + else: + global_vars['Tools'] = {k: v.get('32') for (k, v) in TOOLS.items()} + + # Fix paths + global_vars['Tools'] = {k: os.path.join(global_vars['BinDir'], v) + for (k, v) in global_vars['Tools'].items()} + + +def clean_env_vars(): + """Remove conflicting global_vars and env variables. + + This fixes an issue where both global_vars and + global_vars['Env'] are expanded at the same time.""" + for key in global_vars.keys(): + global_vars['Env'].pop(key, None) + + +def find_bin(): + """Find .bin folder in the cwd or it's parents.""" + wd = os.getcwd() + base = None + while base is None: + if os.path.exists('.bin'): + base = os.getcwd() + break + if re.fullmatch(r'\w:\\', os.getcwd()): + break + os.chdir('..') + os.chdir(wd) + if base is None: + raise BinNotFoundError + global_vars['BaseDir'] = base + + +def generate_global_vars_report(): + """Build readable string from global_vars, returns str.""" + report = ['global_vars: {'] + for k, v in sorted(global_vars.items()): + if k == 'Env': + continue + if isinstance(v, list): + report.append(' {}: ['.format(str(k))) + for item in v: + report.append(' {}'.format(str(v))) + report.append(' ]') + elif isinstance(v, dict): + report.append(' {}: {{'.format(str(k))) + for item_k, item_v in sorted(v.items()): + report.append(' {:<15} {}'.format( + str(item_k)+':', str(item_v))) + report.append(' }') + else: + report.append(' {:<18}{}'.format(str(k)+':', str(v))) + report.append(' Env:') + for k, v in sorted(global_vars.get('Env', {}).items()): + report.append(' {:<15} {}'.format( + str(k)+':', str(v))) + report.append('}') + + return '\n'.join(report) + + +def make_tmp_dirs(): + """Make temp directories.""" + os.makedirs(global_vars['BackupDir'], exist_ok=True) + os.makedirs(global_vars['LogDir'], exist_ok=True) + os.makedirs(r'{}\{}'.format( + global_vars['LogDir'], KIT_NAME_FULL), exist_ok=True) + os.makedirs(r'{}\Tools'.format(global_vars['LogDir']), exist_ok=True) + os.makedirs(global_vars['TmpDir'], exist_ok=True) + + +def set_common_vars(): + """Set common variables.""" + global_vars['Date'] = time.strftime("%Y-%m-%d") + global_vars['Date-Time'] = time.strftime("%Y-%m-%d_%H%M_%z") + global_vars['Env'] = os.environ.copy() + + global_vars['ArchivePassword'] = ARCHIVE_PASSWORD + global_vars['BinDir'] = r'{BaseDir}\.bin'.format(**global_vars) + global_vars['CBinDir'] = r'{BaseDir}\.cbin'.format(**global_vars) + global_vars['ClientDir'] = r'{SYSTEMDRIVE}\{prefix}'.format( + prefix=KIT_NAME_SHORT, **global_vars['Env']) + global_vars['BackupDir'] = r'{ClientDir}\Backups'.format(**global_vars) + global_vars['LogDir'] = r'{ClientDir}\Logs\{Date}'.format(**global_vars) + global_vars['QuarantineDir'] = r'{ClientDir}\Quarantine'.format(**global_vars) + global_vars['TmpDir'] = r'{BinDir}\tmp'.format(**global_vars) + + +def set_linux_vars(): + """Set common variables in a Linux environment. + + These assume we're running under a WK-Linux build.""" + result = run_program(['mktemp', '-d']) + global_vars['TmpDir'] = result.stdout.decode().strip() + global_vars['Date'] = time.strftime("%Y-%m-%d") + global_vars['Date-Time'] = time.strftime("%Y-%m-%d_%H%M_%z") + global_vars['Env'] = os.environ.copy() + global_vars['BinDir'] = '/usr/local/bin' + global_vars['LogDir'] = '{}/Logs'.format(global_vars['Env']['HOME']) + global_vars['Tools'] = { + 'wimlib-imagex': 'wimlib-imagex', + 'SevenZip': '7z', + } + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") + +# vim: sts=2 sw=2 ts=2 diff --git a/.bin/Scripts/functions/data.py b/scripts/wk.prev/functions/data.py similarity index 100% rename from .bin/Scripts/functions/data.py rename to scripts/wk.prev/functions/data.py diff --git a/.bin/Scripts/functions/ddrescue.py b/scripts/wk.prev/functions/ddrescue.py similarity index 100% rename from .bin/Scripts/functions/ddrescue.py rename to scripts/wk.prev/functions/ddrescue.py diff --git a/.bin/Scripts/functions/disk.py b/scripts/wk.prev/functions/disk.py similarity index 100% rename from .bin/Scripts/functions/disk.py rename to scripts/wk.prev/functions/disk.py diff --git a/.bin/Scripts/functions/hw_diags.py b/scripts/wk.prev/functions/hw_diags.py similarity index 100% rename from .bin/Scripts/functions/hw_diags.py rename to scripts/wk.prev/functions/hw_diags.py diff --git a/.bin/Scripts/functions/info.py b/scripts/wk.prev/functions/info.py similarity index 100% rename from .bin/Scripts/functions/info.py rename to scripts/wk.prev/functions/info.py diff --git a/.bin/Scripts/functions/json.py b/scripts/wk.prev/functions/json.py similarity index 100% rename from .bin/Scripts/functions/json.py rename to scripts/wk.prev/functions/json.py diff --git a/.bin/Scripts/functions/product_keys.py b/scripts/wk.prev/functions/product_keys.py similarity index 100% rename from .bin/Scripts/functions/product_keys.py rename to scripts/wk.prev/functions/product_keys.py diff --git a/.bin/Scripts/functions/repairs.py b/scripts/wk.prev/functions/repairs.py similarity index 100% rename from .bin/Scripts/functions/repairs.py rename to scripts/wk.prev/functions/repairs.py diff --git a/.bin/Scripts/functions/safemode.py b/scripts/wk.prev/functions/safemode.py similarity index 100% rename from .bin/Scripts/functions/safemode.py rename to scripts/wk.prev/functions/safemode.py diff --git a/.bin/Scripts/functions/sensors.py b/scripts/wk.prev/functions/sensors.py similarity index 100% rename from .bin/Scripts/functions/sensors.py rename to scripts/wk.prev/functions/sensors.py diff --git a/.bin/Scripts/functions/setup.py b/scripts/wk.prev/functions/setup.py similarity index 100% rename from .bin/Scripts/functions/setup.py rename to scripts/wk.prev/functions/setup.py diff --git a/.bin/Scripts/functions/sw_diags.py b/scripts/wk.prev/functions/sw_diags.py similarity index 100% rename from .bin/Scripts/functions/sw_diags.py rename to scripts/wk.prev/functions/sw_diags.py diff --git a/.bin/Scripts/functions/threading.py b/scripts/wk.prev/functions/threading.py similarity index 100% rename from .bin/Scripts/functions/threading.py rename to scripts/wk.prev/functions/threading.py diff --git a/.bin/Scripts/functions/tmux.py b/scripts/wk.prev/functions/tmux.py similarity index 100% rename from .bin/Scripts/functions/tmux.py rename to scripts/wk.prev/functions/tmux.py diff --git a/.bin/Scripts/functions/update.py b/scripts/wk.prev/functions/update.py similarity index 100% rename from .bin/Scripts/functions/update.py rename to scripts/wk.prev/functions/update.py diff --git a/.bin/Scripts/functions/windows_setup.py b/scripts/wk.prev/functions/windows_setup.py similarity index 100% rename from .bin/Scripts/functions/windows_setup.py rename to scripts/wk.prev/functions/windows_setup.py diff --git a/.bin/Scripts/functions/windows_updates.py b/scripts/wk.prev/functions/windows_updates.py similarity index 100% rename from .bin/Scripts/functions/windows_updates.py rename to scripts/wk.prev/functions/windows_updates.py diff --git a/.bin/Scripts/functions/winpe_menus.py b/scripts/wk.prev/functions/winpe_menus.py similarity index 100% rename from .bin/Scripts/functions/winpe_menus.py rename to scripts/wk.prev/functions/winpe_menus.py diff --git a/.bin/Scripts/settings/browsers.py b/scripts/wk.prev/settings/browsers.py similarity index 100% rename from .bin/Scripts/settings/browsers.py rename to scripts/wk.prev/settings/browsers.py diff --git a/.bin/Scripts/settings/cleanup.py b/scripts/wk.prev/settings/cleanup.py similarity index 100% rename from .bin/Scripts/settings/cleanup.py rename to scripts/wk.prev/settings/cleanup.py diff --git a/.bin/Scripts/settings/data.py b/scripts/wk.prev/settings/data.py similarity index 100% rename from .bin/Scripts/settings/data.py rename to scripts/wk.prev/settings/data.py diff --git a/.bin/Scripts/settings/ddrescue.py b/scripts/wk.prev/settings/ddrescue.py similarity index 100% rename from .bin/Scripts/settings/ddrescue.py rename to scripts/wk.prev/settings/ddrescue.py diff --git a/.bin/Scripts/settings/hw_diags.py b/scripts/wk.prev/settings/hw_diags.py similarity index 98% rename from .bin/Scripts/settings/hw_diags.py rename to scripts/wk.prev/settings/hw_diags.py index 048f489b..7957fda2 100644 --- a/.bin/Scripts/settings/hw_diags.py +++ b/scripts/wk.prev/settings/hw_diags.py @@ -94,9 +94,10 @@ ATTRIBUTES = { }, } ATTRIBUTE_COLORS = ( + # NOTE: The order here is important; least important to most important. + ('Warning', 'YELLOW'), ('Error', 'RED'), ('Maximum', 'PURPLE'), - ('Warning', 'YELLOW'), ) KEY_NVME = 'nvme_smart_health_information_log' KEY_SMART = 'ata_smart_attributes' diff --git a/.bin/Scripts/settings/info.py b/scripts/wk.prev/settings/info.py similarity index 100% rename from .bin/Scripts/settings/info.py rename to scripts/wk.prev/settings/info.py diff --git a/.bin/Scripts/settings/launchers.py b/scripts/wk.prev/settings/launchers.py similarity index 100% rename from .bin/Scripts/settings/launchers.py rename to scripts/wk.prev/settings/launchers.py diff --git a/.bin/Scripts/settings/main.py b/scripts/wk.prev/settings/main.py similarity index 100% rename from .bin/Scripts/settings/main.py rename to scripts/wk.prev/settings/main.py diff --git a/.bin/Scripts/settings/music.py b/scripts/wk.prev/settings/music.py similarity index 100% rename from .bin/Scripts/settings/music.py rename to scripts/wk.prev/settings/music.py diff --git a/.bin/Scripts/settings/partition_uids.py b/scripts/wk.prev/settings/partition_uids.py similarity index 100% rename from .bin/Scripts/settings/partition_uids.py rename to scripts/wk.prev/settings/partition_uids.py diff --git a/.bin/Scripts/settings/sensors.py b/scripts/wk.prev/settings/sensors.py similarity index 100% rename from .bin/Scripts/settings/sensors.py rename to scripts/wk.prev/settings/sensors.py diff --git a/.bin/Scripts/settings/setup.py b/scripts/wk.prev/settings/setup.py similarity index 100% rename from .bin/Scripts/settings/setup.py rename to scripts/wk.prev/settings/setup.py diff --git a/.bin/Scripts/settings/sources.py b/scripts/wk.prev/settings/sources.py similarity index 98% rename from .bin/Scripts/settings/sources.py rename to scripts/wk.prev/settings/sources.py index 2abf079f..1605d5f2 100644 --- a/.bin/Scripts/settings/sources.py +++ b/scripts/wk.prev/settings/sources.py @@ -12,7 +12,7 @@ SOURCE_URLS = { 'BlueScreenView32': 'http://www.nirsoft.net/utils/bluescreenview.zip', 'BlueScreenView64': 'http://www.nirsoft.net/utils/bluescreenview-x64.zip', 'Caffeine': 'http://www.zhornsoftware.co.uk/caffeine/caffeine.zip', - 'ClassicStartSkin': 'http://www.classicshell.net/forum/download/file.php?id=3001&sid=9a195960d98fd754867dcb63d9315335', + 'ClassicStartSkin': 'https://coddec.github.io/Classic-Shell/www.classicshell.net/forum/download/fileb1ba.php?id=3001', 'Du': 'https://download.sysinternals.com/files/DU.zip', 'ERUNT': 'http://www.aumha.org/downloads/erunt.zip', 'Everything32': 'https://www.voidtools.com/Everything-1.4.1.935.x86.en-US.zip', diff --git a/.bin/Scripts/settings/sw_diags.py b/scripts/wk.prev/settings/sw_diags.py similarity index 100% rename from .bin/Scripts/settings/sw_diags.py rename to scripts/wk.prev/settings/sw_diags.py diff --git a/.bin/Scripts/settings/tools.py b/scripts/wk.prev/settings/tools.py similarity index 100% rename from .bin/Scripts/settings/tools.py rename to scripts/wk.prev/settings/tools.py diff --git a/.bin/Scripts/settings/windows_builds.py b/scripts/wk.prev/settings/windows_builds.py similarity index 100% rename from .bin/Scripts/settings/windows_builds.py rename to scripts/wk.prev/settings/windows_builds.py diff --git a/.bin/Scripts/settings/windows_setup.py b/scripts/wk.prev/settings/windows_setup.py similarity index 100% rename from .bin/Scripts/settings/windows_setup.py rename to scripts/wk.prev/settings/windows_setup.py diff --git a/.bin/Scripts/settings/winpe.py b/scripts/wk.prev/settings/winpe.py similarity index 100% rename from .bin/Scripts/settings/winpe.py rename to scripts/wk.prev/settings/winpe.py diff --git a/scripts/wk/__init__.py b/scripts/wk/__init__.py new file mode 100644 index 00000000..b6a11b56 --- /dev/null +++ b/scripts/wk/__init__.py @@ -0,0 +1,36 @@ +"""WizardKit: wk module init""" +# vim: sts=2 sw=2 ts=2 + +from sys import version_info as version + +from wk import cfg +from wk import debug +from wk import exe +from wk import graph +from wk import hw +from wk import io +from wk import kit +from wk import log +from wk import net +from wk import os +from wk import std +from wk import sw +from wk import tmux + + +# Check env +if version < (3, 7): + # Unsupported + raise RuntimeError( + f'This package is unsupported on Python {version.major}.{version.minor}' + ) + +# Init +try: + log.start() +except UserWarning as err: + std.print_warning(err) + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/.bin/Scripts/borrowed/acpi.py b/scripts/wk/borrowed/acpi.py similarity index 100% rename from .bin/Scripts/borrowed/acpi.py rename to scripts/wk/borrowed/acpi.py diff --git a/.bin/Scripts/borrowed/knownpaths-LICENSE.txt b/scripts/wk/borrowed/knownpaths-LICENSE.txt similarity index 100% rename from .bin/Scripts/borrowed/knownpaths-LICENSE.txt rename to scripts/wk/borrowed/knownpaths-LICENSE.txt diff --git a/.bin/Scripts/borrowed/knownpaths.py b/scripts/wk/borrowed/knownpaths.py similarity index 100% rename from .bin/Scripts/borrowed/knownpaths.py rename to scripts/wk/borrowed/knownpaths.py diff --git a/scripts/wk/cfg/__init__.py b/scripts/wk/cfg/__init__.py new file mode 100644 index 00000000..23ca608f --- /dev/null +++ b/scripts/wk/cfg/__init__.py @@ -0,0 +1,8 @@ +"""WizardKit: cfg module init""" + +from wk.cfg import ddrescue +from wk.cfg import hw +from wk.cfg import log +from wk.cfg import main +from wk.cfg import net +from wk.cfg import ufd diff --git a/scripts/wk/cfg/ddrescue.py b/scripts/wk/cfg/ddrescue.py new file mode 100644 index 00000000..c0d9f6dc --- /dev/null +++ b/scripts/wk/cfg/ddrescue.py @@ -0,0 +1,67 @@ +"""WizardKit: Config - ddrescue""" +# pylint: disable=bad-whitespace,line-too-long +# vim: sts=2 sw=2 ts=2 + +from collections import OrderedDict + + +# Layout +TMUX_SIDE_WIDTH = 21 +TMUX_LAYOUT = OrderedDict({ + 'Source': {'height': 2, 'Check': True}, + 'Started': {'width': TMUX_SIDE_WIDTH, 'Check': True}, + 'Progress': {'width': TMUX_SIDE_WIDTH, 'Check': True}, +}) + +# ddrescue +AUTO_PASS_THRESHOLDS = { + # NOTE: The scrape key is set to infinity to force a break + 'read': 95, + 'trim': 98, + 'scrape': float('inf'), + } +DDRESCUE_SETTINGS = { + 'Default': { + '--binary-prefixes': {'Selected': True, 'Hidden': True, }, + '--data-preview': {'Selected': True, 'Value': '5', 'Hidden': True, }, + '--idirect': {'Selected': True, }, + '--odirect': {'Selected': True, }, + '--max-error-rate': {'Selected': True, 'Value': '100MiB', }, + '--max-read-rate': {'Selected': False, 'Value': '1MiB', }, + '--min-read-rate': {'Selected': True, 'Value': '64KiB', }, + '--reopen-on-error': {'Selected': True, }, + '--retry-passes': {'Selected': True, 'Value': '0', }, + '--reverse': {'Selected': False, }, + '--test-mode': {'Selected': False, 'Value': 'test.map', }, + '--timeout': {'Selected': True, 'Value': '30m', }, + '-vvvv': {'Selected': True, 'Hidden': True, }, + }, + 'Fast': { + '--max-error-rate': {'Selected': True, 'Value': '32MiB', }, + '--min-read-rate': {'Selected': True, 'Value': '1MiB', }, + '--reopen-on-error': {'Selected': False, }, + '--timeout': {'Selected': True, 'Value': '5m', }, + }, + 'Safe': { + '--max-read-rate': {'Selected': True, 'Value': '64MiB', }, + '--min-read-rate': {'Selected': True, 'Value': '1KiB', }, + '--reopen-on-error': {'Selected': True, }, + '--timeout': {'Selected': False, 'Value': '30m', }, + }, + } +PARTITION_TYPES = { + 'GPT': { + 'NTFS': 'EBD0A0A2-B9E5-4433-87C0-68B6B72699C7', # Basic Data Partition + 'VFAT': 'EBD0A0A2-B9E5-4433-87C0-68B6B72699C7', # Basic Data Partition + 'EXFAT': 'EBD0A0A2-B9E5-4433-87C0-68B6B72699C7', # Basic Data Partition + }, + 'MBR': { + 'EXFAT': 7, # 0x7 + 'NTFS': 7, # 0x7 + 'VFAT': 11, # 0xb + }, + } + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/cfg/hw.py b/scripts/wk/cfg/hw.py new file mode 100644 index 00000000..59cd7248 --- /dev/null +++ b/scripts/wk/cfg/hw.py @@ -0,0 +1,140 @@ +"""WizardKit: Config - Hardware""" +# pylint: disable=bad-whitespace,line-too-long +# vim: sts=2 sw=2 ts=2 + +import re + +from collections import OrderedDict + + +# STATIC VARIABLES +ATTRIBUTE_COLORS = ( + # NOTE: Ordered by ascending importance + ('Warning', 'YELLOW'), + ('Error', 'RED'), + ('Maximum', 'PURPLE'), + ) +# NOTE: Force 4K read block size for disks >= 3TB +BADBLOCKS_LARGE_DISK = 3 * 1024**4 +CPU_CRITICAL_TEMP = 99 +CPU_FAILURE_TEMP = 90 +CPU_TEST_MINUTES = 7 +KEY_NVME = 'nvme_smart_health_information_log' +KEY_SMART = 'ata_smart_attributes' +KNOWN_DISK_ATTRIBUTES = { + # NVMe + 'critical_warning': {'Blocking': True, 'Warning': None, 'Error': 1, 'Maximum': None, }, + 'media_errors': {'Blocking': False, 'Warning': None, 'Error': 1, 'Maximum': None, }, + 'power_on_hours': {'Blocking': False, 'Warning': 17532, 'Error': 26298, 'Maximum': 100000,}, + 'unsafe_shutdowns': {'Blocking': False, 'Warning': 1, 'Error': None, 'Maximum': None, }, + # SMART + 5: {'Hex': '05', 'Blocking': True, 'Warning': None, 'Error': 1, 'Maximum': None, }, + 9: {'Hex': '09', 'Blocking': False, 'Warning': 17532, 'Error': 26298, 'Maximum': 100000,}, + 10: {'Hex': '10', 'Blocking': False, 'Warning': 1, 'Error': 10, 'Maximum': 10000, }, + 184: {'Hex': 'B8', 'Blocking': False, 'Warning': 1, 'Error': 10, 'Maximum': 10000, }, + 187: {'Hex': 'BB', 'Blocking': False, 'Warning': 1, 'Error': 10, 'Maximum': 10000, }, + 188: {'Hex': 'BC', 'Blocking': False, 'Warning': 1, 'Error': 10, 'Maximum': 10000, }, + 196: {'Hex': 'C4', 'Blocking': False, 'Warning': 1, 'Error': 10, 'Maximum': 10000, }, + 197: {'Hex': 'C5', 'Blocking': True, 'Warning': None, 'Error': 1, 'Maximum': None, }, + 198: {'Hex': 'C6', 'Blocking': True, 'Warning': None, 'Error': 1, 'Maximum': None, }, + 199: {'Hex': 'C7', 'Blocking': False, 'Warning': None, 'Error': 1, 'Maximum': None, }, + 201: {'Hex': 'C9', 'Blocking': False, 'Warning': None, 'Error': 1, 'Maximum': 10000, }, + } +KNOWN_DISK_MODELS = { + # model_regex: model_attributes + r'CT(250|500|1000|2000)MX500SSD(1|4)': { + 197: {'Warning': 1, 'Error': 2, 'Note': '(MX500 thresholds)',}, + }, + } +KNOWN_RAM_VENDOR_IDS = { + # https://github.com/hewigovens/hewigovens.github.com/wiki/Memory-vendor-code + '0x014F': 'Transcend', + '0x2C00': 'Micron', + '0x802C': 'Micron', + '0x80AD': 'Hynix', + '0x80CE': 'Samsung', + '0xAD00': 'Hynix', + '0xCE00': 'Samsung', + } +REGEX_POWER_ON_TIME = re.compile( + r'^(\d+)([Hh].*|\s+\(\d+\s+\d+\s+\d+\).*)' + ) +SMC_IDS = { + # Sources: https://github.com/beltex/SMCKit/blob/master/SMCKit/SMC.swift + # http://www.opensource.apple.com/source/net_snmp/ + # https://github.com/jedda/OSX-Monitoring-Tools + 'TA0P': {'CPU Temp': False, 'Source': 'Ambient temp'}, + 'TA0S': {'CPU Temp': False, 'Source': 'PCIE Slot 1 Ambient'}, + 'TA1P': {'CPU Temp': False, 'Source': 'Ambient temp'}, + 'TA1S': {'CPU Temp': False, 'Source': 'PCIE Slot 1 PCB'}, + 'TA2S': {'CPU Temp': False, 'Source': 'PCIE Slot 2 Ambient'}, + 'TA3S': {'CPU Temp': False, 'Source': 'PCIE Slot 2 PCB'}, + 'TC0C': {'CPU Temp': True, 'Source': 'CPU Core 0'}, + 'TC0D': {'CPU Temp': True, 'Source': 'CPU die temp'}, + 'TC0H': {'CPU Temp': True, 'Source': 'CPU heatsink temp'}, + 'TC0P': {'CPU Temp': True, 'Source': 'CPU Ambient 1'}, + 'TC1C': {'CPU Temp': True, 'Source': 'CPU Core 1'}, + 'TC1P': {'CPU Temp': True, 'Source': 'CPU Ambient 2'}, + 'TC2C': {'CPU Temp': True, 'Source': 'CPU B Core 0'}, + 'TC2P': {'CPU Temp': True, 'Source': 'CPU B Ambient 1'}, + 'TC3C': {'CPU Temp': True, 'Source': 'CPU B Core 1'}, + 'TC3P': {'CPU Temp': True, 'Source': 'CPU B Ambient 2'}, + 'TCAC': {'CPU Temp': True, 'Source': 'CPU core from PCECI'}, + 'TCAH': {'CPU Temp': True, 'Source': 'CPU HeatSink'}, + 'TCBC': {'CPU Temp': True, 'Source': 'CPU B core from PCECI'}, + 'TCBH': {'CPU Temp': True, 'Source': 'CPU HeatSink'}, + 'Te1P': {'CPU Temp': False, 'Source': 'PCIE ambient temp'}, + 'Te1S': {'CPU Temp': False, 'Source': 'PCIE slot 1'}, + 'Te2S': {'CPU Temp': False, 'Source': 'PCIE slot 2'}, + 'Te3S': {'CPU Temp': False, 'Source': 'PCIE slot 3'}, + 'Te4S': {'CPU Temp': False, 'Source': 'PCIE slot 4'}, + 'TG0C': {'CPU Temp': False, 'Source': 'Mezzanine GPU Core'}, + 'TG0P': {'CPU Temp': False, 'Source': 'Mezzanine GPU Exhaust'}, + 'TH0P': {'CPU Temp': False, 'Source': 'Drive Bay 0'}, + 'TH1P': {'CPU Temp': False, 'Source': 'Drive Bay 1'}, + 'TH2P': {'CPU Temp': False, 'Source': 'Drive Bay 2'}, + 'TH3P': {'CPU Temp': False, 'Source': 'Drive Bay 3'}, + 'TH4P': {'CPU Temp': False, 'Source': 'Drive Bay 4'}, + 'TM0P': {'CPU Temp': False, 'Source': 'CPU DIMM Exit Ambient'}, + 'Tp0C': {'CPU Temp': False, 'Source': 'PSU1 Inlet Ambient'}, + 'Tp0P': {'CPU Temp': False, 'Source': 'PSU1 Inlet Ambient'}, + 'Tp1C': {'CPU Temp': False, 'Source': 'PSU1 Secondary Component'}, + 'Tp1P': {'CPU Temp': False, 'Source': 'PSU1 Primary Component'}, + 'Tp2P': {'CPU Temp': False, 'Source': 'PSU1 Secondary Component'}, + 'Tp3P': {'CPU Temp': False, 'Source': 'PSU2 Inlet Ambient'}, + 'Tp4P': {'CPU Temp': False, 'Source': 'PSU2 Primary Component'}, + 'Tp5P': {'CPU Temp': False, 'Source': 'PSU2 Secondary Component'}, + 'TS0C': {'CPU Temp': False, 'Source': 'CPU B DIMM Exit Ambient'}, + } +TEMP_COLORS = { + float('-inf'): 'CYAN', + 00: 'BLUE', + 60: 'GREEN', + 70: 'YELLOW', + 80: 'ORANGE', + 90: 'RED', + 100: 'ORANGE_RED', + } +# THRESHOLDS: Rates used to determine HDD/SSD pass/fail +THRESH_HDD_MIN = 50 * 1024**2 +THRESH_HDD_AVG_HIGH = 75 * 1024**2 +THRESH_HDD_AVG_LOW = 65 * 1024**2 +THRESH_SSD_MIN = 90 * 1024**2 +THRESH_SSD_AVG_HIGH = 135 * 1024**2 +THRESH_SSD_AVG_LOW = 100 * 1024**2 +TMUX_SIDE_WIDTH = 20 +TMUX_LAYOUT = OrderedDict({ + 'Top': {'height': 2, 'Check': True}, + 'Started': {'width': TMUX_SIDE_WIDTH, 'Check': True}, + 'Progress': {'width': TMUX_SIDE_WIDTH, 'Check': True}, + # Testing panes + 'Temps': {'height': 1000, 'Check': False}, + 'Prime95': {'height': 11, 'Check': False}, + 'SMART': {'height': 3, 'Check': True}, + 'badblocks': {'height': 5, 'Check': True}, + 'I/O Benchmark': {'height': 1000, 'Check': False}, + }) + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/cfg/log.py b/scripts/wk/cfg/log.py new file mode 100644 index 00000000..f724b011 --- /dev/null +++ b/scripts/wk/cfg/log.py @@ -0,0 +1,18 @@ +"""WizardKit: Config - Log""" +# vim: sts=2 sw=2 ts=2 + + +DEBUG = { + 'level': 'DEBUG', + 'format': '[%(asctime)s %(levelname)s] [%(name)s.%(funcName)s] %(message)s', + 'datefmt': '%Y-%m-%d %H%M%S%z', + } +DEFAULT = { + 'level': 'INFO', + 'format': '[%(asctime)s %(levelname)s] %(message)s', + 'datefmt': '%Y-%m-%d %H%M%z', + } + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/cfg/main.py b/scripts/wk/cfg/main.py new file mode 100644 index 00000000..99a3b2a1 --- /dev/null +++ b/scripts/wk/cfg/main.py @@ -0,0 +1,36 @@ +"""WizardKit: Config - Main + +NOTE: Non-standard formating is used for BASH/BATCH/PYTHON compatibility +""" +# pylint: disable=bad-whitespace +# vim: sts=2 sw=2 ts=2 + + +# Features +ENABLED_OPEN_LOGS=False +ENABLED_TICKET_NUMBERS=False +ENABLED_UPLOAD_DATA=False + +# Main Kit +ARCHIVE_PASSWORD='Abracadabra' +KIT_NAME_FULL='WizardKit' +KIT_NAME_SHORT='WK' +SUPPORT_MESSAGE='Please let 2Shirt know by opening an issue on GitHub' + +# Text Formatting +INDENT=4 +WIDTH=32 + +# Live Linux +ROOT_PASSWORD='Abracadabra' +TECH_PASSWORD='Abracadabra' + +# Time Zones +## See 'timedatectl list-timezones' for valid Linux values +## See 'tzutil /l' for valid Windows values +LINUX_TIME_ZONE='America/Denver' +WINDOWS_TIME_ZONE='Mountain Standard Time' + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/cfg/net.py b/scripts/wk/cfg/net.py new file mode 100644 index 00000000..f530bb6e --- /dev/null +++ b/scripts/wk/cfg/net.py @@ -0,0 +1,35 @@ +"""WizardKit: Config - Net""" +# pylint: disable=bad-whitespace +# vim: sts=2 sw=2 ts=2 + + +# Servers +BACKUP_SERVERS = { + #'Server One': { + # 'Address': '10.0.0.10', + # 'Share': 'Backups', + # 'RO-User': 'restore', + # 'RO-Pass': 'Abracadabra', + # 'RW-User': 'backup', + # 'RW-Pass': 'Abracadabra', + # }, + #'Server Two': { + # 'Address': 'servertwo.example.com', + # 'Share': 'Backups', + # 'RO-User': 'restore', + # 'RO-Pass': 'Abracadabra', + # 'RW-User': 'backup', + # 'RW-Pass': 'Abracadabra', + # }, + } +CRASH_SERVER = { + #'Name': 'CrashServer', + #'Url': '', + #'User': '', + #'Pass': '', + #'Headers': {'X-Requested-With': 'XMLHttpRequest'}, + } + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/.bin/Scripts/settings/ufd.py b/scripts/wk/cfg/ufd.py similarity index 61% rename from .bin/Scripts/settings/ufd.py rename to scripts/wk/cfg/ufd.py index b157e392..a22f0db7 100644 --- a/.bin/Scripts/settings/ufd.py +++ b/scripts/wk/cfg/ufd.py @@ -1,40 +1,15 @@ -'''Wizard Kit: Settings - UFD''' -# pylint: disable=C0326,E0611 +"""WizardKit: Config - UFD""" +# pylint: disable=bad-whitespace # vim: sts=2 sw=2 ts=2 from collections import OrderedDict -from settings.main import KIT_NAME_FULL,KIT_NAME_SHORT + +from wk.cfg.main import KIT_NAME_FULL + # General -DOCSTRING = '''WizardKit: Build UFD - -Usage: - build-ufd [options] --ufd-device PATH --linux PATH - [--linux-minimal PATH] - [--main-kit PATH] - [--winpe PATH] - [--extra-dir PATH] - build-ufd (-h | --help) - -Options: - -d PATH, --linux-dgpu PATH - -e PATH, --extra-dir PATH - -k PATH, --main-kit PATH - -l PATH, --linux PATH - -m PATH, --linux-minimal PATH - -u PATH, --ufd-device PATH - -w PATH, --winpe PATH - - -h --help Show this page - -M --use-mbr Use real MBR instead of GPT w/ Protective MBR - -F --force Bypass all confirmation messages. USE WITH EXTREME CAUTION! - -U --update Don't format device, just update -''' -ISO_LABEL = '{}_LINUX'.format(KIT_NAME_SHORT) -UFD_LABEL = '{}_UFD'.format(KIT_NAME_SHORT) -UFD_SOURCES = OrderedDict({ +SOURCES = OrderedDict({ 'Linux': {'Arg': '--linux', 'Type': 'ISO'}, - 'Linux (dGPU)': {'Arg': '--linux-dgpu', 'Type': 'ISO'}, 'Linux (Minimal)': {'Arg': '--linux-minimal', 'Type': 'ISO'}, 'WinPE': {'Arg': '--winpe', 'Type': 'ISO'}, 'Main Kit': {'Arg': '--main-kit', 'Type': 'KIT'}, @@ -45,7 +20,6 @@ UFD_SOURCES = OrderedDict({ BOOT_ENTRIES = { # Path to check: Comment to remove '/arch_minimal': 'UFD-MINIMAL', - '/dgpu': 'UFD-DGPU', '/sources/boot.wim': 'UFD-WINPE', } BOOT_FILES = { @@ -67,12 +41,6 @@ ITEMS = { ('/EFI/boot', '/EFI/'), ('/EFI/memtest86', '/EFI/'), ), - 'Linux (dGPU)': ( - ('/arch/boot/x86_64/archiso.img', '/dgpu/'), - ('/arch/boot/x86_64/vmlinuz', '/dgpu/'), - ('/arch/pkglist.x86_64.txt', '/dgpu/'), - ('/arch/x86_64', '/dgpu/'), - ), 'Linux (Minimal)': ( ('/arch/boot/x86_64/archiso.img', '/arch_minimal/'), ('/arch/boot/x86_64/vmlinuz', '/arch_minimal/'), @@ -80,7 +48,7 @@ ITEMS = { ('/arch/x86_64', '/arch_minimal/'), ), 'Main Kit': ( - ('/', '/{}/'.format(KIT_NAME_FULL)), + ('/', f'/{KIT_NAME_FULL}/'), ), 'WinPE': ( ('/bootmgr', '/'), @@ -99,12 +67,11 @@ ITEMS_HIDDEN = ( # Linux (all versions) 'arch', 'arch_minimal', - 'dgpu', 'EFI', 'isolinux', # Main Kit - '{}/.bin'.format(KIT_NAME_FULL), - '{}/.cbin'.format(KIT_NAME_FULL), + f'{KIT_NAME_FULL}/.bin', + f'{KIT_NAME_FULL}/.cbin', # WinPE 'boot', 'bootmgr', @@ -114,5 +81,6 @@ ITEMS_HIDDEN = ( 'sources', ) + if __name__ == '__main__': print("This file is not meant to be called directly.") diff --git a/scripts/wk/debug.py b/scripts/wk/debug.py new file mode 100644 index 00000000..437ab0f8 --- /dev/null +++ b/scripts/wk/debug.py @@ -0,0 +1,45 @@ +"""WizardKit: Debug Functions""" +# pylint: disable=invalid-name +# vim: sts=2 sw=2 ts=2 + + +# Classes +class Debug(): + # pylint: disable=too-few-public-methods + """Object used when dumping debug data.""" + def method(self): + """Dummy method used to identify functions vs data.""" + + +# STATIC VARIABLES +DEBUG_CLASS = Debug() +METHOD_TYPE = type(DEBUG_CLASS.method) + + +# Functions +def generate_object_report(obj, indent=0): + """Generate debug report for obj, returns list.""" + report = [] + + # Dump object data + for name in dir(obj): + attr = getattr(obj, name) + + # Skip methods and private attributes + if isinstance(attr, METHOD_TYPE) or name.startswith('_'): + continue + + # Add attribute to report (expanded if necessary) + if isinstance(attr, dict): + report.append(f'{name}:') + for key, value in sorted(attr.items()): + report.append(f'{" "*(indent+1)}{key}: {str(value)}') + else: + report.append(f'{" "*indent}{name}: {str(attr)}') + + # Done + return report + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/exe.py b/scripts/wk/exe.py new file mode 100644 index 00000000..fe2f1b0d --- /dev/null +++ b/scripts/wk/exe.py @@ -0,0 +1,222 @@ +"""WizardKit: Execution functions""" +#vim: sts=2 sw=2 ts=2 + +import json +import logging +import re +import subprocess + +from threading import Thread +from queue import Queue, Empty + +import psutil + + +# STATIC VARIABLES +LOG = logging.getLogger(__name__) + + +# Classes +class NonBlockingStreamReader(): + """Class to allow non-blocking reads from a stream.""" + # pylint: disable=too-few-public-methods + # Credits: + ## https://gist.github.com/EyalAr/7915597 + ## https://stackoverflow.com/a/4896288 + + def __init__(self, stream): + self.stream = stream + self.queue = Queue() + + def populate_queue(stream, queue): + """Collect lines from stream and put them in queue.""" + while True: + line = stream.read(1) + if line: + queue.put(line) + + self.thread = start_thread( + populate_queue, + args=(self.stream, self.queue), + ) + + def read(self, timeout=None): + """Read from queue if possible, returns item from queue.""" + try: + return self.queue.get(block=timeout is not None, timeout=timeout) + except Empty: + return None + + def save_to_file(self, proc, out_path): + """Continuously save output to file while proc is running.""" + while proc.poll() is None: + out = b'' + out_bytes = b'' + while out is not None: + out = self.read(0.1) + if out: + out_bytes += out + with open(out_path, 'a') as _f: + _f.write(out_bytes.decode('utf-8', errors='ignore')) + + +# Functions +def build_cmd_kwargs(cmd, minimized=False, pipe=True, shell=False, **kwargs): + """Build kwargs for use by subprocess functions, returns dict. + + Specifically subprocess.run() and subprocess.Popen(). + NOTE: If no encoding specified then UTF-8 will be used. + """ + LOG.debug( + 'cmd: %s, minimized: %s, pipe: %s, shell: %s', + cmd, minimized, pipe, shell, + ) + LOG.debug('kwargs: %s', kwargs) + cmd_kwargs = { + 'args': cmd, + 'shell': shell, + } + + # Add additional kwargs if applicable + for key in 'check cwd encoding errors stderr stdin stdout'.split(): + if key in kwargs: + cmd_kwargs[key] = kwargs[key] + + # Default to UTF-8 encoding + if not ('encoding' in cmd_kwargs or 'errors' in cmd_kwargs): + cmd_kwargs['encoding'] = 'utf-8' + cmd_kwargs['errors'] = 'ignore' + + # Start minimized + if minimized: + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags = subprocess.STARTF_USESHOWWINDOW + startupinfo.wShowWindow = 6 + cmd_kwargs['startupinfo'] = startupinfo + + + # Pipe output + if pipe: + cmd_kwargs['stderr'] = subprocess.PIPE + cmd_kwargs['stdout'] = subprocess.PIPE + + # Done + LOG.debug('cmd_kwargs: %s', cmd_kwargs) + return cmd_kwargs + + +def get_json_from_command(cmd, check=True, encoding='utf-8', errors='ignore'): + """Capture JSON content from cmd output, returns dict. + + If the data can't be decoded then either an exception is raised + or an empty dict is returned depending on errors. + """ + json_data = {} + + try: + proc = run_program(cmd, check=check, encoding=encoding, errors=errors) + json_data = json.loads(proc.stdout) + except (subprocess.CalledProcessError, json.decoder.JSONDecodeError): + if errors != 'ignore': + raise + + return json_data + + +def get_procs(name, exact=True): + """Get process object(s) based on name, returns list of proc objects.""" + LOG.debug('name: %s, exact: %s', name, exact) + processes = [] + regex = f'^{name}$' if exact else name + + # Iterate over all processes + for proc in psutil.process_iter(): + if re.search(regex, proc.name(), re.IGNORECASE): + processes.append(proc) + + # Done + return processes + + +def kill_procs(name, exact=True, force=False, timeout=30): + """Kill all processes matching name (case-insensitively). + + NOTE: Under Posix systems this will send SIGINT to allow processes + to gracefully exit. + + If force is True then it will wait until timeout specified and then + send SIGKILL to any processes still alive. + """ + LOG.debug( + 'name: %s, exact: %s, force: %s, timeout: %s', + name, exact, force, timeout, + ) + target_procs = get_procs(name, exact=exact) + for proc in target_procs: + proc.terminate() + + # Force kill if necesary + if force: + results = psutil.wait_procs(target_procs, timeout=timeout) + for proc in results[1]: # Alive processes + proc.kill() + + +def popen_program(cmd, minimized=False, pipe=False, shell=False, **kwargs): + """Run program and return a subprocess.Popen object.""" + LOG.debug( + 'cmd: %s, minimized: %s, pipe: %s, shell: %s', + cmd, minimized, pipe, shell, + ) + LOG.debug('kwargs: %s', kwargs) + cmd_kwargs = build_cmd_kwargs( + cmd, + minimized=minimized, + pipe=pipe, + shell=shell, + **kwargs) + + # Ready to run program + return subprocess.Popen(**cmd_kwargs) + + +def run_program(cmd, check=True, pipe=True, shell=False, **kwargs): + # pylint: disable=subprocess-run-check + """Run program and return a subprocess.CompletedProcess object.""" + LOG.debug( + 'cmd: %s, check: %s, pipe: %s, shell: %s', + cmd, check, pipe, shell, + ) + LOG.debug('kwargs: %s', kwargs) + cmd_kwargs = build_cmd_kwargs( + cmd, + check=check, + pipe=pipe, + shell=shell, + **kwargs) + + # Ready to run program + return subprocess.run(**cmd_kwargs) + + +def start_thread(function, args=None, daemon=True): + """Run function as thread in background, returns Thread object.""" + args = args if args else [] + thread = Thread(target=function, args=args, daemon=daemon) + thread.start() + return thread + + +def wait_for_procs(name, exact=True, timeout=None): + """Wait for all process matching name.""" + LOG.debug('name: %s, exact: %s, timeout: %s', name, exact, timeout) + target_procs = get_procs(name, exact=exact) + results = psutil.wait_procs(target_procs, timeout=timeout) + + # Raise exception if necessary + if results[1]: # Alive processes + raise psutil.TimeoutExpired(name=name, seconds=timeout) + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/graph.py b/scripts/wk/graph.py new file mode 100644 index 00000000..1bcb9c27 --- /dev/null +++ b/scripts/wk/graph.py @@ -0,0 +1,151 @@ +"""WizardKit: Graph Functions""" +# pylint: disable=bad-whitespace +# vim: sts=2 sw=2 ts=2 + +import logging + +from wk.std import color_string + + +# STATIC VARIABLES +LOG = logging.getLogger(__name__) +GRAPH_HORIZONTAL = ('▁', '▂', '▃', '▄', '▅', '▆', '▇', '█') +GRAPH_VERTICAL = ( + '▏', '▎', '▍', '▌', + '▋', '▊', '▉', '█', + '█▏', '█▎', '█▍', '█▌', + '█▋', '█▊', '█▉', '██', + '██▏', '██▎', '██▍', '██▌', + '██▋', '██▊', '██▉', '███', + '███▏', '███▎', '███▍', '███▌', + '███▋', '███▊', '███▉', '████', + ) +# SCALE_STEPS: These scales allow showing differences between HDDs and SSDs +# on the same graph. +SCALE_STEPS = { + 8: [2**(0.56*(x+1))+(16*(x+1)) for x in range(8)], + 16: [2**(0.56*(x+1))+(16*(x+1)) for x in range(16)], + 32: [2**(0.56*(x+1)/2)+(16*(x+1)/2) for x in range(32)], + } +# THRESHOLDS: These are the rate_list (in MB/s) used to color graphs +THRESH_FAIL = 65 * 1024**2 +THRESH_WARN = 135 * 1024**2 +THRESH_GREAT = 750 * 1024**2 + + +# Functions +def generate_horizontal_graph(rate_list, graph_width=40, oneline=False): + """Generate horizontal graph from rate_list, returns list.""" + graph = ['', '', '', ''] + scale = 8 if oneline else 32 + + # Build graph + for rate in merge_rates(rate_list, graph_width=graph_width): + step = get_graph_step(rate, scale=scale) + + # Set color + rate_color = None + if rate < THRESH_FAIL: + rate_color = 'RED' + elif rate < THRESH_WARN: + rate_color = 'YELLOW' + elif rate > THRESH_GREAT: + rate_color = 'GREEN' + + # Build graph + full_block = color_string((GRAPH_HORIZONTAL[-1],), (rate_color,)) + if step >= 24: + graph[0] += color_string((GRAPH_HORIZONTAL[step-24],), (rate_color,)) + graph[1] += full_block + graph[2] += full_block + graph[3] += full_block + elif step >= 16: + graph[0] += ' ' + graph[1] += color_string((GRAPH_HORIZONTAL[step-16],), (rate_color,)) + graph[2] += full_block + graph[3] += full_block + elif step >= 8: + graph[0] += ' ' + graph[1] += ' ' + graph[2] += color_string((GRAPH_HORIZONTAL[step-8],), (rate_color,)) + graph[3] += full_block + else: + graph[0] += ' ' + graph[1] += ' ' + graph[2] += ' ' + graph[3] += color_string((GRAPH_HORIZONTAL[step],), (rate_color,)) + + # Done + if oneline: + graph = graph[-1:] + return graph + + +def get_graph_step(rate, scale=16): + """Get graph step based on rate and scale, returns int.""" + rate_in_mb = rate / (1024**2) + step = 0 + + # Iterate over scale_steps backwards + for _r in range(scale-1, -1, -1): + if rate_in_mb >= SCALE_STEPS[scale][_r]: + step = _r + break + + # Done + return step + + +def merge_rates(rates, graph_width=40): + """Merge rates to have entries equal to the width, returns list.""" + merged_rates = [] + offset = 0 + slice_width = int(len(rates) / graph_width) + + # Merge rates + for _i in range(graph_width): + merged_rates.append(sum(rates[offset:offset+slice_width])/slice_width) + offset += slice_width + + # Done + return merged_rates + + +def vertical_graph_line(percent, rate, scale=32): + """Build colored graph string using thresholds, returns str.""" + color_bar = None + color_rate = None + step = get_graph_step(rate, scale=scale) + + # Set colors + if rate < THRESH_FAIL: + color_bar = 'RED' + color_rate = 'YELLOW' + elif rate < THRESH_WARN: + color_bar = 'YELLOW' + color_rate = 'YELLOW' + elif rate > THRESH_GREAT: + color_bar = 'GREEN' + color_rate = 'GREEN' + + # Build string + line = color_string( + strings=( + f'{percent:5.1f}%', + f'{GRAPH_VERTICAL[step]:<4}', + f'{rate/(1000**2):6.1f} MB/s', + ), + colors=( + None, + color_bar, + color_rate, + ), + sep=' ', + ) + + # Done + return line + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/hw/__init__.py b/scripts/wk/hw/__init__.py new file mode 100644 index 00000000..ad2b04cb --- /dev/null +++ b/scripts/wk/hw/__init__.py @@ -0,0 +1,6 @@ +"""WizardKit: hw module init""" + +from wk.hw import ddrescue +from wk.hw import diags +from wk.hw import obj +from wk.hw import sensors diff --git a/scripts/wk/hw/ddrescue.py b/scripts/wk/hw/ddrescue.py new file mode 100644 index 00000000..abd63955 --- /dev/null +++ b/scripts/wk/hw/ddrescue.py @@ -0,0 +1,2061 @@ +"""WizardKit: ddrescue TUI""" +# pylint: disable=too-many-lines +# vim: sts=2 sw=2 ts=2 + +import atexit +import datetime +import json +import logging +import math +import os +import pathlib +import plistlib +import re +import shutil +import subprocess +import time + +from collections import OrderedDict +from docopt import docopt + +import psutil +import pytz + +from wk import cfg, debug, exe, io, log, net, std, tmux +from wk.hw import obj as hw_obj + + +# STATIC VARIABLES +DOCSTRING = f'''{cfg.main.KIT_NAME_FULL}: ddrescue TUI + +Usage: + ddrescue-tui + ddrescue-tui [options] (clone|image) [ []] + ddrescue-tui (-h | --help) + +Options: + -h --help Show this page + -s --dry-run Print commands to be used instead of running them + --force-local-map Skip mounting shares and save map to local drive + --start-fresh Ignore previous runs and start new recovery +''' +CLONE_SETTINGS = { + 'Source': None, + 'Destination': None, + 'Create Boot Partition': False, + 'First Run': True, + 'Needs Format': False, + 'Table Type': None, + 'Partition Mapping': [ + # (5, 1) ## Clone source partition #5 to destination partition #1 + ], + } +DDRESCUE_LOG_REGEX = re.compile( + r'^\s*(?P\S+):\s+' + r'(?P\d+)\s+' + r'(?P[PTGMKB]i?B?)' + r'.*\(\s*(?P\d+\.?\d*)%\)$', + re.IGNORECASE, + ) +REGEX_REMAINING_TIME = re.compile( + r'remaining time:' + r'\s*((?P\d+)d)?' + r'\s*((?P\d+)h)?' + r'\s*((?P\d+)m)?' + r'\s*((?P\d+)s)?' + r'\s*(?Pn/a)?', + re.IGNORECASE + ) +LOG = logging.getLogger(__name__) +MENU_ACTIONS = ( + 'Start', + f'Change settings {std.color_string("(experts only)", "YELLOW")}', + 'Quit') +MENU_TOGGLES = { + 'Auto continue (if recovery % over threshold)': True, + 'Retry (mark non-rescued sectors "non-tried")': False, + } +PANE_RATIOS = ( + 12, # SMART + 22, # ddrescue progress + 4, # Journal (kernel messages) + ) +PLATFORM = std.PLATFORM +RECOMMENDED_FSTYPES = re.compile(r'^(ext[234]|ntfs|xfs)$') +RECOMMENDED_MAP_FSTYPES = re.compile(r'^(cifs|ext[234]|ntfs|vfat|xfs)$') +SETTING_PRESETS = ( + 'Default', + 'Fast', + 'Safe', + ) +STATUS_COLORS = { + 'Passed': 'GREEN', + 'Aborted': 'YELLOW', + 'Skipped': 'YELLOW', + 'Working': 'YELLOW', + 'ERROR': 'RED', + } +TIMEZONE = pytz.timezone(cfg.main.LINUX_TIME_ZONE) + + +# Classes +class BlockPair(): + """Object for tracking source to dest recovery data.""" + def __init__(self, source, destination, model, working_dir): + """Initialize BlockPair() + + NOTE: source should be a wk.hw.obj.Disk() object + and destination should be a pathlib.Path() object. + """ + self.source = source.path + self.destination = destination + self.map_data = {} + self.map_path = None + self.size = source.details['size'] + self.status = OrderedDict({ + 'read': 'Pending', + 'trim': 'Pending', + 'scrape': 'Pending', + }) + + # Set map file + # e.g. '(Clone|Image)_Model[_p#]_Size[_Label].map' + map_name = model if model else 'None' + if source.details['bus'] == 'Image': + map_name = 'Image' + if source.details['parent']: + part_num = re.sub(r"^.*?(\d+)$", r"\1", source.path.name) + map_name += f'_p{part_num}' + size_str = std.bytes_to_string( + size=source.details["size"], + use_binary=False, + ) + map_name += f'_{size_str.replace(" ", "")}' + if source.details.get('label', ''): + map_name += f'_{source.details["label"]}' + map_name = map_name.replace(' ', '_') + map_name = map_name.replace('/', '_') + if destination.is_dir(): + # Imaging + self.map_path = pathlib.Path(f'{destination}/Image_{map_name}.map') + self.destination = self.map_path.with_suffix('.dd') + self.destination.touch() + else: + # Cloning + self.map_path = pathlib.Path(f'{working_dir}/Clone_{map_name}.map') + self.map_path.touch() + + # Set initial status + self.set_initial_status() + + def get_percent_recovered(self): + """Get percent rescued from map_data, returns float.""" + return 100 * self.map_data.get('rescued', 0) / self.size + + def get_rescued_size(self): + """Get rescued size using map data. + + NOTE: Returns 0 if no map data is available. + """ + self.load_map_data() + return self.map_data.get('rescued', 0) + + def load_map_data(self): + """Load map data from file. + + NOTE: If the file is missing it is assumed that recovery hasn't + started yet so default values will be returned instead. + """ + data = {'full recovery': False, 'pass completed': False} + + # Get output from ddrescuelog + cmd = [ + 'ddrescuelog', + '--binary-prefixes', + '--show-status', + self.map_path, + ] + proc = exe.run_program(cmd, check=False) + + # Parse output + for line in proc.stdout.splitlines(): + _r = DDRESCUE_LOG_REGEX.search(line) + if _r: + if _r.group('key') == 'rescued' and _r.group('percent') == '100': + # Fix rounding errors from ddrescuelog output + data['rescued'] = self.size + else: + data[_r.group('key')] = std.string_to_bytes( + f'{_r.group("size")} {_r.group("unit")}', + ) + data['pass completed'] = 'current status: finished' in line.lower() + + # Check if 100% done (only if map is present and non-zero size + # NOTE: ddrescuelog returns 0 (i.e. 100% done) for empty files + if self.map_path.exists() and self.map_path.stat().st_size != 0: + cmd = [ + 'ddrescuelog', + '--done-status', + self.map_path, + ] + proc = exe.run_program(cmd, check=False) + data['full recovery'] = proc.returncode == 0 + + # Done + self.map_data.update(data) + + def pass_complete(self, pass_name): + """Check if pass_num is complete based on map data, returns bool.""" + complete = False + pending_size = 0 + + # Check map data + if self.map_data.get('full recovery', False): + complete = True + elif 'non-tried' not in self.map_data: + # Assuming recovery has not been attempted yet + complete = False + else: + # Check that current and previous passes are complete + pending_size = self.map_data['non-tried'] + if pass_name in ('trim', 'scrape'): + pending_size += self.map_data['non-trimmed'] + if pass_name == 'scrape': + pending_size += self.map_data['non-scraped'] + if pending_size == 0: + complete = True + + # Done + return complete + + def safety_check(self): + """Run safety check and abort if necessary.""" + dest_size = -1 + if self.destination.exists(): + dest_obj = hw_obj.Disk(self.destination) + dest_size = dest_obj.details['size'] + del dest_obj + + # Check destination size if cloning + if not self.destination.is_file() and dest_size < self.size: + std.print_error(f'Invalid destination: {self.destination}') + raise std.GenericAbort() + + def set_initial_status(self): + """Read map data and set initial statuses.""" + self.load_map_data() + percent = self.get_percent_recovered() + for name in self.status.keys(): + if self.pass_complete(name): + self.status[name] = percent + else: + # Stop checking + if percent > 0: + self.status[name] = percent + break + + def skip_pass(self, pass_name): + """Mark pass as skipped if applicable.""" + if self.status[pass_name] == 'Pending': + self.status[pass_name] = 'Skipped' + + def update_progress(self, pass_name): + """Update progress via map data.""" + self.load_map_data() + + # Update status + percent = self.get_percent_recovered() + if percent > 0: + self.status[pass_name] = percent + + # Mark future passes as skipped if applicable + if percent == 100: + if pass_name == 'read': + self.status['trim'] = 'Skipped' + if pass_name in ('read', 'trim'): + self.status['scrape'] = 'Skipped' + + +class State(): + """Object for tracking hardware diagnostic data.""" + def __init__(self): + self.block_pairs = [] + self.destination = None + self.log_dir = None + self.mode = None + self.panes = {} + self.source = None + self.working_dir = None + + # Start a background process to maintain layout + self._init_tmux() + exe.start_thread(self._fix_tmux_layout_loop) + + def _add_block_pair(self, source, destination): + """Add BlockPair object and run safety checks.""" + self.block_pairs.append( + BlockPair( + source=source, + destination=destination, + model=self.source.details['model'], + working_dir=self.working_dir, + )) + + def _fix_tmux_layout(self, forced=True): + """Fix tmux layout based on cfg.ddrescue.TMUX_LAYOUT.""" + layout = cfg.ddrescue.TMUX_LAYOUT + needs_fixed = tmux.layout_needs_fixed(self.panes, layout) + + # Main layout fix + try: + tmux.fix_layout(self.panes, layout, forced=forced) + except RuntimeError: + # Assuming self.panes changed while running + pass + + # Source/Destination + if forced or needs_fixed: + self.update_top_panes() + + # Return if Progress pane not present + if 'Progress' not in self.panes: + return + + # SMART/Journal + if forced or needs_fixed: + height = tmux.get_pane_size(self.panes['Progress'])[1] - 2 + p_ratios = [int((x/sum(PANE_RATIOS)) * height) for x in PANE_RATIOS] + if 'SMART' in self.panes: + tmux.resize_pane(self.panes['SMART'], height=p_ratios[0]) + tmux.resize_pane(height=p_ratios[1]) + if 'Journal' in self.panes: + tmux.resize_pane(self.panes['Journal'], height=p_ratios[2]) + + def _fix_tmux_layout_loop(self): + """Fix tmux layout on a loop. + + NOTE: This should be called as a thread. + """ + while True: + self._fix_tmux_layout(forced=False) + std.sleep(1) + + def _init_tmux(self): + """Initialize tmux layout.""" + tmux.kill_all_panes() + + # Source (placeholder) + self.panes['Source'] = tmux.split_window( + behind=True, + lines=2, + text=' ', + vertical=True, + ) + + # Started + self.panes['Started'] = tmux.split_window( + lines=cfg.ddrescue.TMUX_SIDE_WIDTH, + target_id=self.panes['Source'], + text=std.color_string( + ['Started', time.strftime("%Y-%m-%d %H:%M %Z")], + ['BLUE', None], + sep='\n', + ), + ) + + # Source / Dest + self.update_top_panes() + + def add_clone_block_pairs(self): + """Add device to device block pairs and set settings if necessary.""" + source_sep = get_partition_separator(self.source.path.name) + dest_sep = get_partition_separator(self.destination.path.name) + settings = {} + source_parts = [] + + # Clone settings + settings = self.load_settings(discard_unused_settings=True) + + # Add pairs + if settings['Partition Mapping']: + # Resume previous run, load pairs from settings file + for part_map in settings['Partition Mapping']: + bp_source = hw_obj.Disk( + f'{self.source.path}{source_sep}{part_map[0]}', + ) + bp_dest = pathlib.Path( + f'{self.destination.path}{dest_sep}{part_map[1]}', + ) + self._add_block_pair(bp_source, bp_dest) + else: + source_parts = select_disk_parts('Clone', self.source) + if self.source.path.samefile(source_parts[0].path): + # Whole disk (or single partition via args), skip settings + bp_dest = self.destination.path + self._add_block_pair(self.source, bp_dest) + else: + # New run, use new settings file + settings['Needs Format'] = True + offset = 0 + user_choice = std.choice( + ['G', 'M', 'S'], + 'Format clone using GPT, MBR, or match Source type?', + ) + if user_choice == 'G': + settings['Table Type'] = 'GPT' + elif user_choice == 'M': + settings['Table Type'] = 'MBR' + else: + # Match source type + settings['Table Type'] = get_table_type(self.source) + if std.ask('Create an empty Windows boot partition on the clone?'): + settings['Create Boot Partition'] = True + offset = 2 if settings['Table Type'] == 'GPT' else 1 + + # Add pairs + for dest_num, part in enumerate(source_parts): + dest_num += offset + 1 + bp_dest = pathlib.Path( + f'{self.destination.path}{dest_sep}{dest_num}', + ) + self._add_block_pair(part, bp_dest) + + # Add to settings file + source_num = re.sub(r'^.*?(\d+)$', r'\1', part.path.name) + settings['Partition Mapping'].append([source_num, dest_num]) + + # Save settings + self.save_settings(settings) + + # Done + return source_parts + + def add_image_block_pairs(self, source_parts): + """Add device to image file block pairs.""" + for part in source_parts: + bp_dest = self.destination + self._add_block_pair(part, bp_dest) + + def confirm_selections(self, prompt, source_parts=None): + """Show selection details and prompt for confirmation.""" + report = [] + + # Source + report.append(std.color_string('Source', 'GREEN')) + report.extend(build_object_report(self.source)) + report.append(' ') + + # Destination + report.append(std.color_string('Destination', 'GREEN')) + if self.mode == 'Clone': + report[-1] += std.color_string(' (ALL DATA WILL BE DELETED)', 'RED') + report.extend(build_object_report(self.destination)) + report.append(' ') + + # Show deletion warning if necessary + # NOTE: The check for block_pairs is to limit this section + # to the second confirmation + if self.mode == 'Clone' and self.block_pairs: + report.append(std.color_string('WARNING', 'YELLOW')) + report.append( + 'All data will be deleted from the destination listed above.', + ) + report.append( + std.color_string( + ['This is irreversible and will lead to', 'DATA LOSS.'], + ['YELLOW', 'RED'], + ), + ) + report.append(' ') + + # Block pairs + if self.block_pairs: + report.extend( + build_block_pair_report( + self.block_pairs, + self.load_settings() if self.mode == 'Clone' else {}, + ), + ) + report.append(' ') + + # Map dir + if self.working_dir: + report.append(std.color_string('Map Save Directory', 'GREEN')) + report.append(f'{self.working_dir}/') + report.append(' ') + if not fstype_is_ok(self.working_dir, map_dir=True): + report.append( + std.color_string( + 'Map file(s) are being saved to a non-recommended filesystem.', + 'YELLOW', + ), + ) + report.append( + std.color_string( + ['This is strongly discouraged and may lead to', 'DATA LOSS'], + [None, 'RED'], + ), + ) + report.append(' ') + + # Source part(s) selected + if source_parts: + report.append(std.color_string('Source Part(s) selected', 'GREEN')) + if self.source.path.samefile(source_parts[0].path): + report.append('Whole Disk') + else: + report.append(std.color_string(f'{"NAME":<9} SIZE', 'BLUE')) + for part in source_parts: + report.append( + f'{part.path.name:<9} ' + f'{std.bytes_to_string(part.details["size"], use_binary=False)}' + ) + report.append(' ') + + # Prompt user + std.clear_screen() + std.print_report(report) + if not std.ask(prompt): + raise std.GenericAbort() + + def get_percent_recovered(self): + """Get total percent rescued from block_pairs, returns float.""" + total_rescued = self.get_rescued_size() + total_size = sum([pair.size for pair in self.block_pairs]) + return 100 * total_rescued / total_size + + def get_rescued_size(self): + """Get total rescued size from all block pairs, returns int.""" + return sum([pair.get_rescued_size() for pair in self.block_pairs]) + + def init_recovery(self, docopt_args): + """Select source/dest and set env.""" + std.clear_screen() + source_parts = [] + + # Set log + self.log_dir = log.format_log_path() + self.log_dir = pathlib.Path( + f'{self.log_dir.parent}/' + f'ddrescue-TUI_{time.strftime("%Y-%m-%d_%H%M%S%z")}/' + ) + log.update_log_path( + dest_dir=self.log_dir, + dest_name='main', + keep_history=True, + timestamp=False, + ) + + # Set mode + self.mode = set_mode(docopt_args) + + # Select source + self.source = get_object(docopt_args['']) + if not self.source: + self.source = select_disk('Source') + self.update_top_panes() + + # Select destination + self.destination = get_object(docopt_args['']) + if not self.destination: + if self.mode == 'Clone': + self.destination = select_disk('Destination', self.source) + elif self.mode == 'Image': + self.destination = select_path('Destination') + self.update_top_panes() + + # Confirmation #1 + self.confirm_selections( + prompt='Are these selections correct?', + source_parts=source_parts, + ) + + # Update panes + self.panes['Progress'] = tmux.split_window( + lines=cfg.ddrescue.TMUX_SIDE_WIDTH, + watch_file=f'{self.log_dir}/progress.out', + ) + self.update_progress_pane('Idle') + + # Set working dir + self.working_dir = get_working_dir( + self.mode, + self.destination, + force_local=docopt_args['--force-local-map'], + ) + + # Start fresh if requested + if docopt_args['--start-fresh']: + clean_working_dir(self.working_dir) + + # Add block pairs + if self.mode == 'Clone': + source_parts = self.add_clone_block_pairs() + else: + source_parts = select_disk_parts(self.mode, self.source) + self.add_image_block_pairs(source_parts) + + # Safety Checks #1 + if self.mode == 'Clone': + self.safety_check_destination() + self.safety_check_size() + + # Confirmation #2 + self.update_progress_pane('Idle') + self.confirm_selections('Start recovery?') + + # Prep destination + if self.mode == 'Clone': + self.prep_destination(source_parts, dry_run=docopt_args['--dry-run']) + + # Safety Checks #2 + if not docopt_args['--dry-run']: + for pair in self.block_pairs: + pair.safety_check() + + def load_settings(self, discard_unused_settings=False): + """Load settings from previous run, returns dict.""" + settings = {} + settings_file = pathlib.Path( + f'{self.working_dir}/Clone_{self.source.details["model"]}.json', + ) + + # Try loading JSON data + if settings_file.exists(): + with open(settings_file, 'r') as _f: + try: + settings = json.loads(_f.read()) + except (OSError, json.JSONDecodeError): + LOG.error('Failed to load clone settings') + std.print_error('Invalid clone settings detected.') + raise std.GenericAbort() + + # Check settings + if settings: + if settings['First Run'] and discard_unused_settings: + # Previous run aborted before starting recovery, discard settings + settings = {} + else: + bail = False + for key in ('model', 'serial'): + if settings['Source'][key] != self.source.details[key]: + std.print_error(f"Clone settings don't match source {key}") + bail = True + if settings['Destination'][key] != self.destination.details[key]: + std.print_error(f"Clone settings don't match destination {key}") + bail = True + if bail: + raise std.GenericAbort() + + # Update settings + if not settings: + settings = CLONE_SETTINGS.copy() + if not settings['Source']: + settings['Source'] = { + 'model': self.source.details['model'], + 'serial': self.source.details['serial'], + } + if not settings['Destination']: + settings['Destination'] = { + 'model': self.destination.details['model'], + 'serial': self.destination.details['serial'], + } + + # Done + return settings + + def mark_started(self): + """Edit clone settings, if applicable, to mark recovery as started.""" + # Skip if not cloning + if self.mode != 'Clone': + return + + # Skip if not using settings + # i.e. Cloning whole disk (or single partition via args) + if self.source.path.samefile(self.block_pairs[0].source): + return + + # Update settings + settings = self.load_settings() + if settings.get('First Run', False): + settings['First Run'] = False + self.save_settings(settings) + + def pass_above_threshold(self, pass_name): + """Check if all block_pairs meet the pass threshold, returns bool.""" + threshold = cfg.ddrescue.AUTO_PASS_THRESHOLDS[pass_name] + return all( + [p.get_percent_recovered() >= threshold for p in self.block_pairs], + ) + + def pass_complete(self, pass_name): + """Check if all block_pairs completed pass_name, returns bool.""" + return all([p.pass_complete(pass_name) for p in self.block_pairs]) + + def prep_destination(self, source_parts, dry_run=True): + """Prep destination as necessary.""" + # TODO: Split into Linux and macOS + # logical sector size is not easily found under macOS + # It might be easier to rewrite this section using macOS tools + dest_prefix = str(self.destination.path) + dest_prefix += get_partition_separator(self.destination.path.name) + esp_type = 'C12A7328-F81F-11D2-BA4B-00A0C93EC93B' + msr_type = 'E3C9E316-0B5C-4DB8-817D-F92DF00215AE' + part_num = 0 + sfdisk_script = [] + settings = self.load_settings() + + # Bail early + if not settings['Needs Format']: + return + + # Add partition table settings + if settings['Table Type'] == 'GPT': + sfdisk_script.append('label: gpt') + else: + sfdisk_script.append('label: dos') + sfdisk_script.append('unit: sectors') + sfdisk_script.append('') + + # Add boot partition if requested + if settings['Create Boot Partition']: + if settings['Table Type'] == 'GPT': + part_num += 1 + sfdisk_script.append( + build_sfdisk_partition_line( + table_type='GPT', + dev_path=f'{dest_prefix}{part_num}', + size='384MiB', + details={'parttype': esp_type, 'partlabel': 'EFI System'}, + ), + ) + part_num += 1 + sfdisk_script.append( + build_sfdisk_partition_line( + table_type=settings['Table Type'], + dev_path=f'{dest_prefix}{part_num}', + size='16MiB', + details={'parttype': msr_type, 'partlabel': 'Microsoft Reserved'}, + ), + ) + elif settings['Table Type'] == 'MBR': + part_num += 1 + sfdisk_script.append( + build_sfdisk_partition_line( + table_type='MBR', + dev_path=f'{dest_prefix}{part_num}', + size='100MiB', + details={'parttype': '0x7', 'partlabel': 'System Reserved'}, + ), + ) + + # Add selected partition(s) + for part in source_parts: + num_sectors = part.details['size'] / self.destination.details['log-sec'] + num_sectors = math.ceil(num_sectors) + part_num += 1 + sfdisk_script.append( + build_sfdisk_partition_line( + table_type=settings['Table Type'], + dev_path=f'{dest_prefix}{part_num}', + size=num_sectors, + details=part.details, + ), + ) + + # Save sfdisk script + script_path = ( + f'{self.working_dir}/' + f'sfdisk_{self.destination.path.name}.script' + ) + with open(script_path, 'w') as _f: + _f.write('\n'.join(sfdisk_script)) + + # Skip real format for dry runs + if dry_run: + LOG.info('Dry run, refusing to format destination') + return + + # Format disk + LOG.warning('Formatting destination: %s', self.destination.path) + with open(script_path, 'r') as _f: + proc = exe.run_program( + cmd=['sudo', 'sfdisk', self.destination.path], + stdin=_f, + check=False, + ) + if proc.returncode != 0: + std.print_error('Error(s) encoundtered while formatting destination') + raise std.GenericAbort() + + # Update settings + settings['Needs Format'] = False + self.save_settings(settings) + + def retry_all_passes(self): + """Prep block_pairs for a retry recovery attempt.""" + bad_statuses = ('*', '/', '-') + LOG.warning('Updating block_pairs for retry') + + # Update all block_pairs + for pair in self.block_pairs: + map_data = [] + + # Reset status strings + for name in pair.status.keys(): + pair.status[name] = 'Pending' + + # Mark all non-trimmed, non-scraped, and bad areas as non-tried + with open(pair.map_path, 'r') as _f: + for line in _f.readlines(): + line = line.strip() + if line.startswith('0x') and line.endswith(bad_statuses): + line = f'{line[:-1]}?' + map_data.append(line) + + # Save updated map + with open(pair.map_path, 'w') as _f: + _f.write('\n'.join(map_data)) + + # Reinitialize status + pair.set_initial_status() + + def safety_check_destination(self): + """Run safety checks for destination and abort if necessary.""" + try: + self.destination.safety_checks() + except hw_obj.CriticalHardwareError: + std.print_error( + f'Critical error(s) detected for: {self.destination.path}', + ) + raise std.GenericAbort() + + def safety_check_size(self): + """Run size safety check and abort if necessary.""" + required_size = sum([pair.size for pair in self.block_pairs]) + settings = self.load_settings() if self.mode == 'Clone' else {} + + # Increase required_size if necessary + if self.mode == 'Clone' and settings.get('Needs Format', False): + if settings['Table Type'] == 'GPT': + # Below is the size calculation for the GPT + # 1 LBA for the protective MBR + # 33 LBAs each for the primary and backup GPT tables + # Source: https://en.wikipedia.org/wiki/GUID_Partition_Table + required_size += (1 + 33 + 33) * self.destination.details['phy-sec'] + if settings['Create Boot Partition']: + # 384MiB EFI System Partition and a 16MiB MS Reserved partition + required_size += (384 + 16) * 1024**2 + else: + # MBR only requires one LBA but adding a full 4096 bytes anyway + required_size += 4096 + if settings['Create Boot Partition']: + # 100MiB System Reserved partition + required_size += 100 * 1024**2 + + # Reduce required_size if necessary + if self.mode == 'Image': + for pair in self.block_pairs: + if pair.destination.exists(): + # NOTE: This uses the "max space" of the destination + # i.e. not the apparent size which is smaller for sparse files + # While this can result in an out-of-space error it's better + # than nothing. + required_size -= pair.destination.stat().st_size + + # Check destination size + if self.mode == 'Clone': + destination_size = self.destination.details['size'] + error_msg = 'A larger destination disk is required' + else: + # NOTE: Adding an extra 5% here to better ensure it will fit + destination_size = psutil.disk_usage(self.destination).free + destination_size *= 1.05 + error_msg = 'Not enough free space on the destination' + if required_size > destination_size: + std.print_error(error_msg) + raise std.GenericAbort() + + def save_debug_reports(self): + """Save debug reports to disk.""" + LOG.info('Saving debug reports') + debug_dir = pathlib.Path(f'{self.log_dir}/debug') + if not debug_dir.exists(): + debug_dir.mkdir() + + # State (self) + with open(f'{debug_dir}/state.report', 'a') as _f: + _f.write('[Debug report]\n') + _f.write('\n'.join(debug.generate_object_report(self))) + _f.write('\n') + + # Block pairs + for _bp in self.block_pairs: + with open(f'{debug_dir}/block_pairs.report', 'a') as _f: + _f.write('[Debug report]\n') + _f.write('\n'.join(debug.generate_object_report(_bp))) + _f.write('\n') + + def save_settings(self, settings): + """Save settings for future runs.""" + settings_file = pathlib.Path( + f'{self.working_dir}/Clone_{self.source.details["model"]}.json', + ) + + # Try saving JSON data + try: + with open(settings_file, 'w') as _f: + json.dump(settings, _f) + except OSError: + std.print_error('Failed to save clone settings') + raise std.GenericAbort() + + def skip_pass(self, pass_name): + """Mark block_pairs as skipped if applicable.""" + for pair in self.block_pairs: + if pair.status[pass_name] == 'Pending': + pair.status[pass_name] = 'Skipped' + + def update_progress_pane(self, overall_status): + """Update progress pane.""" + report = [] + separator = '─────────────────────' + width = cfg.ddrescue.TMUX_SIDE_WIDTH + + # Status + report.append(std.color_string(f'{"Status":^{width}}', 'BLUE')) + if 'NEEDS ATTENTION' in overall_status: + report.append( + std.color_string(f'{overall_status:^{width}}', 'YELLOW_BLINK'), + ) + else: + report.append(f'{overall_status:^{width}}') + report.append(separator) + + # Overall progress + if self.block_pairs: + total_rescued = self.get_rescued_size() + percent = self.get_percent_recovered() + report.append(std.color_string('Overall Progress', 'BLUE')) + report.append( + f'Rescued: {format_status_string(percent, width=width-9)}', + ) + report.append( + std.color_string( + [f'{std.bytes_to_string(total_rescued, decimals=2):>{width}}'], + [get_percent_color(percent)], + ), + ) + report.append(separator) + + # Block pair progress + for pair in self.block_pairs: + report.append(std.color_string(pair.source, 'BLUE')) + for name, status in pair.status.items(): + name = name.title() + report.append( + f'{name}{format_status_string(status, width=width-len(name))}', + ) + report.append(' ') + + # EToC + if overall_status in ('Active', 'NEEDS ATTENTION'): + etoc = get_etoc() + report.append(separator) + report.append(std.color_string('Estimated Pass Finish', 'BLUE')) + if overall_status == 'NEEDS ATTENTION' or etoc == 'N/A': + report.append(std.color_string('N/A', 'YELLOW')) + else: + report.append(etoc) + + # Write to progress file + out_path = pathlib.Path(f'{self.log_dir}/progress.out') + with open(out_path, 'w') as _f: + _f.write('\n'.join(report)) + + def update_top_panes(self): + """(Re)create top source/destination panes.""" + width = tmux.get_pane_size()[0] + width = int(width / 2) - 1 + + def _format_string(obj, width): + """Format source/dest string using obj and width, returns str.""" + string = '' + + # Build base string + if isinstance(obj, hw_obj.Disk): + string = f'{obj.path} {obj.description}' + elif obj.is_dir(): + string = f'{obj}/' + elif obj.is_file(): + size_str = std.bytes_to_string( + obj.stat().st_size, + decimals=0, + use_binary=False) + string = f'{obj.name} {size_str}' + + # Adjust for width + if len(string) > width: + if hasattr(obj, 'is_dir') and obj.is_dir(): + string = f'...{string[-width+3:]}' + else: + string = f'{string[:width-3]}...' + + # Done + return string + + # Kill destination pane + if 'Destination' in self.panes: + tmux.kill_pane(self.panes.pop('Destination')) + + # Source + source_str = ' ' + if self.source: + source_str = _format_string(self.source, width) + tmux.respawn_pane( + self.panes['Source'], + text=std.color_string( + ['Source', source_str], + ['BLUE', None], + sep='\n', + ), + ) + + # Destination + dest_str = '' + if self.destination: + dest_str = _format_string(self.destination, width) + self.panes['Destination'] = tmux.split_window( + percent=50, + vertical=False, + target_id=self.panes['Source'], + text=std.color_string( + ['Destination', dest_str], + ['BLUE', None], + sep='\n', + ), + ) + + +# Functions +def build_block_pair_report(block_pairs, settings): + """Build block pair report, returns list.""" + report = [] + notes = [] + if block_pairs: + report.append(std.color_string('Block Pairs', 'GREEN')) + else: + # Bail early + return report + + # Show block pair mapping + if settings and settings['Create Boot Partition']: + if settings['Table Type'] == 'GPT': + report.append(f'{" —— ":<9} --> EFI System Partition') + report.append(f'{" —— ":<9} --> Microsoft Reserved Partition') + elif settings['Table Type'] == 'MBR': + report.append(f'{" —— ":<9} --> System Reserved') + for pair in block_pairs: + report.append(f'{pair.source.name:<9} --> {pair.destination.name}') + + # Show resume messages as necessary + if settings: + if not settings['First Run']: + notes.append( + std.color_string( + ['NOTE:', 'Clone settings loaded from previous run.'], + ['BLUE', None], + ), + ) + if settings['Needs Format'] and settings['Table Type']: + msg = f'Destination will be formatted using {settings["Table Type"]}' + notes.append( + std.color_string( + ['NOTE:', msg], + ['BLUE', None], + ), + ) + if any([pair.get_rescued_size() > 0 for pair in block_pairs]): + notes.append( + std.color_string( + ['NOTE:', 'Resume data loaded from map file(s).'], + ['BLUE', None], + ), + ) + + # Add notes to report + if notes: + report.append(' ') + report.extend(notes) + + # Done + return report + + +def build_ddrescue_cmd(block_pair, pass_name, settings): + """Build ddrescue cmd using passed details, returns list.""" + cmd = ['sudo', 'ddrescue'] + if (block_pair.destination.is_block_device() + or block_pair.destination.is_char_device()): + cmd.append('--force') + if pass_name == 'read': + cmd.extend(['--no-trim', '--no-scrape']) + elif pass_name == 'trim': + # Allow trimming + cmd.append('--no-scrape') + elif pass_name == 'scrape': + # Allow trimming and scraping + pass + cmd.extend(settings) + cmd.append(block_pair.source) + cmd.append(block_pair.destination) + cmd.append(block_pair.map_path) + + # Done + LOG.debug('ddrescue cmd: %s', cmd) + return cmd + + +def build_directory_report(path): + """Build directory report, returns list.""" + path = f'{path}/' + report = [] + + # Get details + if PLATFORM == 'Linux': + cmd = [ + 'findmnt', + '--output', 'SIZE,AVAIL,USED,FSTYPE,OPTIONS', + '--target', path, + ] + proc = exe.run_program(cmd) + width = len(path) + 1 + for line in proc.stdout.splitlines(): + line = line.replace('\n', '') + if 'FSTYPE' in line: + line = std.color_string(f'{"PATH":<{width}}{line}', 'BLUE') + else: + line = f'{path:<{width}}{line}' + report.append(line) + else: + # TODO Get dir details under macOS + report.append(std.color_string('PATH', 'BLUE')) + report.append(str(path)) + + # Done + return report + + +def build_disk_report(dev): + """Build device report, returns list.""" + children = dev.details.get('children', []) + report = [] + + # Get widths + widths = { + 'fstype': max(6, len(str(dev.details.get('fstype', '')))), + 'label': max(5, len(str(dev.details.get('label', '')))), + 'name': max(4, len(dev.path.name)), + } + for child in children: + widths['fstype'] = max(widths['fstype'], len(str(child['fstype']))) + widths['label'] = max(widths['label'], len(str(child['label']))) + widths['name'] = max( + widths['name'], + len(child['name'].replace('/dev/', '')), + ) + widths = {k: v+1 for k, v in widths.items()} + + # Disk details + report.append(f'{dev.path.name} {dev.description}') + report.append(' ') + dev_fstype = dev.details.get('fstype', '') + dev_label = dev.details.get('label', '') + dev_name = dev.path.name + dev_size = std.bytes_to_string(dev.details["size"], use_binary=False) + + # Partition details + report.append( + std.color_string( + ( + f'{"NAME":<{widths["name"]}}' + f'{" " if children else ""}' + f'{"SIZE":<7}' + f'{"FSTYPE":<{widths["fstype"]}}' + f'{"LABEL":<{widths["label"]}}' + ), + 'BLUE', + ), + ) + report.append( + f'{dev_name if dev_name else "":<{widths["name"]}}' + f'{" " if children else ""}' + f'{dev_size:>6} ' + f'{dev_fstype if dev_fstype else "":<{widths["fstype"]}}' + f'{dev_label if dev_label else "":<{widths["label"]}}' + ) + for child in children: + fstype = child['fstype'] + label = child['label'] + name = child['name'].replace('/dev/', '') + size = std.bytes_to_string(child["size"], use_binary=False) + report.append( + f'{name if name else "":<{widths["name"]}}' + f'{size:>6} ' + f'{fstype if fstype else "":<{widths["fstype"]}}' + f'{label if label else "":<{widths["label"]}}' + ) + + # Indent children + if len(children) > 1: + report = [ + *report[:4], + *[f'├─{line}' for line in report[4:-1]], + f'└─{report[-1]}', + ] + elif len(children) == 1: + report[-1] = f'└─{report[-1]}' + + # Done + return report + + +def build_main_menu(): + """Build main menu, returns wk.std.Menu.""" + menu = std.Menu(title=std.color_string('ddrescue TUI: Main Menu', 'GREEN')) + menu.separator = ' ' + + # Add actions, options, etc + for action in MENU_ACTIONS: + menu.add_action(action) + for toggle, selected in MENU_TOGGLES.items(): + menu.add_toggle(toggle, {'Selected': selected}) + + # Done + return menu + + +def build_object_report(obj): + """Build object report, returns list.""" + report = [] + + # Get details based on object given + if hasattr(obj, 'is_dir') and obj.is_dir(): + # Directory report + report = build_directory_report(obj) + else: + # Device report + report = build_disk_report(obj) + + # Done + return report + + +def build_settings_menu(silent=True): + """Build settings menu, returns wk.std.Menu.""" + title_text = [ + std.color_string('ddrescue TUI: Expert Settings', 'GREEN'), + ' ', + std.color_string( + ['These settings can cause', 'MAJOR DAMAGE', 'to drives'], + ['YELLOW', 'RED', 'YELLOW'], + ), + 'Please read the manual before making changes', + ] + menu = std.Menu(title='\n'.join(title_text)) + menu.separator = ' ' + preset = 'Default' + if not silent: + # Ask which preset to use + print(f'Available ddrescue presets: {" / ".join(SETTING_PRESETS)}') + preset = std.choice(SETTING_PRESETS, 'Please select a preset:') + + # Fix selection + for _p in SETTING_PRESETS: + if _p.startswith(preset): + preset = _p + + # Add default settings + menu.add_action('Load Preset') + menu.add_action('Main Menu') + for name, details in cfg.ddrescue.DDRESCUE_SETTINGS['Default'].items(): + menu.add_option(name, details.copy()) + + # Update settings using preset + if preset != 'Default': + for name, details in cfg.ddrescue.DDRESCUE_SETTINGS[preset].items(): + menu.options[name].update(details.copy()) + + # Done + return menu + + +def build_sfdisk_partition_line(table_type, dev_path, size, details): + """Build sfdisk partition line using passed details, returns str.""" + line = f'{dev_path} : size={size}' + dest_type = '' + source_filesystem = str(details.get('fstype', '')).upper() + source_table_type = '' + source_type = details.get('parttype', '') + + # Set dest type + if re.match(r'^0x\w+$', source_type): + # Both source and dest are MBR + source_table_type = 'MBR' + if table_type == 'MBR': + dest_type = source_type.replace('0x', '').lower() + elif re.match(r'^\w{8}-\w{4}-\w{4}-\w{4}-\w{12}$', source_type): + # Source is a GPT type + source_table_type = 'GPT' + if table_type == 'GPT': + dest_type = source_type.upper() + if not dest_type: + # Assuming changing table types, set based on FS + if source_filesystem in cfg.ddrescue.PARTITION_TYPES.get(table_type, {}): + dest_type = cfg.ddrescue.PARTITION_TYPES[table_type][source_filesystem] + line += f', type={dest_type}' + + # Safety Check + if not dest_type: + std.print_error(f'Failed to determine partition type for: {dev_path}') + raise std.GenericAbort() + + # Add extra details + if details.get('partlabel', ''): + line += f', name="{details["partlabel"]}"' + if details.get('partuuid', '') and source_table_type == table_type: + # Only add UUID if source/dest table types match + line += f', uuid={details["partuuid"].upper()}' + + # Done + return line + + +def clean_working_dir(working_dir): + """Clean working directory to ensure a fresh recovery session. + + NOTE: Data from previous sessions will be preserved + in a backup directory. + """ + backup_dir = pathlib.Path(f'{working_dir}/prev') + backup_dir = io.non_clobber_path(backup_dir) + backup_dir.mkdir() + + # Move settings, maps, etc to backup_dir + for entry in os.scandir(working_dir): + if entry.name.endswith(('.dd', '.json', '.map')): + new_path = f'{backup_dir}/{entry.name}' + new_path = io.non_clobber_path(new_path) + shutil.move(entry.path, new_path) + + +def format_status_string(status, width): + """Format colored status string, returns str.""" + color = None + percent = -1 + status_str = str(status) + + # Check if status is percentage + try: + percent = float(status_str) + except ValueError: + # Assuming status is text + pass + + # Format status + if percent >= 0: + # Percentage + color = get_percent_color(percent) + status_str = f'{percent:{width-2}.2f} %' + if '100.00' in status_str and percent < 100: + # Always round down to 99.99% + LOG.warning('Rounding down to 99.99 from %s', percent) + status_str = f'{"99.99 %":>{width}}' + else: + # Text + color = STATUS_COLORS.get(status_str, None) + status_str = f'{status_str:>{width}}' + + # Add color if necessary + if color: + status_str = std.color_string(status_str, color) + + # Done + return status_str + + +def fstype_is_ok(path, map_dir=False): + """Check if filesystem type is acceptable, returns bool.""" + is_ok = False + fstype = None + + # Get fstype + if PLATFORM == 'Darwin': + # TODO: Determine fstype under macOS + pass + elif PLATFORM == 'Linux': + cmd = [ + 'findmnt', + '--noheadings', + '--output', 'FSTYPE', + '--target', path, + ] + proc = exe.run_program(cmd, check=False) + fstype = proc.stdout + fstype = fstype.strip().lower() + + # Check fstype + if map_dir: + is_ok = RECOMMENDED_MAP_FSTYPES.match(fstype) + else: + is_ok = RECOMMENDED_FSTYPES.match(fstype) + + # Done + return is_ok + + +def get_ddrescue_settings(settings_menu): + """Get ddrescue settings from menu selections, returns list.""" + settings = [] + + # Check menu selections + for name, details in settings_menu.options.items(): + if details['Selected']: + if 'Value' in details: + settings.append(f'{name}={details["Value"]}') + else: + settings.append(name) + + # Done + return settings + + +def get_etoc(): + """Get EToC from ddrescue output, returns str.""" + delta = None + delta_dict = {} + etoc = 'Unknown' + now = datetime.datetime.now(tz=TIMEZONE) + output = tmux.capture_pane() + + # Search for EToC delta + matches = re.findall(f'remaining time:.*$', output, re.MULTILINE) + if matches: + match = REGEX_REMAINING_TIME.search(matches[-1]) + if match.group('na'): + etoc = 'N/A' + else: + for key in ('days', 'hours', 'minutes', 'seconds'): + delta_dict[key] = match.group(key) + delta_dict = {k: int(v) if v else 0 for k, v in delta_dict.items()} + delta = datetime.timedelta(**delta_dict) + + # Calc EToC if delta found + if delta: + etoc_datetime = now + delta + etoc = etoc_datetime.strftime('%Y-%m-%d %H:%M %Z') + + # Done + return etoc + + +def get_object(path): + """Get object based on path, returns obj.""" + obj = None + + # Bail early + if not path: + return obj + + # Check path + path = pathlib.Path(path).resolve() + if path.is_block_device() or path.is_char_device(): + obj = hw_obj.Disk(path) + + # Child/Parent check + parent = obj.details['parent'] + if parent: + std.print_warning(f'"{obj.path}" is a child device') + if std.ask(f'Use parent device "{parent}" instead?'): + obj = hw_obj.Disk(parent) + elif path.is_dir(): + obj = path + elif path.is_file(): + # Assuming file is a raw image, mounting + loop_path = mount_raw_image(path) + obj = hw_obj.Disk(loop_path) + + # Abort if obj not set + if not obj: + std.print_error(f'Invalid source/dest path: {path}') + raise std.GenericAbort() + + # Done + return obj + + +def get_partition_separator(name): + """Get partition separator based on device name, returns str.""" + separator = '' + if re.search(r'(loop|mmc|nvme)', name, re.IGNORECASE): + separator = 'p' + + return separator + + +def get_percent_color(percent): + """Get color based on percentage, returns str.""" + color = None + if percent > 100: + color = 'PURPLE' + elif percent >= 99: + color = 'GREEN' + elif percent >= 90: + color = 'YELLOW' + elif percent > 0: + color = 'RED' + + # Done + return color + + +def get_table_type(disk): + """Get disk partition table type, returns str. + + NOTE: If resulting table type is not GPT or MBR + then an exception is raised. + """ + table_type = str(disk.details.get('pttype', '')).upper() + table_type = table_type.replace('DOS', 'MBR') + + # Check type + if table_type not in ('GPT', 'MBR'): + std.print_error(f'Unsupported partition table type: {table_type}') + raise std.GenericAbort() + + # Done + return table_type + + +def get_working_dir(mode, destination, force_local=False): + """Get working directory using mode and destination, returns path.""" + ticket_id = None + working_dir = None + + # Set ticket ID + while ticket_id is None: + ticket_id = std.input_text( + prompt='Please enter ticket ID:', + allow_empty_response=False, + ) + ticket_id = ticket_id.replace(' ', '_') + if not re.match(r'^\d+', ticket_id): + ticket_id = None + + # Use preferred path if possible + if mode == 'Image': + try: + path = pathlib.Path(destination).resolve() + except TypeError: + std.print_error(f'Invalid destination: {destination}') + raise std.GenericAbort() + if path.exists() and fstype_is_ok(path, map_dir=False): + working_dir = path + elif mode == 'Clone' and not force_local: + std.print_info('Mounting backup shares...') + net.mount_backup_shares(read_write=True) + for server in cfg.net.BACKUP_SERVERS: + path = pathlib.Path(f'/Backups/{server}') + if path.exists() and fstype_is_ok(path, map_dir=True): + # Acceptable path found + working_dir = path + break + + # Default to current dir if necessary + if not working_dir: + LOG.error('Failed to set preferred working directory') + working_dir = pathlib.Path(os.getcwd()) + + # Set subdir using ticket ID + if mode == 'Clone': + working_dir = working_dir.joinpath(ticket_id) + + # Create directory + working_dir.mkdir(parents=True, exist_ok=True) + os.chdir(working_dir) + + # Done + LOG.info('Set working directory to: %s', working_dir) + return working_dir + + +def main(): + """Main function for ddrescue TUI.""" + args = docopt(DOCSTRING) + log.update_log_path(dest_name='ddrescue-TUI', timestamp=True) + + # Check if running inside tmux + if 'TMUX' not in os.environ: + LOG.error('tmux session not found') + raise RuntimeError('tmux session not found') + + # Init + atexit.register(tmux.kill_all_panes) + main_menu = build_main_menu() + settings_menu = build_settings_menu() + state = State() + try: + state.init_recovery(args) + except std.GenericAbort: + std.abort() + + # Show menu + while True: + action = None + selection = main_menu.advanced_select() + + # Change settings + if 'Change settings' in selection[0]: + while True: + selection = settings_menu.settings_select() + if 'Load Preset' in selection: + # Rebuild settings menu using preset + settings_menu = build_settings_menu(silent=False) + else: + break + + # Start recovery + if 'Start' in selection: + std.clear_screen() + run_recovery(state, main_menu, settings_menu, dry_run=args['--dry-run']) + + # Quit + if 'Quit' in selection: + total_percent = state.get_percent_recovered() + if total_percent == 100: + break + + # Recovey < 100% + std.print_warning('Recovery is less than 100%') + if std.ask('Are you sure you want to quit?'): + break + + +def mount_raw_image(path): + """Mount raw image using OS specific methods, returns pathlib.Path.""" + loopback_path = None + + if PLATFORM == 'Darwin': + loopback_path = mount_raw_image_macos(path) + elif PLATFORM == 'Linux': + loopback_path = mount_raw_image_linux(path) + + # Check + if not loopback_path: + std.print_error(f'Failed to mount image: {path}') + + # Register unmount atexit + atexit.register(unmount_loopback_device, loopback_path) + + # Done + return loopback_path + + +def mount_raw_image_linux(path): + """Mount raw image using losetup, returns pathlib.Path.""" + loopback_path = None + + # Mount using losetup + cmd = [ + 'sudo', + 'losetup', + '--find', + '--partscan', + '--show', + path, + ] + proc = exe.run_program(cmd, check=False) + + # Check result + if proc.returncode == 0: + loopback_path = proc.stdout.strip() + + # Done + return loopback_path + +def mount_raw_image_macos(path): + """Mount raw image using hdiutil, returns pathlib.Path.""" + loopback_path = None + plist_data = {} + + # Mount using hdiutil + # plistdata['system-entities'][{}...] + cmd = [ + 'hdiutil', 'attach', + '-imagekey', 'diskimage-class=CRawDiskImage', + '-nomount', + '-plist', + '-readonly', + path, + ] + proc = exe.run_program(cmd, check=False, encoding=None, errors=None) + + # Check result + try: + plist_data = plistlib.loads(proc.stdout) + except plistlib.InvalidFileException: + return None + for dev in plist_data.get('system-entities', []): + dev_path = dev.get('dev-entry', '') + if re.match(r'^/dev/disk\d+$', dev_path): + loopback_path = dev_path + + # Done + return loopback_path + + +def run_ddrescue(state, block_pair, pass_name, settings, dry_run=True): + """Run ddrescue using passed settings.""" + cmd = build_ddrescue_cmd(block_pair, pass_name, settings) + state.update_progress_pane('Active') + std.clear_screen() + warning_message = '' + + def _update_smart_pane(): + """Update SMART pane every 30 seconds.""" + state.source.update_smart_details() + now = datetime.datetime.now(tz=TIMEZONE).strftime('%Y-%m-%d %H:%M %Z') + with open(f'{state.log_dir}/smart.out', 'w') as _f: + _f.write( + std.color_string( + ['SMART Attributes', f'Updated: {now}\n'], + ['BLUE', 'YELLOW'], + sep='\t\t', + ), + ) + _f.write('\n'.join(state.source.generate_report(header=False))) + + # Dry run + if dry_run: + LOG.info('ddrescue cmd: %s', cmd) + return + + # Start ddrescue + proc = exe.popen_program(cmd) + + # ddrescue loop + _i = 0 + while True: + if _i % 30 == 0: + # Update SMART pane + _update_smart_pane() + if _i % 60 == 0: + # Clear ddrescue pane + tmux.clear_pane() + _i += 1 + + # Update progress + block_pair.update_progress(pass_name) + state.update_progress_pane('Active') + + # Check if complete + try: + proc.wait(timeout=1) + break + except KeyboardInterrupt: + # Wait a bit to let ddrescue exit safely + LOG.warning('ddrescue stopped by user') + warning_message = 'Aborted' + std.sleep(2) + exe.run_program(['sudo', 'kill', str(proc.pid)], check=False) + break + except subprocess.TimeoutExpired: + # Continue to next loop to update panes + pass + else: + # Done + std.sleep(1) + break + + # Update progress + # NOTE: Using 'Active' here to avoid flickering between block pairs + block_pair.update_progress(pass_name) + state.update_progress_pane('Active') + + # Check result + if proc.poll(): + # True if return code is non-zero (poll() returns None if still running) + warning_message = 'Error(s) encountered, see message above' + if warning_message: + print(' ') + print(' ') + std.print_error('DDRESCUE PROCESS HALTED') + print(' ') + std.print_warning(warning_message) + + # Needs attention? + if str(proc.poll()) != '0': + state.update_progress_pane('NEEDS ATTENTION') + std.pause('Press Enter to return to main menu...') + raise std.GenericAbort() + + +def run_recovery(state, main_menu, settings_menu, dry_run=True): + """Run recovery passes.""" + atexit.register(state.save_debug_reports) + attempted_recovery = False + auto_continue = False + + # Get settings + for name, details in main_menu.toggles.items(): + if 'Auto continue' in name and details['Selected']: + auto_continue = True + if 'Retry' in name and details['Selected']: + details['Selected'] = False + state.retry_all_passes() + settings = get_ddrescue_settings(settings_menu) + + # Start SMART/Journal + state.panes['SMART'] = tmux.split_window( + behind=True, lines=12, vertical=True, + watch_file=f'{state.log_dir}/smart.out', + ) + state.panes['Journal'] = tmux.split_window( + lines=4, vertical=True, cmd='journalctl --dmesg --follow', + ) + + # Run pass(es) + for pass_name in ('read', 'trim', 'scrape'): + abort = False + + # Skip to next pass + if state.pass_complete(pass_name): + # NOTE: This bypasses auto_continue + state.skip_pass(pass_name) + continue + + # Run ddrescue + for pair in state.block_pairs: + if not pair.pass_complete(pass_name): + attempted_recovery = True + state.mark_started() + try: + run_ddrescue(state, pair, pass_name, settings, dry_run=dry_run) + except (KeyboardInterrupt, std.GenericAbort): + abort = True + break + + # Continue or return to menu + all_complete = state.pass_complete(pass_name) + all_above_threshold = state.pass_above_threshold(pass_name) + if abort or not (all_complete and all_above_threshold and auto_continue): + LOG.warning('Recovery halted') + break + + # Stop SMART/Journal + for pane in ('SMART', 'Journal'): + if pane in state.panes: + tmux.kill_pane(state.panes.pop(pane)) + + # Show warning if nothing was done + if not attempted_recovery: + std.print_warning('No actions performed') + std.print_standard(' ') + std.pause('Press Enter to return to main menu...') + + # Done + state.save_debug_reports() + atexit.unregister(state.save_debug_reports) + state.update_progress_pane('Idle') + + +def select_disk(prompt, skip_disk=None): + """Select disk from list, returns Disk().""" + std.print_info('Scanning disks...') + disks = hw_obj.get_disks() + menu = std.Menu( + title=std.color_string(f'ddrescue TUI: {prompt} Selection', 'GREEN'), + ) + menu.disabled_str = 'Already selected' + menu.separator = ' ' + menu.add_action('Quit') + for disk in disks: + disable_option = False + size = disk.details["size"] + + # Check if option should be disabled + if skip_disk: + parent = skip_disk.details.get('parent', None) + if (disk.path.samefile(skip_disk.path) + or (parent and disk.path.samefile(parent))): + disable_option = True + + # Add to menu + menu.add_option( + name=( + f'{str(disk.path):<12} ' + f'{disk.details["bus"]:<5} ' + f'{std.bytes_to_string(size, decimals=1, use_binary=False):<8} ' + f'{disk.details["model"]} ' + f'{disk.details["serial"]}' + ), + details={'Disabled': disable_option, 'Object': disk}, + ) + + # Get selection + selection = menu.simple_select() + if 'Quit' in selection: + raise std.GenericAbort() + + # Done + return selection[-1]['Object'] + + +def select_disk_parts(prompt, disk): + """Select disk parts from list, returns list of Disk().""" + title = std.color_string(f'ddrescue TUI: Partition Selection', 'GREEN') + title += f'\n\nDisk: {disk.path} {disk.description}' + menu = std.Menu(title) + menu.separator = ' ' + menu.add_action('All') + menu.add_action('None') + menu.add_action('Proceed', {'Separator': True}) + menu.add_action('Quit') + object_list = [] + + def _select_parts(menu): + """Loop over selection menu until at least one partition selected.""" + while True: + selection = menu.advanced_select( + f'Please select the parts to {prompt.lower()}: ', + ) + if 'All' in selection: + for option in menu.options.values(): + option['Selected'] = True + elif 'None' in selection: + for option in menu.options.values(): + option['Selected'] = False + elif 'Proceed' in selection: + if any([option['Selected'] for option in menu.options.values()]): + # At least one partition/device selected/device selected + break + elif 'Quit' in selection: + raise std.GenericAbort() + + # Bail early if child device selected + if disk.details.get('parent', False): + return [disk] + + # Add parts + whole_disk_str = f'{str(disk.path):<14} (Whole device)' + for part in disk.details.get('children', []): + size = part["size"] + name = ( + f'{str(part["path"]):<14} ' + f'({std.bytes_to_string(size, decimals=1, use_binary=False):>6})' + ) + menu.add_option(name, details={'Selected': True, 'Path': part['path']}) + + # Add whole disk if necessary + if not menu.options: + menu.add_option(whole_disk_str, {'Selected': True, 'Path': disk.path}) + menu.title += '\n\n' + menu.title += std.color_string(' No partitions detected.', 'YELLOW') + + # Get selection + _select_parts(menu) + + # Build list of Disk() object_list + for option in menu.options.values(): + if option['Selected']: + object_list.append(option['Path']) + + # Check if whole disk selected + if len(object_list) == len(disk.details.get('children', [])): + # NOTE: This is not true if the disk has no partitions + msg = f'Preserve partition table and unused space in {prompt.lower()}?' + if std.ask(msg): + # Replace part list with whole disk obj + object_list = [disk.path] + + # Convert object_list to hw_obj.Disk() objects + print(' ') + std.print_info('Getting disk/partition details...') + object_list = [hw_obj.Disk(path) for path in object_list] + + # Done + return object_list + + +def select_path(prompt): + """Select path, returns pathlib.Path.""" + invalid = False + menu = std.Menu( + title=std.color_string(f'ddrescue TUI: {prompt} Path Selection', 'GREEN'), + ) + menu.separator = ' ' + menu.add_action('Quit') + menu.add_option(f'Current directory') + menu.add_option('Enter manually') + path = None + + # Make selection + selection = menu.simple_select() + if 'Current directory' in selection: + path = os.getcwd() + elif 'Enter manually' in selection: + path = std.input_text('Please enter path: ') + elif 'Quit' in selection: + raise std.GenericAbort() + + # Check + try: + path = pathlib.Path(path).resolve() + except TypeError: + invalid = True + if invalid or not path.is_dir(): + std.print_error(f'Invalid path: {path}') + raise std.GenericAbort() + + # Done + return path + + +def set_mode(docopt_args): + """Set mode from docopt_args or user selection, returns str.""" + mode = None + + # Check docopt_args + if docopt_args['clone']: + mode = 'Clone' + elif docopt_args['image']: + mode = 'Image' + + # Ask user if necessary + if not mode: + answer = std.choice(['C', 'I'], 'Are we cloning or imaging?') + if answer == 'C': + mode = 'Clone' + else: + mode = 'Image' + + # Done + return mode + + +def unmount_loopback_device(path): + """Unmount loopback device using OS specific methods.""" + cmd = [] + + # Build OS specific cmd + if PLATFORM == 'Darwin': + cmd = ['hdiutil', 'detach', path] + elif PLATFORM == 'Linux': + cmd = ['sudo', 'losetup', '--detach', path] + + # Unmount loopback device + exe.run_program(cmd, check=False) + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/hw/diags.py b/scripts/wk/hw/diags.py new file mode 100644 index 00000000..32264b08 --- /dev/null +++ b/scripts/wk/hw/diags.py @@ -0,0 +1,1369 @@ +"""WizardKit: Hardware diagnostics""" +# pylint: disable=too-many-lines +# vim: sts=2 sw=2 ts=2 + +import atexit +import logging +import os +import pathlib +import re +import subprocess +import time + +from collections import OrderedDict +from docopt import docopt + +from wk import cfg, debug, exe, graph, log, net, std, tmux +from wk.hw import obj as hw_obj +from wk.hw import sensors as hw_sensors + + +# STATIC VARIABLES +DOCSTRING = f'''{cfg.main.KIT_NAME_FULL}: Hardware Diagnostics + +Usage: + hw-diags [options] + hw-diags (-h | --help) + +Options: + -c --cli Force CLI mode + -h --help Show this page + -q --quick Skip menu and perform a quick check +''' +LOG = logging.getLogger(__name__) +BADBLOCKS_REGEX = re.compile( + r'^Pass completed, (\d+) bad blocks found. .(\d+)/(\d+)/(\d+) errors', + re.IGNORECASE, + ) +IO_GRAPH_WIDTH = 40 +IO_ALT_TEST_SIZE_FACTOR = 0.01 +IO_BLOCK_SIZE = 512 * 1024 +IO_CHUNK_SIZE = 32 * 1024**2 +IO_MINIMUM_TEST_SIZE = 10 * 1024**3 +IO_RATE_REGEX = re.compile( + r'(?P\d+) bytes.* (?P\S+) s(?:,|ecs )', + ) +MENU_ACTIONS = ( + 'Audio Test', + 'Keyboard Test', + 'Network Test', + 'Start', + 'Quit') +MENU_ACTIONS_SECRET = ( + 'Matrix', + 'Tubes', + ) +MENU_OPTIONS = ( + 'CPU & Cooling', + 'Disk Attributes', + 'Disk Self-Test', + 'Disk Surface Scan', + 'Disk I/O Benchmark', +) +MENU_OPTIONS_QUICK = ('Disk Attributes',) +MENU_SETS = { + 'Full Diagnostic': (*MENU_OPTIONS,), + 'Disk Diagnostic': ( + 'Disk Attributes', + 'Disk Self-Test', + 'Disk Surface Scan', + 'Disk I/O Benchmark', + ), + 'Disk Diagnostic (Quick)': ('Disk Attributes',), +} +MENU_TOGGLES = ( + 'Skip USB Benchmarks', + ) +PLATFORM = std.PLATFORM +STATUS_COLORS = { + 'Passed': 'GREEN', + 'Aborted': 'YELLOW', + 'N/A': 'YELLOW', + 'Skipped': 'YELLOW', + 'Unknown': 'YELLOW', + 'Working': 'YELLOW', + 'Denied': 'RED', + 'ERROR': 'RED', + 'Failed': 'RED', + 'TimedOut': 'RED', + } + + +# Error Classes +class DeviceTooSmallError(RuntimeError): + """Raised when a device is too small to test.""" + + +# Classes +class State(): + """Object for tracking hardware diagnostic data.""" + def __init__(self): + self.cpu = None + self.disks = [] + self.layout = cfg.hw.TMUX_LAYOUT.copy() + self.log_dir = None + self.panes = {} + self.tests = OrderedDict({ + 'CPU & Cooling': { + 'Enabled': False, + 'Function': cpu_mprime_test, + 'Objects': [], + }, + 'Disk Attributes': { + 'Enabled': False, + 'Function': disk_attribute_check, + 'Objects': [], + }, + 'Disk Self-Test': { + 'Enabled': False, + 'Function': disk_self_test, + 'Objects': [], + }, + 'Disk Surface Scan': { + 'Enabled': False, + 'Function': disk_surface_scan, + 'Objects': [], + }, + 'Disk I/O Benchmark': { + 'Enabled': False, + 'Function': disk_io_benchmark, + 'Objects': [], + }, + }) + self.top_text = std.color_string('Hardware Diagnostics', 'GREEN') + + # Init tmux and start a background process to maintain layout + self.init_tmux() + exe.start_thread(self.fix_tmux_layout_loop) + + def abort_testing(self): + """Set unfinished tests as aborted and cleanup tmux panes.""" + for details in self.tests.values(): + for test in details['Objects']: + if test.status in ('Pending', 'Working'): + test.set_status('Aborted') + + # Cleanup tmux + self.panes.pop('Current', None) + for key, pane_ids in self.panes.copy().items(): + if key in ('Top', 'Started', 'Progress'): + continue + if isinstance(pane_ids, str): + tmux.kill_pane(self.panes.pop(key)) + else: + for _id in pane_ids: + tmux.kill_pane(_id) + self.panes.pop(key) + + def disk_safety_checks(self, prep=False, wait_for_self_tests=True): + # pylint: disable=too-many-branches + """Run disk safety checks.""" + self_tests_in_progress = False + for disk in self.disks: + disable_tests = False + + # Skip already disabled devices + if all([test.disabled for test in disk.tests.values()]): + continue + + try: + disk.safety_checks() + except hw_obj.CriticalHardwareError: + disable_tests = True + disk.add_note('Critical hardware error detected.', 'RED') + if 'Disk Attributes' in disk.tests: + disk.tests['Disk Attributes'].failed = True + disk.tests['Disk Attributes'].set_status('Failed') + except hw_obj.SMARTSelfTestInProgressError: + if prep: + std.print_warning(f'SMART self-test(s) in progress for {disk.path}') + if std.ask('Continue with all tests disabled for this device?'): + disable_tests = True + else: + std.print_standard('Diagnostics aborted.') + std.print_standard(' ') + std.pause('Press Enter to exit...') + raise SystemExit(1) + elif wait_for_self_tests: + self_tests_in_progress = True + else: + # Other tests will NOT be disabled + LOG.warning('SMART data may not be reliable for: %s', disk.path) + # Add note to report + if 'Disk Self-Test' in disk.tests: + disk.tests['Disk Self-Test'].failed = True + disk.tests['Disk Self-Test'].report.append( + std.color_string('Please manually review SMART data', 'YELLOW'), + ) + + # Disable tests if necessary + if disable_tests: + disk.disable_disk_tests() + + # Wait for self-test(s) + if self_tests_in_progress: + std.print_warning('SMART self-test(s) in progress') + std.print_standard('Waiting 60 seconds before continuing...') + std.sleep(60) + self.disk_safety_checks(wait_for_self_tests=False) + + def fix_tmux_layout(self, forced=True): + # pylint: disable=unused-argument + """Fix tmux layout based on cfg.hw.TMUX_LAYOUT.""" + try: + tmux.fix_layout(self.panes, self.layout, forced=forced) + except RuntimeError: + # Assuming self.panes changed while running + pass + + def fix_tmux_layout_loop(self): + """Fix tmux layout on a loop. + + NOTE: This should be called as a thread. + """ + while True: + self.fix_tmux_layout(forced=False) + std.sleep(1) + + def init_diags(self, menu): + """Initialize diagnostic pass.""" + + # Reset objects + self.disks.clear() + self.layout.clear() + self.layout.update(cfg.hw.TMUX_LAYOUT) + for test_data in self.tests.values(): + test_data['Objects'].clear() + + # Set log + self.log_dir = log.format_log_path() + self.log_dir = pathlib.Path( + f'{self.log_dir.parent}/' + f'Hardware-Diagnostics_{time.strftime("%Y-%m-%d_%H%M%S%z")}/' + ) + log.update_log_path( + dest_dir=self.log_dir, + dest_name='main', + keep_history=False, + timestamp=False, + ) + std.print_info('Starting Hardware Diagnostics') + + # Progress Pane + self.update_progress_pane() + tmux.respawn_pane( + pane_id=self.panes['Progress'], + watch_file=f'{self.log_dir}/progress.out', + ) + + # Add HW Objects + self.cpu = hw_obj.CpuRam() + self.disks = hw_obj.get_disks(skip_kits=True) + + # Add test objects + for name, details in menu.options.items(): + self.tests[name]['Enabled'] = details['Selected'] + if not details['Selected']: + continue + if 'CPU' in name: + # Create two Test objects which will both be used by cpu_mprime_test + # NOTE: Prime95 should be added first + test_mprime_obj = hw_obj.Test(dev=self.cpu, label='Prime95') + test_cooling_obj = hw_obj.Test(dev=self.cpu, label='Cooling') + self.cpu.tests[test_mprime_obj.label] = test_mprime_obj + self.cpu.tests[test_cooling_obj.label] = test_cooling_obj + self.tests[name]['Objects'].append(test_mprime_obj) + self.tests[name]['Objects'].append(test_cooling_obj) + elif 'Disk' in name: + for disk in self.disks: + test_obj = hw_obj.Test(dev=disk, label=disk.path.name) + disk.tests[name] = test_obj + self.tests[name]['Objects'].append(test_obj) + + # Run safety checks + self.disk_safety_checks(prep=True) + + def init_tmux(self): + """Initialize tmux layout.""" + tmux.kill_all_panes() + + # Top + self.panes['Top'] = tmux.split_window( + behind=True, + lines=2, + vertical=True, + text=f'{self.top_text}\nMain Menu', + ) + + # Started + self.panes['Started'] = tmux.split_window( + lines=cfg.hw.TMUX_SIDE_WIDTH, + target_id=self.panes['Top'], + text=std.color_string( + ['Started', time.strftime("%Y-%m-%d %H:%M %Z")], + ['BLUE', None], + sep='\n', + ), + ) + + # Progress + self.panes['Progress'] = tmux.split_window( + lines=cfg.hw.TMUX_SIDE_WIDTH, + text=' ', + ) + + def save_debug_reports(self): + """Save debug reports to disk.""" + LOG.info('Saving debug reports') + debug_dir = pathlib.Path(f'{self.log_dir}/debug') + if not debug_dir.exists(): + debug_dir.mkdir() + + # State (self) + with open(f'{debug_dir}/state.report', 'a') as _f: + _f.write('\n'.join(debug.generate_object_report(self))) + + # CPU/RAM + with open(f'{debug_dir}/cpu.report', 'a') as _f: + _f.write('\n'.join(debug.generate_object_report(self.cpu))) + _f.write('\n\n[Tests]') + for name, test in self.cpu.tests.items(): + _f.write(f'\n{name}:\n') + _f.write('\n'.join(debug.generate_object_report(test, indent=1))) + + # Disks + for disk in self.disks: + with open(f'{debug_dir}/disk_{disk.path.name}.report', 'a') as _f: + _f.write('\n'.join(debug.generate_object_report(disk))) + _f.write('\n\n[Tests]') + for name, test in disk.tests.items(): + _f.write(f'\n{name}:\n') + _f.write('\n'.join(debug.generate_object_report(test, indent=1))) + + def update_progress_pane(self): + """Update progress pane.""" + report = [] + width = cfg.hw.TMUX_SIDE_WIDTH + + for name, details in self.tests.items(): + if not details['Enabled']: + continue + + # Add test details + report.append(std.color_string(name, 'BLUE')) + for test_obj in details['Objects']: + report.append(std.color_string( + [test_obj.label, f'{test_obj.status:>{width-len(test_obj.label)}}'], + [None, STATUS_COLORS.get(test_obj.status, None)], + sep='', + )) + + # Add spacer + report.append(' ') + + # Write to progress file + out_path = pathlib.Path(f'{self.log_dir}/progress.out') + with open(out_path, 'w') as _f: + _f.write('\n'.join(report)) + + def update_top_pane(self, text): + """Update top pane with text.""" + tmux.respawn_pane(self.panes['Top'], text=f'{self.top_text}\n{text}') + + +# Functions +def audio_test(): + """Run an OS-specific audio test.""" + if PLATFORM == 'Linux': + audio_test_linux() + # TODO: Add tests for other OS + + +def audio_test_linux(): + """Run an audio test using amixer and speaker-test.""" + LOG.info('Audio Test') + + # Set volume + for source in ('Master', 'PCM'): + cmd = f'amixer -q set "{source}" 80% unmute'.split() + exe.run_program(cmd, check=False) + + # Run audio tests + for mode in ('pink', 'wav'): + cmd = f'speaker-test -c 2 -l 1 -t {mode}'.split() + exe.run_program(cmd, check=False, pipe=False) + + +def build_menu(cli_mode=False, quick_mode=False): + """Build main menu, returns wk.std.Menu.""" + menu = std.Menu(title=None) + + # Add actions, options, etc + for action in MENU_ACTIONS: + menu.add_action(action) + for action in MENU_ACTIONS_SECRET: + menu.add_action(action, {'Hidden': True}) + for option in MENU_OPTIONS: + menu.add_option(option, {'Selected': True}) + for toggle in MENU_TOGGLES: + menu.add_toggle(toggle, {'Selected': True}) + for name, targets in MENU_SETS.items(): + menu.add_set(name, {'Targets': targets}) + menu.actions['Start']['Separator'] = True + + # Update default selections for quick mode if necessary + if quick_mode: + for name in menu.options: + # Only select quick option(s) + menu.options[name]['Selected'] = name in MENU_OPTIONS_QUICK + + # Add CLI actions if necessary + if cli_mode or 'DISPLAY' not in os.environ: + menu.add_action('Reboot') + menu.add_action('Power Off') + + # Compatibility checks + if PLATFORM != 'Linux': + for name in ('Audio Test', 'Keyboard Test', 'Network Test'): + menu.actions[name]['Disabled'] = True + if PLATFORM not in ('Darwin', 'Linux'): + for name in ('Matrix', 'Tubes'): + menu.actions[name]['Disabled'] = True + + # Done + return menu + + +def calc_io_dd_values(dev_size): + """Calculate I/O benchmark dd values, returns dict. + + Calculations: + The minimum dev size is IO_GRAPH_WIDTH * IO_CHUNK_SIZE + (e.g. 1.25 GB for a width of 40 and a chunk size of 32MB) + + read_total is the area to be read in bytes + If the dev is < IO_MINIMUM_TEST_SIZE then it's the whole dev + Else it's the larger of IO_MINIMUM_TEST_SIZE or the alt test size + (determined by dev * IO_ALT_TEST_SIZE_FACTOR) + + read_chunks is the number of groups of IO_CHUNK_SIZE in test_obj.dev + This number is reduced to a multiple of IO_GRAPH_WIDTH in order + to allow for the data to be condensed cleanly + + read_blocks is the chunk size in number of blocks + (e.g. 64 if block size is 512KB and chunk size is 32MB + + skip_total is the number of IO_BLOCK_SIZE groups not tested + skip_blocks is the number of blocks to skip per IO_CHUNK_SIZE + skip_extra_rate is how often to add an additional skip block + This is needed to ensure an even testing across the dev + This is calculated by using the fractional amount left off + of the skip_blocks variable + """ + read_total = min(IO_MINIMUM_TEST_SIZE, dev_size) + read_total = max(read_total, dev_size*IO_ALT_TEST_SIZE_FACTOR) + read_chunks = int(read_total // IO_CHUNK_SIZE) + read_chunks -= read_chunks % IO_GRAPH_WIDTH + if read_chunks < IO_GRAPH_WIDTH: + raise DeviceTooSmallError + read_blocks = int(IO_CHUNK_SIZE / IO_BLOCK_SIZE) + read_total = read_chunks * IO_CHUNK_SIZE + skip_total = int((dev_size - read_total) // IO_BLOCK_SIZE) + skip_blocks = int((skip_total / read_chunks) // 1) + skip_extra_rate = 0 + try: + skip_extra_rate = 1 + int(1 / ((skip_total / read_chunks) % 1)) + except ZeroDivisionError: + # skip_extra_rate == 0 is fine + pass + + # Done + return { + 'Read Chunks': read_chunks, + 'Read Blocks': read_blocks, + 'Skip Blocks': skip_blocks, + 'Skip Extra': skip_extra_rate, + } + + +def check_cooling_results(test_obj, sensors): + """Check cooling results and update test_obj.""" + max_temp = sensors.cpu_max_temp() + + # Check temps + if not max_temp: + test_obj.set_status('Unknown') + elif max_temp >= cfg.hw.CPU_FAILURE_TEMP: + test_obj.failed = True + test_obj.set_status('Failed') + elif 'Aborted' not in test_obj.status: + test_obj.passed = True + test_obj.set_status('Passed') + + # Add temps to report + for line in sensors.generate_report( + 'Idle', 'Max', 'Cooldown', only_cpu=True): + test_obj.report.append(f' {line}') + + +def check_io_benchmark_results(test_obj, rate_list, graph_width): + """Generate colored report using rate_list, returns list of str.""" + avg_read = sum(rate_list) / len(rate_list) + min_read = min(rate_list) + max_read = max(rate_list) + if test_obj.dev.details['ssd']: + thresh_min = cfg.hw.THRESH_SSD_MIN + thresh_avg_high = cfg.hw.THRESH_SSD_AVG_HIGH + thresh_avg_low = cfg.hw.THRESH_SSD_AVG_LOW + else: + thresh_min = cfg.hw.THRESH_HDD_MIN + thresh_avg_high = cfg.hw.THRESH_HDD_AVG_HIGH + thresh_avg_low = cfg.hw.THRESH_HDD_AVG_LOW + + # Add horizontal graph to report + for line in graph.generate_horizontal_graph(rate_list, graph_width): + if not std.strip_colors(line).strip(): + # Skip empty lines + continue + test_obj.report.append(line) + + # Add read rates to report + test_obj.report.append( + f'Read speeds avg: {avg_read/(1000**2):3.1f}' + f' min: {min_read/(1000**2):3.1f}' + f' max: {max_read/(1000**2):3.1f}' + ) + + # Compare against thresholds + if min_read <= thresh_min and avg_read <= thresh_avg_high: + test_obj.failed = True + elif avg_read <= thresh_avg_low: + test_obj.failed = True + else: + test_obj.passed = True + + # Set status + if test_obj.failed: + test_obj.set_status('Failed') + elif test_obj.passed: + test_obj.set_status('Passed') + else: + test_obj.set_status('Unknown') + + +def check_mprime_results(test_obj, working_dir): + """Check mprime log files and update test_obj.""" + passing_lines = {} + warning_lines = {} + + def _read_file(log_name): + """Read file and split into lines, returns list.""" + lines = [] + try: + with open(f'{working_dir}/{log_name}', 'r') as _f: + lines = _f.readlines() + except FileNotFoundError: + # File may be missing on older systems + lines = [] + + return lines + + # results.txt (check if failed) + for line in _read_file('results.txt'): + line = line.strip() + if re.search(r'(error|fail)', line, re.IGNORECASE): + warning_lines[line] = None + + # print.log (check if passed) + for line in _read_file('prime.log'): + line = line.strip() + match = re.search( + r'(completed.*(\d+) errors, (\d+) warnings)', line, re.IGNORECASE) + if match: + if int(match.group(2)) + int(match.group(3)) > 0: + # Errors and/or warnings encountered + warning_lines[match.group(1).capitalize()] = None + else: + # No errors/warnings + passing_lines[match.group(1).capitalize()] = None + + # Update status + if warning_lines: + test_obj.failed = True + test_obj.set_status('Failed') + elif passing_lines and 'Aborted' not in test_obj.status: + test_obj.passed = True + test_obj.set_status('Passed') + else: + test_obj.set_status('Unknown') + + # Update report + for line in passing_lines: + test_obj.report.append(f' {line}') + for line in warning_lines: + test_obj.report.append(std.color_string(f' {line}', 'YELLOW')) + if not (passing_lines or warning_lines): + test_obj.report.append(std.color_string(' Unknown result', 'YELLOW')) + + +def check_self_test_results(test_obj, aborted=False): + """Check SMART self-test results.""" + test_obj.report.append(std.color_string('Self-Test', 'BLUE')) + if test_obj.disabled or test_obj.status == 'Denied': + test_obj.report.append(std.color_string(f' {test_obj.status}', 'RED')) + elif test_obj.status == 'N/A' or not test_obj.dev.attributes: + test_obj.report.append(std.color_string(f' {test_obj.status}', 'YELLOW')) + else: + # Not updating SMART data here to preserve the test status for the report + # For instance if the test was aborted the report should inlcude the last + # known progress instead of just "was aborted buy host" + test_details = test_obj.dev.get_smart_self_test_details() + test_result = test_details.get('status', {}).get('string', 'Unknown') + test_obj.report.append(f' {test_result}') + if aborted and not (test_obj.passed or test_obj.failed): + test_obj.report.append(std.color_string(' Aborted', 'YELLOW')) + test_obj.set_status('Aborted') + elif test_obj.status == 'TimedOut': + test_obj.report.append(std.color_string(' TimedOut', 'YELLOW')) + test_obj.set_status('TimedOut') + else: + test_obj.failed = not test_obj.passed + if test_obj.failed: + test_obj.set_status('Failed') + + +def cpu_mprime_test(state, test_objects): + # pylint: disable=too-many-statements + """CPU & cooling check using Prime95.""" + LOG.info('CPU Test (Prime95)') + aborted = False + prime_log = pathlib.Path(f'{state.log_dir}/prime.log') + sensors_out = pathlib.Path(f'{state.log_dir}/sensors.out') + test_mprime_obj, test_cooling_obj = test_objects + + # Bail early + if test_cooling_obj.disabled or test_mprime_obj.disabled: + return + + # Prep + state.update_top_pane(test_mprime_obj.dev.description) + test_cooling_obj.set_status('Working') + test_mprime_obj.set_status('Working') + + # Start sensors monitor + sensors = hw_sensors.Sensors() + sensors.start_background_monitor( + sensors_out, + thermal_action=('killall', 'mprime', '-INT'), + ) + + # Create monitor and worker panes + state.update_progress_pane() + state.panes['Prime95'] = tmux.split_window( + lines=10, vertical=True, watch_file=prime_log) + if PLATFORM == 'Darwin': + state.panes['Temps'] = tmux.split_window( + behind=True, percent=80, vertical=True, cmd='./hw-sensors') + elif PLATFORM == 'Linux': + state.panes['Temps'] = tmux.split_window( + behind=True, percent=80, vertical=True, watch_file=sensors_out) + tmux.resize_pane(height=3) + state.panes['Current'] = '' + state.layout['Current'] = {'height': 3, 'Check': True} + + # Get idle temps + std.print_standard('Saving idle temps...') + sensors.save_average_temps(temp_label='Idle', seconds=5) + + # Stress CPU + std.print_info('Starting stress test') + set_apple_fan_speed('max') + proc_mprime = start_mprime(state.log_dir, prime_log) + + # Show countdown + print('') + try: + print_countdown(proc=proc_mprime, seconds=cfg.hw.CPU_TEST_MINUTES*60) + except KeyboardInterrupt: + aborted = True + + # Stop Prime95 + stop_mprime(proc_mprime) + + # Update progress if necessary + if sensors.cpu_reached_critical_temp() or aborted: + test_cooling_obj.set_status('Aborted') + test_mprime_obj.set_status('Aborted') + state.update_progress_pane() + + # Get cooldown temp + std.clear_screen() + std.print_standard('Letting CPU cooldown...') + std.sleep(5) + std.print_standard('Saving cooldown temps...') + sensors.save_average_temps(temp_label='Cooldown', seconds=5) + + # Check Prime95 results + test_mprime_obj.report.append(std.color_string('Prime95', 'BLUE')) + check_mprime_results(test_obj=test_mprime_obj, working_dir=state.log_dir) + + # Check Cooling results + test_cooling_obj.report.append(std.color_string('Temps', 'BLUE')) + check_cooling_results(test_obj=test_cooling_obj, sensors=sensors) + + # Cleanup + state.update_progress_pane() + sensors.stop_background_monitor() + state.panes.pop('Current', None) + tmux.kill_pane(state.panes.pop('Prime95', None)) + tmux.kill_pane(state.panes.pop('Temps', None)) + + # Done + if aborted: + raise std.GenericAbort('Aborted') + + +def disk_attribute_check(state, test_objects): + """Disk attribute check.""" + LOG.info('Disk Attribute Check') + for test in test_objects: + if not test.dev.attributes: + # No NVMe/SMART data + test.set_status('N/A') + continue + + if test.dev.check_attributes(): + test.set_status('Passed') + else: + test.set_status('Failed') + + # Done + state.update_progress_pane() + + +def disk_io_benchmark(state, test_objects, skip_usb=True): + # pylint: disable=too-many-statements + """Disk I/O benchmark using dd.""" + LOG.info('Disk I/O Benchmark (dd)') + aborted = False + + def _run_io_benchmark(test_obj, log_path): + """Run I/O benchmark and handle exceptions.""" + dev_path = test_obj.dev.path + if PLATFORM == 'Darwin': + # Use "RAW" disks under macOS + dev_path = dev_path.with_name(f'r{dev_path.name}') + offset = 0 + read_rates = [] + test_obj.report.append(std.color_string('I/O Benchmark', 'BLUE')) + + # Get dd values or bail + try: + dd_values = calc_io_dd_values(test_obj.dev.details['size']) + except DeviceTooSmallError: + test_obj.set_status('N/A') + test_obj.report.append( + std.color_string('Disk too small to test', 'YELLOW'), + ) + return + + # Run dd read tests + for _i in range(dd_values['Read Chunks']): + _i += 1 + + # Build cmd + skip = dd_values['Skip Blocks'] + if dd_values['Skip Extra'] and _i % dd_values['Skip Extra'] == 0: + skip += 1 + cmd = [ + 'sudo', 'dd', + f'bs={IO_BLOCK_SIZE}', + f'skip={offset+skip}', + f'count={dd_values["Read Blocks"]}', + f'if={dev_path}', + 'of=/dev/null', + ] + if PLATFORM == 'Linux': + cmd.append('iflag=direct') + + # Run and get read rate + try: + proc = exe.run_program( + cmd, + pipe=False, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + except PermissionError: + # Since we're using sudo we can't kill dd + # Assuming this happened during a CTRL+c + raise KeyboardInterrupt + match = IO_RATE_REGEX.search(proc.stdout) + if match: + read_rates.append( + int(match.group('bytes')) / float(match.group('seconds')), + ) + match.group(1) + + # Show progress + with open(log_path, 'a') as _f: + if _i % 5 == 0: + percent = (_i / dd_values['Read Chunks']) * 100 + _f.write(f' {graph.vertical_graph_line(percent, read_rates[-1])}\n') + + # Update offset + offset += dd_values['Read Blocks'] + skip + + # Check results + check_io_benchmark_results(test_obj, read_rates, IO_GRAPH_WIDTH) + + # Run benchmarks + state.update_top_pane( + f'Disk I/O Benchmark{"s" if len(test_objects) > 1 else ""}', + ) + state.panes['I/O Benchmark'] = tmux.split_window( + percent=75, + vertical=True, + text=' ', + ) + for test in test_objects: + if test.disabled: + # Skip + continue + + # Skip USB devices if requested + if skip_usb and test.dev.details['bus'] == 'USB': + test.set_status('Skipped') + continue + + # Start benchmark + if not aborted: + std.clear_screen() + std.print_report(test.dev.generate_report()) + test.set_status('Working') + test_log = f'{state.log_dir}/{test.dev.path.name}_benchmark.out' + tmux.respawn_pane( + state.panes['I/O Benchmark'], + watch_cmd='tail', + watch_file=test_log, + ) + state.update_progress_pane() + try: + _run_io_benchmark(test, test_log) + except KeyboardInterrupt: + aborted = True + except (subprocess.CalledProcessError, TypeError, ValueError) as err: + # Something went wrong + LOG.error('%s', err) + test.set_status('ERROR') + test.report.append(std.color_string(' Unknown Error', 'RED')) + + # Mark test(s) aborted if necessary + if aborted: + test.set_status('Aborted') + test.report.append(std.color_string(' Aborted', 'YELLOW')) + + # Update progress after each test + state.update_progress_pane() + + # Cleanup + state.update_progress_pane() + tmux.kill_pane(state.panes.pop('I/O Benchmark', None)) + + # Done + if aborted: + raise std.GenericAbort('Aborted') + + +def disk_self_test(state, test_objects): + # pylint: disable=too-many-statements + """Disk self-test if available.""" + LOG.info('Disk Self-Test(s)') + aborted = False + threads = [] + state.panes['SMART'] = [] + + def _run_self_test(test_obj, log_path): + """Run self-test and handle exceptions.""" + result = None + + try: + test_obj.passed = test_obj.dev.run_self_test(log_path) + except TimeoutError: + test_obj.failed = True + result = 'TimedOut' + except hw_obj.SMARTNotSupportedError: + result = 'N/A' + + # Set status + if result: + test_obj.set_status(result) + else: + if test_obj.failed: + test_obj.set_status('Failed') + elif test_obj.passed: + test_obj.set_status('Passed') + else: + test_obj.set_status('Unknown') + + # Run self-tests + state.update_top_pane( + f'Disk self-test{"s" if len(test_objects) > 1 else ""}', + ) + std.print_info(f'Starting self-test{"s" if len(test_objects) > 1 else ""}') + for test in reversed(test_objects): + if test.disabled: + # Skip + continue + + # Start thread + test.set_status('Working') + test_log = f'{state.log_dir}/{test.dev.path.name}_selftest.log' + threads.append(exe.start_thread(_run_self_test, args=(test, test_log))) + + # Show progress + if threads[-1].is_alive(): + state.panes['SMART'].append( + tmux.split_window(lines=3, vertical=True, watch_file=test_log), + ) + + # Wait for all tests to complete + state.update_progress_pane() + try: + while True: + if any([t.is_alive() for t in threads]): + std.sleep(1) + else: + break + except KeyboardInterrupt: + aborted = True + for test in test_objects: + test.dev.abort_self_test() + std.sleep(0.5) + + # Save report(s) + for test in test_objects: + check_self_test_results(test, aborted=aborted) + + # Cleanup + state.update_progress_pane() + for pane in state.panes['SMART']: + tmux.kill_pane(pane) + state.panes.pop('SMART', None) + + # Done + if aborted: + raise std.GenericAbort('Aborted') + + +def disk_surface_scan(state, test_objects): + # pylint: disable=too-many-statements + """Read-only disk surface scan using badblocks.""" + LOG.info('Disk Surface Scan (badblocks)') + aborted = False + threads = [] + state.panes['badblocks'] = [] + + def _run_surface_scan(test_obj, log_path): + """Run surface scan and handle exceptions.""" + block_size = '1024' + dev = test_obj.dev + test_obj.report.append(std.color_string('badblocks', 'BLUE')) + test_obj.set_status('Working') + + # Increase block size if necessary + if (dev.details['phy-sec'] == 4096 + or dev.details['size'] >= cfg.hw.BADBLOCKS_LARGE_DISK): + block_size = '4096' + + # Start scan + cmd = ['sudo', 'badblocks', '-sv', '-b', block_size, '-e', '1', dev.path] + with open(log_path, 'a') as _f: + size_str = std.bytes_to_string(dev.details["size"], use_binary=False) + _f.write( + std.color_string( + ['[', dev.path.name, ' ', size_str, ']\n'], + [None, 'BLUE', None, 'CYAN', None], + sep='', + ), + ) + _f.flush() + exe.run_program( + cmd, + check=False, + pipe=False, + stderr=subprocess.STDOUT, + stdout=_f, + ) + + # Check results + with open(log_path, 'r') as _f: + for line in _f.readlines(): + line = std.strip_colors(line.strip()) + if not line or line.startswith('Checking') or line.startswith('['): + # Skip + continue + match = BADBLOCKS_REGEX.search(line) + if match: + if all([s == '0' for s in match.groups()]): + test_obj.passed = True + test_obj.report.append(f' {line}') + test_obj.set_status('Passed') + else: + test_obj.failed = True + test_obj.report.append(f' {std.color_string(line, "YELLOW")}') + test_obj.set_status('Failed') + else: + test_obj.report.append(f' {std.color_string(line, "YELLOW")}') + if not (test_obj.passed or test_obj.failed): + test_obj.set_status('Unknown') + + # Run surface scans + state.update_top_pane( + f'Disk Surface Scan{"s" if len(test_objects) > 1 else ""}', + ) + std.print_info( + f'Starting disk surface scan{"s" if len(test_objects) > 1 else ""}', + ) + for test in reversed(test_objects): + if test.disabled: + # Skip + continue + + # Start thread + test_log = f'{state.log_dir}/{test.dev.path.name}_badblocks.log' + threads.append(exe.start_thread(_run_surface_scan, args=(test, test_log))) + + # Show progress + if threads[-1].is_alive(): + state.panes['badblocks'].append( + tmux.split_window( + lines=5, + vertical=True, + watch_cmd='tail', + watch_file=test_log, + ), + ) + + # Wait for all tests to complete + try: + while True: + if any([t.is_alive() for t in threads]): + state.update_progress_pane() + std.sleep(5) + else: + break + except KeyboardInterrupt: + aborted = True + std.sleep(0.5) + # Handle aborts + for test in test_objects: + if not (test.disabled or test.passed or test.failed): + test.set_status('Aborted') + test.report.append(std.color_string(' Aborted', 'YELLOW')) + + # Cleanup + state.update_progress_pane() + for pane in state.panes['badblocks']: + tmux.kill_pane(pane) + state.panes.pop('badblocks', None) + + # Done + if aborted: + raise std.GenericAbort('Aborted') + + +def keyboard_test(): + """Test keyboard using xev.""" + LOG.info('Keyboard Test (xev)') + cmd = ['xev', '-event', 'keyboard'] + exe.run_program(cmd, check=False, pipe=False) + + +def main(): + # pylint: disable=too-many-branches + """Main function for hardware diagnostics.""" + args = docopt(DOCSTRING) + log.update_log_path(dest_name='Hardware-Diagnostics', timestamp=True) + + # Safety check + if 'TMUX' not in os.environ: + LOG.error('tmux session not found') + raise RuntimeError('tmux session not found') + + # Init + atexit.register(tmux.kill_all_panes) + menu = build_menu(cli_mode=args['--cli'], quick_mode=args['--quick']) + state = State() + + # Quick Mode + if args['--quick']: + run_diags(state, menu, quick_mode=True) + return + + # Show menu + while True: + action = None + selection = menu.advanced_select() + + # Set action + if 'Audio Test' in selection: + action = audio_test + elif 'Keyboard Test' in selection: + action = keyboard_test + elif 'Network Test' in selection: + action = network_test + + # Run simple test + if action: + state.update_top_pane(selection[0]) + try: + action() + except KeyboardInterrupt: + std.print_warning('Aborted.') + std.print_standard('') + std.pause('Press Enter to return to main menu...') + + # Secrets + if 'Matrix' in selection: + screensaver('matrix') + elif 'Tubes' in selection: + # Tubes ≈≈ Pipes? + screensaver('pipes') + + # Quit + if 'Reboot' in selection: + cmd = ['/usr/local/bin/wk-power-command', 'reboot'] + exe.run_program(cmd, check=False) + elif 'Power Off' in selection: + cmd = ['/usr/local/bin/wk-power-command', 'poweroff'] + exe.run_program(cmd, check=False) + elif 'Quit' in selection: + break + + # Start diagnostics + if 'Start' in selection: + run_diags(state, menu, quick_mode=False) + + # Reset top pane + state.update_top_pane('Main Menu') + + +def network_test(): + """Run network tests.""" + LOG.info('Network Test') + try_and_print = std.TryAndPrint() + result = try_and_print.run( + message='Network connection...', + function=net.connected_to_private_network, + msg_good='OK', + raise_on_error=True, + ) + + # Bail if not connected + if result['Failed']: + std.print_warning('Please connect to a network and try again') + std.pause('Press Enter to return to main menu...') + return + + # Show IP address(es) + net.show_valid_addresses() + + # Ping tests + try_and_print.run( + 'Internet connection...', net.ping, msg_good='OK', addr='8.8.8.8') + try_and_print.run( + 'DNS resolution...', net.ping, msg_good='OK', addr='google.com') + + # Speedtest + try_and_print.run('Speedtest...', net.speedtest) + + # Done + std.pause('Press Enter to return to main menu...') + + +def print_countdown(proc, seconds): + """Print countdown to screen while proc is alive.""" + for i in range(seconds): + sec_left = (seconds - i) % 60 + min_left = int((seconds - i) / 60) + + out_str = '\r ' + if min_left: + out_str += f'{min_left} minute{"s" if min_left != 1 else ""}, ' + out_str += f'{sec_left} second{"s" if sec_left != 1 else ""}' + out_str += ' remaining' + + print(f'{out_str:<42}', end='', flush=True) + try: + proc.wait(1) + except subprocess.TimeoutExpired: + # proc still going, continue + pass + if proc.poll() is not None: + # proc exited, stop countdown + break + + # Done + print('') + + +def run_diags(state, menu, quick_mode=False): + """Run selected diagnostics.""" + aborted = False + atexit.register(state.save_debug_reports) + state.init_diags(menu) + + # Just return if no tests were selected + if not any([details['Enabled'] for details in state.tests.values()]): + std.print_warning('No tests selected?') + std.pause() + return + + # Run tests + for name, details in state.tests.items(): + if not details['Enabled']: + # Skip disabled tests + continue + + # Run test(s) + function = details['Function'] + args = [details['Objects']] + if name == 'Disk I/O Benchmark': + args.append(menu.toggles['Skip USB Benchmarks']['Selected']) + std.clear_screen() + try: + function(state, *args) + except (KeyboardInterrupt, std.GenericAbort): + aborted = True + state.abort_testing() + state.update_progress_pane() + break + + # Run safety checks + if name.startswith('Disk'): + state.disk_safety_checks(wait_for_self_tests=name != 'Disk Attributes') + + # Handle aborts + if aborted: + for details in state.tests.values(): + for test_obj in details['Objects']: + if test_obj.status == 'Pending': + test_obj.set_status('Aborted') + + # Show results + show_results(state) + + # Done + state.save_debug_reports() + atexit.unregister(state.save_debug_reports) + if quick_mode: + std.pause('Press Enter to exit...') + else: + std.pause('Press Enter to return to main menu...') + + +def screensaver(name): + """Show screensaver""" + LOG.info('Screensaver (%s)', name) + if name == 'matrix': + cmd = ['cmatrix', '-abs'] + elif name == 'pipes': + cmd = [ + 'pipes' if PLATFORM == 'Linux' else 'pipes.sh', + '-t', '0', + '-t', '1', + '-t', '2', + '-t', '3', + '-t', '5', + '-R', '-r', '4000', + ] + + # Switch pane to fullscreen and start screensaver + tmux.zoom_pane() + exe.run_program(cmd, check=False, pipe=False) + tmux.zoom_pane() + + +def set_apple_fan_speed(speed): + """Set Apple fan speed.""" + cmd = None + + # Check + if speed not in ('auto', 'max'): + raise RuntimeError(f'Invalid speed {speed}') + + # Set cmd + if PLATFORM == 'Linux': + cmd = ['apple-fans', speed] + #TODO: Add method for use under macOS + + # Run cmd + if cmd: + exe.run_program(cmd, check=False) + + +def show_results(state): + """Show test results by device.""" + std.sleep(0.5) + std.clear_screen() + state.update_top_pane('Results') + + # CPU Tests + cpu_tests_enabled = [data['Enabled'] for name, data in state.tests.items() + if name.startswith('CPU')] + if any(cpu_tests_enabled): + std.print_success('CPU:') + std.print_report(state.cpu.generate_report()) + std.print_standard(' ') + + # Disk Tests + disk_tests_enabled = [data['Enabled'] for name, data in state.tests.items() + if name.startswith('Disk')] + if any(disk_tests_enabled): + std.print_success(f'Disk{"s" if len(state.disks) > 1 else ""}:') + for disk in state.disks: + std.print_report(disk.generate_report()) + std.print_standard(' ') + if not state.disks: + std.print_warning('No devices') + std.print_standard(' ') + + +def start_mprime(working_dir, log_path): + """Start mprime and save filtered output to log, returns Popen object.""" + set_apple_fan_speed('max') + proc_mprime = subprocess.Popen( + ['mprime', '-t'], + cwd=working_dir, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + ) + proc_grep = subprocess.Popen( + 'grep --ignore-case --invert-match --line-buffered stress.txt'.split(), + stdin=proc_mprime.stdout, + stdout=subprocess.PIPE, + ) + proc_mprime.stdout.close() + save_nsbr = exe.NonBlockingStreamReader(proc_grep.stdout) + exe.start_thread( + save_nsbr.save_to_file, + args=(proc_grep, log_path), + ) + + # Return objects + return proc_mprime + + +def stop_mprime(proc_mprime): + """Stop mprime gracefully, then forcefully as needed.""" + proc_mprime.terminate() + try: + proc_mprime.wait(timeout=5) + except subprocess.TimeoutExpired: + proc_mprime.kill() + set_apple_fan_speed('auto') + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/hw/obj.py b/scripts/wk/hw/obj.py new file mode 100644 index 00000000..205d857a --- /dev/null +++ b/scripts/wk/hw/obj.py @@ -0,0 +1,844 @@ +"""WizardKit: Hardware objects (mostly)""" +# vim: sts=2 sw=2 ts=2 + +import logging +import pathlib +import plistlib +import re + +from collections import OrderedDict + +from wk.cfg.hw import ( + ATTRIBUTE_COLORS, + KEY_NVME, + KEY_SMART, + KNOWN_DISK_ATTRIBUTES, + KNOWN_DISK_MODELS, + KNOWN_RAM_VENDOR_IDS, + REGEX_POWER_ON_TIME, + ) +from wk.cfg.main import KIT_NAME_SHORT +from wk.exe import get_json_from_command, run_program +from wk.std import ( + PLATFORM, + bytes_to_string, + color_string, + sleep, + string_to_bytes, + ) + + +# STATIC VARIABLES +LOG = logging.getLogger(__name__) +NVME_WARNING_KEYS = ( + 'spare_below_threshold', + 'reliability_degraded', + 'volatile_memory_backup_failed', + ) +WK_LABEL_REGEX = re.compile( + fr'{KIT_NAME_SHORT}_(LINUX|UFD)', + re.IGNORECASE, + ) + + +# Exception Classes +class CriticalHardwareError(RuntimeError): + """Exception used for critical hardware failures.""" + +class SMARTNotSupportedError(TypeError): + """Exception used for disks lacking SMART support.""" + +class SMARTSelfTestInProgressError(RuntimeError): + """Exception used when a SMART self-test is in progress.""" + + +# Classes +class BaseObj(): + """Base object for tracking device data.""" + def __init__(self): + self.tests = OrderedDict() + + def all_tests_passed(self): + """Check if all tests passed, returns bool.""" + return all([results.passed for results in self.tests.values()]) + + def any_test_failed(self): + """Check if any test failed, returns bool.""" + return any([results.failed for results in self.tests.values()]) + + +class CpuRam(BaseObj): + """Object for tracking CPU & RAM specific data.""" + def __init__(self): + super().__init__() + self.description = 'Unknown' + self.details = {} + self.ram_total = 'Unknown' + self.ram_dimms = [] + self.tests = OrderedDict() + + # Update details + self.get_cpu_details() + self.get_ram_details() + + def generate_report(self): + """Generate CPU & RAM report, returns list.""" + report = [] + report.append(color_string('Device', 'BLUE')) + report.append(f' {self.description}') + + # Include RAM details + report.append(color_string('RAM', 'BLUE')) + report.append(f' {self.ram_total} ({", ".join(self.ram_dimms)})') + + # Tests + for test in self.tests.values(): + report.extend(test.report) + + return report + + def get_cpu_details(self): + """Get CPU details using OS specific methods.""" + if PLATFORM == 'Darwin': + cmd = 'sysctl -n machdep.cpu.brand_string'.split() + proc = run_program(cmd, check=False) + self.description = re.sub(r'\s+', ' ', proc.stdout.strip()) + elif PLATFORM == 'Linux': + cmd = ['lscpu', '--json'] + json_data = get_json_from_command(cmd) + for line in json_data.get('lscpu', [{}]): + _field = line.get('field', '').replace(':', '') + _data = line.get('data', '') + if not (_field or _data): + # Skip + continue + self.details[_field] = _data + + self.description = self.details.get('Model name', '') + + # Replace empty description + if not self.description: + self.description = 'Unknown CPU' + + def get_ram_details(self): + """Get RAM details using OS specific methods.""" + if PLATFORM == 'Darwin': + dimm_list = get_ram_list_macos() + elif PLATFORM == 'Linux': + dimm_list = get_ram_list_linux() + + details = {'Total': 0} + for dimm_details in dimm_list: + size, manufacturer = dimm_details + if size <= 0: + # Skip empty DIMMs + continue + description = f'{bytes_to_string(size)} {manufacturer}' + details['Total'] += size + if description in details: + details[description] += 1 + else: + details[description] = 1 + + # Save details + self.ram_total = bytes_to_string(details.pop('Total', 0)) + self.ram_dimms = [ + f'{count}x {desc}' for desc, count in sorted(details.items()) + ] + + +class Disk(BaseObj): + """Object for tracking disk specific data.""" + def __init__(self, path): + super().__init__() + self.attributes = {} + self.description = 'Unknown' + self.details = {} + self.notes = [] + self.path = pathlib.Path(path).resolve() + self.smartctl = {} + self.tests = OrderedDict() + + # Update details + self.get_details() + self.enable_smart() + self.update_smart_details() + if not self.is_4k_aligned(): + self.add_note('One or more partitions are not 4K aligned', 'YELLOW') + + def abort_self_test(self): + """Abort currently running non-captive self-test.""" + cmd = ['sudo', 'smartctl', '--abort', self.path] + run_program(cmd, check=False) + + def add_note(self, note, color=None): + """Add note that will be included in the disk report.""" + if color: + note = color_string(note, color) + if note not in self.notes: + self.notes.append(note) + self.notes.sort() + + def check_attributes(self, only_blocking=False): + """Check if any known attributes are failing, returns bool.""" + attributes_ok = True + known_attributes = get_known_disk_attributes(self.details['model']) + for attr, value in self.attributes.items(): + # Skip unknown attributes + if attr not in known_attributes: + continue + + # Get thresholds + blocking_attribute = known_attributes[attr].get('Blocking', False) + err_thresh = known_attributes[attr].get('Error', None) + max_thresh = known_attributes[attr].get('Maximum', None) + if not max_thresh: + max_thresh = float('inf') + + # Skip non-blocking attributes if necessary + if only_blocking and not blocking_attribute: + continue + + # Skip informational attributes + if not err_thresh: + continue + + # Check attribute + if err_thresh <= value['raw'] < max_thresh: + attributes_ok = False + + # Done + return attributes_ok + + def disable_disk_tests(self): + """Disable all tests.""" + LOG.warning('Disabling all tests for: %s', self.path) + for test in self.tests.values(): + if test.status in ('Pending', 'Working'): + test.set_status('Denied') + test.disabled = True + + def enable_smart(self): + """Try enabling SMART for this disk.""" + cmd = [ + 'sudo', + 'smartctl', + '--tolerance=permissive', + '--smart=on', + self.path, + ] + run_program(cmd, check=False) + + def generate_attribute_report(self): + """Generate attribute report, returns list.""" + known_attributes = get_known_disk_attributes(self.details['model']) + report = [] + for attr, value in sorted(self.attributes.items()): + note = '' + value_color = 'GREEN' + + # Skip attributes not in our list + if attr not in known_attributes: + continue + + # Check for attribute note + note = known_attributes[attr].get('Note', '') + + # ID / Name + label = f'{attr:>3}' + if isinstance(attr, int): + # Assuming SMART, include hex ID and name + label += f' / {str(hex(attr))[2:].upper():0>2}: {value["name"]}' + label = f' {label.replace("_", " "):38}' + + # Value color + for threshold, color in ATTRIBUTE_COLORS: + threshold_val = known_attributes[attr].get(threshold, None) + if threshold_val and value['raw'] >= threshold_val: + value_color = color + if threshold == 'Error': + note = '(failed)' + elif threshold == 'Maximum': + note = '(invalid?)' + + # 199/C7 warning + if str(attr) == '199' and value['raw'] > 0: + note = '(bad cable?)' + + # Build colored string and append to report + line = color_string( + [label, value['raw_str'], note], + [None, value_color, 'YELLOW'], + ) + report.append(line) + + # Done + return report + + def generate_report(self, header=True): + """Generate Disk report, returns list.""" + report = [] + if header: + report.append(color_string(f'Device ({self.path.name})', 'BLUE')) + report.append(f' {self.description}') + + # Attributes + if self.attributes: + if header: + report.append(color_string('Attributes', 'BLUE')) + report.extend(self.generate_attribute_report()) + + # Notes + if self.notes: + report.append(color_string('Notes', 'BLUE')) + for note in self.notes: + report.append(f' {note}') + + # Tests + for test in self.tests.values(): + report.extend(test.report) + + return report + + def get_details(self): + """Get disk details using OS specific methods. + + Required details default to generic descriptions + and are converted to the correct type. + """ + if PLATFORM == 'Darwin': + self.details = get_disk_details_macos(self.path) + elif PLATFORM == 'Linux': + self.details = get_disk_details_linux(self.path) + + # Set necessary details + self.details['bus'] = str(self.details.get('bus', '???')).upper() + self.details['bus'] = self.details['bus'].replace('IMAGE', 'Image') + self.details['bus'] = self.details['bus'].replace('NVME', 'NVMe') + self.details['log-sec'] = self.details.get('log-sec', 512) + self.details['model'] = self.details.get('model', 'Unknown Model') + self.details['name'] = self.details.get('name', self.path) + self.details['phy-sec'] = self.details.get('phy-sec', 512) + self.details['serial'] = self.details.get('serial', 'Unknown Serial') + self.details['size'] = self.details.get('size', -1) + self.details['ssd'] = self.details.get('ssd', False) + + # Ensure certain attributes types + for attr in ['bus', 'model', 'name', 'serial']: + if not isinstance(self.details[attr], str): + self.details[attr] = str(self.details[attr]) + for attr in ['phy-sec', 'size']: + if not isinstance(self.details[attr], int): + try: + self.details[attr] = int(self.details[attr]) + except (TypeError, ValueError): + LOG.error('Invalid disk %s: %s', attr, self.details[attr]) + self.details[attr] = -1 + + # Set description + self.description = '{size_str} ({bus}) {model} {serial}'.format( + size_str=bytes_to_string(self.details['size'], use_binary=False), + **self.details, + ) + + def get_labels(self): + """Build list of labels for this disk, returns list.""" + labels = [] + + # Add all labels from lsblk + for disk in [self.details, *self.details.get('children', [])]: + labels.append(disk.get('label', '')) + labels.append(disk.get('partlabel', '')) + + # Remove empty labels + labels = [str(label) for label in labels if label] + + # Done + return labels + + def get_smart_self_test_details(self): + """Shorthand to get deeply nested self-test details, returns dict.""" + details = {} + try: + details = self.smartctl['ata_smart_data']['self_test'] + except (KeyError, TypeError): + # Assuming disk lacks SMART support, ignore and return empty dict. + pass + + # Done + return details + + def is_4k_aligned(self): + """Check that all disk partitions are aligned, returns bool.""" + aligned = True + if PLATFORM == 'Darwin': + aligned = is_4k_aligned_macos(self.details) + elif PLATFORM == 'Linux': + aligned = is_4k_aligned_linux(self.path, self.details['phy-sec']) + #TODO: Add checks for other OS + + return aligned + + def safety_checks(self): + """Run safety checks and raise an exception if necessary.""" + blocking_event_encountered = False + self.update_smart_details() + + # Attributes + if not self.check_attributes(only_blocking=True): + blocking_event_encountered = True + LOG.error('%s: Blocked for failing attribute(s)', self.path) + + # NVMe status + nvme_status = self.smartctl.get('smart_status', {}).get('nvme', {}) + if nvme_status.get('media_read_only', False): + blocking_event_encountered = True + msg = 'Media has been placed in read-only mode' + self.add_note(msg, 'RED') + LOG.error('%s %s', self.path, msg) + for key in NVME_WARNING_KEYS: + if nvme_status.get(key, False): + msg = key.replace('_', ' ') + self.add_note(msg, 'YELLOW') + LOG.warning('%s %s', self.path, msg) + + # SMART overall assessment + smart_passed = True + try: + smart_passed = self.smartctl['smart_status']['passed'] + except (KeyError, TypeError): + # Assuming disk doesn't support SMART overall assessment + pass + if not smart_passed: + blocking_event_encountered = True + msg = 'SMART overall self-assessment: Failed' + self.add_note(msg, 'RED') + LOG.error('%s %s', self.path, msg) + + # Raise blocking exception if necessary + if blocking_event_encountered: + raise CriticalHardwareError(f'Critical error(s) for: {self.path}') + + # SMART self-test status + test_details = self.get_smart_self_test_details() + if 'remaining_percent' in test_details.get('status', ''): + msg = f'SMART self-test in progress for: {self.path}' + LOG.error(msg) + raise SMARTSelfTestInProgressError(msg) + + def run_self_test(self, log_path): + """Run disk self-test and check if it passed, returns bool. + + NOTE: This function is here to reserve a place for future + NVMe self-tests announced in NVMe spec v1.3. + """ + result = self.run_smart_self_test(log_path) + return result + + def run_smart_self_test(self, log_path): + """Run SMART self-test and check if it passed, returns bool. + + NOTE: An exception will be raised if the disk lacks SMART support. + """ + finished = False + result = None + started = False + status_str = 'Starting self-test...' + test_details = self.get_smart_self_test_details() + test_minutes = 15 + + # Check if disk supports self-tests + if not test_details: + raise SMARTNotSupportedError( + f'SMART self-test not supported for {self.path}') + + # Get real test length + test_minutes = test_details.get('polling_minutes', {}).get('short', 5) + test_minutes = int(test_minutes) + 10 + + # Start test + cmd = [ + 'sudo', + 'smartctl', + '--tolerance=normal', + '--test=short', + self.path, + ] + run_program(cmd, check=False) + + # Monitor progress (in five second intervals) + for _i in range(int(test_minutes*60/5)): + sleep(5) + + # Update status + self.update_smart_details() + test_details = self.get_smart_self_test_details() + + # Check test progress + if started: + status_str = test_details.get('status', {}).get('string', 'Unknown') + status_str = status_str.capitalize() + + # Update log + with open(log_path, 'w') as _f: + _f.write(f'SMART self-test status for {self.path}:\n {status_str}') + + # Check if finished + if 'remaining_percent' not in test_details['status']: + finished = True + break + + elif 'remaining_percent' in test_details['status']: + started = True + + # Check result + if finished: + result = test_details.get('status', {}).get('passed', False) + elif started: + raise TimeoutError(f'SMART self-test timed out for {self.path}') + + # Done + return result + + def update_smart_details(self): + """Update SMART details via smartctl.""" + self.attributes = {} + cmd = [ + 'sudo', + 'smartctl', + '--tolerance=verypermissive', + '--all', + '--json', + self.path, + ] + self.smartctl = get_json_from_command(cmd, check=False) + + # Check for attributes + if KEY_NVME in self.smartctl: + for name, value in self.smartctl[KEY_NVME].items(): + try: + self.attributes[name] = { + 'name': name, + 'raw': int(value), + 'raw_str': str(value), + } + except ValueError: + # Ignoring invalid attribute + LOG.error('Invalid NVMe attribute: %s %s', name, value) + elif KEY_SMART in self.smartctl: + for attribute in self.smartctl[KEY_SMART].get('table', {}): + try: + _id = int(attribute['id']) + except (KeyError, ValueError): + # Ignoring invalid attribute + LOG.error('Invalid SMART attribute: %s', attribute) + continue + name = str(attribute.get('name', 'Unknown')).replace('_', ' ').title() + raw = int(attribute.get('raw', {}).get('value', -1)) + raw_str = attribute.get('raw', {}).get('string', 'Unknown') + + # Fix power-on time + match = REGEX_POWER_ON_TIME.match(raw_str) + if _id == 9 and match: + raw = int(match.group(1)) + + # Add to dict + self.attributes[_id] = { + 'name': name, 'raw': raw, 'raw_str': raw_str} + + # Add note if necessary + if not self.attributes: + self.add_note('No NVMe or SMART data available', 'YELLOW') + + +class Test(): + # pylint: disable=too-few-public-methods + """Object for tracking test specific data.""" + def __init__(self, dev, label): + self.dev = dev + self.disabled = False + self.failed = False + self.label = label + self.passed = False + self.report = [] + self.status = 'Pending' + + def set_status(self, status): + """Update status string.""" + if self.disabled: + # Don't change status if disabled + return + + self.status = status + + +# Functions +def get_disk_details_linux(path): + """Get disk details using lsblk, returns dict.""" + cmd = ['lsblk', '--bytes', '--json', '--output-all', '--paths', path] + json_data = get_json_from_command(cmd, check=False) + details = json_data.get('blockdevices', [{}])[0] + + # Fix details + for dev in [details, *details.get('children', [])]: + dev['bus'] = dev.pop('tran', '???') + dev['parent'] = dev.pop('pkname', None) + dev['ssd'] = not dev.pop('rota', True) + if 'loop' in str(path) and dev['bus'] is None: + dev['bus'] = 'Image' + dev['model'] = '' + dev['serial'] = '' + + # Done + return details + + +def get_disk_details_macos(path): + """Get disk details using diskutil, returns dict.""" + details = {} + + # Get "list" details + cmd = ['diskutil', 'list', '-plist', path] + proc = run_program(cmd, check=False, encoding=None, errors=None) + try: + plist_data = plistlib.loads(proc.stdout) + except (TypeError, ValueError): + # Invalid / corrupt plist data? return empty dict to avoid crash + LOG.error('Failed to get diskutil list for %s', path) + return details + + # Parse "list" details + details = plist_data.get('AllDisksAndPartitions', [{}])[0] + details['children'] = details.pop('Partitions', []) + details['path'] = path + for child in details['children']: + child['path'] = path.with_name(child.get('DeviceIdentifier', 'null')) + + # Get "info" details + for dev in [details, *details['children']]: + cmd = ['diskutil', 'info', '-plist', dev['path']] + proc = run_program(cmd, check=False, encoding=None, errors=None) + try: + plist_data = plistlib.loads(proc.stdout) + except (TypeError, ValueError): + LOG.error('Failed to get diskutil info for %s', path) + continue #Skip + + # Parse "info" details + dev.update(plist_data) + dev['bus'] = dev.pop('BusProtocol', '???') + dev['fstype'] = dev.pop('FilesystemType', '') + dev['label'] = dev.pop('VolumeName', '') + dev['model'] = dev.pop('MediaName', 'Unknown') + dev['mountpoint'] = dev.pop('MountPoint', '') + dev['phy-sec'] = dev.pop('DeviceBlockSize', 512) + dev['serial'] = get_disk_serial_macos(dev['path']) + dev['size'] = dev.pop('Size', -1) + dev['ssd'] = dev.pop('SolidState', False) + dev['vendor'] = '' + if not dev.get('WholeDisk', True): + dev['parent'] = dev.pop('ParentWholeDisk', None) + + # Done + return details + + +def get_disk_serial_macos(path): + """Get disk serial using system_profiler, returns str.""" + cmd = ['sudo', 'smartctl', '--info', '--json', path] + smart_info = get_json_from_command(cmd) + return smart_info.get('serial_number', 'Unknown Serial') + + +def get_disks(skip_kits=False): + """Get disks using OS-specific methods, returns list.""" + disks = [] + if PLATFORM == 'Darwin': + disks = get_disks_macos() + elif PLATFORM == 'Linux': + disks = get_disks_linux() + + # Skip WK disks + if skip_kits: + disks = [ + disk_obj for disk_obj in disks + if not any( + [WK_LABEL_REGEX.search(label) for label in disk_obj.get_labels()] + ) + ] + + # Done + return disks + + +def get_disks_linux(): + """Get disks via lsblk, returns list.""" + cmd = ['lsblk', '--json', '--nodeps', '--paths'] + disks = [] + + # Add valid disks + json_data = get_json_from_command(cmd) + for disk in json_data.get('blockdevices', []): + disk_obj = Disk(disk['name']) + + # Skip loopback devices, optical devices, etc + if disk_obj.details['type'] != 'disk': + continue + + # Add disk + disks.append(disk_obj) + + # Done + return disks + + +def get_disks_macos(): + """Get disks via diskutil, returns list.""" + cmd = ['diskutil', 'list', '-plist', 'physical'] + disks = [] + + # Get info from diskutil + proc = run_program(cmd, encoding=None, errors=None) + try: + plist_data = plistlib.loads(proc.stdout) + except (TypeError, ValueError): + # Invalid / corrupt plist data? return empty list to avoid crash + LOG.error('Failed to get diskutil list') + return disks + + # Add valid disks + for disk in plist_data['WholeDisks']: + disks.append(Disk(f'/dev/{disk}')) + + # Done + return disks + + +def get_known_disk_attributes(model): + """Get known NVMe/SMART attributes (model specific), returns str.""" + known_attributes = KNOWN_DISK_ATTRIBUTES.copy() + + # Apply model-specific data + for regex, data in KNOWN_DISK_MODELS.items(): + if re.search(regex, model): + for attr, thresholds in data.items(): + if attr in known_attributes: + known_attributes[attr].update(thresholds) + else: + known_attributes[attr] = thresholds + + # Done + return known_attributes + + +def get_ram_list_linux(): + """Get RAM list using dmidecode.""" + cmd = ['sudo', 'dmidecode', '--type', 'memory'] + dimm_list = [] + manufacturer = 'Unknown' + size = 0 + + # Get DMI data + proc = run_program(cmd) + dmi_data = proc.stdout.splitlines() + + # Parse data + for line in dmi_data: + line = line.strip() + if line == 'Memory Device': + # Reset vars + manufacturer = 'Unknown' + size = 0 + elif line.startswith('Size:'): + size = line.replace('Size: ', '') + try: + size = string_to_bytes(size, assume_binary=True) + except ValueError: + # Assuming empty module + size = 0 + elif line.startswith('Manufacturer:'): + manufacturer = line.replace('Manufacturer: ', '') + dimm_list.append([size, manufacturer]) + + # Save details + return dimm_list + + +def get_ram_list_macos(): + """Get RAM list using system_profiler.""" + dimm_list = [] + + # Get and parse plist data + cmd = [ + 'system_profiler', + '-xml', + 'SPMemoryDataType', + ] + proc = run_program(cmd, check=False, encoding=None, errors=None) + try: + plist_data = plistlib.loads(proc.stdout) + except (TypeError, ValueError): + # Ignore and return an empty list + return dimm_list + + # Check DIMM data + dimm_details = plist_data[0].get('_items', [{}])[0].get('_items', []) + for dimm in dimm_details: + manufacturer = dimm.get('dimm_manufacturer', None) + manufacturer = KNOWN_RAM_VENDOR_IDS.get( + manufacturer, + f'Unknown ({manufacturer})') + size = dimm.get('dimm_size', '0 GB') + try: + size = string_to_bytes(size, assume_binary=True) + except ValueError: + # Empty DIMM? + LOG.error('Invalid DIMM size: %s', size) + continue + dimm_list.append([size, manufacturer]) + + # Save details + return dimm_list + + +def is_4k_aligned_macos(disk_details): + """Check partition alignment using diskutil info, returns bool.""" + aligned = True + + # Check partitions + for part in disk_details.get('children', []): + offset = part.get('PartitionMapPartitionOffset', 0) + if not offset: + # Assuming offset couldn't be found and it defaulted to 0 + # NOTE: Just logging the error, not bailing + LOG.error('Failed to get partition offset for %s', part['path']) + aligned = aligned and offset >= 0 and offset % 4096 == 0 + + # Done + return aligned + + +def is_4k_aligned_linux(dev_path, physical_sector_size): + """Check partition alignment using lsblk, returns bool.""" + aligned = True + cmd = [ + 'sudo', + 'sfdisk', + '--json', + dev_path, + ] + + # Get partition details + json_data = get_json_from_command(cmd) + + # Check partitions + for part in json_data.get('partitiontable', {}).get('partitions', []): + offset = physical_sector_size * part.get('start', -1) + aligned = aligned and offset >= 0 and offset % 4096 == 0 + + # Done + return aligned + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/hw/sensors.py b/scripts/wk/hw/sensors.py new file mode 100644 index 00000000..77cbcfa3 --- /dev/null +++ b/scripts/wk/hw/sensors.py @@ -0,0 +1,412 @@ +"""WizardKit: Hardware sensors""" +# vim: sts=2 sw=2 ts=2 + +import json +import logging +import pathlib +import re + +from subprocess import CalledProcessError + +from wk.cfg.hw import CPU_CRITICAL_TEMP, SMC_IDS, TEMP_COLORS +from wk.exe import run_program, start_thread +from wk.std import PLATFORM, color_string, sleep + + +# STATIC VARIABLES +LOG = logging.getLogger(__name__) +LM_SENSORS_CPU_REGEX = re.compile(r'(core|k\d+)temp', re.IGNORECASE) +SMC_REGEX = re.compile( + r'^\s*(?P\w{4})' + r'\s+\[(?P.*)\]' + r'\s+(?P.*?)' + r'\s*\(bytes (?P.*)\)$' + ) +SENSOR_SOURCE_WIDTH = 25 if PLATFORM == 'Darwin' else 20 + + +# Error Classes +class ThermalLimitReachedError(RuntimeError): + """Raised when the thermal threshold is reached.""" + + +# Classes +class Sensors(): + """Class for holding sensor specific data.""" + def __init__(self): + self.background_thread = None + self.data = get_sensor_data() + self.out_path = None + + def clear_temps(self): + """Clear saved temps but keep structure""" + for adapters in self.data.values(): + for sources in adapters.values(): + for source_data in sources.values(): + source_data['Temps'] = [] + + def cpu_max_temp(self): + """Get max temp from any CPU source, returns float. + + NOTE: If no temps are found this returns zero. + """ + max_temp = 0.0 + + # Check all CPU Temps + for section, adapters in self.data.items(): + if not section.startswith('CPU'): + continue + for sources in adapters.values(): + for source_data in sources.values(): + max_temp = max(max_temp, source_data.get('Max', 0)) + + # Done + return max_temp + + def cpu_reached_critical_temp(self): + """Check if CPU reached CPU_CRITICAL_TEMP, returns bool.""" + for section, adapters in self.data.items(): + if not section.startswith('CPU'): + # Limit to CPU temps + continue + + # Ugly section + for sources in adapters.values(): + for source_data in sources.values(): + if source_data.get('Max', -1) >= CPU_CRITICAL_TEMP: + return True + + # Didn't return above so temps are within the threshold + return False + + def generate_report(self, *temp_labels, colored=True, only_cpu=False): + """Generate report based on given temp_labels, returns list.""" + report = [] + + for section, adapters in sorted(self.data.items()): + if only_cpu and not section.startswith('CPU'): + continue + + # Ugly section + for adapter, sources in sorted(adapters.items()): + report.append(fix_sensor_name(adapter)) + for source, source_data in sorted(sources.items()): + line = f'{fix_sensor_name(source):{SENSOR_SOURCE_WIDTH}} ' + for label in temp_labels: + if label != 'Current': + line += f' {label.lower()}: ' + line += get_temp_str( + source_data.get(label, '???'), + colored=colored, + ) + report.append(line) + if not only_cpu: + report.append('') + + # Handle empty reports + if not report: + report = [ + color_string('WARNING: No sensors found', 'YELLOW'), + '', + 'Please monitor temps manually', + ] + + # Done + return report + + def monitor_to_file( + self, out_path, + exit_on_thermal_limit=True, temp_labels=None, thermal_action=None): + """Write report to path every second until stopped. + + thermal_action is a cmd to run if ThermalLimitReachedError is caught. + """ + stop_path = pathlib.Path(out_path).resolve().with_suffix('.stop') + if not temp_labels: + temp_labels = ('Current', 'Max') + + # Start loop + while True: + try: + self.update_sensor_data(exit_on_thermal_limit) + except ThermalLimitReachedError: + if thermal_action: + run_program(thermal_action, check=False) + report = self.generate_report(*temp_labels) + with open(out_path, 'w') as _f: + _f.write('\n'.join(report)) + + # Check if we should stop + if stop_path.exists(): + break + + # Sleep before next loop + sleep(0.5) + + def save_average_temps(self, temp_label, seconds=10): + # pylint: disable=unused-variable + """Save average temps under temp_label over provided seconds..""" + self.clear_temps() + + # Get temps + for i in range(seconds): + self.update_sensor_data() + sleep(1) + + # Calculate averages + for adapters in self.data.values(): + for sources in adapters.values(): + for source_data in sources.values(): + temps = source_data['Temps'] + source_data[temp_label] = sum(temps) / len(temps) + + def start_background_monitor( + self, out_path, + exit_on_thermal_limit=True, temp_labels=None, thermal_action=None): + """Start background thread to save report to file. + + thermal_action is a cmd to run if ThermalLimitReachedError is caught. + """ + if self.background_thread: + raise RuntimeError('Background thread already running') + + self.out_path = pathlib.Path(out_path) + self.background_thread = start_thread( + self.monitor_to_file, + args=(out_path, exit_on_thermal_limit, temp_labels, thermal_action), + ) + + def stop_background_monitor(self): + """Stop background thread.""" + self.out_path.with_suffix('.stop').touch() + self.background_thread.join() + + # Reset vars to None + self.background_thread = None + self.out_path = None + + def update_sensor_data(self, exit_on_thermal_limit=True): + """Update sensor data via OS-specific means.""" + if PLATFORM == 'Darwin': + self.update_sensor_data_macos(exit_on_thermal_limit) + elif PLATFORM == 'Linux': + self.update_sensor_data_linux(exit_on_thermal_limit) + + def update_sensor_data_linux(self, exit_on_thermal_limit=True): + """Update sensor data via lm_sensors.""" + lm_sensor_data = get_sensor_data_lm() + for section, adapters in self.data.items(): + for adapter, sources in adapters.items(): + for source, source_data in sources.items(): + try: + label = source_data['Label'] + temp = lm_sensor_data[adapter][source][label] + source_data['Current'] = temp + source_data['Max'] = max(temp, source_data['Max']) + source_data['Temps'].append(temp) + except KeyError: + # Dumb workaround for Dell sensors with changing source names + pass + + # Raise exception if thermal limit reached + if exit_on_thermal_limit and section == 'CPUTemps': + if source_data['Current'] >= CPU_CRITICAL_TEMP: + raise ThermalLimitReachedError('CPU temps reached limit') + + def update_sensor_data_macos(self, exit_on_thermal_limit=True): + """Update sensor data via SMC.""" + for section, adapters in self.data.items(): + for sources in adapters.values(): + for source_data in sources.values(): + cmd = ['smc', '-k', source_data['Label'], '-r'] + proc = run_program(cmd) + match = SMC_REGEX.match(proc.stdout.strip()) + try: + temp = float(match.group('Value')) + except (TypeError, ValueError): + LOG.error('Failed to update temp %s', source_data['Label']) + continue + + # Update source + source_data['Current'] = temp + source_data['Max'] = max(temp, source_data['Max']) + source_data['Temps'].append(temp) + + # Raise exception if thermal limit reached + if exit_on_thermal_limit and section == 'CPUTemps': + if source_data['Current'] >= CPU_CRITICAL_TEMP: + raise ThermalLimitReachedError('CPU temps reached limit') + + +# Functions +def fix_sensor_name(name): + """Cleanup sensor name, returns str.""" + name = re.sub(r'^(\w+)-(\w+)-(\w+)', r'\1 (\2 \3)', name, re.IGNORECASE) + name = name.title() + name = name.replace('Acpi', 'ACPI') + name = name.replace('ACPItz', 'ACPI TZ') + name = name.replace('Coretemp', 'CoreTemp') + name = name.replace('Cpu', 'CPU') + name = name.replace('Id ', 'ID ') + name = name.replace('Isa ', 'ISA ') + name = name.replace('Pci ', 'PCI ') + name = name.replace('Smc', 'SMC') + name = re.sub(r'(\D+)(\d+)', r'\1 \2', name, re.IGNORECASE) + name = re.sub(r'^K (\d+)Temp', r'AMD K\1 Temps', name, re.IGNORECASE) + name = re.sub(r'T(ctl|die)', r'CPU (T\1)', name, re.IGNORECASE) + name = re.sub(r'\s+', ' ', name) + return name + + +def get_sensor_data(): + """Get sensor data via OS-specific means, returns dict.""" + sensor_data = {} + if PLATFORM == 'Darwin': + sensor_data = get_sensor_data_macos() + elif PLATFORM == 'Linux': + sensor_data = get_sensor_data_linux() + + return sensor_data + + +def get_sensor_data_linux(): + """Get sensor data via lm_sensors, returns dict.""" + raw_lm_sensor_data = get_sensor_data_lm() + sensor_data = {'CPUTemps': {}, 'Others': {}} + + # Parse lm_sensor data + for adapter, sources in raw_lm_sensor_data.items(): + section = 'Others' + if LM_SENSORS_CPU_REGEX.search(adapter): + section = 'CPUTemps' + sensor_data[section][adapter] = {} + sources.pop('Adapter', None) + + # Find current temp and add to dict + ## current temp is labeled xxxx_input + for source, labels in sources.items(): + for label, temp in labels.items(): + if label.startswith('fan') or label.startswith('in'): + # Skip fan RPMs and voltages + continue + if 'input' in label: + sensor_data[section][adapter][source] = { + 'Current': temp, + 'Label': label, + 'Max': temp, + 'Temps': [temp], + } + + # Remove empty adapters + if not sensor_data[section][adapter]: + sensor_data[section].pop(adapter) + + # Remove empty sections + for adapters in sensor_data.values(): + adapters = {source: source_data for source, source_data in adapters.items() + if source_data} + + # Done + return sensor_data + + +def get_sensor_data_lm(): + """Get raw sensor data via lm_sensors, returns dict.""" + raw_lm_sensor_data = {} + cmd = ['sensors', '-j'] + + # Get raw data + try: + proc = run_program(cmd) + except CalledProcessError: + # Assuming no sensors available, return empty dict + return {} + + # Workaround for bad sensors + raw_data = [] + for line in proc.stdout.splitlines(): + if line.strip() == ',': + # Assuming malformatted line caused by missing data + continue + raw_data.append(line) + + # Parse JSON data + try: + raw_lm_sensor_data = json.loads('\n'.join(raw_data)) + except json.JSONDecodeError: + # Still broken, just return the empty dict + pass + + # Done + return raw_lm_sensor_data + + +def get_sensor_data_macos(): + """Get sensor data via SMC, returns dict. + + NOTE: The data is structured like the lm_sensor data. + """ + cmd = ['smc', '-l'] + sensor_data = {'CPUTemps': {'SMC (CPU)': {}}, 'Others': {'SMC (Other)': {}}} + + # Parse SMC data + proc = run_program(cmd) + for line in proc.stdout.splitlines(): + tmp = SMC_REGEX.match(line.strip()) + if tmp: + value = tmp.group('Value') + try: + LOG.debug('Invalid sensor: %s', tmp.group('ID')) + value = float(value) + except (TypeError, ValueError): + # Skip this sensor + continue + + # Only add known sensor IDs + sensor_id = tmp.group('ID') + if sensor_id not in SMC_IDS: + continue + + # Add to dict + section = 'Others' + adapter = 'SMC (Other)' + if SMC_IDS[sensor_id].get('CPU Temp', False): + section = 'CPUTemps' + adapter = 'SMC (CPU)' + source = SMC_IDS[sensor_id]['Source'] + sensor_data[section][adapter][source] = { + 'Current': value, + 'Label': sensor_id, + 'Max': value, + 'Temps': [value], + } + + # Done + return sensor_data + + +def get_temp_str(temp, colored=True): + """Get colored string based on temp, returns str.""" + temp_color = None + + # Safety check + try: + temp = float(temp) + except (TypeError, ValueError): + # Invalid temp? + return color_string(temp, 'PURPLE') + + # Determine color + if colored: + for threshold, color in sorted(TEMP_COLORS.items(), reverse=True): + if temp >= threshold: + temp_color = color + break + + # Done + return color_string(f'{"-" if temp < 0 else ""}{temp:2.0f}°C', temp_color) + + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/io.py b/scripts/wk/io.py new file mode 100644 index 00000000..a29ccee6 --- /dev/null +++ b/scripts/wk/io.py @@ -0,0 +1,196 @@ +"""WizardKit: I/O Functions""" +# vim: sts=2 sw=2 ts=2 + +import logging +import os +import pathlib +import re +import shutil + + +# STATIC VARIABLES +LOG = logging.getLogger(__name__) + + +# Functions +def case_insensitive_path(path): + """Find path case-insensitively, returns pathlib.Path obj.""" + given_path = pathlib.Path(path).resolve() + real_path = None + + # Quick check + if given_path.exists(): + return given_path + + # Search for real path + parts = list(given_path.parts) + real_path = parts.pop(0) + for part in parts: + try: + real_path = case_insensitive_search(real_path, part) + except NotADirectoryError: + # Reclassify error + raise FileNotFoundError(given_path) + real_path = pathlib.Path(real_path) + + # Done + return real_path + + +def case_insensitive_search(path, item): + """Search path for item case insensitively, returns pathlib.Path obj.""" + path = pathlib.Path(path).resolve() + given_path = path.joinpath(item) + real_path = None + regex = fr'^{item}' + + # Quick check + if given_path.exists(): + return given_path + + # Check all items in path + for entry in os.scandir(path): + if re.match(regex, entry.name, re.IGNORECASE): + real_path = path.joinpath(entry.name) + + # Raise exception if necessary + if not real_path: + raise FileNotFoundError(given_path) + + # Done + return real_path + + +def delete_empty_folders(path): + """Recursively delete all empty folders in path.""" + LOG.debug('path: %s', path) + + # Delete empty subfolders first + for item in os.scandir(path): + if item.is_dir(): + delete_empty_folders(item.path) + + # Attempt to remove (top) path + try: + delete_folder(path, force=False) + except OSError: + # Assuming it's not empty + pass + + +def delete_folder(path, force=False, ignore_errors=False): + """Delete folder if empty or if forced. + + NOTE: Exceptions are not caught by this function, + ignore_errors is passed to shutil.rmtree to allow partial deletions. + """ + LOG.debug( + 'path: %s, force: %s, ignore_errors: %s', + path, force, ignore_errors, + ) + + if force: + shutil.rmtree(path, ignore_errors=ignore_errors) + else: + os.rmdir(path) + + +def delete_item(path, force=False, ignore_errors=False): + """Delete file or folder, optionally recursively. + + NOTE: Exceptions are not caught by this function, + ignore_errors is passed to delete_folder to allow partial deletions. + """ + LOG.debug( + 'path: %s, force: %s, ignore_errors: %s', + path, force, ignore_errors, + ) + + path = pathlib.Path(path) + if path.is_dir(): + delete_folder(path, force=force, ignore_errors=ignore_errors) + else: + os.remove(path) + + +def non_clobber_path(path): + """Update path as needed to non-existing path, returns pathlib.Path.""" + LOG.debug('path: %s', path) + path = pathlib.Path(path) + name = path.name + new_path = None + suffix = ''.join(path.suffixes) + name = name.replace(suffix, '') + + # Bail early + if not path.exists(): + return path + + # Find non-existant path + for _i in range(1000): + test_path = path.with_name(f'{name}_{_i}').with_suffix(suffix) + if not test_path.exists(): + new_path = test_path + break + + # Raise error if viable path not found + if not new_path: + raise FileExistsError(new_path) + + # Done + LOG.debug('new path: %s', new_path) + return new_path + + +def recursive_copy(source, dest, overwrite=False): + """Copy source to dest recursively. + + NOTE: This uses rsync style source/dest syntax. + If the source has a trailing slash then it's contents are copied, + otherwise the source itself is copied. + + Examples assuming "ExDir/ExFile.txt" exists: + recursive_copy("ExDir", "Dest/") results in "Dest/ExDir/ExFile.txt" + recursive_copy("ExDir/", "Dest/") results in "Dest/ExFile.txt" + + NOTE 2: dest does not use find_path because it might not exist. + """ + copy_contents = str(source).endswith(('/', '\\')) + source = case_insensitive_path(source) + dest = pathlib.Path(dest).resolve().joinpath(source.name) + os.makedirs(dest.parent, exist_ok=True) + + # Recursively copy source to dest + if source.is_dir(): + if copy_contents: + # Trailing slash syntax + for item in os.scandir(source): + recursive_copy(item.path, dest.parent, overwrite=overwrite) + elif not dest.exists(): + # No conflict, copying whole tree (no merging needed) + shutil.copytree(source, dest) + elif not dest.is_dir(): + # Refusing to replace file with dir + raise FileExistsError(f'Refusing to replace file: {dest}') + else: + # Dest exists and is a dir, merge dirs + for item in os.scandir(source): + recursive_copy(item.path, dest, overwrite=overwrite) + elif source.is_file(): + if not dest.exists(): + # No conflict, copying file + shutil.copy2(source, dest) + elif not dest.is_file(): + # Refusing to replace dir with file + raise FileExistsError(f'Refusing to replace dir: {dest}') + elif overwrite: + # Dest file exists, deleting and replacing file + os.remove(dest) + shutil.copy2(source, dest) + else: + # Refusing to delete file when overwrite=False + raise FileExistsError(f'Refusing to delete file: {dest}') + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/kit/__init__.py b/scripts/wk/kit/__init__.py new file mode 100644 index 00000000..0869d9c6 --- /dev/null +++ b/scripts/wk/kit/__init__.py @@ -0,0 +1,7 @@ +"""WizardKit: kit module init""" +# vim: sts=2 sw=2 ts=2 + +import platform + +if platform.system() == 'Linux': + from wk.kit import ufd diff --git a/scripts/wk/kit/ufd.py b/scripts/wk/kit/ufd.py new file mode 100644 index 00000000..f432a3f3 --- /dev/null +++ b/scripts/wk/kit/ufd.py @@ -0,0 +1,468 @@ +"""WizardKit: UFD Functions""" +# vim: sts=2 sw=2 ts=2 +# TODO: Replace some lsblk usage with hw_obj? +# TODO: Reduce imports if possible +# TODO: Needs testing + +import logging +import os +import shutil + +from collections import OrderedDict +from docopt import docopt + +from wk import io, log, std +from wk.cfg.main import KIT_NAME_FULL, KIT_NAME_SHORT +from wk.cfg.ufd import BOOT_ENTRIES, BOOT_FILES, ITEMS, ITEMS_HIDDEN, SOURCES +from wk.exe import run_program +from wk.os import linux + + +# STATIC VARIABLES +DOCSTRING = '''WizardKit: Build UFD + +Usage: + build-ufd [options] --ufd-device PATH --linux PATH + [--linux-minimal PATH] + [--main-kit PATH] + [--winpe PATH] + [--extra-dir PATH] + build-ufd (-h | --help) + +Options: + -e PATH, --extra-dir PATH + -k PATH, --main-kit PATH + -l PATH, --linux PATH + -m PATH, --linux-minimal PATH + -u PATH, --ufd-device PATH + -w PATH, --winpe PATH + + -h --help Show this page + -M --use-mbr Use real MBR instead of GPT w/ Protective MBR + -F --force Bypass all confirmation messages. USE WITH EXTREME CAUTION! + -U --update Don't format device, just update +''' +LOG = logging.getLogger(__name__) +ISO_LABEL = f'{KIT_NAME_SHORT}_LINUX' +UFD_LABEL = f'{KIT_NAME_SHORT}_UFD' + + +# Functions +def build_ufd(): + """Build UFD using selected sources.""" + args = docopt(DOCSTRING) + log.update_log_path(dest_name='build-ufd', timestamp=True) + try_print = std.TryAndPrint() + try_print.indent = 2 + + # Check if running with root permissions + if not linux.running_as_root(): + std.print_error('This script is meant to be run as root') + std.abort() + + # Show header + std.print_success(KIT_NAME_FULL) + std.print_warning('UFD Build Tool') + std.print_warning(' ') + + # Verify selections + ufd_dev = verify_ufd(args['--ufd-device']) + sources = verify_sources(args, SOURCES) + show_selections(args, sources, ufd_dev, SOURCES) + if not args['--force']: + confirm_selections(update=args['--update']) + + # Prep UFD + if not args['--update']: + std.print_info('Prep UFD') + prep_device(ufd_dev, UFD_LABEL, use_mbr=args['--use-mbr']) + + # Mount UFD + try_print.run( + message='Mounting UFD...', + function=linux.mount, + mount_source=find_first_partition(ufd_dev), + mount_point='/mnt/UFD', + read_write=True, + ) + + # Remove Arch folder + if args['--update']: + try_print.run( + message='Removing Linux...', + function=remove_arch, + ) + + # Copy sources + std.print_standard(' ') + std.print_info('Copy Sources') + for s_label, s_path in sources.items(): + try_print.run( + message='Copying {}...'.format(s_label), + function=copy_source, + source=s_path, + items=ITEMS[s_label], + overwrite=True, + ) + + # Update boot entries + std.print_standard(' ') + std.print_info('Boot Setup') + try_print.run( + message='Updating boot entries...', + function=update_boot_entries, + ) + + # Install syslinux (to partition) + try_print.run( + message='Syslinux (partition)...', + function=install_syslinux_to_partition, + partition=find_first_partition(ufd_dev), + ) + + # Unmount UFD + try_print.run( + message='Unmounting UFD...', + function=linux.unmount, + mount_point='/mnt/UFD', + ) + + # Install syslinux (to device) + try_print.run( + message='Syslinux (device)...', + function=install_syslinux_to_dev, + ufd_dev=ufd_dev, + use_mbr=args['--use-mbr'], + ) + + # Hide items + std.print_standard(' ') + std.print_info('Final Touches') + try_print.run( + message='Hiding items...', + function=hide_items, + ufd_dev=ufd_dev, + items=ITEMS_HIDDEN, + ) + + # Done + std.print_standard('\nDone.') + if not args['--force']: + std.pause('Press Enter to exit...') + + +def confirm_selections(update=False): + """Ask tech to confirm selections, twice if necessary.""" + if not std.ask('Is the above information correct?'): + std.abort() + + # Safety check + if not update: + std.print_standard(' ') + std.print_warning('SAFETY CHECK') + std.print_standard( + 'All data will be DELETED from the disk and partition(s) listed above.') + std.print_colored( + ['This is irreversible and will lead to', 'DATA LOSS'], + [None, 'RED'], + ) + if not std.ask('Asking again to confirm, is this correct?'): + std.abort() + + std.print_standard(' ') + + +def copy_source(source, items, overwrite=False): + """Copy source items to /mnt/UFD.""" + is_image = source.is_file() + + # Mount source if necessary + if is_image: + linux.mount(source, '/mnt/Source') + + # Copy items + for i_source, i_dest in items: + i_source = f'{"/mnt/Source" if is_image else source}{i_source}' + i_dest = f'/mnt/UFD{i_dest}' + try: + io.recursive_copy(i_source, i_dest, overwrite=overwrite) + except FileNotFoundError: + # Going to assume (hope) that this is fine + pass + + # Unmount source if necessary + if is_image: + linux.unmount('/mnt/Source') + + +def find_first_partition(dev_path): + """Find path to first partition of dev, returns str. + + NOTE: This assumes the dev was just partitioned with + a single partition. + """ + cmd = [ + 'lsblk', + '--list', + '--noheadings', + '--output', 'name', + '--paths', + dev_path, + ] + + # Run cmd + proc = run_program(cmd) + part_path = proc.stdout.splitlines()[-1].strip() + + # Done + return part_path + + +def hide_items(ufd_dev, items): + """Set FAT32 hidden flag for items.""" + first_partition = find_first_partition(ufd_dev) + with open('/root/.mtoolsrc', 'w') as _f: + _f.write(f'drive U: file="{first_partition}"\n') + _f.write('mtools_skip_check=1\n') + + # Hide items + for item in items: + cmd = [f'yes | mattrib +h "U:/{item}"'] + run_program(cmd, check=False, shell=True) + + +def install_syslinux_to_dev(ufd_dev, use_mbr): + """Install Syslinux to UFD (dev).""" + cmd = [ + 'dd', + 'bs=440', + 'count=1', + f'if=/usr/lib/syslinux/bios/{"mbr" if use_mbr else "gptmbr"}.bin', + f'of={ufd_dev}', + ] + run_program(cmd) + + +def install_syslinux_to_partition(partition): + """Install Syslinux to UFD (partition).""" + cmd = [ + 'syslinux', + '--install', + '--directory', + '/arch/boot/syslinux/', + partition, + ] + run_program(cmd) + + +def is_valid_path(path_obj, path_type): + """Verify path_obj is valid by type, returns bool.""" + valid_path = False + if path_type == 'DIR': + valid_path = path_obj.is_dir() + elif path_type == 'KIT': + valid_path = path_obj.is_dir() and path_obj.joinpath('.bin').exists() + elif path_type == 'IMG': + valid_path = path_obj.is_file() and path_obj.suffix.lower() == '.img' + elif path_type == 'ISO': + valid_path = path_obj.is_file() and path_obj.suffix.lower() == '.iso' + elif path_type == 'UFD': + valid_path = path_obj.is_block_device() + + return valid_path + + +def prep_device(dev_path, label, use_mbr=False): + """Format device in preparation for applying the WizardKit components + + This is done is four steps: + 1. Zero-out first 64MB (this deletes the partition table and/or bootloader) + 2. Create a new partition table (GPT by default, optionally MBR) + 3. Set boot flag + 4. Format partition (FAT32, 4K aligned) + """ + try_print = std.TryAndPrint() + try_print.indent = 2 + + # Zero-out first 64MB + cmd = [ + 'dd', + 'bs=4M', + 'count=16', + 'if=/dev/zero', + f'of={dev_path}', + ] + try_print.run( + message='Zeroing first 64MiB...', + function=run_program, + cmd=cmd, + ) + + # Create partition table + cmd = [ + 'parted', dev_path, + '--script', + '--', + 'mklabel', 'msdos' if use_mbr else 'gpt', + '-1s' if use_mbr else '-4MiB', + ] + try_print.run( + message='Creating partition table...', + function=run_program, + cmd=cmd, + ) + + # Set boot flag + cmd = [ + 'parted', dev_path, + 'set', '1', + 'boot' if use_mbr else 'legacy_boot', + 'on', + ] + try_print.run( + message='Setting boot flag...', + function=run_program, + cmd=cmd, + ) + + # Format partition + cmd = [ + 'mkfs.vfat', + '-F', '32', + '-n', label, + find_first_partition(dev_path), + ] + try_print.run( + message='Formatting partition...', + function=run_program, + cmd=cmd, + ) + + +def remove_arch(): + """Remove arch dir from UFD. + + This ensures a clean installation to the UFD and resets the boot files + """ + shutil.rmtree(io.case_insensitive_path('/mnt/UFD/arch')) + + +def show_selections(args, sources, ufd_dev, ufd_sources): + """Show selections including non-specified options.""" + + # Sources + std.print_info('Sources') + for label in ufd_sources.keys(): + if label in sources: + std.print_standard(f' {label+":":<18} {sources["label"]}') + else: + std.print_colored( + [f' {label+":":<18}', 'Not Specified'], + [None, 'YELLOW'], + ) + std.print_standard(' ') + + # Destination + std.print_info('Destination') + cmd = [ + 'lsblk', '--nodeps', '--noheadings', '--paths', + '--output', 'NAME,FSTYPE,TRAN,SIZE,VENDOR,MODEL,SERIAL', + ufd_dev, + ] + proc = run_program(cmd, check=False) + std.print_standard(proc.stdout.strip()) + cmd = [ + 'lsblk', '--noheadings', '--paths', + '--output', 'NAME,SIZE,FSTYPE,LABEL,MOUNTPOINT', + ufd_dev, + ] + proc = run_program(cmd, check=False) + for line in proc.stdout.splitlines()[1:]: + std.print_standard(line) + + # Notes + if args['--update']: + std.print_warning('Updating kit in-place') + elif args['--use-mbr']: + std.print_warning('Formatting using legacy MBR') + std.print_standard(' ') + + +def update_boot_entries(): + """Update boot files for UFD usage""" + configs = [] + + # Find config files + for c_path, c_ext in BOOT_FILES.items(): + c_path = io.case_insensitive_path('/mnt/UFD{c_path}') + for item in os.scandir(c_path): + if item.name.lower().endswith(c_ext.lower()): + configs.append(item.path) + + # Update Linux labels + cmd = [ + 'sed', + '--in-place', + '--regexp-extended', + f's/{ISO_LABEL}/{UFD_LABEL}/', + *configs, + ] + run_program(cmd) + + # Uncomment extra entries if present + for b_path, b_comment in BOOT_ENTRIES.items(): + try: + io.case_insensitive_path(f'/mnt/UFD{b_path}') + except (FileNotFoundError, NotADirectoryError): + # Entry not found, continue to next entry + continue + + # Entry found, update config files + cmd = [ + 'sed', + '--in-place', + f's/#{b_comment}#//', + *configs, + ] + run_program(cmd, check=False) + + +def verify_sources(args, ufd_sources): + """Check all sources and abort if necessary, returns dict.""" + sources = OrderedDict() + + for label, data in ufd_sources.items(): + s_path = args[data['Arg']] + if s_path: + try: + s_path_obj = io.case_insensitive_path(s_path) + except FileNotFoundError: + std.print_error(f'ERROR: {label} not found: {s_path}') + std.abort() + if not is_valid_path(s_path_obj, data['Type']): + std.print_error(f'ERROR: Invalid {label} source: {s_path}') + std.abort() + sources[label] = s_path_obj + + return sources + + +def verify_ufd(dev_path): + """Check that dev_path is a valid UFD, returns pathlib.Path obj.""" + ufd_dev = None + + try: + ufd_dev = io.case_insensitive_path(dev_path) + except FileNotFoundError: + std.print_error(f'ERROR: UFD device not found: {dev_path}') + std.abort() + + if not is_valid_path(ufd_dev, 'UFD'): + std.print_error(f'ERROR: Invalid UFD device: {ufd_dev}') + std.abort() + + return ufd_dev + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/log.py b/scripts/wk/log.py new file mode 100644 index 00000000..ee512c95 --- /dev/null +++ b/scripts/wk/log.py @@ -0,0 +1,154 @@ +"""WizardKit: Log Functions""" +# vim: sts=2 sw=2 ts=2 + +import atexit +import logging +import os +import pathlib +import shutil +import time + +from wk import cfg +from wk.io import non_clobber_path + + +# STATIC VARIABLES +if os.name == 'nt': + # Example: "C:\WK\1955-11-05\WizardKit" + DEFAULT_LOG_DIR = ( + f'{os.environ.get("SYSTEMDRIVE", "C:")}/' + f'{cfg.main.KIT_NAME_SHORT}/' + f'{time.strftime("%Y-%m-%d")}' + ) +else: + # Example: "/home/tech/Logs" + DEFAULT_LOG_DIR = f'{os.path.expanduser("~")}/Logs' +DEFAULT_LOG_NAME = cfg.main.KIT_NAME_FULL + + +# Functions +def enable_debug_mode(): + """Configures logging for better debugging.""" + root_logger = logging.getLogger() + for handler in root_logger.handlers: + formatter = logging.Formatter( + datefmt=cfg.log.DEBUG['datefmt'], + fmt=cfg.log.DEBUG['format'], + ) + handler.setFormatter(formatter) + root_logger.setLevel('DEBUG') + + +def format_log_path( + log_dir=None, log_name=None, timestamp=False, + kit=False, tool=False): + """Format path based on args passed, returns pathlib.Path obj.""" + log_path = pathlib.Path( + f'{log_dir if log_dir else DEFAULT_LOG_DIR}/' + f'{cfg.main.KIT_NAME_FULL+"/" if kit else ""}' + f'{"Tools/" if tool else ""}' + f'{log_name if log_name else DEFAULT_LOG_NAME}' + f'{"_" if timestamp else ""}' + f'{time.strftime("%Y-%m-%d_%H%M%S%z") if timestamp else ""}' + '.log' + ) + log_path = log_path.resolve() + + # Avoid clobbering + log_path = non_clobber_path(log_path) + + # Done + return log_path + + +def get_root_logger_path(): + """Get path to log file from root logger, returns pathlib.Path obj.""" + log_path = None + root_logger = logging.getLogger() + + # Check all handlers and use the first fileHandler found + for handler in root_logger.handlers: + if isinstance(handler, logging.FileHandler): + log_path = pathlib.Path(handler.baseFilename).resolve() + break + + # Done + return log_path + + +def remove_empty_log(): + """Remove log if empty.""" + is_empty = False + + # Check if log is empty + log_path = get_root_logger_path() + try: + is_empty = log_path and log_path.exists() and log_path.stat().st_size == 0 + except (FileNotFoundError, AttributeError): + # File doesn't exist or couldn't verify it's empty + pass + + # Delete log + if is_empty: + log_path.unlink() + + +def start(config=None): + """Configure and start logging using safe defaults.""" + log_path = format_log_path(timestamp=os.name != 'nt') + root_logger = logging.getLogger() + + # Safety checks + if not config: + config = cfg.log.DEFAULT + if root_logger.hasHandlers(): + raise UserWarning('Logging already started, results may be unpredictable.') + + # Create log_dir + os.makedirs(log_path.parent, exist_ok=True) + + # Config logger + logging.basicConfig(filename=log_path, **config) + + # Register shutdown to run atexit + atexit.register(remove_empty_log) + atexit.register(logging.shutdown) + + +def update_log_path( + dest_dir=None, dest_name=None, keep_history=True, timestamp=True): + """Moves current log file to new path and updates the root logger.""" + root_logger = logging.getLogger() + cur_handler = None + cur_path = get_root_logger_path() + new_path = format_log_path(dest_dir, dest_name, timestamp=timestamp) + os.makedirs(new_path.parent, exist_ok=True) + + # Get current logging file handler + for handler in root_logger.handlers: + if isinstance(handler, logging.FileHandler): + cur_handler = handler + break + if not cur_handler: + raise RuntimeError('Logging FileHandler not found') + + # Copy original log to new location + if keep_history: + if new_path.exists(): + raise FileExistsError(f'Refusing to clobber: {new_path}') + shutil.move(cur_path, new_path) + + # Remove old log if empty + remove_empty_log() + + # Create new cur_handler (preserving formatter settings) + new_handler = logging.FileHandler(new_path, mode='a') + new_handler.setFormatter(cur_handler.formatter) + + # Replace current handler + root_logger.removeHandler(cur_handler) + root_logger.addHandler(new_handler) + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/net.py b/scripts/wk/net.py new file mode 100644 index 00000000..8b930c3b --- /dev/null +++ b/scripts/wk/net.py @@ -0,0 +1,260 @@ +"""WizardKit: Net Functions""" +# vim: sts=2 sw=2 ts=2 + +import os +import pathlib +import re + +import psutil + +from wk.exe import get_json_from_command, run_program +from wk.std import PLATFORM, GenericError, show_data + +from wk.cfg.net import BACKUP_SERVERS + + +# REGEX +REGEX_VALID_IP = re.compile( + r'(10.\d+.\d+.\d+' + r'|172.(1[6-9]|2\d|3[0-1])' + r'|192.168.\d+.\d+)', + re.IGNORECASE) + + +# Functions +def connected_to_private_network(raise_on_error=False): + """Check if connected to a private network, returns bool. + + This checks for a valid private IP assigned to this system. + + NOTE: If one isn't found and raise_on_error=True then an exception is raised. + NOTE 2: If one is found and raise_on_error=True then None is returned. + """ + connected = False + + # Check IPs + devs = psutil.net_if_addrs() + for dev in devs.values(): + for family in dev: + if REGEX_VALID_IP.search(family.address): + # Valid IP found + connected = True + break + if connected: + break + + # No valid IP found + if not connected and raise_on_error: + raise GenericError('Not connected to a network') + + # Done + if raise_on_error: + connected = None + return connected + + +def mount_backup_shares(read_write=False): + """Mount backup shares using OS specific methods.""" + report = [] + for name, details in BACKUP_SERVERS.items(): + mount_point = None + mount_str = f'{name} (//{details["Address"]}/{details["Share"]})' + + # Prep mount point + if PLATFORM in ('Darwin', 'Linux'): + mount_point = pathlib.Path(f'/Backups/{name}') + try: + if not mount_point.exists(): + # Script should be run as user so sudo is required + run_program(['sudo', 'mkdir', '-p', mount_point]) + except OSError: + # Assuming permission denied under macOS + pass + if mount_point: + mount_str += f' to {mount_point}' + + # Check if already mounted + if share_is_mounted(details): + report.append(f'(Already) Mounted {mount_str}') + # Skip to next share + continue + + # Mount share + proc = mount_network_share(details, mount_point, read_write=read_write) + if proc.returncode: + report.append(f'Failed to Mount {mount_str}') + else: + report.append(f'Mounted {mount_str}') + + # Done + return report + + +def mount_network_share(details, mount_point=None, read_write=False): + """Mount network share using OS specific methods.""" + cmd = None + address = details['Address'] + share = details['Share'] + username = details['RO-User'] + password = details['RO-Pass'] + if read_write: + username = details['RW-User'] + password = details['RW-Pass'] + + # Network check + if not connected_to_private_network(): + raise RuntimeError('Not connected to a network') + + # Build OS-specific command + if PLATFORM == 'Darwin': + cmd = [ + 'sudo', + 'mount', + '-t', 'smbfs', + '-o', f'{"rw" if read_write else "ro"}', + f'//{username}:{password}@{address}/{share}', + mount_point, + ] + elif PLATFORM == 'Linux': + cmd = [ + 'sudo', + 'mount', + '-t', 'cifs', + '-o', ( + f'{"rw" if read_write else "ro"}' + f',uid={os.getuid()}' + f',gid={os.getgid()}' + f',username={username}' + f',{"password=" if password else "guest"}{password}' + ), + f'//{address}/{share}', + mount_point + ] + elif PLATFORM == 'Windows': + cmd = ['net', 'use'] + if mount_point: + cmd.append(f'{mount_point}:') + cmd.append(f'/user:{username}') + cmd.append(fr'\\{address}\{share}') + cmd.append(password) + + # Mount share + return run_program(cmd, check=False) + + +def ping(addr='google.com'): + """Attempt to ping addr.""" + cmd = ( + 'ping', + '-n' if psutil.WINDOWS else '-c', + '2', + addr, + ) + run_program(cmd) + + +def share_is_mounted(details): + """Check if dev/share/etc is mounted, returns bool.""" + mounted = False + + if PLATFORM == 'Darwin': + # Weak and naive text search + proc = run_program(['mount'], check=False) + for line in proc.stdout.splitlines(): + if f'{details["Address"]}/{details["Share"]}' in line: + mounted = True + break + elif PLATFORM == 'Linux': + cmd = [ + 'findmnt', + '--list', + '--json', + '--invert', + '--types', ( + 'autofs,binfmt_misc,bpf,cgroup,cgroup2,configfs,debugfs,devpts,' + 'devtmpfs,hugetlbfs,mqueue,proc,pstore,securityfs,sysfs,tmpfs' + ), + '--output', 'SOURCE', + ] + mount_data = get_json_from_command(cmd) + for row in mount_data.get('filesystems', []): + if row['source'] == f'//{details["Address"]}/{details["Share"]}': + mounted = True + break + #TODO: Check mount status under Windows + #elif PLATFORM == 'Windows': + + # Done + return mounted + + +def show_valid_addresses(): + """Show all valid private IP addresses assigned to the system.""" + devs = psutil.net_if_addrs() + for dev, families in sorted(devs.items()): + for family in families: + if REGEX_VALID_IP.search(family.address): + # Valid IP found + show_data(message=dev, data=family.address) + + +def speedtest(): + """Run a network speedtest using speedtest-cli.""" + cmd = ['speedtest-cli', '--simple'] + proc = run_program(cmd, check=False) + output = [line.strip() for line in proc.stdout.splitlines() if line.strip()] + output = [line.split() for line in output] + output = [(a, float(b), c) for a, b, c in output] + return [f'{a:<10}{b:6.2f} {c}' for a, b, c in output] + + +def unmount_backup_shares(): + """Unmount backup shares.""" + report = [] + for name, details in BACKUP_SERVERS.items(): + kwargs = {} + source_str = f'{name} (//{details["Address"]}/{details["Share"]})' + + # Check if mounted + if not share_is_mounted(details): + report.append(f'Not mounted {source_str}') + continue + + # Build OS specific kwargs + if PLATFORM in ('Darwin', 'Linux'): + kwargs['mount_point'] = f'/Backups/{name}' + elif PLATFORM == 'Windows': + kwargs['details'] = details + + # Unmount and add to report + proc = unmount_network_share(**kwargs) + if proc.returncode: + report.append(f'Failed to unmount {source_str}') + else: + report.append(f'Unmounted {source_str}') + + # Done + return report + + +def unmount_network_share(details=None, mount_point=None): + """Unmount network share""" + cmd = [] + + # Build OS specific command + if PLATFORM in ('Darwin', 'Linux'): + cmd = ['sudo', 'umount', mount_point] + elif PLATFORM == 'Windows': + cmd = ['net', 'use'] + if mount_point: + cmd.append(f'{mount_point}:') + elif details: + cmd.append(fr'\\{details["Address"]}\{details["Share"]}') + cmd.append('/delete') + + # Unmount share + return run_program(cmd, check=False) + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/os/__init__.py b/scripts/wk/os/__init__.py new file mode 100644 index 00000000..1b9b21bd --- /dev/null +++ b/scripts/wk/os/__init__.py @@ -0,0 +1,10 @@ +"""WizardKit: os module init""" +# vim: sts=2 sw=2 ts=2 + +import platform + +#if platform.system() == 'Darwin': +if platform.system() == 'Linux': + from wk.os import linux +if platform.system() == 'Windows': + from wk.os import win diff --git a/scripts/wk/os/linux.py b/scripts/wk/os/linux.py new file mode 100644 index 00000000..253b1dd0 --- /dev/null +++ b/scripts/wk/os/linux.py @@ -0,0 +1,240 @@ +"""WizardKit: Linux Functions""" +# vim: sts=2 sw=2 ts=2 + +import logging +import os +import pathlib +import re +import subprocess + +from wk import std +from wk.exe import popen_program, run_program +from wk.hw.obj import Disk + + +# STATIC VARIABLES +LOG = logging.getLogger(__name__) +UUID_CORESTORAGE = '53746f72-6167-11aa-aa11-00306543ecac' + + +# Functions +def get_user_home(user): + """Get path to user's home dir, returns pathlib.Path obj.""" + home = None + + # Get path from user details + cmd = ['getent', 'passwd', user] + proc = run_program(cmd, check=False) + try: + home = proc.stdout.split(':')[5] + except IndexError: + # Try using environment variable + home = os.environ.get('HOME') + + # Raise exception if necessary + if not home: + raise RuntimeError(f'Failed to find home for: {user}') + + # Done + return pathlib.Path(home) + + +def get_user_name(): + """Get real user name, returns str.""" + user = None + + # Query environment + user = os.environ.get('SUDO_USER') + if not user: + user = os.environ.get('USER') + + # Raise exception if necessary + if not user: + raise RuntimeError('Failed to determine user') + + # Done + return user + + +def make_temp_file(): + """Make temporary file, returns pathlib.Path() obj.""" + proc = run_program(['mktemp'], check=False) + return pathlib.Path(proc.stdout.strip()) + + +def mount(source, mount_point=None, read_write=False): + """Mount source (on mount_point if provided). + + NOTE: If not running_as_root() then udevil will be used. + """ + cmd = [ + 'mount', + '-o', 'rw' if read_write else 'ro', + source, + ] + if not running_as_root(): + cmd.insert(0, 'udevil') + if mount_point: + cmd.append(mount_point) + + # Run mount command + proc = run_program(cmd, check=False) + if not proc.returncode == 0: + raise RuntimeError(f'Failed to mount: {source} on {mount_point}') + + +def mount_volumes(device_path=None, read_write=False, scan_corestorage=False): + """Mount all detected volumes, returns list. + + NOTE: If device_path is specified then only volumes + under that path will be mounted. + """ + report = [] + volumes = [] + containers = [] + + # Get list of volumes + cmd = [ + 'lsblk', + '--list', + '--noheadings', + '--output=name', + '--paths', + ] + if device_path: + cmd.append(device_path) + proc = run_program(cmd, check=False) + for line in sorted(proc.stdout.splitlines()): + volumes.append(Disk(line.strip())) + + # Get list of CoreStorage containers + containers = [ + vol for vol in volumes if vol.details.get('parttype', '') == UUID_CORESTORAGE + ] + + # Scan CoreStorage containers + if scan_corestorage: + if containers: + std.print_warning( + f'Detected CoreStorage container{"s" if len(containers) > 1 else ""}', + ) + std.print_standard('Scanning for inner volume(s)...') + for container in containers: + volumes.extend(scan_corestorage_container(container)) + + # Mount volumes + for vol in volumes: + already_mounted = vol.details.get('mountpoint', '') + result = f'{vol.details["name"].replace("/dev/mapper/", ""):<20}' + + # Parent devices + if vol.details.get('children', False): + if vol.details.get('fstype', ''): + result += vol.details['fstype'] + if vol.details.get('label', ''): + result += f' "{vol.details["label"]}"' + report.append(std.color_string(result, 'BLUE')) + continue + + # Attempt to mount volume + if not already_mounted: + mount(vol.path, read_write=read_write) + proc = run_program(cmd, check=False) + if proc.returncode: + result += 'Failed to mount' + report.append(std.color_string(result, 'RED')) + continue + + # Add size to result + vol.get_details() + vol.details['fsused'] = vol.details.get('fsused', -1) + vol.details['fsavail'] = vol.details.get('fsavail', -1) + result += f'{"Mounted on "+vol.details.get("mountpoint", "?"):<40}' + result = ( + f'{result} ({vol.details.get("fstype", "Unknown FS")+",":<5} ' + f'{std.bytes_to_string(vol.details["fsused"], decimals=1):>9} used, ' + f'{std.bytes_to_string(vol.details["fsavail"], decimals=1):>9} free)' + ) + report.append( + std.color_string( + result, + 'YELLOW' if already_mounted else None, + ), + ) + + # Done + return report + + +def running_as_root(): + """Check if running with effective UID of 0, returns bool.""" + return os.geteuid() == 0 + + +def scan_corestorage_container(container, timeout=300): + """Scan CoreStorage container for inner volumes, returns list.""" + # TODO: Test Scanning CoreStorage containers + detected_volumes = {} + inner_volumes = [] + log_path = make_temp_file() + + # Run scan via TestDisk + cmd = [ + 'sudo', 'testdisk', + '/logname', log_path, + '/debug', + '/log', + '/cmd', container.path, 'partition_none,analyze', + ] + proc = popen_program(cmd) + try: + proc.wait(timeout=timeout) + except subprocess.TimeoutExpired: + # Failed to find any volumes, stop scan + run_program(['sudo', 'kill', proc.pid], check=False) + + # Check results + if proc.returncode == 0 and log_path.exists(): + results = log_path.read_text(encoding='utf-8', errors='ignore') + for line in results.splitlines(): + line = line.lower().strip() + match = re.match(r'^.*echo "([^"]+)" . dmsetup create test(\d)$', line) + if match: + cs_name = f'CoreStorage_{container.path.name}_{match.group(2)}' + detected_volumes[cs_name] = match.group(1) + + # Create mapper device(s) if necessary + for name, cmd in detected_volumes.items(): + cmd_file = make_temp_file() + cmd_file.write_text(cmd) + proc = run_program( + cmd=['sudo', 'dmsetup', 'create', name, cmd_file], + check=False, + ) + if proc.returncode == 0: + inner_volumes.append(Disk(f'/dev/mapper/{name}')) + + # Done + return inner_volumes + + +def unmount(source_or_mountpoint): + """Unmount source_or_mountpoint. + + NOTE: If not running_as_root() then udevil will be used. + """ + cmd = [ + 'umount', + source_or_mountpoint, + ] + if not running_as_root(): + cmd.insert(0, 'udevil') + + # Run unmount command + proc = run_program(cmd, check=False) + if not proc.returncode == 0: + raise RuntimeError(f'Failed to unmount: {source_or_mountpoint}') + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/os/win.py b/scripts/wk/os/win.py new file mode 100644 index 00000000..f367e40f --- /dev/null +++ b/scripts/wk/os/win.py @@ -0,0 +1,181 @@ +"""WizardKit: Windows Functions""" +# vim: sts=2 sw=2 ts=2 + +import logging +import os +import pathlib +import platform + +from wk.borrowed import acpi +from wk.exe import run_program +from wk.io import non_clobber_path +from wk.log import format_log_path +from wk.std import GenericError, GenericWarning, sleep + + +# STATIC VARIABLES +LOG = logging.getLogger(__name__) +OS_VERSION = float(platform.win32_ver()[0]) # TODO: Check if Win8.1 returns '8' +REG_MSISERVER = r'HKLM\SYSTEM\CurrentControlSet\Control\SafeBoot\Network\MSIServer' +SLMGR = pathlib.Path(f'{os.environ.get("SYSTEMROOT")}/System32/slmgr.vbs') + + +# Functions +def activate_with_bios(): + """Attempt to activate Windows with a key stored in the BIOS.""" + # Code borrowed from https://github.com/aeruder/get_win8key + ##################################################### + #script to query windows 8.x OEM key from PC firmware + #ACPI -> table MSDM -> raw content -> byte offset 56 to end + #ck, 03-Jan-2014 (christian@korneck.de) + ##################################################### + bios_key = None + table = b"MSDM" + if acpi.FindAcpiTable(table) is True: + rawtable = acpi.GetAcpiTable(table) + #http://msdn.microsoft.com/library/windows/hardware/hh673514 + #byte offset 36 from beginning + # = Microsoft 'software licensing data structure' + # / 36 + 20 bytes offset from beginning = Win Key + bios_key = rawtable[56:len(rawtable)].decode("utf-8") + if not bios_key: + raise GenericError('BIOS key not found.') + + # Check if activation is needed + if is_activated(): + raise GenericWarning('System already activated') + + # Install Key + cmd = ['cscript', '//nologo', SLMGR, '/ipk', bios_key] + run_program(cmd, check=False) + sleep(5) + + # Attempt activation + cmd = ['cscript', '//nologo', SLMGR, '/ato'] + run_program(cmd, check=False) + sleep(5) + + # Check status + if not is_activated(): + raise GenericError('Activation Failed') + + +def disable_safemode(): + """Edit BCD to remove safeboot value.""" + cmd = ['bcdedit', '/deletevalue', '{default}', 'safeboot'] + run_program(cmd) + + +def disable_safemode_msi(): + """Disable MSI access under safemode.""" + cmd = ['reg', 'delete', REG_MSISERVER, '/f'] + run_program(cmd) + + +def enable_safemode(): + """Edit BCD to set safeboot as default.""" + cmd = ['bcdedit', '/set', '{default}', 'safeboot', 'network'] + run_program(cmd) + + +def enable_safemode_msi(): + """Enable MSI access under safemode.""" + cmd = ['reg', 'add', REG_MSISERVER, '/f'] + run_program(cmd) + cmd = [ + 'reg', 'add', REG_MSISERVER, '/ve', + '/t', 'REG_SZ', + '/d', 'Service', '/f', + ] + run_program(cmd) + + +def get_activation_string(): + """Get activation status, returns str.""" + cmd = ['cscript', '//nologo', SLMGR, '/xpr'] + proc = run_program(cmd, check=False) + act_str = proc.stdout + act_str = act_str.splitlines()[1] + act_str = act_str.strip() + return act_str + + +def is_activated(): + """Check if Windows is activated via slmgr.vbs and return bool.""" + act_str = get_activation_string() + + # Check result. + return act_str and 'permanent' in act_str + + +def run_chkdsk_offline(): + """Set filesystem 'dirty bit' to force a CHKDSK during startup.""" + cmd = f'fsutil dirty set {os.environ.get("SYSTEMDRIVE")}' + proc = run_program(cmd.split(), check=False) + + # Check result + if proc.returncode > 0: + raise GenericError('Failed to set dirty bit.') + + +def run_chkdsk_online(): + """Run CHKDSK in a split window. + + NOTE: If run on Windows 8+ online repairs are attempted. + """ + cmd = ['CHKDSK', os.environ.get('SYSTEMDRIVE', 'C:')] + if OS_VERSION >= 8: + cmd.extend(['/scan', '/perf']) + log_path = format_log_path(log_name='CHKDSK', tool=True) + err_path = log_path.with_suffix('.err') + + # Run scan + proc = run_program(cmd, check=False) + + # Check result + if proc.returncode == 1: + raise GenericWarning('Repaired (or manually aborted)') + if proc.returncode > 1: + raise GenericError('Issue(s) detected') + + # Save output + os.makedirs(log_path.parent, exist_ok=True) + with open(log_path, 'w') as _f: + _f.write(proc.stdout) + with open(err_path, 'w') as _f: + _f.write(proc.stderr) + + +def run_sfc_scan(): + """Run SFC and save results.""" + cmd = ['sfc', '/scannow'] + log_path = format_log_path(log_name='SFC', tool=True) + err_path = log_path.with_suffix('.err') + + # Run SFC + proc = run_program(cmd, check=False, encoding='utf-16') + + # Fix paths + log_path = non_clobber_path(log_path) + err_path = non_clobber_path(err_path) + + # Save output + os.makedirs(log_path.parent, exist_ok=True) + with open(log_path, 'w') as _f: + _f.write(proc.stdout) + with open(err_path, 'w') as _f: + _f.write(proc.stderr) + + # Check result + if 'did not find any integrity violations' in proc.stdout: + pass + elif 'successfully repaired' in proc.stdout: + raise GenericWarning('Repaired') + elif 'found corrupt files' in proc.stdout: + raise GenericError('Corruption detected') + else: + raise OSError + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/std.py b/scripts/wk/std.py new file mode 100644 index 00000000..ba44f18e --- /dev/null +++ b/scripts/wk/std.py @@ -0,0 +1,1061 @@ +"""WizardKit: Standard Functions""" +# pylint: disable=too-many-lines +# vim: sts=2 sw=2 ts=2 + +import itertools +import logging +import lzma +import os +import pathlib +import platform +import re +import socket +import subprocess +import sys +import time +import traceback + +from collections import OrderedDict + +import requests + +from wk.cfg.main import ( + ENABLED_UPLOAD_DATA, + INDENT, + SUPPORT_MESSAGE, + WIDTH, + ) +from wk.cfg.net import CRASH_SERVER + + +# STATIC VARIABLES +COLORS = { + 'CLEAR': '\033[0m', + 'RED': '\033[31m', + 'RED_BLINK': '\033[31;5m', + 'ORANGE': '\033[31;1m', + 'ORANGE_RED': '\033[1;31;41m', + 'GREEN': '\033[32m', + 'YELLOW': '\033[33m', + 'YELLOW_BLINK': '\033[33;5m', + 'BLUE': '\033[34m', + 'PURPLE': '\033[35m', + 'CYAN': '\033[36m', + } +LOG = logging.getLogger(__name__) +PLATFORM = platform.system() +REGEX_SIZE_STRING = re.compile( + r'(?P\-?\d+\.?\d*)\s*(?P[PTGMKB])(?PI?)B?' + ) + + +# Exception Classes +class GenericAbort(Exception): + """Exception used for aborts selected by the user at runtime.""" + +class GenericError(Exception): + """Exception used when the built-in exceptions don't fit.""" + +class GenericWarning(Exception): + """Exception used to highlight non-critical events. + + NOTE: Avoiding built-in warning exceptions in case the + warnings filter has been changed from the default. + """ + + +# Classes +class Menu(): + """Object for tracking menu specific data and methods. + + Menu items are added to an OrderedDict so the order is preserved. + + ASSUMPTIONS: + 1. All entry names are unique. + 2. All action entry names start with different letters. + """ + def __init__(self, title='[Untitled Menu]'): + self.actions = OrderedDict() + self.options = OrderedDict() + self.sets = OrderedDict() + self.toggles = OrderedDict() + self.disabled_str = 'Disabled' + self.separator = '─' + self.title = title + + def _generate_menu_text(self): + """Generate menu text, returns str.""" + separator_string = self._get_separator_string() + menu_lines = [self.title, separator_string] if self.title else [] + + # Sets & toggles + for section in (self.sets, self.toggles): + for details in section.values(): + if details.get('Hidden', False): + continue + if details.get('Separator', False): + menu_lines.append(separator_string) + menu_lines.append(details['Display Name']) + if self.sets or self.toggles: + menu_lines.append(separator_string) + + # Options + for details in self.options.values(): + if details.get('Hidden', False): + continue + if details.get('Separator', False): + menu_lines.append(separator_string) + menu_lines.append(details['Display Name']) + if self.options: + menu_lines.append(separator_string) + + # Actions + for details in self.actions.values(): + if details.get('Hidden', False): + continue + if details.get('Separator', False): + menu_lines.append(separator_string) + menu_lines.append(details['Display Name']) + + # Show menu + menu_lines.append('') + menu_lines = [str(line) for line in menu_lines] + return '\n'.join(menu_lines) + + def _get_display_name( + self, name, details, + index=None, no_checkboxes=True, setting_item=False): + # pylint: disable=no-self-use,too-many-arguments + """Format display name based on details and args, returns str.""" + disabled = details.get('Disabled', False) + if setting_item and not details['Selected']: + # Display item in YELLOW + disabled = True + checkmark = '*' + if 'DISPLAY' in os.environ or PLATFORM == 'Darwin': + checkmark = '✓' + display_name = f'{index if index else name[:1].upper()}: ' + if not (index and index >= 10): + display_name = f' {display_name}' + if setting_item and 'Value' in details: + name = f'{name} = {details["Value"]}' + + # Add enabled status if necessary + if not no_checkboxes: + display_name += f'[{checkmark if details["Selected"] else " "}] ' + + # Add name + if disabled: + display_name += color_string(f'{name} ({self.disabled_str})', 'YELLOW') + else: + display_name += name + + # Done + return display_name + + def _get_separator_string(self): + """Format separator length based on name lengths, returns str.""" + separator_length = 0 + + # Check title line(s) + if self.title: + for line in self.title.split('\n'): + separator_length = max(separator_length, len(strip_colors(line))) + + # Loop over all item names + for section in (self.actions, self.options, self.sets, self.toggles): + for details in section.values(): + if details.get('Hidden', False): + # Skip hidden lines + continue + line = strip_colors(details['Display Name']) + separator_length = max(separator_length, len(line)) + separator_length += 1 + + # Done + return self.separator * separator_length + + def _get_valid_answers(self): + """Get valid answers based on menu items, returns list.""" + valid_answers = [] + + # Numbered items + index = 0 + for section in (self.sets, self.toggles, self.options): + for details in section.values(): + if details.get('Hidden', False): + # Don't increment index or add to valid_answers + continue + index += 1 + if not details.get('Disabled', False): + valid_answers.append(str(index)) + + # Action items + for name, details in self.actions.items(): + if not details.get('Disabled', False): + valid_answers.append(name[:1].upper()) + + # Done + return valid_answers + + def _resolve_selection(self, selection): + """Get menu item based on user selection, returns tuple.""" + offset = 1 + resolved_selection = None + if selection.isnumeric(): + # Enumerate over numbered entries + entries = [ + *self.sets.items(), + *self.toggles.items(), + *self.options.items(), + ] + for _i, details in enumerate(entries): + if details[1].get('Hidden', False): + offset -= 1 + elif str(_i+offset) == selection: + resolved_selection = (details) + break + else: + # Just check actions + for action, details in self.actions.items(): + if action.lower().startswith(selection.lower()): + resolved_selection = (action, details) + break + + # Done + return resolved_selection + + def _update(self, single_selection=True, settings_mode=False): + """Update menu items in preparation for printing to screen.""" + index = 0 + + # Fix selection status for sets + for set_details in self.sets.values(): + set_selected = True + set_targets = set_details['Targets'] + for option, option_details in self.options.items(): + if option in set_targets and not option_details['Selected']: + set_selected = False + elif option not in set_targets and option_details['Selected']: + set_selected = False + set_details['Selected'] = set_selected + + # Numbered sections + for section in (self.sets, self.toggles, self.options): + for name, details in section.items(): + if details.get('Hidden', False): + # Skip hidden lines and don't increment index + continue + index += 1 + details['Display Name'] = self._get_display_name( + name, + details, + index=index, + no_checkboxes=single_selection, + setting_item=settings_mode, + ) + + # Actions + for name, details in self.actions.items(): + details['Display Name'] = self._get_display_name( + name, + details, + no_checkboxes=True, + ) + + def _update_entry_selection_status(self, entry, toggle=True, status=None): + """Update entry selection status either directly or by toggling.""" + if entry in self.sets: + # Update targets not the set itself + new_status = not self.sets[entry]['Selected'] if toggle else status + targets = self.sets[entry]['Targets'] + self._update_set_selection_status(targets, new_status) + for section in (self.toggles, self.options, self.actions): + if entry in section: + if toggle: + section[entry]['Selected'] = not section[entry]['Selected'] + else: + section[entry]['Selected'] = status + + def _update_set_selection_status(self, targets, status): + """Select or deselect options based on targets and status.""" + for option, details in self.options.items(): + # If (new) status is True and this option is a target then select + # Otherwise deselect + details['Selected'] = status and option in targets + + def _user_select(self, prompt): + """Show menu and select an entry, returns str.""" + menu_text = self._generate_menu_text() + valid_answers = self._get_valid_answers() + + # Menu loop + while True: + clear_screen() + print(menu_text) + sleep(0.01) + answer = input_text(prompt).strip() + if answer.upper() in valid_answers: + break + + # Done + return answer + + def add_action(self, name, details=None): + """Add action to menu.""" + details = details if details else {} + details['Selected'] = details.get('Selected', False) + self.actions[name] = details + + def add_option(self, name, details=None): + """Add option to menu.""" + details = details if details else {} + details['Selected'] = details.get('Selected', False) + self.options[name] = details + + def add_set(self, name, details=None): + """Add set to menu.""" + details = details if details else {} + details['Selected'] = details.get('Selected', False) + + # Safety check + if 'Targets' not in details: + raise KeyError('Menu set has no targets') + + # Add set + self.sets[name] = details + + def add_toggle(self, name, details=None): + """Add toggle to menu.""" + details = details if details else {} + details['Selected'] = details.get('Selected', False) + self.toggles[name] = details + + def advanced_select(self, prompt='Please make a selection: '): + """Display menu and make multiple selections, returns tuple. + + NOTE: Menu is displayed until an action entry is selected. + """ + while True: + self._update(single_selection=False) + user_selection = self._user_select(prompt) + selected_entry = self._resolve_selection(user_selection) + if user_selection.isnumeric(): + # Update selection(s) + self._update_entry_selection_status(selected_entry[0]) + else: + # Action selected + break + + # Done + return selected_entry + + def settings_select(self, prompt='Please make a selection: '): + """Display menu and make multiple selections, returns tuple. + + NOTE: Menu is displayed until an action entry is selected. + """ + choice_kwargs = { + 'choices': ['T', 'C'], + 'prompt': 'Toggle or change value?', + } + + while True: + self._update(single_selection=True, settings_mode=True) + user_selection = self._user_select(prompt) + selected_entry = self._resolve_selection(user_selection) + if user_selection.isnumeric(): + if 'Value' in selected_entry[-1] and choice(**choice_kwargs) == 'C': + # Change + selected_entry[-1]['Value'] = input_text('Enter new value: ') + else: + # Toggle + self._update_entry_selection_status(selected_entry[0]) + else: + # Action selected + break + + # Done + return selected_entry + + def simple_select(self, prompt='Please make a selection: '): + """Display menu and make a single selection, returns tuple.""" + self._update() + user_selection = self._user_select(prompt) + return self._resolve_selection(user_selection) + + +class TryAndPrint(): + """Object used to standardize running functions and returning the result. + + The errors and warning attributes are used to allow fine-tuned results + based on exception names. + """ + def __init__(self, msg_bad='FAILED', msg_good='SUCCESS'): + self.indent = INDENT + self.msg_bad = msg_bad + self.msg_good = msg_good + self.width = WIDTH + self.list_errors = ['GenericError'] + self.list_warnings = ['GenericWarning'] + + def _format_exception_message(self, _exception): + """Format using the exception's args or name, returns str.""" + LOG.debug( + 'Formatting exception: %s', + _exception.__class__.__name__, + ) + message = None + + # Use known argument index or first string found + try: + if isinstance(_exception, subprocess.CalledProcessError): + message = _exception.stderr + if not isinstance(message, str): + message = message.decode('utf-8') + message = message.strip() + elif isinstance(_exception, FileNotFoundError): + message = _exception.args[1] + elif isinstance(_exception, ZeroDivisionError): + message = 'ZeroDivisionError' + else: + for arg in _exception.args: + if isinstance(arg, str): + message = arg + break + except Exception: # pylint: disable=broad-except + # Just use the exception name instead + pass + + # Safety check + if not message: + try: + message = _exception.__class__.__name__ + except Exception: # pylint: disable=broad-except + message = 'UNKNOWN ERROR' + + # Fix multi-line messages + if '\n' in message: + try: + lines = [ + f'{" "*(self.indent+self.width)}{line.strip()}' + for line in message.splitlines() if line.strip() + ] + lines[0] = lines[0].strip() + message = '\n'.join(lines) + except Exception: # pylint: disable=broad-except + pass + + # Done + return message + + def _format_function_output(self, output): + """Format function output for use in try_and_print(), returns str.""" + LOG.debug('Formatting output: %s', output) + + if not output: + raise GenericWarning('No output') + + # Ensure we're working with a list + if isinstance(output, subprocess.CompletedProcess): + stdout = output.stdout + if not isinstance(stdout, str): + stdout = stdout.decode('utf8') + output = stdout.strip().splitlines() + else: + try: + output = list(output) + except TypeError: + output = [output] + + # Safety check + if not output: + # Going to ignore empty function output for now + LOG.error('Output is empty') + return 'UNKNOWN' + + # Build result_msg + result_msg = f'{output.pop(0)}' + if output: + output = [f'{" "*(self.indent+self.width)}{line}' for line in output] + result_msg += '\n' + '\n'.join(output) + + # Done + return result_msg + + def _get_exception(self, name): + # pylint: disable=no-self-use + """Get exception by name, returns exception object. + + [Doctest] + >>> self._get_exception('AttributeError') + + >>> self._get_exception('CalledProcessError') + + >>> self._get_exception('GenericError') + + """ + LOG.debug('Getting exception: %s', name) + try: + obj = getattr(sys.modules[__name__], name) + except AttributeError: + # Try builtin classes + obj = getattr(sys.modules['builtins'], name) + return obj + + def _log_result(self, message, result_msg): + """Log result text without color formatting.""" + log_text = f'{" "*self.indent}{message:<{self.width}}{result_msg}' + for line in log_text.splitlines(): + line = strip_colors(line) + LOG.info(line) + + def add_error(self, exception_name): + """Add exception name to error list.""" + if exception_name not in self.list_errors: + self.list_errors.append(exception_name) + + def add_warning(self, exception_name): + """Add exception name to warning list.""" + if exception_name not in self.list_warnings: + self.list_warnings.append(exception_name) + + def run( + self, message, function, *args, + catch_all=True, msg_good=None, verbose=False, **kwargs): + # pylint: disable=catching-non-exception + """Run a function and print the results, returns results as dict. + + If catch_all is True then (nearly) all exceptions will be caught. + Otherwise if an exception occurs that wasn't specified it will be + re-raised. + + If the function returns data it will be used instead of msg_good, + msg_bad, or exception text. + The output should be a list or a subprocess.CompletedProcess object. + + If msg_good is passed it will override self.msg_good for this call. + + If verbose is True then exception names or messages will be used for + the result message. Otherwise it will simply be set to result_bad. + + args and kwargs are passed to the function. + """ + LOG.debug('function: %s.%s', function.__module__, function.__name__) + LOG.debug('args: %s', args) + LOG.debug('kwargs: %s', kwargs) + LOG.debug( + 'catch_all: %s, msg_good: %s, verbose: %s', + catch_all, + msg_good, + verbose, + ) + f_exception = None + output = None + result_msg = 'UNKNOWN' + + # Build exception tuples + e_exceptions = tuple(self._get_exception(e) for e in self.list_errors) + w_exceptions = tuple(self._get_exception(e) for e in self.list_warnings) + + # Run function and catch exceptions + print(f'{" "*self.indent}{message:<{self.width}}', end='', flush=True) + LOG.debug('Running function: %s.%s', function.__module__, function.__name__) + try: + output = function(*args, **kwargs) + except w_exceptions as _exception: + # Warnings + result_msg = self._format_exception_message(_exception) + print_warning(result_msg, log=False) + f_exception = _exception + except e_exceptions as _exception: + # Exceptions + result_msg = self._format_exception_message(_exception) + print_error(result_msg, log=False) + f_exception = _exception + except Exception as _exception: # pylint: disable=broad-except + # Unexpected exceptions + if verbose: + result_msg = self._format_exception_message(_exception) + else: + result_msg = self.msg_bad + print_error(result_msg, log=False) + f_exception = _exception + if not catch_all: + # Re-raise error as necessary + raise + else: + # Success + if output: + result_msg = self._format_function_output(output) + print(result_msg) + else: + result_msg = msg_good if msg_good else self.msg_good + print_success(result_msg, log=False) + + # Done + self._log_result(message, result_msg) + return { + 'Failed': bool(f_exception), + 'Exception': f_exception, + 'Output': output, + } + + +# Functions +def abort(prompt='Aborted.', show_prompt=True, return_code=1): + """Abort script.""" + print_warning(prompt) + if show_prompt: + sleep(0.5) + pause(prompt='Press Enter to exit... ') + sys.exit(return_code) + + +def ask(prompt='Kotaero!'): + """Prompt the user with a Y/N question, returns bool.""" + answer = None + prompt = f'{prompt} [Y/N]: ' + + # Loop until acceptable answer is given + while answer is None: + tmp = input_text(prompt) + if re.search(r'^y(es|up|)$', tmp, re.IGNORECASE): + answer = True + elif re.search(r'^n(o|ope|)$', tmp, re.IGNORECASE): + answer = False + + # Done + LOG.info('%s%s', prompt, 'Yes' if answer else 'No') + return answer + + +def beep(repeat=1): + """Play system bell with optional repeat.""" + # TODO: Verify Windows functionality + while repeat >= 1: + # Print bell char without a newline + print('\a', end='', flush=True) + sleep(0.5) + repeat -= 1 + + +def bytes_to_string(size, decimals=0, use_binary=True): + """Convert size into a human-readable format, returns str. + + [Doctest] + >>> bytes_to_string(10) + '10 B' + >>> bytes_to_string(10_000_000) + '10 MiB' + >>> bytes_to_string(10_000_000, decimals=2) + '9.54 MiB' + >>> bytes_to_string(10_000_000, decimals=2, use_binary=False) + '10.00 MB' + >>> bytes_to_string(-10_000_000, decimals=4) + '-9.5367 MiB' + """ + LOG.debug( + 'size: %s, decimals: %s, use_binary: %s', + size, + decimals, + use_binary, + ) + size = float(size) + abs_size = abs(size) + + # Set scale + scale = 1000 + suffix = 'B' + if use_binary: + scale = 1024 + suffix = 'iB' + + # Convert to sensible units + if abs_size >= scale ** 5: + size /= scale ** 5 + units = 'P' + suffix + elif abs_size >= scale ** 4: + size /= scale ** 4 + units = 'T' + suffix + elif abs_size >= scale ** 3: + size /= scale ** 3 + units = 'G' + suffix + elif abs_size >= scale ** 2: + size /= scale ** 2 + units = 'M' + suffix + elif abs_size >= scale ** 1: + size /= scale ** 1 + units = 'K' + suffix + else: + size /= scale ** 0 + units = f' {" " if use_binary else ""}B' + size = f'{size:0.{decimals}f} {units}' + + # Done + LOG.debug('string: %s', size) + return size + + +def choice(choices, prompt='答えろ!'): + """Choose an option from a provided list, returns str. + + Choices provided will be converted to uppercase and returned as such. + Similar to the commands choice (Windows) and select (Linux). + """ + LOG.debug('choices: %s, prompt: %s', choices, prompt) + answer = None + choices = [str(c).upper()[:1] for c in choices] + prompt = f'{prompt} [{"/".join(choices)}]' + regex = f'^({"|".join(choices)})$' + + # Loop until acceptable answer is given + while answer is None: + tmp = input_text(prompt=prompt) + if re.search(regex, tmp, re.IGNORECASE): + answer = tmp.upper() + + # Done + LOG.info('%s %s', prompt, answer) + return answer + + +def clear_screen(): + """Simple wrapper for clear/cls.""" + cmd = 'cls' if os.name == 'nt' else 'clear' + subprocess.run(cmd, check=False, shell=True, stderr=subprocess.PIPE) + + +def color_string(strings, colors, sep=' '): + """Build colored string using ANSI escapes, returns str.""" + clear_code = COLORS['CLEAR'] + msg = [] + + # Convert to tuples if necessary + if isinstance(strings, (str, pathlib.Path)): + strings = (strings,) + if isinstance(colors, (str, pathlib.Path)): + colors = (colors,) + + # Convert to strings if necessary + try: + iter(strings) + except TypeError: + # Assuming single element passed, convert to string + strings = (str(strings),) + try: + iter(colors) + except TypeError: + # Assuming single element passed, convert to string + colors = (str(colors),) + + # Build new string with color escapes added + for string, color in itertools.zip_longest(strings, colors): + color_code = COLORS.get(color, clear_code) + msg.append(f'{color_code}{string}{clear_code}') + + # Done + return sep.join(msg) + + +def generate_debug_report(): + """Generate debug report, returns str.""" + platform_function_list = ( + 'architecture', + 'machine', + 'platform', + 'python_version', + ) + report = [] + + # Logging data + log_path = get_log_filepath() + if log_path: + report.append('------ Start Log -------') + report.append('') + with open(log_path, 'r') as log_file: + report.extend(log_file.read().splitlines()) + report.append('') + report.append('------- End Log --------') + + # System + report.append('--- Start debug info ---') + report.append('') + report.append('[System]') + report.append(f' {"FQDN":<24} {socket.getfqdn()}') + for func in platform_function_list: + func_name = func.replace('_', ' ').capitalize() + func_result = getattr(platform, func)() + report.append(f' {func_name:<24} {func_result}') + report.append(f' {"Python sys.argv":<24} {sys.argv}') + report.append('') + + # Environment + report.append('[Environment Variables]') + for key, value in sorted(os.environ.items()): + report.append(f' {key:<24} {value}') + report.append('') + + # Done + report.append('---- End debug info ----') + return '\n'.join(report) + + +def get_log_filepath(): + """Get the log filepath from the root logger, returns pathlib.Path obj. + + NOTE: This will use the first handler baseFilename it finds (if any). + """ + log_filepath = None + root_logger = logging.getLogger() + + # Check handlers + for handler in root_logger.handlers: + if hasattr(handler, 'baseFilename'): + log_filepath = pathlib.Path(handler.baseFilename).resolve() + break + + # Done + return log_filepath + + +def input_text(prompt='Enter text', allow_empty_response=True): + """Get text from user, returns string.""" + prompt = str(prompt) + response = None + if prompt[-1:] != ' ': + prompt += ' ' + print(prompt, end='', flush=True) + + while response is None: + try: + response = input() + LOG.debug('%s%s', prompt, response) + except EOFError: + # Ignore and try again + LOG.warning('Exception occured', exc_info=True) + print('', flush=True) + if not allow_empty_response: + if response is None or not response.strip(): + # The None check here is used to avoid a TypeError if response is None + print(f'\r{prompt}', end='', flush=True) + response = None + + return response + + +def major_exception(): + """Display traceback, optionally upload detailes, and exit.""" + LOG.critical('Major exception encountered', exc_info=True) + print_error('Major exception', log=False) + print_warning(SUPPORT_MESSAGE) + print(traceback.format_exc()) + + # Build report + report = generate_debug_report() + + # Upload details + prompt = f'Upload details to {CRASH_SERVER.get("Name", "?")}?' + if ENABLED_UPLOAD_DATA and ask(prompt): + print('Uploading... ', end='', flush=True) + try: + upload_debug_report(report, reason='CRASH') + except Exception: #pylint: disable=broad-except + print_error('FAILED', log=False) + LOG.error('Upload failed', exc_info=True) + else: + print_success('SUCCESS', log=False) + LOG.info('Upload successful') + + # Done + pause('Press Enter to exit... ') + raise SystemExit(1) + + +def pause(prompt='Press Enter to continue... '): + """Simple pause implementation.""" + input_text(prompt) + + +def print_colored(strings, colors, log=False, sep=' ', **kwargs): + """Prints strings in the colors specified.""" + LOG.debug( + 'strings: %s, colors: %s, sep: %s, kwargs: %s', + strings, colors, sep, kwargs, + ) + msg = color_string(strings, colors, sep=sep) + print_options = { + 'end': kwargs.get('end', '\n'), + 'file': kwargs.get('file', sys.stdout), + 'flush': kwargs.get('flush', False), + } + + print(msg, **print_options) + if log: + LOG.info(strip_colors(msg)) + + +def print_error(msg, log=True, **kwargs): + """Prints message in RED and log as ERROR.""" + if 'file' not in kwargs: + # Only set if not specified + kwargs['file'] = sys.stderr + print_colored(msg, 'RED', **kwargs) + if log: + LOG.error(msg) + + +def print_info(msg, log=True, **kwargs): + """Prints message in BLUE and log as INFO.""" + print_colored(msg, 'BLUE', **kwargs) + if log: + LOG.info(msg) + + +def print_report(report, indent=None, log=True): + """Print report to screen and optionally to log.""" + for line in report: + if indent: + line = f'{" "*indent}{line}' + print(line) + if log: + LOG.info(strip_colors(line)) + + +def print_standard(msg, log=True, **kwargs): + """Prints message and log as INFO.""" + print(msg, **kwargs) + if log: + LOG.info(msg) + + +def print_success(msg, log=True, **kwargs): + """Prints message in GREEN and log as INFO.""" + print_colored(msg, 'GREEN', **kwargs) + if log: + LOG.info(msg) + + +def print_warning(msg, log=True, **kwargs): + """Prints message in YELLOW and log as WARNING.""" + if 'file' not in kwargs: + # Only set if not specified + kwargs['file'] = sys.stderr + print_colored(msg, 'YELLOW', **kwargs) + if log: + LOG.warning(msg) + + +def set_title(title): + """Set window title.""" + LOG.debug('title: %s', title) + if os.name == 'nt': + os.system(f'title {title}') + else: + print_error('Setting the title is only supported under Windows.') + + +def show_data(message, data, color=None): + """Display info using standard WIDTH and INDENT.""" + colors = (None, color if color else None) + print_colored( + (f'{" "*INDENT}{message:<{WIDTH}}', data), + colors, + log=True, + sep='', + ) + + +def sleep(seconds=2): + """Simple wrapper for time.sleep.""" + time.sleep(seconds) + + +def string_to_bytes(size, assume_binary=False): + """Convert human-readable size str to bytes and return an int.""" + LOG.debug('size: %s, assume_binary: %s', size, assume_binary) + scale = 1000 + size = str(size) + tmp = REGEX_SIZE_STRING.search(size.upper()) + + # Raise exception if string can't be parsed as a size + if not tmp: + raise ValueError(f'Invalid size string: {size}') + + # Set scale + if tmp.group('binary') or assume_binary: + scale = 1024 + + # Convert to bytes + size = float(tmp.group('size')) + units = tmp.group('units') + if units == 'P': + size *= scale ** 5 + if units == 'T': + size *= scale ** 4 + elif units == 'G': + size *= scale ** 3 + elif units == 'M': + size *= scale ** 2 + elif units == 'K': + size *= scale ** 1 + elif units == 'B': + size *= scale ** 0 + size = int(size) + + # Done + LOG.debug('bytes: %s', size) + return size + + +def strip_colors(string): + """Strip known ANSI color escapes from string, returns str.""" + LOG.debug('string: %s', string) + for color in COLORS.values(): + string = string.replace(color, '') + return string + + +def upload_debug_report(report, compress=True, reason='DEBUG'): + """Upload debug report to CRASH_SERVER as specified in wk.cfg.main.""" + LOG.info('Uploading debug report to %s', CRASH_SERVER.get('Name', '?')) + headers = CRASH_SERVER.get('Headers', {'X-Requested-With': 'XMLHttpRequest'}) + if compress: + headers['Content-Type'] = 'application/octet-stream' + + # Check if the required server details are available + if not all(CRASH_SERVER.get(key, False) for key in ('Name', 'Url', 'User')): + msg = 'Server details missing, aborting upload.' + print_error(msg) + raise RuntimeError(msg) + + # Set filename (based on the logging config if possible) + filename = 'Unknown' + log_path = get_log_filepath() + if log_path: + # Strip everything but the prefix + filename = re.sub(r'^(.*)_(\d{4}-\d{2}-\d{2}.*)', r'\1', log_path.name) + filename = f'{filename}_{reason}_{time.strftime("%Y-%m-%d_%H%M%S%z")}.log' + LOG.debug('filename: %s', filename) + + # Compress report + if compress: + filename += '.xz' + xz_report = lzma.compress(report.encode('utf8')) + + # Upload report + url = f'{CRASH_SERVER["Url"]}/{filename}' + response = requests.put( + url, + data=xz_report if compress else report, + headers=headers, + auth=(CRASH_SERVER['User'], CRASH_SERVER.get('Pass', '')), + ) + + # Check response + if not response.ok: + raise RuntimeError('Failed to upload report') + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/scripts/wk/sw/__init__.py b/scripts/wk/sw/__init__.py new file mode 100644 index 00000000..1cc6236f --- /dev/null +++ b/scripts/wk/sw/__init__.py @@ -0,0 +1 @@ +"""WizardKit: sw module init""" diff --git a/scripts/wk/tmux.py b/scripts/wk/tmux.py new file mode 100644 index 00000000..b6d32848 --- /dev/null +++ b/scripts/wk/tmux.py @@ -0,0 +1,287 @@ +"""WizardKit: tmux Functions""" +# vim: sts=2 sw=2 ts=2 + +import logging +import pathlib + +from wk.exe import run_program +from wk.std import PLATFORM + + +# STATIC_VARIABLES +LOG = logging.getLogger(__name__) + + +# Functions +def capture_pane(pane_id=None): + """Capture text from current or target pane, returns str.""" + cmd = ['tmux', 'capture-pane', '-p'] + if pane_id: + cmd.extend(['-t', pane_id]) + + # Capture and return + proc = run_program(cmd, check=False) + return proc.stdout.strip() + + +def clear_pane(pane_id=None): + """Clear pane buffer for current or target pane.""" + cmd = ['tmux', 'send-keys', '-R'] + if pane_id: + cmd.extend(['-t', pane_id]) + + # Clear pane + run_program(cmd, check=False) + + +def fix_layout(panes, layout, forced=False): + """Fix pane sizes based on layout.""" + if not (forced or layout_needs_fixed(panes, layout)): + # Layout should be fine + return + + # Update panes + for name, data in layout.items(): + # Skip missing panes + if name not in panes: + continue + + # Resize pane(s) + pane_list = panes[name] + if isinstance(pane_list, str): + pane_list = [pane_list] + for pane_id in pane_list: + if name == 'Current': + pane_id = None + try: + resize_pane(pane_id, **data) + except RuntimeError: + # Assuming pane was closed just before resizing + pass + + +def get_pane_size(pane_id=None): + """Get current or target pane size, returns tuple.""" + cmd = ['tmux', 'display', '-p'] + if pane_id: + cmd.extend(['-t', pane_id]) + cmd.append('#{pane_width} #{pane_height}') + + # Get resolution + proc = run_program(cmd, check=False) + width, height = proc.stdout.strip().split() + width = int(width) + height = int(height) + + # Done + return (width, height) + + +def kill_all_panes(pane_id=None): + """Kill all panes except for the current or target pane.""" + cmd = ['tmux', 'kill-pane', '-a'] + if pane_id: + cmd.extend(['-t', pane_id]) + + # Kill + run_program(cmd, check=False) + + +def kill_pane(*pane_ids): + """Kill pane(s) by id.""" + cmd = ['tmux', 'kill-pane', '-t'] + + # Iterate over all passed pane IDs + for pane_id in pane_ids: + run_program(cmd+[pane_id], check=False) + + +def layout_needs_fixed(panes, layout): + """Check if layout needs fixed, returns bool.""" + needs_fixed = False + + # Check panes + for name, data in layout.items(): + # Skip unpredictably sized panes + if not data.get('Check', False): + continue + + # Skip missing panes + if name not in panes: + continue + + # Check pane size(s) + pane_list = panes[name] + if isinstance(pane_list, str): + pane_list = [pane_list] + for pane_id in pane_list: + try: + width, height = get_pane_size(pane_id) + except ValueError: + # Pane may have disappeared during this loop + continue + if data.get('width', False) and data['width'] != width: + needs_fixed = True + if data.get('height', False) and data['height'] != height: + needs_fixed = True + + # Done + return needs_fixed + + +def poll_pane(pane_id): + """Check if pane exists, returns bool.""" + cmd = ['tmux', 'list-panes', '-F', '#D'] + + # Get list of panes + proc = run_program(cmd, check=False) + existant_panes = proc.stdout.splitlines() + + # Check if pane exists + return pane_id in existant_panes + + +def prep_action( + cmd=None, working_dir=None, text=None, watch_file=None, watch_cmd='cat'): + """Prep action to perform during a tmux call, returns list. + + This will prep for running a basic command, displaying text on screen, + or monitoring a file. The last option uses cat by default but can be + overridden by using the watch_cmd. + """ + action_cmd = [] + if working_dir: + action_cmd.extend(['-c', working_dir]) + + if cmd: + # Basic command + action_cmd.append(cmd) + elif text: + # Display text + echo_cmd = ['echo'] + if PLATFORM == 'Linux': + echo_cmd.append('-e') + action_cmd.extend([ + 'watch', + '--color', + '--exec', + '--no-title', + '--interval', '1', + ]) + action_cmd.extend(echo_cmd) + action_cmd.append(text) + elif watch_file: + # Monitor file + prep_file(watch_file) + if watch_cmd == 'cat': + action_cmd.extend([ + 'watch', + '--color', + '--no-title', + '--interval', '1', + 'cat', + ]) + elif watch_cmd == 'tail': + action_cmd.extend(['tail', '-f']) + action_cmd.append(watch_file) + else: + LOG.error('No action specified') + raise RuntimeError('No action specified') + + # Done + return action_cmd + + +def prep_file(path): + """Check if file exists and create empty file if not.""" + path = pathlib.Path(path).resolve() + try: + path.touch(exist_ok=False) + except FileExistsError: + # Leave existing files alone + pass + + +def resize_pane(pane_id=None, width=None, height=None, **kwargs): + # pylint: disable=unused-argument + """Resize current or target pane. + + NOTE: kwargs is only here to make calling this function easier + by dropping any extra kwargs passed. + """ + cmd = ['tmux', 'resize-pane'] + + # Safety checks + if not (width or height): + LOG.error('Neither width nor height specified') + raise RuntimeError('Neither width nor height specified') + + # Finish building cmd + if pane_id: + cmd.extend(['-t', pane_id]) + if width: + cmd.extend(['-x', str(width)]) + if height: + cmd.extend(['-y', str(height)]) + + # Resize + run_program(cmd, check=False) + + +def split_window( + lines=None, percent=None, + behind=False, vertical=False, + target_id=None, **action): + """Split tmux window, run action, and return pane_id as str.""" + cmd = ['tmux', 'split-window', '-d', '-PF', '#D'] + + # Safety checks + if not (lines or percent): + LOG.error('Neither lines nor percent specified') + raise RuntimeError('Neither lines nor percent specified') + + # New pane placement + if behind: + cmd.append('-b') + if vertical: + cmd.append('-v') + else: + cmd.append('-h') + if target_id: + cmd.extend(['-t', target_id]) + + # New pane size + if lines: + cmd.extend(['-l', str(lines)]) + elif percent: + cmd.extend(['-p', str(percent)]) + + # New pane action + cmd.extend(prep_action(**action)) + + # Run and return pane_id + proc = run_program(cmd, check=False) + return proc.stdout.strip() + + +def respawn_pane(pane_id, **action): + """Respawn pane with action.""" + cmd = ['tmux', 'respawn-pane', '-k', '-t', pane_id] + cmd.extend(prep_action(**action)) + + # Respawn + run_program(cmd, check=False) + + +def zoom_pane(pane_id=None): + """Toggle zoom status for current or target pane.""" + cmd = ['tmux', 'resize-pane', '-Z'] + if pane_id: + cmd.extend(['-t', pane_id]) + + # Toggle + run_program(cmd, check=False) + + +if __name__ == '__main__': + print("This file is not meant to be called directly.") diff --git a/Build Linux b/setup/build_linux similarity index 91% rename from Build Linux rename to setup/build_linux index f9acd897..50f84190 100755 --- a/Build Linux +++ b/setup/build_linux @@ -11,10 +11,10 @@ set -o pipefail DATE="$(date +%F)" DATETIME="$(date +%F_%H%M)" ROOT_DIR="$(realpath $(dirname "$0"))" -BUILD_DIR="$ROOT_DIR/BUILD_LINUX" +BUILD_DIR="$ROOT_DIR/setup/BUILD_LINUX" LIVE_DIR="$BUILD_DIR/live" LOG_DIR="$BUILD_DIR/logs" -OUT_DIR="$ROOT_DIR/OUT_LINUX" +OUT_DIR="$ROOT_DIR/setup/OUT_LINUX" REPO_DIR="$BUILD_DIR/repo" SKEL_DIR="$LIVE_DIR/airootfs/etc/skel" TEMP_DIR="$BUILD_DIR/temp" @@ -57,7 +57,7 @@ function cleanup() { function fix_kit_permissions() { # GitHub zip archives don't preserve the correct permissions - for d in .bin .cbin .kit_items .linux_items .pe_items Images; do + for d in docs images scripts setup; do find "$ROOT_DIR/$d" -type d -exec chmod 755 "{}" \; done } @@ -80,7 +80,7 @@ function load_settings() { if [[ "${1:-}" == "--edit" ]]; then # Copy settings if [[ ! -e "$BUILD_DIR/main.py" ]] || ask "Overwrite main.py?"; then - cp -bv "$ROOT_DIR/.bin/Scripts/settings/main.py" "$BUILD_DIR/main.py" + cp -bv "$ROOT_DIR/scripts/wk/cfg/main.py" "$BUILD_DIR/main.py" dos2unix "$BUILD_DIR/main.py" fi @@ -89,7 +89,7 @@ function load_settings() { "$EDITOR" "$BUILD_DIR/main.py" else # Load settings from $LIVE_DIR - _main_path="$LIVE_DIR/airootfs/usr/local/bin/settings/main.py" + _main_path="$LIVE_DIR/airootfs/usr/local/bin/wk/cfg/main.py" fi # Load settings @@ -118,13 +118,13 @@ function copy_live_env() { rm "$LIVE_DIR/syslinux"/*.cfg "$LIVE_DIR/syslinux"/*.png # Add items - rsync -aI "$ROOT_DIR/.linux_items/include/" "$LIVE_DIR/" + rsync -aI "$ROOT_DIR/setup/linux/include/" "$LIVE_DIR/" if [[ "${1:-}" != "--minimal" ]]; then - rsync -aI "$ROOT_DIR/.linux_items/include_x/" "$LIVE_DIR/" + rsync -aI "$ROOT_DIR/setup/linux/include_x/" "$LIVE_DIR/" fi mkdir -p "$LIVE_DIR/airootfs/usr/local/bin" - rsync -aI "$ROOT_DIR/.bin/Scripts/" "$LIVE_DIR/airootfs/usr/local/bin/" - cp -a "$BUILD_DIR/main.py" "$LIVE_DIR/airootfs/usr/local/bin/settings/" + rsync -aI "$ROOT_DIR/scripts/" "$LIVE_DIR/airootfs/usr/local/bin/" + cp -a "$BUILD_DIR/main.py" "$LIVE_DIR/airootfs/usr/local/bin/wk/cfg/" } function run_elevated() { @@ -155,8 +155,8 @@ function update_live_env() { # Boot config (legacy) mkdir -p "$LIVE_DIR/arch" - cp "$ROOT_DIR/Images/Pxelinux.png" "$LIVE_DIR/arch/pxelinux.png" - cp "$ROOT_DIR/Images/Syslinux.png" "$LIVE_DIR/arch/syslinux.png" + cp "$ROOT_DIR/images/Pxelinux.png" "$LIVE_DIR/arch/pxelinux.png" + cp "$ROOT_DIR/images/Syslinux.png" "$LIVE_DIR/arch/syslinux.png" sed -i -r "s/_+/$KIT_NAME_FULL/" "$LIVE_DIR/syslinux/wk_head.cfg" mkdir -p "$TEMP_DIR" 2>/dev/null curl -Lo "$TEMP_DIR/wimboot.zip" "http://git.ipxe.org/releases/wimboot/wimboot-latest.zip" @@ -165,7 +165,7 @@ function update_live_env() { # Boot config (UEFI) mkdir -p "$LIVE_DIR/EFI/boot" cp "/usr/share/refind/refind_x64.efi" "$LIVE_DIR/EFI/boot/bootx64.efi" - cp "$ROOT_DIR/Images/rEFInd.png" "$LIVE_DIR/EFI/boot/rEFInd.png" + cp "$ROOT_DIR/images/rEFInd.png" "$LIVE_DIR/EFI/boot/rEFInd.png" rsync -aI "/usr/share/refind/drivers_x64/" "$LIVE_DIR/EFI/boot/drivers_x64/" rsync -aI "/usr/share/refind/icons/" "$LIVE_DIR/EFI/boot/icons/" --exclude "/usr/share/refind/icons/svg" sed -i "s/%ARCHISO_LABEL%/${label}/" "$LIVE_DIR/EFI/boot/refind.conf" @@ -199,12 +199,12 @@ function update_live_env() { # Live packages while read -r p; do sed -i "/$p/d" "$LIVE_DIR/packages.x86_64" - done < "$ROOT_DIR/.linux_items/packages/live_remove" - cat "$ROOT_DIR/.linux_items/packages/live_add" >> "$LIVE_DIR/packages.x86_64" + done < "$ROOT_DIR/setup/linux/packages/live_remove" + cat "$ROOT_DIR/setup/linux/packages/live_add" >> "$LIVE_DIR/packages.x86_64" if [[ "${1:-}" == "--minimal" ]]; then - cat "$ROOT_DIR/.linux_items/packages/live_add_min" >> "$LIVE_DIR/packages.x86_64" + cat "$ROOT_DIR/setup/linux/packages/live_add_min" >> "$LIVE_DIR/packages.x86_64" else - cat "$ROOT_DIR/.linux_items/packages/live_add_x" >> "$LIVE_DIR/packages.x86_64" + cat "$ROOT_DIR/setup/linux/packages/live_add_x" >> "$LIVE_DIR/packages.x86_64" fi echo "[custom]" >> "$LIVE_DIR/pacman.conf" echo "SigLevel = Optional TrustAll" >> "$LIVE_DIR/pacman.conf" @@ -246,7 +246,7 @@ function update_live_env() { echo 'rm /root/.zlogin' >> "$LIVE_DIR/airootfs/root/customize_airootfs.sh" sed -i -r '/.*PermitRootLogin.*/d' "$LIVE_DIR/airootfs/root/customize_airootfs.sh" echo "sed -i -r '/.*PermitRootLogin.*/d' /etc/ssh/sshd_config" >> "$LIVE_DIR/airootfs/root/customize_airootfs.sh" - cp "$ROOT_DIR/.linux_items/authorized_keys" "$SKEL_DIR/.ssh/authorized_keys" + cp "$ROOT_DIR/setup/linux/authorized_keys" "$SKEL_DIR/.ssh/authorized_keys" # Root user echo "echo 'root:$ROOT_PASSWORD' | chpasswd" >> "$LIVE_DIR/airootfs/root/customize_airootfs.sh" @@ -279,11 +279,11 @@ function update_live_env() { # Wallpaper mkdir -p "$LIVE_DIR/airootfs/usr/share/wallpaper" - cp "$ROOT_DIR/Images/Linux.png" "$LIVE_DIR/airootfs/usr/share/wallpaper/burned.in" + cp "$ROOT_DIR/images/Linux.png" "$LIVE_DIR/airootfs/usr/share/wallpaper/burned.in" fi # WiFi - cp "$ROOT_DIR/.linux_items/known_networks" "$LIVE_DIR/airootfs/root/known_networks" + cp "$ROOT_DIR/setup/linux/known_networks" "$LIVE_DIR/airootfs/root/known_networks" echo "add-known-networks --user=$username" >> "$LIVE_DIR/airootfs/root/customize_airootfs.sh" } @@ -315,7 +315,7 @@ function update_repo() { makepkg -d popd >/dev/null mv -n $p/*xz "$REPO_DIR"/ - done < "$ROOT_DIR/.linux_items/packages/aur" + done < "$ROOT_DIR/setup/linux/packages/aur" popd >/dev/null # Build custom repo database @@ -329,7 +329,7 @@ function install_deps() { packages= while read -r line; do packages="$packages $line" - done < "$ROOT_DIR/.linux_items/packages/dependencies" + done < "$ROOT_DIR/setup/linux/packages/dependencies" run_elevated pacman -Syu --needed --noconfirm $packages } @@ -347,7 +347,7 @@ function build_iso() { chmod 600 "$LIVE_DIR/airootfs/etc/skel/.ssh/id_rsa" # Removing cached (and possibly outdated) custom repo packages - for package in $(cat "$ROOT_DIR/.linux_items/packages/aur"); do + for package in $(cat "$ROOT_DIR/setup/linux/packages/aur"); do for p in /var/cache/pacman/pkg/*${package}*; do if [[ -f "${p}" ]]; then rm "${p}" diff --git a/Build PE.cmd b/setup/build_pe.cmd similarity index 100% rename from Build PE.cmd rename to setup/build_pe.cmd diff --git a/Build Kit.cmd b/setup/build_windows.cmd similarity index 100% rename from Build Kit.cmd rename to setup/build_windows.cmd diff --git a/.linux_items/authorized_keys b/setup/linux/authorized_keys similarity index 100% rename from .linux_items/authorized_keys rename to setup/linux/authorized_keys diff --git a/.linux_items/build_additions.txt b/setup/linux/build_additions.txt similarity index 100% rename from .linux_items/build_additions.txt rename to setup/linux/build_additions.txt diff --git a/.linux_items/include/EFI/boot/icons/dgpu.png b/setup/linux/include/EFI/boot/icons/dgpu.png similarity index 100% rename from .linux_items/include/EFI/boot/icons/dgpu.png rename to setup/linux/include/EFI/boot/icons/dgpu.png diff --git a/.linux_items/include/EFI/boot/icons/wk_arch.png b/setup/linux/include/EFI/boot/icons/wk_arch.png similarity index 100% rename from .linux_items/include/EFI/boot/icons/wk_arch.png rename to setup/linux/include/EFI/boot/icons/wk_arch.png diff --git a/.linux_items/include/EFI/boot/icons/wk_memtest.png b/setup/linux/include/EFI/boot/icons/wk_memtest.png similarity index 100% rename from .linux_items/include/EFI/boot/icons/wk_memtest.png rename to setup/linux/include/EFI/boot/icons/wk_memtest.png diff --git a/.linux_items/include/EFI/boot/icons/wk_win.png b/setup/linux/include/EFI/boot/icons/wk_win.png similarity index 100% rename from .linux_items/include/EFI/boot/icons/wk_win.png rename to setup/linux/include/EFI/boot/icons/wk_win.png diff --git a/.linux_items/include/EFI/boot/refind.conf b/setup/linux/include/EFI/boot/refind.conf similarity index 100% rename from .linux_items/include/EFI/boot/refind.conf rename to setup/linux/include/EFI/boot/refind.conf diff --git a/.linux_items/include/EFI/boot/selection_big.png b/setup/linux/include/EFI/boot/selection_big.png similarity index 100% rename from .linux_items/include/EFI/boot/selection_big.png rename to setup/linux/include/EFI/boot/selection_big.png diff --git a/.linux_items/include/EFI/boot/selection_small.png b/setup/linux/include/EFI/boot/selection_small.png similarity index 100% rename from .linux_items/include/EFI/boot/selection_small.png rename to setup/linux/include/EFI/boot/selection_small.png diff --git a/.linux_items/include/airootfs/etc/default/ufw b/setup/linux/include/airootfs/etc/default/ufw similarity index 100% rename from .linux_items/include/airootfs/etc/default/ufw rename to setup/linux/include/airootfs/etc/default/ufw diff --git a/.linux_items/include/airootfs/etc/hostname b/setup/linux/include/airootfs/etc/hostname similarity index 100% rename from .linux_items/include/airootfs/etc/hostname rename to setup/linux/include/airootfs/etc/hostname diff --git a/.linux_items/include/airootfs/etc/hosts b/setup/linux/include/airootfs/etc/hosts similarity index 100% rename from .linux_items/include/airootfs/etc/hosts rename to setup/linux/include/airootfs/etc/hosts diff --git a/.linux_items/include/airootfs/etc/locale.conf b/setup/linux/include/airootfs/etc/locale.conf similarity index 100% rename from .linux_items/include/airootfs/etc/locale.conf rename to setup/linux/include/airootfs/etc/locale.conf diff --git a/.linux_items/include/airootfs/etc/locale.gen b/setup/linux/include/airootfs/etc/locale.gen similarity index 100% rename from .linux_items/include/airootfs/etc/locale.gen rename to setup/linux/include/airootfs/etc/locale.gen diff --git a/.linux_items/include/airootfs/etc/motd b/setup/linux/include/airootfs/etc/motd similarity index 100% rename from .linux_items/include/airootfs/etc/motd rename to setup/linux/include/airootfs/etc/motd diff --git a/.linux_items/include/airootfs/etc/polkit-1/rules.d/49-nopasswd_global.rules b/setup/linux/include/airootfs/etc/polkit-1/rules.d/49-nopasswd_global.rules similarity index 100% rename from .linux_items/include/airootfs/etc/polkit-1/rules.d/49-nopasswd_global.rules rename to setup/linux/include/airootfs/etc/polkit-1/rules.d/49-nopasswd_global.rules diff --git a/.linux_items/include/airootfs/etc/skel/.aliases b/setup/linux/include/airootfs/etc/skel/.aliases similarity index 100% rename from .linux_items/include/airootfs/etc/skel/.aliases rename to setup/linux/include/airootfs/etc/skel/.aliases diff --git a/.linux_items/include/airootfs/etc/skel/.bashrc b/setup/linux/include/airootfs/etc/skel/.bashrc similarity index 80% rename from .linux_items/include/airootfs/etc/skel/.bashrc rename to setup/linux/include/airootfs/etc/skel/.bashrc index cb37e84b..0c12a187 100644 --- a/.linux_items/include/airootfs/etc/skel/.bashrc +++ b/setup/linux/include/airootfs/etc/skel/.bashrc @@ -12,3 +12,6 @@ PS1='[\u@\h \W]\$ ' # Update LS_COLORS eval $(dircolors ~/.dircolors) + +# WizardKit +export PYTHONPATH='/usr/local/bin' diff --git a/.linux_items/include/airootfs/etc/skel/.dircolors b/setup/linux/include/airootfs/etc/skel/.dircolors similarity index 100% rename from .linux_items/include/airootfs/etc/skel/.dircolors rename to setup/linux/include/airootfs/etc/skel/.dircolors diff --git a/.linux_items/include/airootfs/etc/skel/.rsync_exclusions b/setup/linux/include/airootfs/etc/skel/.rsync_exclusions similarity index 100% rename from .linux_items/include/airootfs/etc/skel/.rsync_exclusions rename to setup/linux/include/airootfs/etc/skel/.rsync_exclusions diff --git a/.linux_items/include/airootfs/etc/skel/.tmux.conf b/setup/linux/include/airootfs/etc/skel/.tmux.conf similarity index 100% rename from .linux_items/include/airootfs/etc/skel/.tmux.conf rename to setup/linux/include/airootfs/etc/skel/.tmux.conf diff --git a/.linux_items/include/airootfs/etc/skel/.update_network b/setup/linux/include/airootfs/etc/skel/.update_network similarity index 100% rename from .linux_items/include/airootfs/etc/skel/.update_network rename to setup/linux/include/airootfs/etc/skel/.update_network diff --git a/.linux_items/include/airootfs/etc/skel/.vimrc b/setup/linux/include/airootfs/etc/skel/.vimrc similarity index 100% rename from .linux_items/include/airootfs/etc/skel/.vimrc rename to setup/linux/include/airootfs/etc/skel/.vimrc diff --git a/.linux_items/include/airootfs/etc/skel/.zlogin b/setup/linux/include/airootfs/etc/skel/.zlogin similarity index 100% rename from .linux_items/include/airootfs/etc/skel/.zlogin rename to setup/linux/include/airootfs/etc/skel/.zlogin diff --git a/.linux_items/include/airootfs/etc/skel/.zshrc b/setup/linux/include/airootfs/etc/skel/.zshrc similarity index 87% rename from .linux_items/include/airootfs/etc/skel/.zshrc rename to setup/linux/include/airootfs/etc/skel/.zshrc index 59a747eb..a5a2298b 100644 --- a/.linux_items/include/airootfs/etc/skel/.zshrc +++ b/setup/linux/include/airootfs/etc/skel/.zshrc @@ -9,3 +9,4 @@ source $ZSH/oh-my-zsh.sh # Wizard Kit . $HOME/.aliases eval $(dircolors ~/.dircolors) +export PYTHONPATH="/usr/local/bin" diff --git a/.linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/NetworkManager.service b/setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/NetworkManager.service similarity index 100% rename from .linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/NetworkManager.service rename to setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/NetworkManager.service diff --git a/.linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/rngd.service b/setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/rngd.service similarity index 100% rename from .linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/rngd.service rename to setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/rngd.service diff --git a/.linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/sshd.service b/setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/sshd.service similarity index 100% rename from .linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/sshd.service rename to setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/sshd.service diff --git a/.linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/systemd-timesyncd.service b/setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/systemd-timesyncd.service similarity index 100% rename from .linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/systemd-timesyncd.service rename to setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/systemd-timesyncd.service diff --git a/.linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/ufw.service b/setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/ufw.service similarity index 100% rename from .linux_items/include/airootfs/etc/systemd/system/multi-user.target.wants/ufw.service rename to setup/linux/include/airootfs/etc/systemd/system/multi-user.target.wants/ufw.service diff --git a/.linux_items/include/airootfs/etc/udev/rules.d/99-udisks2.rules b/setup/linux/include/airootfs/etc/udev/rules.d/99-udisks2.rules similarity index 100% rename from .linux_items/include/airootfs/etc/udev/rules.d/99-udisks2.rules rename to setup/linux/include/airootfs/etc/udev/rules.d/99-udisks2.rules diff --git a/.linux_items/include/airootfs/etc/udevil/udevil.conf b/setup/linux/include/airootfs/etc/udevil/udevil.conf similarity index 100% rename from .linux_items/include/airootfs/etc/udevil/udevil.conf rename to setup/linux/include/airootfs/etc/udevil/udevil.conf diff --git a/.linux_items/include/airootfs/etc/ufw/ufw.conf b/setup/linux/include/airootfs/etc/ufw/ufw.conf similarity index 100% rename from .linux_items/include/airootfs/etc/ufw/ufw.conf rename to setup/linux/include/airootfs/etc/ufw/ufw.conf diff --git a/.linux_items/include/airootfs/etc/ufw/user.rules b/setup/linux/include/airootfs/etc/ufw/user.rules similarity index 100% rename from .linux_items/include/airootfs/etc/ufw/user.rules rename to setup/linux/include/airootfs/etc/ufw/user.rules diff --git a/.linux_items/include/airootfs/etc/ufw/user6.rules b/setup/linux/include/airootfs/etc/ufw/user6.rules similarity index 100% rename from .linux_items/include/airootfs/etc/ufw/user6.rules rename to setup/linux/include/airootfs/etc/ufw/user6.rules diff --git a/.linux_items/include/airootfs/etc/vconsole.conf b/setup/linux/include/airootfs/etc/vconsole.conf similarity index 100% rename from .linux_items/include/airootfs/etc/vconsole.conf rename to setup/linux/include/airootfs/etc/vconsole.conf diff --git a/.linux_items/include/isolinux/isolinux.cfg b/setup/linux/include/isolinux/isolinux.cfg similarity index 100% rename from .linux_items/include/isolinux/isolinux.cfg rename to setup/linux/include/isolinux/isolinux.cfg diff --git a/.linux_items/include_x/airootfs/etc/skel/.Xauthority b/setup/linux/include/syslinux/splash.png similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.Xauthority rename to setup/linux/include/syslinux/splash.png diff --git a/.linux_items/include/syslinux/syslinux.cfg b/setup/linux/include/syslinux/syslinux.cfg similarity index 100% rename from .linux_items/include/syslinux/syslinux.cfg rename to setup/linux/include/syslinux/syslinux.cfg diff --git a/.linux_items/include/syslinux/wk.cfg b/setup/linux/include/syslinux/wk.cfg similarity index 100% rename from .linux_items/include/syslinux/wk.cfg rename to setup/linux/include/syslinux/wk.cfg diff --git a/.linux_items/include/syslinux/wk_hdt.cfg b/setup/linux/include/syslinux/wk_hdt.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_hdt.cfg rename to setup/linux/include/syslinux/wk_hdt.cfg diff --git a/.linux_items/include/syslinux/wk_head.cfg b/setup/linux/include/syslinux/wk_head.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_head.cfg rename to setup/linux/include/syslinux/wk_head.cfg diff --git a/.linux_items/include/syslinux/wk_iso.cfg b/setup/linux/include/syslinux/wk_iso.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_iso.cfg rename to setup/linux/include/syslinux/wk_iso.cfg diff --git a/.linux_items/include/syslinux/wk_iso_linux.cfg b/setup/linux/include/syslinux/wk_iso_linux.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_iso_linux.cfg rename to setup/linux/include/syslinux/wk_iso_linux.cfg diff --git a/.linux_items/include/syslinux/wk_pxe.cfg b/setup/linux/include/syslinux/wk_pxe.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_pxe.cfg rename to setup/linux/include/syslinux/wk_pxe.cfg diff --git a/.linux_items/include/syslinux/wk_pxe_linux.cfg b/setup/linux/include/syslinux/wk_pxe_linux.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_pxe_linux.cfg rename to setup/linux/include/syslinux/wk_pxe_linux.cfg diff --git a/.linux_items/include/syslinux/wk_pxe_winpe.cfg b/setup/linux/include/syslinux/wk_pxe_winpe.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_pxe_winpe.cfg rename to setup/linux/include/syslinux/wk_pxe_winpe.cfg diff --git a/.linux_items/include/syslinux/wk_sys.cfg b/setup/linux/include/syslinux/wk_sys.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_sys.cfg rename to setup/linux/include/syslinux/wk_sys.cfg diff --git a/.linux_items/include/syslinux/wk_sys_linux.cfg b/setup/linux/include/syslinux/wk_sys_linux.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_sys_linux.cfg rename to setup/linux/include/syslinux/wk_sys_linux.cfg diff --git a/.linux_items/include/syslinux/wk_sys_winpe.cfg b/setup/linux/include/syslinux/wk_sys_winpe.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_sys_winpe.cfg rename to setup/linux/include/syslinux/wk_sys_winpe.cfg diff --git a/.linux_items/include/syslinux/wk_tail.cfg b/setup/linux/include/syslinux/wk_tail.cfg similarity index 100% rename from .linux_items/include/syslinux/wk_tail.cfg rename to setup/linux/include/syslinux/wk_tail.cfg diff --git a/.linux_items/include_x/airootfs/etc/oblogout.conf b/setup/linux/include_x/airootfs/etc/oblogout.conf similarity index 100% rename from .linux_items/include_x/airootfs/etc/oblogout.conf rename to setup/linux/include_x/airootfs/etc/oblogout.conf diff --git a/setup/linux/include_x/airootfs/etc/skel/.Xauthority b/setup/linux/include_x/airootfs/etc/skel/.Xauthority new file mode 100644 index 00000000..e69de29b diff --git a/.linux_items/include_x/airootfs/etc/skel/.Xresources b/setup/linux/include_x/airootfs/etc/skel/.Xresources similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.Xresources rename to setup/linux/include_x/airootfs/etc/skel/.Xresources diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/Thunar/accels.scm b/setup/linux/include_x/airootfs/etc/skel/.config/Thunar/accels.scm similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/Thunar/accels.scm rename to setup/linux/include_x/airootfs/etc/skel/.config/Thunar/accels.scm diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/Thunar/uca.xml b/setup/linux/include_x/airootfs/etc/skel/.config/Thunar/uca.xml similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/Thunar/uca.xml rename to setup/linux/include_x/airootfs/etc/skel/.config/Thunar/uca.xml diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/dunst/dunstrc b/setup/linux/include_x/airootfs/etc/skel/.config/dunst/dunstrc similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/dunst/dunstrc rename to setup/linux/include_x/airootfs/etc/skel/.config/dunst/dunstrc diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/gtk-3.0/settings.ini b/setup/linux/include_x/airootfs/etc/skel/.config/gtk-3.0/settings.ini similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/gtk-3.0/settings.ini rename to setup/linux/include_x/airootfs/etc/skel/.config/gtk-3.0/settings.ini diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/i3/config b/setup/linux/include_x/airootfs/etc/skel/.config/i3/config similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/i3/config rename to setup/linux/include_x/airootfs/etc/skel/.config/i3/config diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/i3status/config b/setup/linux/include_x/airootfs/etc/skel/.config/i3status/config similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/i3status/config rename to setup/linux/include_x/airootfs/etc/skel/.config/i3status/config diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/mimeapps.list b/setup/linux/include_x/airootfs/etc/skel/.config/mimeapps.list similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/mimeapps.list rename to setup/linux/include_x/airootfs/etc/skel/.config/mimeapps.list diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/openbox/autostart b/setup/linux/include_x/airootfs/etc/skel/.config/openbox/autostart similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/openbox/autostart rename to setup/linux/include_x/airootfs/etc/skel/.config/openbox/autostart diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/openbox/environment b/setup/linux/include_x/airootfs/etc/skel/.config/openbox/environment similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/openbox/environment rename to setup/linux/include_x/airootfs/etc/skel/.config/openbox/environment diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/openbox/menu.xml b/setup/linux/include_x/airootfs/etc/skel/.config/openbox/menu.xml similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/openbox/menu.xml rename to setup/linux/include_x/airootfs/etc/skel/.config/openbox/menu.xml diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/openbox/rc.xml b/setup/linux/include_x/airootfs/etc/skel/.config/openbox/rc.xml similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/openbox/rc.xml rename to setup/linux/include_x/airootfs/etc/skel/.config/openbox/rc.xml diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/rofi/config b/setup/linux/include_x/airootfs/etc/skel/.config/rofi/config similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/rofi/config rename to setup/linux/include_x/airootfs/etc/skel/.config/rofi/config diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/systemd/user/timers.target.wants/update-conky.timer b/setup/linux/include_x/airootfs/etc/skel/.config/systemd/user/timers.target.wants/update-conky.timer similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/systemd/user/timers.target.wants/update-conky.timer rename to setup/linux/include_x/airootfs/etc/skel/.config/systemd/user/timers.target.wants/update-conky.timer diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.service b/setup/linux/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.service similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.service rename to setup/linux/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.service diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.timer b/setup/linux/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.timer similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.timer rename to setup/linux/include_x/airootfs/etc/skel/.config/systemd/user/update-conky.timer diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/tint2/tint2rc b/setup/linux/include_x/airootfs/etc/skel/.config/tint2/tint2rc similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/tint2/tint2rc rename to setup/linux/include_x/airootfs/etc/skel/.config/tint2/tint2rc diff --git a/.linux_items/include_x/airootfs/etc/skel/.config/volumeicon/volumeicon b/setup/linux/include_x/airootfs/etc/skel/.config/volumeicon/volumeicon similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.config/volumeicon/volumeicon rename to setup/linux/include_x/airootfs/etc/skel/.config/volumeicon/volumeicon diff --git a/.linux_items/include_x/airootfs/etc/skel/.conky_start b/setup/linux/include_x/airootfs/etc/skel/.conky_start similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.conky_start rename to setup/linux/include_x/airootfs/etc/skel/.conky_start diff --git a/.linux_items/include_x/airootfs/etc/skel/.conkyrc_base b/setup/linux/include_x/airootfs/etc/skel/.conkyrc_base similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.conkyrc_base rename to setup/linux/include_x/airootfs/etc/skel/.conkyrc_base diff --git a/.linux_items/include_x/airootfs/etc/skel/.gtkrc-2.0 b/setup/linux/include_x/airootfs/etc/skel/.gtkrc-2.0 similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.gtkrc-2.0 rename to setup/linux/include_x/airootfs/etc/skel/.gtkrc-2.0 diff --git a/.linux_items/include_x/airootfs/etc/skel/.start_desktop_apps b/setup/linux/include_x/airootfs/etc/skel/.start_desktop_apps similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.start_desktop_apps rename to setup/linux/include_x/airootfs/etc/skel/.start_desktop_apps diff --git a/.linux_items/include_x/airootfs/etc/skel/.update_conky b/setup/linux/include_x/airootfs/etc/skel/.update_conky similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.update_conky rename to setup/linux/include_x/airootfs/etc/skel/.update_conky diff --git a/.linux_items/include_x/airootfs/etc/skel/.update_x b/setup/linux/include_x/airootfs/etc/skel/.update_x similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.update_x rename to setup/linux/include_x/airootfs/etc/skel/.update_x diff --git a/.linux_items/include_x/airootfs/etc/skel/.wallpaper b/setup/linux/include_x/airootfs/etc/skel/.wallpaper similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.wallpaper rename to setup/linux/include_x/airootfs/etc/skel/.wallpaper diff --git a/.linux_items/include_x/airootfs/etc/skel/.xinitrc b/setup/linux/include_x/airootfs/etc/skel/.xinitrc similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.xinitrc rename to setup/linux/include_x/airootfs/etc/skel/.xinitrc diff --git a/.linux_items/include_x/airootfs/etc/skel/.zlogin b/setup/linux/include_x/airootfs/etc/skel/.zlogin similarity index 100% rename from .linux_items/include_x/airootfs/etc/skel/.zlogin rename to setup/linux/include_x/airootfs/etc/skel/.zlogin diff --git a/.linux_items/include_x/airootfs/usr/share/applications/Hardware Diagnostics.desktop b/setup/linux/include_x/airootfs/usr/share/applications/Hardware Diagnostics.desktop similarity index 100% rename from .linux_items/include_x/airootfs/usr/share/applications/Hardware Diagnostics.desktop rename to setup/linux/include_x/airootfs/usr/share/applications/Hardware Diagnostics.desktop diff --git a/.linux_items/include_x/airootfs/usr/share/applications/Hardware Information.desktop b/setup/linux/include_x/airootfs/usr/share/applications/Hardware Information.desktop similarity index 100% rename from .linux_items/include_x/airootfs/usr/share/applications/Hardware Information.desktop rename to setup/linux/include_x/airootfs/usr/share/applications/Hardware Information.desktop diff --git a/.linux_items/include_x/airootfs/usr/share/applications/NetworkTest.desktop b/setup/linux/include_x/airootfs/usr/share/applications/NetworkTest.desktop similarity index 100% rename from .linux_items/include_x/airootfs/usr/share/applications/NetworkTest.desktop rename to setup/linux/include_x/airootfs/usr/share/applications/NetworkTest.desktop diff --git a/.linux_items/known_networks b/setup/linux/known_networks similarity index 100% rename from .linux_items/known_networks rename to setup/linux/known_networks diff --git a/.linux_items/packages/aur b/setup/linux/packages/aur similarity index 100% rename from .linux_items/packages/aur rename to setup/linux/packages/aur diff --git a/.linux_items/packages/dependencies b/setup/linux/packages/dependencies similarity index 100% rename from .linux_items/packages/dependencies rename to setup/linux/packages/dependencies diff --git a/.linux_items/packages/live_add b/setup/linux/packages/live_add similarity index 100% rename from .linux_items/packages/live_add rename to setup/linux/packages/live_add diff --git a/.linux_items/packages/live_add_min b/setup/linux/packages/live_add_min similarity index 100% rename from .linux_items/packages/live_add_min rename to setup/linux/packages/live_add_min diff --git a/.linux_items/packages/live_add_x b/setup/linux/packages/live_add_x similarity index 100% rename from .linux_items/packages/live_add_x rename to setup/linux/packages/live_add_x diff --git a/.linux_items/packages/live_remove b/setup/linux/packages/live_remove similarity index 100% rename from .linux_items/packages/live_remove rename to setup/linux/packages/live_remove diff --git a/.pe_items/System32/Winpeshl.ini b/setup/pe/System32/Winpeshl.ini similarity index 100% rename from .pe_items/System32/Winpeshl.ini rename to setup/pe/System32/Winpeshl.ini diff --git a/.pe_items/System32/menu.cmd b/setup/pe/System32/menu.cmd similarity index 100% rename from .pe_items/System32/menu.cmd rename to setup/pe/System32/menu.cmd diff --git a/.pe_items/_include/CPU-Z/cpuz.ini b/setup/pe/bin/CPU-Z/cpuz.ini similarity index 100% rename from .pe_items/_include/CPU-Z/cpuz.ini rename to setup/pe/bin/CPU-Z/cpuz.ini diff --git a/.pe_items/_include/ConEmu/ConEmu.xml b/setup/pe/bin/ConEmu/ConEmu.xml similarity index 100% rename from .pe_items/_include/ConEmu/ConEmu.xml rename to setup/pe/bin/ConEmu/ConEmu.xml diff --git a/.pe_items/_include/HWiNFO/HWiNFO.INI b/setup/pe/bin/HWiNFO/HWiNFO.INI similarity index 100% rename from .pe_items/_include/HWiNFO/HWiNFO.INI rename to setup/pe/bin/HWiNFO/HWiNFO.INI diff --git a/.pe_items/_include/NotepadPlusPlus/config.xml b/setup/pe/bin/NotepadPlusPlus/config.xml similarity index 100% rename from .pe_items/_include/NotepadPlusPlus/config.xml rename to setup/pe/bin/NotepadPlusPlus/config.xml diff --git a/.pe_items/_include/NotepadPlusPlus/npp.cmd b/setup/pe/bin/NotepadPlusPlus/npp.cmd similarity index 100% rename from .pe_items/_include/NotepadPlusPlus/npp.cmd rename to setup/pe/bin/NotepadPlusPlus/npp.cmd diff --git a/.pe_items/_include/NotepadPlusPlus/stylers.model.xml b/setup/pe/bin/NotepadPlusPlus/stylers.model.xml similarity index 100% rename from .pe_items/_include/NotepadPlusPlus/stylers.model.xml rename to setup/pe/bin/NotepadPlusPlus/stylers.model.xml diff --git a/.pe_items/_include/Q-Dir/Q-Dir.ini b/setup/pe/bin/Q-Dir/Q-Dir.ini similarity index 100% rename from .pe_items/_include/Q-Dir/Q-Dir.ini rename to setup/pe/bin/Q-Dir/Q-Dir.ini diff --git a/.kit_items/Drivers/Extras/AMD.url b/setup/windows/Drivers/Extras/AMD.url similarity index 100% rename from .kit_items/Drivers/Extras/AMD.url rename to setup/windows/Drivers/Extras/AMD.url diff --git a/.kit_items/Drivers/Extras/Dell (FTP - Browse for Drivers).url b/setup/windows/Drivers/Extras/Dell (FTP - Browse for Drivers).url similarity index 100% rename from .kit_items/Drivers/Extras/Dell (FTP - Browse for Drivers).url rename to setup/windows/Drivers/Extras/Dell (FTP - Browse for Drivers).url diff --git a/.kit_items/Drivers/Extras/Dell (Simplified Interface).url b/setup/windows/Drivers/Extras/Dell (Simplified Interface).url similarity index 100% rename from .kit_items/Drivers/Extras/Dell (Simplified Interface).url rename to setup/windows/Drivers/Extras/Dell (Simplified Interface).url diff --git a/.kit_items/Drivers/Extras/Dell (Support Site).url b/setup/windows/Drivers/Extras/Dell (Support Site).url similarity index 100% rename from .kit_items/Drivers/Extras/Dell (Support Site).url rename to setup/windows/Drivers/Extras/Dell (Support Site).url diff --git a/.kit_items/Drivers/Extras/Device Remover.url b/setup/windows/Drivers/Extras/Device Remover.url similarity index 100% rename from .kit_items/Drivers/Extras/Device Remover.url rename to setup/windows/Drivers/Extras/Device Remover.url diff --git a/.kit_items/Drivers/Extras/Display Driver Uninstaller.url b/setup/windows/Drivers/Extras/Display Driver Uninstaller.url similarity index 100% rename from .kit_items/Drivers/Extras/Display Driver Uninstaller.url rename to setup/windows/Drivers/Extras/Display Driver Uninstaller.url diff --git a/.kit_items/Drivers/Extras/HP.url b/setup/windows/Drivers/Extras/HP.url similarity index 100% rename from .kit_items/Drivers/Extras/HP.url rename to setup/windows/Drivers/Extras/HP.url diff --git a/.kit_items/Drivers/Extras/Intel Driver & Support Assistant.url b/setup/windows/Drivers/Extras/Intel Driver & Support Assistant.url similarity index 100% rename from .kit_items/Drivers/Extras/Intel Driver & Support Assistant.url rename to setup/windows/Drivers/Extras/Intel Driver & Support Assistant.url diff --git a/.kit_items/Drivers/Extras/NVIDIA.url b/setup/windows/Drivers/Extras/NVIDIA.url similarity index 100% rename from .kit_items/Drivers/Extras/NVIDIA.url rename to setup/windows/Drivers/Extras/NVIDIA.url diff --git a/.kit_items/Drivers/Extras/Samsung Tools & Software.url b/setup/windows/Drivers/Extras/Samsung Tools & Software.url similarity index 100% rename from .kit_items/Drivers/Extras/Samsung Tools & Software.url rename to setup/windows/Drivers/Extras/Samsung Tools & Software.url diff --git a/.kit_items/Installers/BackBlaze.url b/setup/windows/Installers/BackBlaze.url similarity index 100% rename from .kit_items/Installers/BackBlaze.url rename to setup/windows/Installers/BackBlaze.url diff --git a/.kit_items/Misc/Fix Missing Optical Drive.reg b/setup/windows/Misc/Fix Missing Optical Drive.reg similarity index 100% rename from .kit_items/Misc/Fix Missing Optical Drive.reg rename to setup/windows/Misc/Fix Missing Optical Drive.reg diff --git a/.kit_items/Misc/Nirsoft Utilities - Outlook.url b/setup/windows/Misc/Nirsoft Utilities - Outlook.url similarity index 100% rename from .kit_items/Misc/Nirsoft Utilities - Outlook.url rename to setup/windows/Misc/Nirsoft Utilities - Outlook.url diff --git a/.kit_items/Misc/Nirsoft Utilities - Passwords.url b/setup/windows/Misc/Nirsoft Utilities - Passwords.url similarity index 100% rename from .kit_items/Misc/Nirsoft Utilities - Passwords.url rename to setup/windows/Misc/Nirsoft Utilities - Passwords.url diff --git a/.kit_items/Misc/Sysinternals Suite (Live).url b/setup/windows/Misc/Sysinternals Suite (Live).url similarity index 100% rename from .kit_items/Misc/Sysinternals Suite (Live).url rename to setup/windows/Misc/Sysinternals Suite (Live).url diff --git a/.kit_items/Uninstallers/AV Removal Tools/AV Removal Tools.url b/setup/windows/Uninstallers/AV Removal Tools/AV Removal Tools.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/AV Removal Tools.url rename to setup/windows/Uninstallers/AV Removal Tools/AV Removal Tools.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/AVG.url b/setup/windows/Uninstallers/AV Removal Tools/AVG.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/AVG.url rename to setup/windows/Uninstallers/AV Removal Tools/AVG.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/Avast.url b/setup/windows/Uninstallers/AV Removal Tools/Avast.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/Avast.url rename to setup/windows/Uninstallers/AV Removal Tools/Avast.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/Avira.url b/setup/windows/Uninstallers/AV Removal Tools/Avira.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/Avira.url rename to setup/windows/Uninstallers/AV Removal Tools/Avira.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/ESET.url b/setup/windows/Uninstallers/AV Removal Tools/ESET.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/ESET.url rename to setup/windows/Uninstallers/AV Removal Tools/ESET.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/Kaspersky.url b/setup/windows/Uninstallers/AV Removal Tools/Kaspersky.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/Kaspersky.url rename to setup/windows/Uninstallers/AV Removal Tools/Kaspersky.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/MBAM.url b/setup/windows/Uninstallers/AV Removal Tools/MBAM.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/MBAM.url rename to setup/windows/Uninstallers/AV Removal Tools/MBAM.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/McAfee.url b/setup/windows/Uninstallers/AV Removal Tools/McAfee.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/McAfee.url rename to setup/windows/Uninstallers/AV Removal Tools/McAfee.url diff --git a/.kit_items/Uninstallers/AV Removal Tools/Norton.url b/setup/windows/Uninstallers/AV Removal Tools/Norton.url similarity index 100% rename from .kit_items/Uninstallers/AV Removal Tools/Norton.url rename to setup/windows/Uninstallers/AV Removal Tools/Norton.url diff --git a/.bin/ConEmu/ConEmu.xml b/setup/windows/bin/ConEmu/ConEmu.xml similarity index 100% rename from .bin/ConEmu/ConEmu.xml rename to setup/windows/bin/ConEmu/ConEmu.xml diff --git a/.bin/HWiNFO/general.ini b/setup/windows/bin/HWiNFO/general.ini similarity index 100% rename from .bin/HWiNFO/general.ini rename to setup/windows/bin/HWiNFO/general.ini diff --git a/.bin/_Drivers/SDIO/sdi.cfg b/setup/windows/bin/_Drivers/SDIO/sdi.cfg similarity index 100% rename from .bin/_Drivers/SDIO/sdi.cfg rename to setup/windows/bin/_Drivers/SDIO/sdi.cfg diff --git a/.cbin/_include/AIDA64/full.rpf b/setup/windows/cbin/_include/AIDA64/full.rpf similarity index 100% rename from .cbin/_include/AIDA64/full.rpf rename to setup/windows/cbin/_include/AIDA64/full.rpf diff --git a/.cbin/_include/AIDA64/installed_programs.rpf b/setup/windows/cbin/_include/AIDA64/installed_programs.rpf similarity index 100% rename from .cbin/_include/AIDA64/installed_programs.rpf rename to setup/windows/cbin/_include/AIDA64/installed_programs.rpf diff --git a/.cbin/_include/AIDA64/licenses.rpf b/setup/windows/cbin/_include/AIDA64/licenses.rpf similarity index 100% rename from .cbin/_include/AIDA64/licenses.rpf rename to setup/windows/cbin/_include/AIDA64/licenses.rpf diff --git a/.cbin/_include/BleachBit/BleachBit.ini b/setup/windows/cbin/_include/BleachBit/BleachBit.ini similarity index 100% rename from .cbin/_include/BleachBit/BleachBit.ini rename to setup/windows/cbin/_include/BleachBit/BleachBit.ini diff --git a/.cbin/_include/NotepadPlusPlus/config.xml b/setup/windows/cbin/_include/NotepadPlusPlus/config.xml similarity index 100% rename from .cbin/_include/NotepadPlusPlus/config.xml rename to setup/windows/cbin/_include/NotepadPlusPlus/config.xml diff --git a/.cbin/_include/XMPlay/xmplay.ini b/setup/windows/cbin/_include/XMPlay/xmplay.ini similarity index 100% rename from .cbin/_include/XMPlay/xmplay.ini rename to setup/windows/cbin/_include/XMPlay/xmplay.ini diff --git a/.cbin/_include/XYplorerFree/Data/XYplorer.ini b/setup/windows/cbin/_include/XYplorerFree/Data/XYplorer.ini similarity index 100% rename from .cbin/_include/XYplorerFree/Data/XYplorer.ini rename to setup/windows/cbin/_include/XYplorerFree/Data/XYplorer.ini diff --git a/.cbin/_include/_Drivers/Intel RST/SetupRST_13.x.txt b/setup/windows/cbin/_include/_Drivers/Intel RST/SetupRST_13.x.txt similarity index 100% rename from .cbin/_include/_Drivers/Intel RST/SetupRST_13.x.txt rename to setup/windows/cbin/_include/_Drivers/Intel RST/SetupRST_13.x.txt diff --git a/.cbin/_include/_Office/2016_hb_32.xml b/setup/windows/cbin/_include/_Office/2016_hb_32.xml similarity index 100% rename from .cbin/_include/_Office/2016_hb_32.xml rename to setup/windows/cbin/_include/_Office/2016_hb_32.xml diff --git a/.cbin/_include/_Office/2016_hb_64.xml b/setup/windows/cbin/_include/_Office/2016_hb_64.xml similarity index 100% rename from .cbin/_include/_Office/2016_hb_64.xml rename to setup/windows/cbin/_include/_Office/2016_hb_64.xml diff --git a/.cbin/_include/_Office/2016_hs_32.xml b/setup/windows/cbin/_include/_Office/2016_hs_32.xml similarity index 100% rename from .cbin/_include/_Office/2016_hs_32.xml rename to setup/windows/cbin/_include/_Office/2016_hs_32.xml diff --git a/.cbin/_include/_Office/2016_hs_64.xml b/setup/windows/cbin/_include/_Office/2016_hs_64.xml similarity index 100% rename from .cbin/_include/_Office/2016_hs_64.xml rename to setup/windows/cbin/_include/_Office/2016_hs_64.xml diff --git a/.cbin/_include/_Office/2019_hb_32.xml b/setup/windows/cbin/_include/_Office/2019_hb_32.xml similarity index 100% rename from .cbin/_include/_Office/2019_hb_32.xml rename to setup/windows/cbin/_include/_Office/2019_hb_32.xml diff --git a/.cbin/_include/_Office/2019_hb_64.xml b/setup/windows/cbin/_include/_Office/2019_hb_64.xml similarity index 100% rename from .cbin/_include/_Office/2019_hb_64.xml rename to setup/windows/cbin/_include/_Office/2019_hb_64.xml diff --git a/.cbin/_include/_Office/2019_hs_32.xml b/setup/windows/cbin/_include/_Office/2019_hs_32.xml similarity index 100% rename from .cbin/_include/_Office/2019_hs_32.xml rename to setup/windows/cbin/_include/_Office/2019_hs_32.xml diff --git a/.cbin/_include/_Office/2019_hs_64.xml b/setup/windows/cbin/_include/_Office/2019_hs_64.xml similarity index 100% rename from .cbin/_include/_Office/2019_hs_64.xml rename to setup/windows/cbin/_include/_Office/2019_hs_64.xml diff --git a/.cbin/_include/_Office/365_32.xml b/setup/windows/cbin/_include/_Office/365_32.xml similarity index 100% rename from .cbin/_include/_Office/365_32.xml rename to setup/windows/cbin/_include/_Office/365_32.xml diff --git a/.cbin/_include/_Office/365_64.xml b/setup/windows/cbin/_include/_Office/365_64.xml similarity index 100% rename from .cbin/_include/_Office/365_64.xml rename to setup/windows/cbin/_include/_Office/365_64.xml diff --git a/.cbin/_include/_vcredists/InstallAll.bat b/setup/windows/cbin/_include/_vcredists/InstallAll.bat similarity index 100% rename from .cbin/_include/_vcredists/InstallAll.bat rename to setup/windows/cbin/_include/_vcredists/InstallAll.bat