Spaces:
Running
on
T4
Running
on
T4
# Llamafile.py | |
# Description: Functions relating to Llamafile usage | |
from App_Function_Libraries.Local_LLM_Inference_Engine_Lib import local_llm_gui_function | |
# Imports | |
# | |
# | |
# External Imports | |
# | |
# | |
# Local Imports | |
def stop_llamafile(): | |
# Code to stop llamafile | |
# ... | |
return "Llamafile stopped" | |
def start_llamafile(*args): | |
# Unpack arguments | |
(am_noob, verbose_checked, threads_checked, threads_value, http_threads_checked, http_threads_value, | |
model_checked, model_value, hf_repo_checked, hf_repo_value, hf_file_checked, hf_file_value, | |
ctx_size_checked, ctx_size_value, ngl_checked, ngl_value, host_checked, host_value, port_checked, | |
port_value) = args | |
# Construct command based on checked values | |
command = [] | |
if am_noob: | |
am_noob = True | |
if verbose_checked is not None and verbose_checked: | |
command.append('-v') | |
if threads_checked and threads_value is not None: | |
command.extend(['-t', str(threads_value)]) | |
if http_threads_checked and http_threads_value is not None: | |
command.extend(['--threads', str(http_threads_value)]) | |
if model_checked and model_value is not None: | |
model_path = model_value.name | |
command.extend(['-m', model_path]) | |
if hf_repo_checked and hf_repo_value is not None: | |
command.extend(['-hfr', hf_repo_value]) | |
if hf_file_checked and hf_file_value is not None: | |
command.extend(['-hff', hf_file_value]) | |
if ctx_size_checked and ctx_size_value is not None: | |
command.extend(['-c', str(ctx_size_value)]) | |
if ngl_checked and ngl_value is not None: | |
command.extend(['-ngl', str(ngl_value)]) | |
if host_checked and host_value is not None: | |
command.extend(['--host', host_value]) | |
if port_checked and port_value is not None: | |
command.extend(['--port', str(port_value)]) | |
# Code to start llamafile with the provided configuration | |
local_llm_gui_function(am_noob, verbose_checked, threads_checked, threads_value, | |
http_threads_checked, http_threads_value, model_checked, | |
model_value, hf_repo_checked, hf_repo_value, hf_file_checked, | |
hf_file_value, ctx_size_checked, ctx_size_value, ngl_checked, | |
ngl_value, host_checked, host_value, port_checked, port_value, ) | |
# Example command output to verify | |
return f"Command built and ran: {' '.join(command)} \n\nLlamafile started successfully." | |