oceansweep commited on
Commit
85aa3a1
1 Parent(s): 06143b1

Upload 2 files

Browse files
App_Function_Libraries/Local_LLM/Llamafile.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Llamafile.py
2
+ # Description: Functions relating to Llamafile usage
3
+ from App_Function_Libraries.Local_LLM_Inference_Engine_Lib import local_llm_gui_function
4
+
5
+
6
+ # Imports
7
+ #
8
+ #
9
+ # External Imports
10
+ #
11
+ #
12
+ # Local Imports
13
+
14
+ def stop_llamafile():
15
+ # Code to stop llamafile
16
+ # ...
17
+ return "Llamafile stopped"
18
+
19
+ def start_llamafile(*args):
20
+ # Unpack arguments
21
+ (am_noob, verbose_checked, threads_checked, threads_value, http_threads_checked, http_threads_value,
22
+ model_checked, model_value, hf_repo_checked, hf_repo_value, hf_file_checked, hf_file_value,
23
+ ctx_size_checked, ctx_size_value, ngl_checked, ngl_value, host_checked, host_value, port_checked,
24
+ port_value) = args
25
+
26
+ # Construct command based on checked values
27
+ command = []
28
+ if am_noob:
29
+ am_noob = True
30
+ if verbose_checked is not None and verbose_checked:
31
+ command.append('-v')
32
+ if threads_checked and threads_value is not None:
33
+ command.extend(['-t', str(threads_value)])
34
+ if http_threads_checked and http_threads_value is not None:
35
+ command.extend(['--threads', str(http_threads_value)])
36
+ if model_checked and model_value is not None:
37
+ model_path = model_value.name
38
+ command.extend(['-m', model_path])
39
+ if hf_repo_checked and hf_repo_value is not None:
40
+ command.extend(['-hfr', hf_repo_value])
41
+ if hf_file_checked and hf_file_value is not None:
42
+ command.extend(['-hff', hf_file_value])
43
+ if ctx_size_checked and ctx_size_value is not None:
44
+ command.extend(['-c', str(ctx_size_value)])
45
+ if ngl_checked and ngl_value is not None:
46
+ command.extend(['-ngl', str(ngl_value)])
47
+ if host_checked and host_value is not None:
48
+ command.extend(['--host', host_value])
49
+ if port_checked and port_value is not None:
50
+ command.extend(['--port', str(port_value)])
51
+
52
+ # Code to start llamafile with the provided configuration
53
+ local_llm_gui_function(am_noob, verbose_checked, threads_checked, threads_value,
54
+ http_threads_checked, http_threads_value, model_checked,
55
+ model_value, hf_repo_checked, hf_repo_value, hf_file_checked,
56
+ hf_file_value, ctx_size_checked, ctx_size_value, ngl_checked,
57
+ ngl_value, host_checked, host_value, port_checked, port_value, )
58
+
59
+ # Example command output to verify
60
+ return f"Command built and ran: {' '.join(command)} \n\nLlamafile started successfully."
App_Function_Libraries/Local_LLM/__init__.py ADDED
File without changes