xSaXx commited on
Commit
f84fe29
β€’
1 Parent(s): 8dae1ae

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +47 -0
  2. requirements.txt +3 -0
app.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import copy
3
+ import time
4
+ import ctypes #to run on C api directly
5
+ import llama_cpp
6
+ from llama_cpp import Llama
7
+ from huggingface_hub import hf_hub_download #load from huggingfaces
8
+
9
+
10
+ llm = Llama(model_path= hf_hub_download(repo_id="TheBloke/Llama-2-70B-GGML", filename="llama-2-70b.ggmlv3.q4_K_M.bin"), n_ctx=2048) #download model from hf/ n_ctx=2048 for high ccontext length
11
+
12
+ history = []
13
+
14
+ pre_prompt = " The user and the AI are having a conversation : <|endoftext|> \n "
15
+
16
+ def generate_text(input_text, history):
17
+ print("history ",history)
18
+ print("input ", input_text)
19
+ temp =""
20
+ if history == []:
21
+ input_text_with_history = f"SYSTEM:{pre_prompt}"+ "\n" + f"USER: {input_text} " + "\n" +" ASSISTANT:"
22
+ else:
23
+ input_text_with_history = f"{history[-1][1]}"+ "\n"
24
+ input_text_with_history += f"USER: {input_text}" + "\n" +" ASSISTANT:"
25
+ print("new input", input_text_with_history)
26
+ output = llm(input_text_with_history, max_tokens=1024, stop=["<|prompter|>", "<|endoftext|>", "<|endoftext|> \n","ASSISTANT:","USER:","SYSTEM:"], stream=True)
27
+ for out in output:
28
+ stream = copy.deepcopy(out)
29
+ print(stream["choices"][0]["text"])
30
+ temp += stream["choices"][0]["text"]
31
+ yield temp
32
+
33
+
34
+ history =["init",input_text_with_history]
35
+
36
+
37
+
38
+ demo = gr.ChatInterface(generate_text,
39
+ title="LLM on CPU",
40
+ description="Running LLM with https://github.com/abetlen/llama-cpp-python. btw the text streaming thing was the hardest thing to impliment",
41
+ examples=["Hello", "Am I cool?", "Are tomatoes vegetables?"],
42
+ cache_examples=True,
43
+ retry_btn=None,
44
+ undo_btn="Delete Previous",
45
+ clear_btn="Clear",)
46
+ demo.queue(concurrency_count=1, max_size=5)
47
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ llama-cpp-python
2
+ huggingface_hub
3
+ gradio_client