manan05 commited on
Commit
2dcdd38
1 Parent(s): c2b25e6
Files changed (2) hide show
  1. app.py +27 -2
  2. requirements.txt +2 -0
app.py CHANGED
@@ -2,14 +2,39 @@ import gradio as gr
2
  import random
3
  import time
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  with gr.Blocks() as demo:
6
  chatbot = gr.Chatbot()
7
  msg = gr.Textbox()
8
  clear = gr.ClearButton([msg, chatbot])
9
 
10
  def respond(message, chat_history):
11
- bot_message = random.choice(["How are you?", "I love you", "I'm very hungry"])
12
- chat_history.append((message, bot_message))
 
13
  time.sleep(2)
14
  return "", chat_history
15
 
 
2
  import random
3
  import time
4
 
5
+ from ctransformers import AutoModelForCausalLM
6
+
7
+ # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.
8
+ model = AutoModelForCausalLM.from_pretrained(
9
+ "manan05/mistral-7b-friends-v0.1.gguf",
10
+ model_file="mistralfriends-7b-v0.1.gguf",
11
+ model_type="mistral",
12
+ gpu_layers=0,
13
+ hf=True
14
+ )
15
+
16
+ from transformers import AutoTokenizer, pipeline
17
+
18
+ # Tokenizer
19
+ tokenizer = AutoTokenizer.from_pretrained("manan05/mistral-7b-friends")
20
+
21
+ # Pipeline
22
+ generator = pipeline(
23
+ model=model, tokenizer=tokenizer,
24
+ task='text-generation',
25
+ max_new_tokens=50,
26
+ repetition_penalty=1.1
27
+ )
28
+
29
  with gr.Blocks() as demo:
30
  chatbot = gr.Chatbot()
31
  msg = gr.Textbox()
32
  clear = gr.ClearButton([msg, chatbot])
33
 
34
  def respond(message, chat_history):
35
+ user_message = "<s>[INST] Given the following conversation context, generate the upcomming dialogue of Joey in his style. \n CONTEXT: Me: " + message + "[/INST]"
36
+ bot_message = generator(user_message[0]["generated_text"])
37
+ chat_history.append((user_message, bot_message))
38
  time.sleep(2)
39
  return "", chat_history
40
 
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ ctransformers
2
+ transformers