developer3000 commited on
Commit
2c68df4
·
1 Parent(s): 28e5049

Add application file

Browse files
Files changed (1) hide show
  1. app.py +40 -13
app.py CHANGED
@@ -1,20 +1,47 @@
1
- from langchain.chat_models import ChatOpenAI
2
- from langchain.schema import AIMessage, HumanMessage
3
- import openai
4
- import os
5
  import gradio as gr
 
 
 
6
 
7
- os.environ["OPENAI_API_KEY"] = "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" # Replace with your key
 
 
8
 
9
- llm = ChatOpenAI(temperature=1.0, model='gpt-3.5-turbo-0613')
 
 
 
 
 
 
10
 
11
  def predict(message, history):
12
- history_langchain_format = []
13
- for human, ai in history:
14
- history_langchain_format.append(HumanMessage(content=human))
15
- history_langchain_format.append(AIMessage(content=ai))
16
- history_langchain_format.append(HumanMessage(content=message))
17
- gpt_response = llm(history_langchain_format)
18
- return gpt_response.content
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  gr.ChatInterface(predict).launch()
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, StoppingCriteria, StoppingCriteriaList, TextIteratorStreamer
4
+ from threading import Thread
5
 
6
+ tokenizer = AutoTokenizer.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1")
7
+ model = AutoModelForCausalLM.from_pretrained("togethercomputer/RedPajama-INCITE-Chat-3B-v1", torch_dtype=torch.float16)
8
+ model = model.to('cuda:0')
9
 
10
+ class StopOnTokens(StoppingCriteria):
11
+ def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
12
+ stop_ids = [29, 0]
13
+ for stop_id in stop_ids:
14
+ if input_ids[0][-1] == stop_id:
15
+ return True
16
+ return False
17
 
18
  def predict(message, history):
19
+ history_transformer_format = history + [[message, ""]]
20
+ stop = StopOnTokens()
21
+
22
+ messages = "".join(["".join(["\n<human>:"+item[0], "\n<bot>:"+item[1]])
23
+ for item in history_transformer_format])
24
+
25
+ model_inputs = tokenizer([messages], return_tensors="pt").to("cuda")
26
+ streamer = TextIteratorStreamer(tokenizer, timeout=10., skip_prompt=True, skip_special_tokens=True)
27
+ generate_kwargs = dict(
28
+ model_inputs,
29
+ streamer=streamer,
30
+ max_new_tokens=1024,
31
+ do_sample=True,
32
+ top_p=0.95,
33
+ top_k=1000,
34
+ temperature=1.0,
35
+ num_beams=1,
36
+ stopping_criteria=StoppingCriteriaList([stop])
37
+ )
38
+ t = Thread(target=model.generate, kwargs=generate_kwargs)
39
+ t.start()
40
+
41
+ partial_message = ""
42
+ for new_token in streamer:
43
+ if new_token != '<':
44
+ partial_message += new_token
45
+ yield partial_message
46
 
47
  gr.ChatInterface(predict).launch()