CogwiseAI commited on
Commit
b63ab56
1 Parent(s): 58a98b6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -56
app.py CHANGED
@@ -1,78 +1,128 @@
1
- import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM
3
- import torch
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
- model = AutoModelForCausalLM.from_pretrained(
6
- "Cogwisechat/falcon-7b-finance",
7
- torch_dtype=torch.bfloat16,
8
- trust_remote_code=True,
9
- device_map="auto",
10
- low_cpu_mem_usage=True,
11
- )
12
- tokenizer = AutoTokenizer.from_pretrained("Cogwisechat/falcon-7b-finance")
13
 
 
 
 
 
14
 
15
- def generate_text(input_text):
16
- global output_text
17
 
18
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
19
- attention_mask = torch.ones(input_ids.shape)
20
 
21
- output = model.generate(
22
- input_ids,
23
- attention_mask=attention_mask,
24
- max_length=200,
25
- do_sample=True,
26
- top_k=10,
27
- num_return_sequences=1,
28
- eos_token_id=tokenizer.eos_token_id,
29
- )
30
-
31
 
32
- output_text = tokenizer.decode(output[0], skip_special_tokens=True)
33
- print(output_text)
34
 
35
- # Remove Prompt Echo from Generated Text
36
-
37
- cleaned_output_text = output_text.replace(input_text, "")
38
- return cleaned_output_text
39
 
40
- block = gr.Blocks()
41
 
42
 
43
- with block:
44
- gr.Markdown("""<h1><center>CogwiseAI falcon7b</center></h1>
45
- """)
46
- # chatbot = gr.Chatbot()
47
- message = gr.Textbox(placeholder='Enter Your Question Here')
48
- state = gr.State()
49
- submit = gr.Button("SEND")
50
- submit.click(generate_text, inputs=[message, state], outputs=[output_text, state])
51
 
52
- block.launch(debug = True)
53
 
54
 
55
 
56
 
57
 
 
58
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
 
60
 
61
 
 
 
 
62
 
63
-
64
 
65
- # logo = (
66
- # "<div >"
67
- # "<img src='ai-icon.png'alt='image One'>"
68
- # + "</div>"
69
- # )
70
- # text_generation_interface = gr.Interface(
71
- # fn=generate_text,
72
- # inputs=[
73
- # gr.inputs.Textbox(label="Input Text"),
74
- # ],
75
- # outputs=gr.inputs.Textbox(label="Generated Text"),
76
- # title="Falcon-7B Instruct",
77
- # image=logo
78
- # ).launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import gradio as gr
2
+ # from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ # import torch
4
+
5
+ # model = AutoModelForCausalLM.from_pretrained(
6
+ # "Cogwisechat/falcon-7b-finance",
7
+ # torch_dtype=torch.bfloat16,
8
+ # trust_remote_code=True,
9
+ # device_map="auto",
10
+ # low_cpu_mem_usage=True,
11
+ # )
12
+ # tokenizer = AutoTokenizer.from_pretrained("Cogwisechat/falcon-7b-finance")
13
+
14
+
15
+ # def generate_text(input_text):
16
+ # global output_text
17
+
18
+ # input_ids = tokenizer.encode(input_text, return_tensors="pt")
19
+ # attention_mask = torch.ones(input_ids.shape)
20
+
21
+ # output = model.generate(
22
+ # input_ids,
23
+ # attention_mask=attention_mask,
24
+ # max_length=200,
25
+ # do_sample=True,
26
+ # top_k=10,
27
+ # num_return_sequences=1,
28
+ # eos_token_id=tokenizer.eos_token_id,
29
+ # )
30
+
31
 
32
+ # output_text = tokenizer.decode(output[0], skip_special_tokens=True)
33
+ # print(output_text)
 
 
 
 
 
 
34
 
35
+ # # Remove Prompt Echo from Generated Text
36
+
37
+ # cleaned_output_text = output_text.replace(input_text, "")
38
+ # return cleaned_output_text
39
 
40
+ # block = gr.Blocks()
 
41
 
 
 
42
 
43
+ # with block:
44
+ # gr.Markdown("""<h1><center>CogwiseAI falcon7b</center></h1>
45
+ # """)
46
+ # # chatbot = gr.Chatbot()
47
+ # message = gr.Textbox(placeholder='Enter Your Question Here')
48
+ # state = gr.State()
49
+ # submit = gr.Button("SEND")
50
+ # submit.click(generate_text, inputs=[message, state], outputs=[output_text, state])
 
 
51
 
52
+ # block.launch(debug = True)
 
53
 
 
 
 
 
54
 
 
55
 
56
 
 
 
 
 
 
 
 
 
57
 
 
58
 
59
 
60
 
61
 
62
 
63
+
64
 
65
+ # # logo = (
66
+ # # "<div >"
67
+ # # "<img src='ai-icon.png'alt='image One'>"
68
+ # # + "</div>"
69
+ # # )
70
+ # # text_generation_interface = gr.Interface(
71
+ # # fn=generate_text,
72
+ # # inputs=[
73
+ # # gr.inputs.Textbox(label="Input Text"),
74
+ # # ],
75
+ # # outputs=gr.inputs.Textbox(label="Generated Text"),
76
+ # # title="Falcon-7B Instruct",
77
+ # # image=logo
78
+ # # ).launch()
79
 
80
 
81
 
82
+ from transformers import AutoModelForCausalLM, AutoTokenizer
83
+ import gradio as gr
84
+ import torch
85
 
 
86
 
87
+ title = "🤖AI ChatBot"
88
+ description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
89
+ examples = [["How are you?"]]
90
+
91
+
92
+ tokenizer = AutoTokenizer.from_pretrained("Cogwisechat/falcon-7b-finance")
93
+ model = AutoModelForCausalLM.from_pretrained("Cogwisechat/falcon-7b-finance")
94
+
95
+
96
+ def predict(input, history=[]):
97
+ # tokenize the new input sentence
98
+ new_user_input_ids = tokenizer.encode(
99
+ input + tokenizer.eos_token, return_tensors="pt"
100
+ )
101
+
102
+ # append the new user input tokens to the chat history
103
+ bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
104
+
105
+ # generate a response
106
+ history = model.generate(
107
+ bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
108
+ ).tolist()
109
+
110
+ # convert the tokens to text, and then split the responses into lines
111
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
112
+ # print('decoded_response-->>'+str(response))
113
+ response = [
114
+ (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
115
+ ] # convert to tuples of list
116
+ # print('response-->>'+str(response))
117
+ return response, history
118
+
119
+
120
+ gr.Interface(
121
+ fn=predict,
122
+ title=title,
123
+ description=description,
124
+ examples=examples,
125
+ inputs=["text", "state"],
126
+ outputs=["chatbot", "state"],
127
+ theme="finlaymacklon/boxy_violet",
128
+ ).launch()