amatiger commited on
Commit
87d4fba
0 Parent(s):

Duplicate from amatiger/GODEL-Demo

Browse files
Files changed (4) hide show
  1. .gitattributes +33 -0
  2. README.md +14 -0
  3. app.py +91 -0
  4. requirements.txt +4 -0
.gitattributes ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.npy filter=lfs diff=lfs merge=lfs -text
14
+ *.npz filter=lfs diff=lfs merge=lfs -text
15
+ *.onnx filter=lfs diff=lfs merge=lfs -text
16
+ *.ot filter=lfs diff=lfs merge=lfs -text
17
+ *.parquet filter=lfs diff=lfs merge=lfs -text
18
+ *.pb filter=lfs diff=lfs merge=lfs -text
19
+ *.pickle filter=lfs diff=lfs merge=lfs -text
20
+ *.pkl filter=lfs diff=lfs merge=lfs -text
21
+ *.pt filter=lfs diff=lfs merge=lfs -text
22
+ *.pth filter=lfs diff=lfs merge=lfs -text
23
+ *.rar filter=lfs diff=lfs merge=lfs -text
24
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
25
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
26
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
27
+ *.tflite filter=lfs diff=lfs merge=lfs -text
28
+ *.tgz filter=lfs diff=lfs merge=lfs -text
29
+ *.wasm filter=lfs diff=lfs merge=lfs -text
30
+ *.xz filter=lfs diff=lfs merge=lfs -text
31
+ *.zip filter=lfs diff=lfs merge=lfs -text
32
+ *.zst filter=lfs diff=lfs merge=lfs -text
33
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: GODEL Demo
3
+ emoji: 🐠
4
+ colorFrom: yellow
5
+ colorTo: blue
6
+ sdk: gradio
7
+ sdk_version: 3.6
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ duplicated_from: amatiger/GODEL-Demo
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from transformers import (
4
+ AutoTokenizer,
5
+ AutoModel,
6
+ AutoModelForSeq2SeqLM,
7
+ AutoModelForCausalLM
8
+ )
9
+
10
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/GODEL-v1_1-base-seq2seq")
11
+ model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/GODEL-v1_1-base-seq2seq")
12
+
13
+ preset_examples = [
14
+ ('Instruction: given a dialog context, you need to response empathically.',
15
+ '', 'Does money buy happiness?', 'Chitchat'),
16
+ ]
17
+
18
+
19
+ def generate(instruction, knowledge, dialog, top_p, min_length, max_length):
20
+ if knowledge != '':
21
+ knowledge = '[KNOWLEDGE] ' + knowledge
22
+ dialog = ' EOS '.join(dialog)
23
+ query = f"{instruction} [CONTEXT] {dialog} {knowledge}"
24
+
25
+ input_ids = tokenizer(f"{query}", return_tensors="pt").input_ids
26
+ outputs = model.generate(input_ids, min_length=int(
27
+ min_length), max_length=int(max_length), top_p=top_p, do_sample=True)
28
+ output = tokenizer.decode(outputs[0], skip_special_tokens=True)
29
+ print(query)
30
+ print(output)
31
+ return output
32
+
33
+
34
+ def api_call_generation(instruction, knowledge, query, top_p, min_length, max_length):
35
+
36
+ dialog = [
37
+ query
38
+ ]
39
+ response = generate(instruction, knowledge, dialog,
40
+ top_p, min_length, max_length)
41
+
42
+ return response
43
+
44
+
45
+ def change_example(choice):
46
+ choice_idx = int(choice.split()[-1]) - 1
47
+ instruction, knowledge, query, instruction_type = preset_examples[choice_idx]
48
+ return [gr.update(lines=1, visible=True, value=instruction), gr.update(visible=True, value=knowledge), gr.update(lines=1, visible=True, value=query), gr.update(visible=True, value=instruction_type)]
49
+
50
+ def change_textbox(choice):
51
+ if choice == "Chitchat":
52
+ return gr.update(lines=1, visible=True, value="Instruction: given a dialog context, you need to response empathically.")
53
+ elif choice == "Grounded Response Generation":
54
+ return gr.update(lines=1, visible=True, value="Instruction: given a dialog context and related knowledge, you need to response safely based on the knowledge.")
55
+ else:
56
+ return gr.update(lines=1, visible=True, value="Instruction: given a dialog context and related knowledge, you need to answer the question based on the knowledge.")
57
+
58
+
59
+ with gr.Blocks() as demo:
60
+ gr.Markdown("# The broken God")
61
+ gr.Markdown('''All hail Mekhane. Reject flesh. Embrace metal''')
62
+
63
+ dropdown = gr.Dropdown(
64
+ [f"Example {i+1}" for i in range(1)], label='Examples')
65
+
66
+ radio = gr.Radio(
67
+ ["Conversational Question Answering", "Chitchat", "Grounded Response Generation"], label="Instruction Type", value='Conversational Question Answering'
68
+ )
69
+ instruction = gr.Textbox(lines=1, interactive=True, label="Instruction",
70
+ value="Instruction: given a dialog context and related knowledge, you need to answer the question based on the knowledge.")
71
+ radio.change(fn=change_textbox, inputs=radio, outputs=instruction)
72
+ knowledge = gr.Textbox(lines=6, label="Knowledge")
73
+ query = gr.Textbox(lines=1, label="User Query")
74
+
75
+ dropdown.change(change_example, dropdown, [instruction, knowledge, query, radio])
76
+
77
+ with gr.Row():
78
+ with gr.Column(scale=1):
79
+ response = gr.Textbox(label="Response", lines=2)
80
+
81
+ with gr.Column(scale=1):
82
+ top_p = gr.Slider(0, 1, value=0.9, label='top_p')
83
+ min_length = gr.Number(8, label='min_length')
84
+ max_length = gr.Number(
85
+ 64, label='max_length (should be larger than min_length)')
86
+
87
+ greet_btn = gr.Button("Generate")
88
+ greet_btn.click(fn=api_call_generation, inputs=[
89
+ instruction, knowledge, query, top_p, min_length, max_length], outputs=response)
90
+
91
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==1.10.1
2
+ transformers==4.22.2
3
+ tokenizers==0.11.1
4
+ pandas==1.5.0