T.Masuda commited on
Commit
080cf4b
·
1 Parent(s): 3a90fd0

create app

Browse files
Files changed (3) hide show
  1. README.md +1 -1
  2. app.py +56 -0
  3. requirements.txt +6 -0
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: Question Answering Ja
3
  emoji: 🏃
4
  colorFrom: green
5
  colorTo: red
 
1
  ---
2
+ title: Question Answering JA
3
  emoji: 🏃
4
  colorFrom: green
5
  colorTo: red
app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
4
+ from datetime import datetime
5
+
6
+ print('{}:loading...'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained('line-corporation/japanese-large-lm-1.7b-instruction-sft', use_fast=False)
9
+ model = AutoModelForCausalLM.from_pretrained('line-corporation/japanese-large-lm-1.7b-instruction-sft')
10
+ #tokenizer = AutoTokenizer.from_pretrained('line-corporation/japanese-large-lm-3.6b-instruction-sft', use_fast=False)
11
+ #model = AutoModelForCausalLM.from_pretrained('line-corporation/japanese-large-lm-3.6b-instruction-sft')
12
+
13
+ if torch.cuda.is_available():
14
+ model.half()
15
+ model = model.to('cuda')
16
+
17
+ generator = pipeline('text-generation', model=model, tokenizer=tokenizer, device=model.device)
18
+ print('{}:done.'.format(datetime.now().strftime('%Y-%m-%d %H:%M:%S')))
19
+
20
+ def generate(input_text, maxlen):
21
+ input = f'ユーザー: {input_text}\nシステム: '
22
+ output = generator(
23
+ input,
24
+ max_length=maxlen,
25
+ do_sample=True,
26
+ temperature=0.7,
27
+ top_p=0.9,
28
+ top_k=0,
29
+ repetition_penalty=1.1,
30
+ num_beams=1,
31
+ num_return_sequences=1,
32
+ pad_token_id=tokenizer.pad_token_id,
33
+ bos_token_id=tokenizer.bos_token_id,
34
+ eos_token_id=tokenizer.eos_token_id
35
+ )
36
+ generated_text = output[0]['generated_text'][len(input) + 1:]
37
+ return generated_text
38
+
39
+ with gr.Blocks(title='question answering ja') as chatbox:
40
+ gr.Markdown('# Question Answering JA')
41
+
42
+ chatbot = gr.Chatbot(label='answer')
43
+ msg = gr.Textbox(label='question')
44
+ maxlen = gr.Slider(minimum=30, maximum=256, value=30, step=1, label='max length')
45
+ clear = gr.ClearButton([msg, chatbot])
46
+
47
+ def respond(message, maxlen, chat_history):
48
+ if message == '':
49
+ return '', chat_history
50
+ bot_message = generate(message, maxlen)
51
+ chat_history.append((message, bot_message))
52
+ return '', chat_history
53
+
54
+ msg.submit(respond, [msg, maxlen, chatbot], [msg, chatbot])
55
+
56
+ chatbox.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio
2
+ torch
3
+ torchvision
4
+ torchaudio
5
+ transformers
6
+ sentencepiece