hunkim commited on
Commit
d8e1e44
ยท
1 Parent(s): b97592a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -1
app.py CHANGED
@@ -4,6 +4,7 @@ import streamlit as st
4
  import torch
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
 
 
7
  tokenizer = AutoTokenizer.from_pretrained(
8
  'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b',
9
  bos_token='[BOS]', eos_token='[EOS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]'
@@ -17,16 +18,19 @@ model = AutoModelForCausalLM.from_pretrained(
17
  torch_dtype=torch.float16, low_cpu_mem_usage=False
18
  ).to(device=device, non_blocking=True)
19
  _ = model.eval()
20
-
21
  print("Model loading done!")
22
 
23
  def gpt(prompt):
 
 
24
  with torch.no_grad():
25
  tokens = tokenizer.encode(prompt, return_tensors='pt').to(device=device, non_blocking=True)
26
  gen_tokens = model.generate(tokens, do_sample=True, temperature=0.8, max_length=256)
27
  generated = tokenizer.batch_decode(gen_tokens)[0]
28
 
29
  return generated
 
30
 
31
  #prompts
32
  st.title("์—ฌ๋Ÿฌ๋ถ„๋“ค์˜ ๋ฌธ์žฅ์„ ์™„์„ฑํ•ด์ค๋‹ˆ๋‹ค. ๐Ÿค–")
 
4
  import torch
5
  from transformers import AutoTokenizer, AutoModelForCausalLM
6
 
7
+ '''
8
  tokenizer = AutoTokenizer.from_pretrained(
9
  'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b',
10
  bos_token='[BOS]', eos_token='[EOS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]'
 
18
  torch_dtype=torch.float16, low_cpu_mem_usage=False
19
  ).to(device=device, non_blocking=True)
20
  _ = model.eval()
21
+ '''
22
  print("Model loading done!")
23
 
24
  def gpt(prompt):
25
+ return prompt
26
+ '''
27
  with torch.no_grad():
28
  tokens = tokenizer.encode(prompt, return_tensors='pt').to(device=device, non_blocking=True)
29
  gen_tokens = model.generate(tokens, do_sample=True, temperature=0.8, max_length=256)
30
  generated = tokenizer.batch_decode(gen_tokens)[0]
31
 
32
  return generated
33
+ '''
34
 
35
  #prompts
36
  st.title("์—ฌ๋Ÿฌ๋ถ„๋“ค์˜ ๋ฌธ์žฅ์„ ์™„์„ฑํ•ด์ค๋‹ˆ๋‹ค. ๐Ÿค–")