imperialwool commited on
Commit
2a813c3
1 Parent(s): ab733a4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import random
3
+ from quart import Quart
4
+ from transformers import AutoTokenizer, AutoModelForCausalLM
5
+
6
+ app = Quart(__name__)
7
+ tokenizer = AutoTokenizer.from_pretrained("OpenBuddy/openbuddy-openllama-3b-v10-bf16")
8
+ model = AutoModelForCausalLM.from_pretrained("OpenBuddy/openbuddy-openllama-3b-v10-bf16")
9
+ model.eval()
10
+
11
+ with open('../system.prompt', 'r', encoding='utf-8') as f:
12
+ prompt = f.read()
13
+
14
+ @app.post("/echo")
15
+ async def echo():
16
+ data = await request.get_json()
17
+ if data.get("max_tokens") != None and data.get("max_tokens") > 500: data['max_tokens'] = 500
18
+ userPrompt = prompt + "\n\nUser: " + data['request'] + "\nAssistant: "
19
+ input_ids = tokenizer.encode(prompt, return_tensors='pt')
20
+ with torch.no_grad():
21
+ output_ids = model.generate(
22
+ input_ids=input_ids,
23
+ do_sample=random.choice([True, False]), temperature=float(random.randint(7,20)) / 10.0,
24
+ max_new_tokens=data.get("max_tokens") or random.randomint(200,500),
25
+ eos_token_id=tokenizer.eos_token_id, return_full_text = False)
26
+ return {"output": tokenizer.decode(output_ids[0], skip_special_tokens=True)}