Yash Sachdeva commited on
Commit
1720d8c
·
1 Parent(s): ea58bd6
Files changed (1) hide show
  1. question_paper.py +5 -2
question_paper.py CHANGED
@@ -10,8 +10,9 @@ app = FastAPI()
10
  MODEL = None
11
  TOKENIZER = None
12
  # ?input=%22Name%203%20shows%22
 
13
  @app.get("/")
14
- def llama(input):
15
  # prompt = [{'role': 'user', 'content': ""+input}]
16
  # inputs = TOKENIZER.apply_chat_template( prompt, add_generation_prompt=True, return_tensors='pt' )
17
 
@@ -30,4 +31,6 @@ def llama(input):
30
  # print("loading model")
31
  # TOKENIZER = AutoTokenizer.from_pretrained('stabilityai/stablelm-zephyr-3b')
32
  # MODEL = AutoModelForCausalLM.from_pretrained('stabilityai/stablelm-zephyr-3b', device_map="auto")
33
- # print("loaded model")
 
 
 
10
  MODEL = None
11
  TOKENIZER = None
12
  # ?input=%22Name%203%20shows%22
13
+ origins = ['https://aiforall.netlify.app/']
14
  @app.get("/")
15
+ def llama():
16
  # prompt = [{'role': 'user', 'content': ""+input}]
17
  # inputs = TOKENIZER.apply_chat_template( prompt, add_generation_prompt=True, return_tensors='pt' )
18
 
 
31
  # print("loading model")
32
  # TOKENIZER = AutoTokenizer.from_pretrained('stabilityai/stablelm-zephyr-3b')
33
  # MODEL = AutoModelForCausalLM.from_pretrained('stabilityai/stablelm-zephyr-3b', device_map="auto")
34
+ # print("loaded model")
35
+
36
+