HakanKilic01 commited on
Commit
77582ad
1 Parent(s): 6a590e1
Files changed (1) hide show
  1. app.py +27 -17
app.py CHANGED
@@ -2,13 +2,7 @@ import re
2
  import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
- model = AutoModelForCausalLM.from_pretrained(
6
- 'parsak/codegen-350M-mono-lora-instruction',
7
- )
8
- tokenizer = AutoTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
9
 
10
- tokenizer.pad_token_id = 0 # different to <eos>
11
- tokenizer.padding_side = "left" # Allow batched inference
12
 
13
  def extract_code(input_text):
14
  pattern = r"'''py\n(.*?)'''"
@@ -19,28 +13,44 @@ def extract_code(input_text):
19
  else:
20
  return None # Return None if no match is found
21
 
22
- def generate_code(input_text):
23
- input_ids = tokenizer(input_text, return_tensors="pt").input_ids
24
- generated_ids = model.generate(input_ids, max_length=128)
25
- result = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
26
- return extract_code(result)
 
 
 
 
 
 
 
 
 
27
 
28
  def respond(message, chat_history, additional_inputs):
29
- return f"Here's an example code:\n\n```python\n{generate_code(message)}\n```"
30
 
31
 
 
 
 
 
 
 
32
 
33
 
 
34
 
35
-
36
- gr.ChatInterface(respond,
37
  retry_btn= gr.Button(value="Retry"),
38
  undo_btn=None, clear_btn=gr.Button(value="Clear"),
39
  additional_inputs=[
40
- gr.Dropdown(["annen", "baban"])
41
  ]
42
- ).launch()
43
-
44
 
45
 
 
 
46
 
 
2
  import gradio as gr
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
 
 
 
 
5
 
 
 
6
 
7
  def extract_code(input_text):
8
  pattern = r"'''py\n(.*?)'''"
 
13
  else:
14
  return None # Return None if no match is found
15
 
16
+ def generate_code(input_text,modelName):
17
+ if(modelName == "codegen-350M"):
18
+ input_ids = codeGenTokenizer(input_text, return_tensors="pt").input_ids
19
+ generated_ids = codeGenModel.generate(input_ids, max_length=128)
20
+ result = codeGenTokenizer.decode(generated_ids[0], skip_special_tokens=True)
21
+ return extract_code(result)
22
+ elif(modelName == "mistral-7b"):
23
+ input_ids = mistralTokenizer(input_text, return_tensors="pt").input_ids
24
+ generated_ids = mistralModel.generate(input_ids, max_length=128)
25
+ result = mistralTokenizer.decode(generated_ids[0], skip_special_tokens=True)
26
+ return result
27
+ else:
28
+ return None
29
+
30
 
31
  def respond(message, chat_history, additional_inputs):
32
+ return f"Here's an example code:\n\n```python\n{generate_code(message,additional_inputs)}\n```"
33
 
34
 
35
+ codeGenModel = AutoModelForCausalLM.from_pretrained('parsak/codegen-350M-mono-lora-instruction')
36
+ mistralModel = AutoModelForCausalLM.from_pretrained('parsak/mistral-code-7b-instruct')
37
+ codeGenTokenizer = AutoTokenizer.from_pretrained('Salesforce/codegen-350M-mono')
38
+ mistralTokenizer = AutoTokenizer.from_pretrained('parsak/mistral-code-7b-instruct')
39
+ codeGenTokenizer.pad_token_id = 0
40
+ codeGenTokenizer.padding_side = "left"
41
 
42
 
43
+ dropdown = gr.Dropdown(label="Models",choices=["codegen-350M", "mistral-7b"], value="codegen-350M")
44
 
45
+ interface = gr.ChatInterface(respond,
 
46
  retry_btn= gr.Button(value="Retry"),
47
  undo_btn=None, clear_btn=gr.Button(value="Clear"),
48
  additional_inputs=[
49
+ dropdown
50
  ]
51
+ )
 
52
 
53
 
54
+ if __name__ == "__main__":
55
+ interface.launch()
56