nadiamaqbool81 commited on
Commit
552b390
1 Parent(s): 755b920

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +78 -0
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import T5ForConditionalGeneration, AutoTokenizer, RobertaTokenizer,AutoModelForCausalLM,pipeline,TrainingArguments
4
+
5
+
6
+
7
+ models=[
8
+ "nadiamaqbool81/starcoderbase-1b-hf",
9
+ "nadiamaqbool81/starcoderbase-1b-hf_python",
10
+ "nadiamaqbool81/codet5-large-hf",
11
+ "nadiamaqbool81/codet5-large-hf-python",
12
+ "nadiamaqbool81/llama-2-7b-int4-java-code-1.178k",
13
+ "nadiamaqbool81/llama-2-7b-int4-python-code-510"
14
+ ]
15
+ names=[
16
+ "nadiamaqbool81/starcoderbase-java",
17
+ "nadiamaqbool81/starcoderbase-python",
18
+ "nadiamaqbool81/codet5-java",
19
+ "nadiamaqbool81/codet5-python",
20
+ "nadiamaqbool81/llama-2-java",
21
+ "nadiamaqbool81/llama-2-python"
22
+ ]
23
+ model_box=[
24
+ gr.load(f"models/{models[0]}"),
25
+ gr.load(f"models/{models[1]}"),
26
+ gr.load(f"models/{models[2]}"),
27
+ gr.load(f"models/{models[3]}"),
28
+ gr.load(f"models/{models[4]}"),
29
+ gr.load(f"models/{models[5]}"),
30
+ ]
31
+ current_model=model_box[0]
32
+ pythonFlag = "false"
33
+ javaFlag = "false"
34
+
35
+
36
+
37
+ def the_process(input_text, model_choice):
38
+ global pythonFlag
39
+ global javaFlag
40
+ print("Inside the_process for python 0", pythonFlag)
41
+ global output
42
+ print("Inside the_process for python 1", model_choice)
43
+ if(model_choice==1):
44
+ if(pythonFlag == "false"):
45
+ print("Inside starcoder for python")
46
+ tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
47
+ model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
48
+ output = run_predict(input_text, model, tokenizer)
49
+ print("output starcoder python" , output)
50
+ elif(model_choice==0):
51
+ if(javaFlag == "false"):
52
+ print("Inside starcoder for java")
53
+ tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
54
+ model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
55
+ output = run_predict(input_text, model, tokenizer)
56
+ print("output starcoder java" , output)
57
+ else:
58
+ a_variable = model_box[model_choice]
59
+ output = a_variable(input_text)
60
+ print("output other" , output)
61
+ return(output)
62
+
63
+
64
+ def run_predict(text, model, tokenizer):
65
+ prompt = text
66
+ pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400)
67
+ result = pipe(f"<s>[INST] {prompt} [/INST]")
68
+ arr = result[0]['generated_text'].split('[/INST]')
69
+ return arr[1]
70
+
71
+
72
+ gr.HTML("""<h1 style="font-weight:600;font-size:50;margin-top:4px;margin-bottom:4px;text-align:center;">Text to Code Generation</h1></div>""")
73
+ model_choice = gr.Dropdown(label="Select Model", choices=[m for m in names], type="index", interactive=True)
74
+ input_text = gr.Textbox(label="Input Prompt")
75
+ output_window = gr.Code(label="Generated Code")
76
+
77
+ interface = gr.Interface(fn=the_process, inputs=[input_text, model_choice], outputs="text")
78
+ interface.launch(debug=True)