Text2Code / app.py
nadiamaqbool81's picture
Create app.py
552b390 verified
raw
history blame
2.9 kB
import gradio as gr
import torch
from transformers import T5ForConditionalGeneration, AutoTokenizer, RobertaTokenizer,AutoModelForCausalLM,pipeline,TrainingArguments
models=[
"nadiamaqbool81/starcoderbase-1b-hf",
"nadiamaqbool81/starcoderbase-1b-hf_python",
"nadiamaqbool81/codet5-large-hf",
"nadiamaqbool81/codet5-large-hf-python",
"nadiamaqbool81/llama-2-7b-int4-java-code-1.178k",
"nadiamaqbool81/llama-2-7b-int4-python-code-510"
]
names=[
"nadiamaqbool81/starcoderbase-java",
"nadiamaqbool81/starcoderbase-python",
"nadiamaqbool81/codet5-java",
"nadiamaqbool81/codet5-python",
"nadiamaqbool81/llama-2-java",
"nadiamaqbool81/llama-2-python"
]
model_box=[
gr.load(f"models/{models[0]}"),
gr.load(f"models/{models[1]}"),
gr.load(f"models/{models[2]}"),
gr.load(f"models/{models[3]}"),
gr.load(f"models/{models[4]}"),
gr.load(f"models/{models[5]}"),
]
current_model=model_box[0]
pythonFlag = "false"
javaFlag = "false"
def the_process(input_text, model_choice):
global pythonFlag
global javaFlag
print("Inside the_process for python 0", pythonFlag)
global output
print("Inside the_process for python 1", model_choice)
if(model_choice==1):
if(pythonFlag == "false"):
print("Inside starcoder for python")
tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf_python")
output = run_predict(input_text, model, tokenizer)
print("output starcoder python" , output)
elif(model_choice==0):
if(javaFlag == "false"):
print("Inside starcoder for java")
tokenizer = AutoTokenizer.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
model = AutoModelForCausalLM.from_pretrained("nadiamaqbool81/starcoderbase-1b-hf")
output = run_predict(input_text, model, tokenizer)
print("output starcoder java" , output)
else:
a_variable = model_box[model_choice]
output = a_variable(input_text)
print("output other" , output)
return(output)
def run_predict(text, model, tokenizer):
prompt = text
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=400)
result = pipe(f"<s>[INST] {prompt} [/INST]")
arr = result[0]['generated_text'].split('[/INST]')
return arr[1]
gr.HTML("""<h1 style="font-weight:600;font-size:50;margin-top:4px;margin-bottom:4px;text-align:center;">Text to Code Generation</h1></div>""")
model_choice = gr.Dropdown(label="Select Model", choices=[m for m in names], type="index", interactive=True)
input_text = gr.Textbox(label="Input Prompt")
output_window = gr.Code(label="Generated Code")
interface = gr.Interface(fn=the_process, inputs=[input_text, model_choice], outputs="text")
interface.launch(debug=True)