File size: 1,351 Bytes
e4cea83
 
94bc0e3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr

gr.load("models/Salesforce/codet5p-220m").launch()
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch

def main():
    st.title("Python Code Generation App")

    # Load the model and tokenizer
    tokenizer = AutoTokenizer.from_pretrained("models/Salesforce/codet5p-220m")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = AutoModelForSeq2SeqLM.from_pretrained("models/Salesforce/codet5p-220m").to(device)

    # Get user input
    st.subheader("Instructions")
    st.write("Use the following format to enter prompts: Write python code for SBERT vector embedding of a sentence")
    st.write("")    
    query = st.text_input("Enter a prompt here: ")
    if st.button("Generate Code"):
        if query.strip().lower() == 'exit':
            st.stop()
        else:
            # Generate summary
            inputs = tokenizer(f"summarize:{query}", return_tensors="pt")
            inputs = {k: v.to(device) for k, v in inputs.items()}
            output = model.generate(**inputs, max_length=750)
            generated_text = tokenizer.decode(output[0]).replace("summarize:", "")

            # Display the generated summary
            st.subheader("Generated Code:")
            st.code(generated_text)

if __name__ == "__main__":
    main()