File size: 966 Bytes
dfbcf7f
fde41fa
24e9940
de81baf
 
 
 
 
 
f3676ad
cabdd14
f3676ad
20fd975
b7096cd
f3676ad
 
 
 
fde41fa
20fd975
fde41fa
f3676ad
fde41fa
 
 
 
20fd975
 
fde41fa
20fd975
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
from transformers import AutoModelForCausalLM, AutoTokenizer, FalconForCausalLM
fine_tuned_model = "ashioyajotham/results"
model = FalconForCausalLM.from_pretrained(fine_tuned_model)
# Load the tokenizer for the Falcon 7B model with remote code trust
tokenizer = AutoTokenizer.from_pretrained(fine_tuned_model, trust_remote_code=True)

# Set the padding token to be the same as the end-of-sequence token
tokenizer.pad_token = tokenizer.eos_token

prompt = "Generate a Python script to add prime numbers between one and ten"

inputs = tokenizer.encode(prompt, return_tensors='pt')

outputs = model.generate(inputs, max_length=100, temperature = .7, do_sample=True, pad_token_id=tokenizer.eos_token_id)

completion = tokenizer.decode(outputs[0])

#print(completion)
import gradio as gr

iface = gr.Interface(
    fn=completion,
    inputs="text",
    outputs="text",
    title="Code Generation App",
    description="Generate code from text input."
)

iface.launch()