File size: 1,584 Bytes
7ff1469
 
 
bf7f4c5
7ff1469
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
import torch
from transformers import  AutoTokenizer, AutoModel, BloomTokenizerFast,BloomForCausalLM
import gradio as gr
modelo = 'bigscience/bloom-560m' 
tokenizer = AutoTokenizer.from_pretrained(modelo)
model = BloomForCausalLM.from_pretrained(modelo)

def generator(prompt,max_length, temp):
    input_ids = tokenizer(prompt, return_tensors="pt").input_ids
    gen_tokens = model.generate(
        input_ids,
        do_sample=True,
        temperature=temp,
        max_length=max_length,
    )
    gen_text = tokenizer.batch_decode(gen_tokens)[0]
    return gen_text






def run(prompt, max_len, temp):
    min_len = 1
    output = generator(prompt,max_len, temp)
    print(output)
    return (output,"")

if __name__ == "__main__":
    demo = gr.Blocks()
    with demo:
        gr.Markdown(modelo)
        with gr.Row():
            with gr.Column():
                text = gr.Textbox(
                    label="Input",
                    value=" ",  # should be set to " " when plugged into a real API
                )
                tokens = gr.Slider(1, 250, value=50, step=1, label="Tokens to generate")
                temp = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")

                with gr.Row():
                    submit = gr.Button("Submit")
            with gr.Column():
                text_error = gr.Markdown(label="Log information")
                text_out = gr.Textbox(label="Output")
        submit.click(
            run,
            inputs=[text, tokens, temp],
            outputs=[text_out, text_error],
        )

demo.launch()