File size: 1,577 Bytes
342de3e
96cf3b7
3d8420a
342de3e
 
 
 
 
 
 
 
 
 
3d8420a
6a5edc5
3d8420a
 
 
96cf3b7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342de3e
 
96cf3b7
 
 
 
 
 
342de3e
96cf3b7
342de3e
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
from transformers import pipeline
from transformers import BloomTokenizerFast, BloomForCausalLM

description = """
When in legal doubt, you better call BLOOM! Ask BLOOM any legal question:
<img src="https://huggingface.co/spaces/tomrb/bettercallbloom/resolve/main/img.jpeg" width=200px>
"""
title = "Better Call Bloom!"
examples = [["Adventurer is approached by a mysterious stranger in the tavern for a new quest."]]



tokenizer = BloomTokenizerFast.from_pretrained("tomrb/bettercallbloom-3b-8bit")
model = BloomForCausalLM.from_pretrained("tomrb/bettercallbloom-3b-8bit",low_cpu_mem_usage=True)

generator = pipeline('text-generation', model=model, tokenizer=tokenizer)


def preprocess(text):
    #We add 'Question :' and 'Answer #1:' at the start and end of the prompt
    return "Question: " + text + "Answer #1:"


def generate(text):
    
    preprocessed_text = preprocess(text)
    result = generator(preprocessed_text, max_length=128)
    output = re.split(r'\nQuestion:|Answer #|Title:',result[0]['generated_text'])[2]
    
    return output

examples = [
    ["I started a company with a friend. What types of legal documents should we fill in to clarify the ownership of the company?"],
    ["[CA] I got a parking ticket in Toronto. How can I contest it?"],
]

demo = gr.Interface(
    fn=generate,
    inputs=gr.inputs.Textbox(lines=5, label="Input Text", placeholder = "Write your question here..."),
    outputs=gr.outputs.Textbox(label="Generated Text"),
    examples=examples,
    description=description,
    title=title
)

demo.launch()