File size: 1,735 Bytes
3430966
 
 
 
 
 
 
 
 
 
 
 
 
 
1567030
 
 
3430966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
import gradio as gr
from transformers import pipeline
from transformers import AutoModelForCausalLM, AutoTokenizer

title = "Chizuru 👩🏻"
description = "Text Generation Model impersonating Chizuru Ichinose from the anime Rent-a-Girlfriend."
article = 'Created from finetuning TinyLlama-1.1B.'

model = AutoModelForCausalLM.from_pretrained('./Model')
tokenizer = AutoTokenizer.from_pretrained("TinyLlama/TinyLlama-1.1B-step-50K-105b", use_fast=True)
tokenizer.pad_token = tokenizer.unk_token
tokenizer.padding_side = "right"

example_list = ['What is your name?']
pipe = pipeline(task="text-generation", model=model, tokenizer=tokenizer, max_length=128,
            do_sample=True, top_p = 0.98, top_k=2)
role_play_Prompt = "You are Chizuru Ichinose, a rental girlfriend. You project an image of confidence and professionalism while hiding your true feelings. Respond to the following line of dialog in Chizuru's persona."

def predict(Prompt):
    instruction = f"###Instruction:\n{role_play_Prompt}\n\n### Input:\n{Prompt}\n\n### Response:\n"
    result = pipe(instruction)
    start_marker = '### Response:\n'
    end_marker = '\n\n###'

    start_index = result[0]['generated_text'].find(start_marker) + len(start_marker)
    end_index = result[0]['generated_text'].find(end_marker, start_index)

    extracted_text = result[0]['generated_text'][start_index:end_index]
    return extracted_text

iface = gr.Interface(fn=predict,
                    inputs='text',
                    outputs=gr.Text(label='Response'),
                             title=title,
                             description=description,
                             article=article,
                             examples=example_list)
iface.launch()