File size: 1,553 Bytes
c87dd5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33dd608
c87dd5a
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
import gradio as gr
import json
import httpx

def geninference(model):
    modelconfig = httpx.get(f'https://huggingface.co/{model}/raw/main/config.json')
    jsonconfig = json.loads(modelconfig.text)
    rawtokenizerconfig = httpx.get(f'https://huggingface.co/{model}/raw/main/tokenizer_config.json')
    tokenizerconfig = json.loads(rawtokenizerconfig.text)
    arch = jsonconfig["architectures"][0]
    tokenizerclass = tokenizerconfig["tokenizer_class"]
    inference_code = f"""
    from transformers import {arch}, {tokenizerclass}

    model_id = "{model}"

    model = {arch}.from_pretrained(model_id)
    tokenizer = {tokenizerclass}.from_pretrained(model_id)
    prompt = "Hello"
    inputs = tokenizer(prompt, return_tensors="pt")
    gen_ids = model.generate(
        inputs.input_ids,
        max_length=50,
        num_return_sequences=1,
        no_repeat_ngram_size=2,
        do_sample=True,
        temperature=0.7,
    )
    output = tokenizer.decode(gen_ids[0], skip_special_tokens=True)
    print(output)
    """
    return inference_code

with gr.Blocks() as demo:
    gr.Markdown("## Generate inference code for your model with a single click!")
    with gr.Row(equal_height=True):
        inputbox = gr.Textbox(placeholder="Enter your model id here", label="Model ID")
        generatebtn = gr.Button("Generate", variant="primary")
    outputbox = gr.Textbox(label="Output", placeholder="Output will be here")
    generatebtn.click(
        fn=geninference,
        inputs=inputbox,
        outputs=outputbox
    )

demo.launch()