|
import gradio as gr |
|
from llama_cpp import Llama |
|
|
|
llm = Llama(model_path="eachadea_ggml-vic7b-q4_0.bin", n_ctx=2048) |
|
|
|
def generate_text(prompt): |
|
output = llm(prompt, max_tokens=468, temperature=0.1, top_p=0.5, echo=False, stop=["#"]) |
|
output_text = output['choices'][0]['text'] |
|
return output_text |
|
|
|
description = "Vicuna-7B-GPTQ-4bit-128g.GGML, max_tokens=468, temperature=0.1, top_p=0.5" |
|
|
|
examples = [ |
|
["What is the capital of France? ", "The capital of France is Paris."], |
|
["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."], |
|
["What is the square root of 64?", "The square root of 64 is 8."] |
|
] |
|
|
|
gradio_interface = gr.Interface( |
|
fn=generate_text, |
|
inputs="text", |
|
outputs="text", |
|
title="Vicuna API", |
|
) |
|
gradio_interface.launch() |