Spaces:
Paused
Paused
File size: 1,236 Bytes
818675e 751c1e6 39a25f7 da994cb 818675e 751c1e6 9dd267d ccc3e1a 818675e ccc3e1a 818675e ccc3e1a 818675e ccc3e1a 818675e ccc3e1a 818675e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 |
from llama_cpp.server.app import create_app, Settings
from fastapi.responses import HTMLResponse
import os
import requests
from llama_cpp import Llama
import gradio as gr
url="https://huggingface.co/TheBloke/WizardLM-13B-V1.2-GGUF/resolve/main/wizardlm-13b-v1.2.Q4_0.gguf"
response = requests.get(url)
with open("./model.gguf", mode="wb") as file:
file.write(response.content)
llm = Llama(model_path="./model.gguf")
def generate_text(input_text):
output = llm(f"Q: {input_text} A:", max_tokens=256, stop=["Q:", "\n"], echo=True)
return output['choices'][0]['text']
input_text = gr.inputs.Textbox(lines= 10, label="Enter your input text")
output_text = gr.outputs.Textbox(label="Output text")
description = "llama.cpp implementation in python [https://github.com/abetlen/llama-cpp-python]"
examples = [
["What is the capital of France? ", "The capital of France is Paris."],
["Who wrote the novel 'Pride and Prejudice'?", "The novel 'Pride and Prejudice' was written by Jane Austen."],
["What is the square root of 64?", "The square root of 64 is 8."]
]
gr.Interface(fn=generate_text, inputs=input_text, outputs=output_text, title="Llama Language Model", description=description, examples=examples).launch()
|