Richard Neuschulz
change, test
64edef9
import os
import gradio as gr
from huggingface_hub import hf_hub_download
from llama_cpp import Llama
model_id = "TheBloke/KafkaLM-70B-German-V0.1-GGUF"
model_filename = "kafkalm-70b-german-v0.1.Q5_K_M.gguf"
model_path = hf_hub_download(repo_id=model_id, filename=model_filename)
def generate_text(user_input, system_prompt):
# Initialize the Llama model
llm = Llama(
model_path=model_path, # Use the downloaded model file
n_ctx=4096, # Adjust based on the model's max sequence length
n_threads=8, # Tailor to your system
n_gpu_layers=35 # Set based on your GPU's capability
)
# Combine the system and user prompts
prompt = f"\n{system_prompt.strip()}</s>\n\n{user_input.strip()}</s>\n"
# Generate text using the Llama model
output = llm(prompt, max_tokens=512, stop=["</s>"], echo=True)
# Extract the generated text from the output
generated_text = output['completions'][0]['completion']
return generated_text
# Setup the Gradio interface
iface = gr.Interface(
fn=generate_text,
inputs=[
gr.Textbox(lines=2, label="User Prompt", value="Wer ist Kafka?"),
gr.Textbox(lines=5, label="System Prompt", value="Du bist ein freundlicher und hilfsbereiter KI-Assistent. Du beantwortest Fragen faktenorientiert und präzise, ohne dabei relevante Fakten auszulassen.")
],
outputs=gr.Textbox(label="Generated Text"),
title="Text Generation with KafkaLM",
description="Enter a user prompt and a system prompt to generate text using the KafkaLM model."
)
# Launch the Gradio app
if __name__ == "__main__":
iface.launch()