Richard Neuschulz
commited on
Commit
•
4897216
1
Parent(s):
2160a0a
removed spaces
Browse files
app.py
CHANGED
@@ -3,12 +3,12 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
3 |
import spaces # Import the spaces module for ZeroGPU compatibility
|
4 |
|
5 |
# Define the text generation function
|
6 |
-
|
|
|
|
|
|
|
|
|
7 |
def generate_text(user_input, system_prompt):
|
8 |
-
# Load the model and tokenizer from Hugging Face within the function
|
9 |
-
model_id = "doubledsbv/KafkaLM-8x7B-German-V0.1-AWQ"
|
10 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
-
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).to('cuda') # Move the model to GPU here
|
12 |
|
13 |
# Combine the system prompt and the user input to form the full prompt
|
14 |
full_prompt = f"{system_prompt.strip()}\n\n{user_input.strip()}"
|
|
|
3 |
import spaces # Import the spaces module for ZeroGPU compatibility
|
4 |
|
5 |
# Define the text generation function
|
6 |
+
|
7 |
+
model_id = "doubledsbv/KafkaLM-8x7B-German-V0.1-AWQ"
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
9 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True)
|
10 |
+
|
11 |
def generate_text(user_input, system_prompt):
|
|
|
|
|
|
|
|
|
12 |
|
13 |
# Combine the system prompt and the user input to form the full prompt
|
14 |
full_prompt = f"{system_prompt.strip()}\n\n{user_input.strip()}"
|