erhanmeydan commited on
Commit
b94947a
·
verified ·
1 Parent(s): bfaa7a3

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -0
app.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+ import torch
4
+
5
+ # Model ve tokenizer'ı yükle
6
+ model_name = "mistralai/Mistral-Small-3.1-24B-Base-2503"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16)
9
+
10
+ # Yanıt oluşturma fonksiyonu
11
+ def generate_response(prompt):
12
+ inputs = tokenizer(prompt, return_tensors="pt").to("cuda")
13
+ outputs = model.generate(
14
+ **inputs,
15
+ max_new_tokens=500,
16
+ temperature=0.7,
17
+ top_p=0.9,
18
+ do_sample=True
19
+ )
20
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
21
+ return response
22
+
23
+ # Gradio arayüzü
24
+ interface = gr.Interface(
25
+ fn=generate_response,
26
+ inputs=gr.Textbox(lines=2, placeholder="Sorunuzu buraya yazın..."),
27
+ outputs="text",
28
+ title="Mistral-Small-3.1 ile Sohbet",
29
+ description="Mistral-Small-3.1-24B-Base-2503 modelini kullanarak sorularınızı yanıtlar."
30
+ )
31
+
32
+ # Uygulamayı başlat
33
+ interface.launch()