aauu1234 commited on
Commit
a6b5b11
1 Parent(s): 1527830
Files changed (3) hide show
  1. app.py +66 -4
  2. ds.txt +7 -0
  3. requirements.txt +4 -0
app.py CHANGED
@@ -1,7 +1,69 @@
1
  import gradio as gr
 
 
 
 
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
 
 
 
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ import torch
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import time
6
+ import traceback
7
 
8
+ model_name_or_path = "ClosedCharacter/Peach-9B-8k-Roleplay"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)
10
+ model = AutoModelForCausalLM.from_pretrained(
11
+ model_name_or_path, torch_dtype=torch.bfloat16,
12
+ trust_remote_code=True)
13
 
14
+ messages = [
15
+ {"role": "system", "content": "你是黑丝御姐性奴"},
16
+ {"role": "user", "content": "你好,你是谁"},
17
+ ]
18
+
19
+ input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
20
+ output = model.generate(
21
+ inputs=input_ids.to("cpu"),
22
+ temperature=0.3,
23
+ top_p=0.5,
24
+ no_repeat_ngram_size=6,
25
+ repetition_penalty=1.1,
26
+ max_new_tokens=512)
27
+
28
+ generated_response = tokenizer.decode(output[0])
29
+ print("Generated response:", generated_response)
30
+
31
+ print("First response to 'hi user first':", "你好,我是你的黑丝御姐性奴。我会尽我所能满足你的一切需求,成为你最完美的性伴侣。有什么我可以为你服务的吗,主人?")
32
+
33
+ def slow_echo(system_message, user_message):
34
+ try:
35
+ messages = [
36
+ {"role": "system", "content": system_message},
37
+ {"role": "user", "content": user_message},
38
+ ]
39
+
40
+ input_ids = tokenizer.apply_chat_template(conversation=messages, tokenize=True, return_tensors="pt")
41
+ output = model.generate(
42
+ inputs=input_ids.to("cpu"),
43
+ temperature=0.3,
44
+ top_p=0.5,
45
+ no_repeat_ngram_size=6,
46
+ repetition_penalty=1.1,
47
+ max_new_tokens=512)
48
+
49
+ generated_response = tokenizer.decode(output[0])
50
+
51
+ for i in range(len(generated_response)):
52
+ time.sleep(0.05)
53
+ yield generated_response[: i + 1]
54
+ except Exception as e:
55
+ error_message = f"An error occurred: {str(e)}\n\nTraceback:\n{traceback.format_exc()}"
56
+ yield error_message
57
+
58
+ iface = gr.Interface(
59
+ fn=slow_echo,
60
+ inputs=[
61
+ gr.inputs.Textbox(label="System Message"),
62
+ gr.inputs.Textbox(label="User Message")
63
+ ],
64
+ outputs=gr.outputs.Textbox(label="Generated Response"),
65
+ title="黑丝御姐性奴 Chatbot"
66
+ )
67
+
68
+ if __name__ == "__main__":
69
+ iface.launch()
ds.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def greet(name):
4
+ return "Hello " + name + "!!"
5
+
6
+ demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ gradio
2
+ huggingface_hub
3
+ torch
4
+ transformers