DarwinAnim8or commited on
Commit
e942e28
·
verified ·
1 Parent(s): e8751fa

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -0
app.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import transformers
3
+ import gradio as gr
4
+
5
+ from transformers import AutoModelForCausalLM, GPT2Tokenizer
6
+
7
+ model_name = "DarwinAnim8or/GPT-NoSleep-v2"
8
+ tokenizer = GPT2Tokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
+
11
+ # Handle padding
12
+ if tokenizer.pad_token_id is None:
13
+ tokenizer.pad_token_id = tokenizer.eos_token_id
14
+
15
+ def generate_story(prompt, max_length=200):
16
+ """Generates a story continuation from a given prompt."""
17
+ input_ids = tokenizer.encode(prompt, return_tensors="pt")
18
+
19
+ # Set generation parameters (adjust for creativity)
20
+ output = model.generate(
21
+ input_ids,
22
+ max_length=max_length,
23
+ num_return_sequences=1, # Generate a single story
24
+ no_repeat_ngram_size=2, # Prevent repetitive phrases
25
+ do_sample=True,
26
+ top_k=50,
27
+ top_p=0.95,
28
+ temperature=0.8, # Control randomness (higher = more creative)
29
+ )
30
+
31
+ # Decode the generated story
32
+ story = tokenizer.decode(output[0], skip_special_tokens=True)
33
+ return story
34
+
35
+ # Gradio Interface
36
+ with gr.Blocks() as demo:
37
+ gr.Markdown("## Storyteller: Generate a story from a prompt!")
38
+ prompt_input = gr.Textbox(label="Enter your story prompt:")
39
+ story_output = gr.Textbox(label="Generated story:")
40
+ max_length_slider = gr.Slider(minimum=50, maximum=500, value=200, step=10, label="Max Story Length")
41
+ generate_button = gr.Button("Generate Story")
42
+
43
+ # Event handling
44
+ generate_button.click(
45
+ fn=generate_story,
46
+ inputs=[prompt_input, max_length_slider],
47
+ outputs=story_output
48
+ )
49
+
50
+ # Launch the demo (customize the sharing options if desired)
51
+ demo.launch()