Spaces:
Runtime error
Runtime error
File size: 1,563 Bytes
86330c9 99c3047 67ef7d6 86330c9 67ef7d6 86330c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 |
from transformers import pipeline
import streamlit as st
# Load the Gemma-7b text-generation pipeline
# generator = pipeline("text-generation", model="google/gemma-7b")
# Load model directly
from transformers import AutoConfig, AutoModel
config = AutoConfig.from_pretrained("TheBloke/WhiteRabbitNeo-13B-GGUF")
model = AutoModel.from_config(config)
model.load_state_dict(torch.load("path/to/model/weights.bin"))
st.title("Project Prompt Generator")
# User input fields
topic = st.text_input("Enter a project topic:")
keywords = st.multiselect("Choose relevant keywords:", ["sustainability", "data analysis", "education", "technology"], default=[])
# Generate prompts button
if st.button("Generate Prompts"):
prompts = generate_prompts(topic, keywords)
# Display generated prompts
st.subheader("Generated Prompts:")
for prompt in prompts:
st.write(f"* {prompt}")
# Function to generate project prompts
def generate_prompts(topic, keywords):
"""
Generates project prompts based on user input.
Args:
topic: The main theme or area of the project.
keywords: A list of relevant keywords chosen by the user.
Returns:
A list of generated project prompts.
"""
prompts = []
for _ in range(3): # Generate 3 prompts
prompt = model(
prompt=f"Generate a project prompt related to {topic} using the keywords {', '.join(keywords)}.",
max_length=150,
num_return_sequences=1
)[0]["generated_text"]
prompts.append(prompt)
return prompts
# Run the app
if __name__ == "__main__":
st.run()
|