Akshayram1's picture
Rename app.py to app2.py
a9f8d58 verified
import streamlit as st
from crewai import Agent, Task, Crew, Process
from crewai_tools import WebsiteSearchTool
from dotenv import load_dotenv
import os
# Load environment variables
load_dotenv()
# Streamlit App
st.title("Blog Generator with AI")
st.sidebar.header("Input")
# OpenAI API key input
user_api_key = st.sidebar.text_input("Enter your OpenAI API Key", type="password")
news_link = st.sidebar.text_input("Enter News Link", "")
generate_blog = st.sidebar.button("Generate Blog")
if generate_blog:
if user_api_key:
# Set the OpenAI API key dynamically
os.environ["OPENAI_API_KEY"] = user_api_key
if news_link:
st.info("Fetching and processing the news...")
# Define tools
web_tool = WebsiteSearchTool()
# Create agents
blog_researcher = Agent(
role='AI Blog Researcher from News Website',
goal='Get the relevant latest AI related news from News Website',
verbose=True,
memory=True,
backstory=("Expert in understanding videos in AI, Data Science, Machine Learning, and GEN AI."),
tools=[web_tool],
allow_delegation=True
)
blog_writer = Agent(
role='Blog Writer',
goal='Narrate compelling tech stories about the News Article and add the reference links at the end of the blog.',
verbose=True,
memory=True,
backstory=(
"With a flair for simplifying complex topics, you craft engaging narratives that captivate and educate,"
"bringing new discoveries to light in an accessible manner."
),
tools=[web_tool],
allow_delegation=False
)
# Define tasks
research_task = Task(
description=(
"Identify the News Article and get detailed information about the News from the website."
),
expected_output='A comprehensive 3-paragraph-long report based on the {topic} of News.',
tools=[web_tool],
agent=blog_researcher,
)
write_task = Task(
description=(
"Get the info from the News Website on the topic {topic}."
),
expected_output='Summarize the info from the News website on the topic {topic} and create the content for the blog.',
tools=[web_tool],
agent=blog_writer,
async_execution=False,
output_file="" # Provide an empty string or valid file path
)
# Create crew
crew = Crew(
agents=[blog_researcher, blog_writer],
tasks=[research_task, write_task],
process=Process.sequential,
memory=True,
cache=True,
max_rpm=100,
share_crew=True
)
# Kickoff the process and fetch the result
try:
result = crew.kickoff(inputs={'topic': news_link})
# Inspect result attributes for debugging
st.subheader("Result Attributes")
st.write(dir(result))
# Access task outputs
try:
task_outputs = result.tasks_output
except AttributeError:
st.error("The result object does not have 'tasks_output'.")
task_outputs = []
# Display task outputs
st.subheader("Task Outputs")
for idx, task_output in enumerate(task_outputs):
st.write(f"Task {idx + 1}:")
st.json(task_output)
# Extract blog content
try:
blog_content = task_outputs[1].raw
except (IndexError, AttributeError):
blog_content = "Unable to fetch blog content from the task outputs."
# Display the blog content
st.subheader("Generated Blog")
st.text_area("Blog Content", value=blog_content, height=400)
# Actions to save or reject the blog
st.subheader("Actions")
save_blog = st.button("Save Blog")
reject_blog = st.button("Reject Blog")
if save_blog:
with open("saved_blog.txt", "w") as f:
f.write(blog_content)
st.success("Blog file saved successfully!")
if reject_blog:
st.warning("Blog rejected. No file was saved.")
except Exception as e:
st.error(f"An error occurred during the process: {e}")
else:
st.error("Please enter a valid news link.")
else:
st.error("Please provide your OpenAI API Key.")