agent_1 / app.py
jojopp's picture
Fix circular import by moving DuckDuckGoSearchTool to separate module
77726b9
raw
history blame
5.46 kB
from smolagents import CodeAgent, HfApiModel, load_tool, tool
import datetime
import requests
import pytz
import yaml
from tools.final_answer import FinalAnswerTool
from tools.blog_tools import generate_blog_section, improve_writing_style, check_readability, generate_seo_metadata
from tools.search_tools import get_search_tool
from Gradio_UI import GradioUI
@tool
def generate_blog_outline(topic: str) -> str:
"""Creates a structured outline for a blog post with a direct, practical approach
Args:
topic: The main topic for the blog post
"""
try:
# Research the topic first
search_tool = get_search_tool(max_results=2)
research = search_tool.forward(f"{topic} key aspects challenges solutions")
# Create an outline based on research and expertise
outline = f"# Blog Outline: {topic}\n\n"
outline += "## 1. Introduction\n"
outline += "- Context and why this matters\n"
outline += "- My experience with this topic\n\n"
outline += "## 2. Current State\n"
outline += "- Key developments\n"
outline += "- Market trends\n\n"
outline += "## 3. Practical Insights\n"
outline += "- Real-world applications\n"
outline += "- Lessons from implementation\n\n"
outline += "## 4. Looking Forward\n"
outline += "- Next steps\n"
outline += "- Recommendations\n\n"
outline += "## 5. Key Takeaways\n"
outline += "- Action items\n"
outline += "- Discussion points\n"
return outline
except Exception as e:
return f"Error generating outline: {str(e)}"
@tool
def suggest_blog_topics(main_theme: str) -> str:
"""Suggests related blog topics based on a theme, drawing from AI product expertise
Args:
main_theme: The primary theme or area of interest
"""
try:
# Research current trends
search_tool = get_search_tool(max_results=2)
research = search_tool.forward(f"{main_theme} latest developments challenges")
# Generate topic suggestions based on research and expertise
suggestions = f"# Blog Topic Ideas: {main_theme}\n\n"
suggestions += "Based on current trends and my product experience:\n\n"
suggestions += "1. Practical Implementation:\n"
suggestions += " - Building scalable {main_theme} solutions\n"
suggestions += " - Real-world challenges and solutions\n\n"
suggestions += "2. Product Strategy:\n"
suggestions += " - Market fit and user needs\n"
suggestions += " - Integration approaches\n\n"
suggestions += "3. Future Outlook:\n"
suggestions += " - Emerging trends\n"
suggestions += " - Potential impact\n\n"
suggestions += "\nThese topics combine current research with hands-on insights."
return suggestions
except Exception as e:
return f"Error suggesting topics: {str(e)}"
@tool
def get_current_time_in_timezone(timezone: str) -> str:
"""Fetches current local time in specified timezone
Args:
timezone: Valid timezone (e.g., 'America/New_York')
"""
try:
tz = pytz.timezone(timezone)
local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
return f"The current local time in {timezone} is: {local_time}"
except Exception as e:
return f"Error fetching time for timezone '{timezone}': {str(e)}"
@tool
def research_topic(query: str) -> str:
"""Searches the web for information about a specific topic and returns relevant sources
Args:
query: The topic to research
"""
try:
# Create a fresh instance for each search
search_tool = get_search_tool(max_results=3)
# Add focus on tech and AI product development
enhanced_query = f"{query} AI product development"
results = search_tool.forward(enhanced_query)
# Format the results in your style
response = f"Quick research on {query}:\n\n"
response += results
response += "\n\nNote: Always verify current info."
return response
except Exception as e:
return f"Error performing research: {str(e)}"
final_answer = FinalAnswerTool()
model = HfApiModel(
max_tokens=1000, # Reduced from 2096 to stay within limits
temperature=0.7, # Slightly increased for more natural, conversational tone
model_id='Qwen/Qwen2.5-Coder-32B-Instruct',
custom_role_conversions=None,
)
with open("prompts.yaml", 'r') as stream:
prompt_templates = yaml.safe_load(stream)
with open("style_guide.yaml", 'r') as stream:
style_guide = yaml.safe_load(stream)
agent = CodeAgent(
model=model,
tools=[
final_answer,
generate_blog_outline,
suggest_blog_topics,
get_current_time_in_timezone,
generate_blog_section,
improve_writing_style,
check_readability,
generate_seo_metadata,
research_topic
],
max_steps=15,
verbosity_level=1,
grammar=None,
planning_interval=None,
name="Joséphine's Blog Assistant",
description="""An AI writing assistant that matches Joséphine's style:
- Direct and conversational tone
- Product and AI expertise focus
- Clear, practical insights
- Natural, professional voice
- Gets to the point quickly"""
)
GradioUI(agent).launch()