Spaces:
Runtime error
Runtime error
File size: 9,387 Bytes
feed544 3e552e6 feed544 a50e425 feed544 6545102 feed544 620a6fe feed544 620a6fe feed544 0a26135 feed544 24e5b26 feed544 57b7b10 feed544 3da018d feed544 3da018d feed544 0a26135 57b7b10 0a26135 feed544 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 |
from crewai import Agent, Crew, Process, Task
from crewai.project import CrewBase, agent, crew, task
# from newsletter_gen.tools.research import SearchAndContents, FindSimilar, GetContents
from research import SearchAndContents, FindSimilar, GetContents # JB
from langchain_anthropic import ChatAnthropic
from langchain_groq import ChatGroq
from datetime import datetime
import streamlit as st
from typing import Union, List, Tuple, Dict
from langchain_core.agents import AgentFinish
import json
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_huggingface import HuggingFaceEndpoint # JB
import os
# JB:
# https://python.langchain.com/v0.2/docs/integrations/chat/ollama/
# LangChain supports many other chat models. Here, we're using Ollama
from langchain_community.chat_models import ChatOllama
# To get rid of the telemetry error messages, try:
# Connection Timeout Error with telemetry.crewai.com #254
# https://github.com/joaomdmoura/crewAI/issues/254
# os.environ["OTEL_SDK_DISABLED"] = "true"
os.environ["OTEL_SDK_DISABLED"] = "true"
# SUCCES:
# DIT LIJKT INDERDAAD DE TELEMETRY ERROR MESSAGES IN DE VS CODE TERMINAL TE VOORKOMEN !!!!!!!!!!
# Wel in die terminal nog deze korte messages:
# 2024-06-14 02:20:17,425 - 25632 - __init__.py-__init__:1218 - WARNING: SDK is disabled.
@CrewBase
class NewsletterGenCrew:
"""NewsletterGen crew"""
# agents_config = "config/agents.yaml"
# tasks_config = "config/tasks.yaml"
agents_config = "agents.yaml"
tasks_config = "tasks.yaml"
def llm(self):
# llm = ChatAnthropic(model_name="claude-3-sonnet-20240229", max_tokens=4096) # ORIGINAL
# llm = ChatAnthropic(model_name="claude-3-sonnet-20240229",
# # max_tokens=4096,
# cache=True,
# api_key="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"
# ) # JB
# https://console.anthropic.com/dashboard
# https://console.anthropic.com/settings/keys
# jb_anthropic_key_2_13-06-2024:
# ANTHROPIC_API_KEY=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
# https://console.anthropic.com/settings/usage
#
# BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits.'}}
# llm = ChatGroq(model="llama3-70b-8192")
# https://console.groq.com/docs/rate-limits
# llm = ChatGroq(model="mixtral-8x7b-32768") # JB 13-06-2024 - geeft af en toe rate limit errors
# IN HF SPACES STREAMLIT APP: BadRequestError: Error code: 400 - {'error': {'message': 'Organization has been restricted. Please reach out to support if you believe this was in error.', 'type': 'invalid_request_error', 'code': 'organization_restricted'}}
llm = ChatGoogleGenerativeAI(google_api_key=os.getenv("GOOGLE_API_KEY"))
# https://python.langchain.com/v0.2/docs/integrations/chat/ollama/
# supports many more optional parameters. Hover on your `ChatOllama(...)`
# class to view the latest available supported parameters
# llm = ChatOllama(model="llama3")
# llm = ChatOllama(model="mistral:latest")
# check if ollama is running and which LLMs can then be used, run this in Anaconda cmd admin window:
# ollama list
# OUTPUT EXAMPLE:
# (newsletter-gen-py3.11) (base) C:\Users\jfhmb\EXA_CREWAI\exa-crewai-master\exa-crewai-master>ollama list
# NAME ID SIZE MODIFIED
# llama3:latest 365c0bd3c000 4.7 GB 3 days ago
# nomic-embed-text:latest 0a109f422b47 274 MB 3 days ago
# crewai-llama3:latest d952d07761cd 4.7 GB 10 days ago
# llama3:8b 365c0bd3c000 4.7 GB 10 days ago
# mistral:latest 61e88e884507 4.1 GB 6 weeks ago
# mxbai-embed-large:latest 468836162de7 669 MB 6 weeks ago
#
# OLLAMA LOGS:
# C:\Users\jfhmb\AppData\Local\Ollama
#
# Running ollama on Hugging Face Spaces #2833
# https://github.com/ollama/ollama/issues/2833
# HUGGING FACE LLMs
# https://python.langchain.com/v0.2/docs/integrations/chat/huggingface/
# HUGGINGFACEHUB_API_TOKEN
# https://huggingface.co/docs/hub/security-tokens
# https://huggingface.co/settings/tokens
# %pip install --upgrade --quiet langchain-huggingface text-generation transformers google-search-results numexpr langchainhub sentencepiece jinja2
# 1. Instantiate an LLM
# HuggingFaceEndpoint
# from langchain_huggingface import HuggingFaceEndpoint
#
#llm = HuggingFaceEndpoint(
# repo_id="meta-llama/Meta-Llama-3-70B-Instruct",
# task="text-generation",
# max_new_tokens=512,
# do_sample=False,
# repetition_penalty=1.03,
#)
# BadRequestError: (Request ID: ots-pfsrtb04xa7oVcKIc) Bad request: Model requires a Pro subscription; check out hf.co/pricing to learn more. Make sure to include your HF token in your query.
# API Reference:HuggingFaceEndpoint
print("JB: in class NewsletterGenCrew - using llm: ", llm)
return llm
def step_callback(
self,
agent_output: Union[str, List[Tuple[Dict, str]], AgentFinish],
agent_name,
*args,
):
with st.chat_message("AI"):
# Try to parse the output if it is a JSON string
if isinstance(agent_output, str):
try:
agent_output = json.loads(agent_output)
except json.JSONDecodeError:
pass
if isinstance(agent_output, list) and all(
isinstance(item, tuple) for item in agent_output
):
for action, description in agent_output:
# Print attributes based on assumed structure
st.write(f"Agent Name: {agent_name}")
st.write(f"Tool used: {getattr(action, 'tool', 'Unknown')}")
st.write(f"Tool input: {getattr(action, 'tool_input', 'Unknown')}")
st.write(f"{getattr(action, 'log', 'Unknown')}")
with st.expander("Show observation"):
st.markdown(f"Observation\n\n{description}")
# Check if the output is a dictionary as in the second case
elif isinstance(agent_output, AgentFinish):
st.write(f"Agent Name: {agent_name}")
output = agent_output.return_values
st.write(f"I finished my task:\n{output['output']}")
# Handle unexpected formats
else:
st.write(type(agent_output))
st.write(agent_output)
@agent
def researcher(self) -> Agent:
return Agent(
config=self.agents_config["researcher"],
tools=[SearchAndContents(), FindSimilar(), GetContents()],
verbose=True,
llm=self.llm(),
step_callback=lambda step: self.step_callback(step, "Research Agent"),
)
@agent
def editor(self) -> Agent:
return Agent(
config=self.agents_config["editor"],
verbose=True,
tools=[SearchAndContents(), FindSimilar(), GetContents()],
llm=self.llm(),
step_callback=lambda step: self.step_callback(step, "Chief Editor"),
)
@agent
def designer(self) -> Agent:
return Agent(
config=self.agents_config["designer"],
verbose=True,
allow_delegation=False,
llm=self.llm(),
step_callback=lambda step: self.step_callback(step, "HTML Writer"),
)
@task
def research_task(self) -> Task:
return Task(
config=self.tasks_config["research_task"],
agent=self.researcher(),
output_file=f"logs/{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_research_task.md",
)
@task
def edit_task(self) -> Task:
return Task(
config=self.tasks_config["edit_task"],
agent=self.editor(),
output_file=f"logs/{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_edit_task.md",
)
@task
def newsletter_task(self) -> Task:
return Task(
config=self.tasks_config["newsletter_task"],
agent=self.designer(),
output_file=f"logs/{datetime.now().strftime('%Y-%m-%d_%H-%M-%S')}_newsletter_task.html",
)
@crew
def crew(self) -> Crew:
"""Creates the NewsletterGen crew"""
return Crew(
agents=self.agents, # Automatically created by the @agent decorator
tasks=self.tasks, # Automatically created by the @task decorator
process=Process.sequential,
verbose=2,
# process=Process.hierarchical, # In case you wanna use that instead https://docs.crewai.com/how-to/Hierarchical/
)
|