JBHF commited on
Commit
620a6fe
·
verified ·
1 Parent(s): 24e5b26

Update crew.py

Browse files
Files changed (1) hide show
  1. crew.py +5 -3
crew.py CHANGED
@@ -41,8 +41,8 @@ class NewsletterGenCrew:
41
  tasks_config = "tasks.yaml"
42
 
43
  def llm(self):
44
- llm = ChatAnthropic(model_name="claude-3-sonnet-20240229", max_tokens=4096) # ORIGINAL
45
- #llm = ChatAnthropic(model_name="claude-3-sonnet-20240229",
46
  # # max_tokens=4096,
47
  # cache=True,
48
  # api_key="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"
@@ -52,6 +52,8 @@ class NewsletterGenCrew:
52
  # jb_anthropic_key_2_13-06-2024:
53
  # ANTHROPIC_API_KEY=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
54
  # https://console.anthropic.com/settings/usage
 
 
55
 
56
  # llm = ChatGroq(model="llama3-70b-8192")
57
  # https://console.groq.com/docs/rate-limits
@@ -64,7 +66,7 @@ class NewsletterGenCrew:
64
  # supports many more optional parameters. Hover on your `ChatOllama(...)`
65
  # class to view the latest available supported parameters
66
  # llm = ChatOllama(model="llama3")
67
- # llm = ChatOllama(model="mistral:latest")
68
  # check if ollama is running and which LLMs can then be used, run this in Anaconda cmd admin window:
69
  # ollama list
70
  # OUTPUT EXAMPLE:
 
41
  tasks_config = "tasks.yaml"
42
 
43
  def llm(self):
44
+ # llm = ChatAnthropic(model_name="claude-3-sonnet-20240229", max_tokens=4096) # ORIGINAL
45
+ # llm = ChatAnthropic(model_name="claude-3-sonnet-20240229",
46
  # # max_tokens=4096,
47
  # cache=True,
48
  # api_key="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"
 
52
  # jb_anthropic_key_2_13-06-2024:
53
  # ANTHROPIC_API_KEY=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
54
  # https://console.anthropic.com/settings/usage
55
+ #
56
+ # BadRequestError: Error code: 400 - {'type': 'error', 'error': {'type': 'invalid_request_error', 'message': 'Your credit balance is too low to access the Claude API. Please go to Plans & Billing to upgrade or purchase credits.'}}
57
 
58
  # llm = ChatGroq(model="llama3-70b-8192")
59
  # https://console.groq.com/docs/rate-limits
 
66
  # supports many more optional parameters. Hover on your `ChatOllama(...)`
67
  # class to view the latest available supported parameters
68
  # llm = ChatOllama(model="llama3")
69
+ llm = ChatOllama(model="mistral:latest")
70
  # check if ollama is running and which LLMs can then be used, run this in Anaconda cmd admin window:
71
  # ollama list
72
  # OUTPUT EXAMPLE: