soulwax
commited on
Commit
·
923696f
1
Parent(s):
ce903e1
Push stable to huggingface
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .devcontainer/Dockerfile +23 -0
- .devcontainer/devcontainer.json +39 -0
- .env.template +123 -0
- .flake8 +12 -0
- .gitignore +154 -0
- .isort.cfg +10 -0
- .pre-commit-config.yaml +33 -0
- .sourcery.yaml +71 -0
- CONTRIBUTING.md +64 -0
- Dockerfile +23 -0
- LICENSE +21 -0
- README.md +16 -6
- autogpt/__init__.py +0 -0
- autogpt/__main__.py +572 -0
- autogpt/agent.py +304 -0
- autogpt/agent_manager.py +75 -0
- autogpt/ai_config.py +114 -0
- autogpt/ai_functions.py +77 -0
- autogpt/browse.py +198 -0
- autogpt/call_ai_function.py +26 -0
- autogpt/chat.py +175 -0
- autogpt/commands.py +274 -0
- autogpt/config.py +221 -0
- autogpt/data_ingestion.py +95 -0
- autogpt/execute_code.py +105 -0
- autogpt/file_operations.py +141 -0
- autogpt/image_gen.py +67 -0
- autogpt/js/overlay.js +29 -0
- autogpt/json_parser.py +113 -0
- autogpt/json_utils.py +128 -0
- autogpt/llm_utils.py +69 -0
- autogpt/logger.py +195 -0
- autogpt/memory/__init__.py +59 -0
- autogpt/memory/base.py +43 -0
- autogpt/memory/local.py +123 -0
- autogpt/memory/no_memory.py +66 -0
- autogpt/memory/pinecone.py +71 -0
- autogpt/memory/redismem.py +151 -0
- autogpt/prompt.py +108 -0
- autogpt/promptgenerator.py +134 -0
- autogpt/speak.py +120 -0
- autogpt/spinner.py +38 -0
- autogpt/summary.py +69 -0
- autogpt/token_counter.py +72 -0
- autogpt/utils.py +26 -0
- autogpt/web.py +85 -0
- azure.yaml.template +7 -0
- docker-compose.yml +16 -0
- main.py +1 -0
- pyproject.toml +11 -0
.devcontainer/Dockerfile
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# [Choice] Python version (use -bullseye variants on local arm64/Apple Silicon): 3, 3.10, 3.9, 3.8, 3.7, 3.6, 3-bullseye, 3.10-bullseye, 3.9-bullseye, 3.8-bullseye, 3.7-bullseye, 3.6-bullseye, 3-buster, 3.10-buster, 3.9-buster, 3.8-buster, 3.7-buster, 3.6-buster
|
2 |
+
ARG VARIANT=3-bullseye
|
3 |
+
FROM python:3.8
|
4 |
+
|
5 |
+
RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
6 |
+
# Remove imagemagick due to https://security-tracker.debian.org/tracker/CVE-2019-10131
|
7 |
+
&& apt-get purge -y imagemagick imagemagick-6-common
|
8 |
+
|
9 |
+
# Temporary: Upgrade python packages due to https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-40897
|
10 |
+
# They are installed by the base image (python) which does not have the patch.
|
11 |
+
RUN python3 -m pip install --upgrade setuptools
|
12 |
+
|
13 |
+
# [Optional] If your pip requirements rarely change, uncomment this section to add them to the image.
|
14 |
+
# COPY requirements.txt /tmp/pip-tmp/
|
15 |
+
# RUN pip3 --disable-pip-version-check --no-cache-dir install -r /tmp/pip-tmp/requirements.txt \
|
16 |
+
# && rm -rf /tmp/pip-tmp
|
17 |
+
|
18 |
+
# [Optional] Uncomment this section to install additional OS packages.
|
19 |
+
# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \
|
20 |
+
# && apt-get -y install --no-install-recommends <your-package-list-here>
|
21 |
+
|
22 |
+
# [Optional] Uncomment this line to install global node packages.
|
23 |
+
# RUN su vscode -c "source /usr/local/share/nvm/nvm.sh && npm install -g <your-package-here>" 2>&1
|
.devcontainer/devcontainer.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"build": {
|
3 |
+
"dockerfile": "./Dockerfile",
|
4 |
+
"context": "."
|
5 |
+
},
|
6 |
+
"features": {
|
7 |
+
"ghcr.io/devcontainers/features/common-utils:2": {
|
8 |
+
"installZsh": "true",
|
9 |
+
"username": "vscode",
|
10 |
+
"userUid": "1000",
|
11 |
+
"userGid": "1000",
|
12 |
+
"upgradePackages": "true"
|
13 |
+
},
|
14 |
+
"ghcr.io/devcontainers/features/python:1": "none",
|
15 |
+
"ghcr.io/devcontainers/features/node:1": "none",
|
16 |
+
"ghcr.io/devcontainers/features/git:1": {
|
17 |
+
"version": "latest",
|
18 |
+
"ppa": "false"
|
19 |
+
}
|
20 |
+
},
|
21 |
+
// Configure tool-specific properties.
|
22 |
+
"customizations": {
|
23 |
+
// Configure properties specific to VS Code.
|
24 |
+
"vscode": {
|
25 |
+
// Set *default* container specific settings.json values on container create.
|
26 |
+
"settings": {
|
27 |
+
"python.defaultInterpreterPath": "/usr/local/bin/python"
|
28 |
+
}
|
29 |
+
}
|
30 |
+
},
|
31 |
+
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
32 |
+
// "forwardPorts": [],
|
33 |
+
|
34 |
+
// Use 'postCreateCommand' to run commands after the container is created.
|
35 |
+
// "postCreateCommand": "pip3 install --user -r requirements.txt",
|
36 |
+
|
37 |
+
// Set `remoteUser` to `root` to connect as root instead. More info: https://aka.ms/vscode-remote/containers/non-root.
|
38 |
+
"remoteUser": "vscode"
|
39 |
+
}
|
.env.template
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
################################################################################
|
2 |
+
### AUTO-GPT - GENERAL SETTINGS
|
3 |
+
################################################################################
|
4 |
+
# EXECUTE_LOCAL_COMMANDS - Allow local command execution (Example: False)
|
5 |
+
EXECUTE_LOCAL_COMMANDS=False
|
6 |
+
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
|
7 |
+
BROWSE_CHUNK_MAX_LENGTH=8192
|
8 |
+
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
|
9 |
+
BROWSE_SUMMARY_MAX_TOKEN=300
|
10 |
+
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
11 |
+
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
12 |
+
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
13 |
+
AI_SETTINGS_FILE=ai_settings.yaml
|
14 |
+
|
15 |
+
################################################################################
|
16 |
+
### LLM PROVIDER
|
17 |
+
################################################################################
|
18 |
+
|
19 |
+
### OPENAI
|
20 |
+
# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
21 |
+
# TEMPERATURE - Sets temperature in OpenAI (Default: 1)
|
22 |
+
# USE_AZURE - Use Azure OpenAI or not (Default: False)
|
23 |
+
OPENAI_API_KEY=your-openai-api-key
|
24 |
+
TEMPERATURE=1
|
25 |
+
USE_AZURE=False
|
26 |
+
|
27 |
+
### AZURE
|
28 |
+
# OPENAI_AZURE_API_BASE - OpenAI API base URL for Azure (Example: https://my-azure-openai-url.com)
|
29 |
+
# OPENAI_AZURE_API_VERSION - OpenAI API version for Azure (Example: v1)
|
30 |
+
# OPENAI_AZURE_DEPLOYMENT_ID - OpenAI deployment ID for Azure (Example: my-deployment-id)
|
31 |
+
# OPENAI_AZURE_CHAT_DEPLOYMENT_ID - OpenAI deployment ID for Azure Chat (Example: my-deployment-id-for-azure-chat)
|
32 |
+
# OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID - OpenAI deployment ID for Embedding (Example: my-deployment-id-for-azure-embeddigs)
|
33 |
+
OPENAI_AZURE_API_BASE=your-base-url-for-azure
|
34 |
+
OPENAI_AZURE_API_VERSION=api-version-for-azure
|
35 |
+
OPENAI_AZURE_DEPLOYMENT_ID=deployment-id-for-azure
|
36 |
+
OPENAI_AZURE_CHAT_DEPLOYMENT_ID=deployment-id-for-azure-chat
|
37 |
+
OPENAI_AZURE_EMBEDDINGS_DEPLOYMENT_ID=deployment-id-for-azure-embeddigs
|
38 |
+
|
39 |
+
################################################################################
|
40 |
+
### LLM MODELS
|
41 |
+
################################################################################
|
42 |
+
|
43 |
+
# SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
44 |
+
# FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
45 |
+
SMART_LLM_MODEL=gpt-4
|
46 |
+
FAST_LLM_MODEL=gpt-3.5-turbo
|
47 |
+
|
48 |
+
### LLM MODEL SETTINGS
|
49 |
+
# FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
50 |
+
# SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
51 |
+
# When using --gpt3onlythis needs to be set to 4000.
|
52 |
+
FAST_TOKEN_LIMIT=4000
|
53 |
+
SMART_TOKEN_LIMIT=8000
|
54 |
+
|
55 |
+
################################################################################
|
56 |
+
### MEMORY
|
57 |
+
################################################################################
|
58 |
+
|
59 |
+
# MEMORY_BACKEND - Memory backend type (Default: local)
|
60 |
+
MEMORY_BACKEND=local
|
61 |
+
|
62 |
+
### PINECONE
|
63 |
+
# PINECONE_API_KEY - Pinecone API Key (Example: my-pinecone-api-key)
|
64 |
+
# PINECONE_ENV - Pinecone environment (region) (Example: us-west-2)
|
65 |
+
PINECONE_API_KEY=your-pinecone-api-key
|
66 |
+
PINECONE_ENV=your-pinecone-region
|
67 |
+
|
68 |
+
### REDIS
|
69 |
+
# REDIS_HOST - Redis host (Default: localhost)
|
70 |
+
# REDIS_PORT - Redis port (Default: 6379)
|
71 |
+
# REDIS_PASSWORD - Redis password (Default: "")
|
72 |
+
# WIPE_REDIS_ON_START - Wipes data / index on start (Default: False)
|
73 |
+
# MEMORY_INDEX - Name of index created in Redis database (Default: auto-gpt)
|
74 |
+
REDIS_HOST=localhost
|
75 |
+
REDIS_PORT=6379
|
76 |
+
REDIS_PASSWORD=
|
77 |
+
WIPE_REDIS_ON_START=False
|
78 |
+
MEMORY_INDEX=auto-gpt
|
79 |
+
|
80 |
+
################################################################################
|
81 |
+
### IMAGE GENERATION PROVIDER
|
82 |
+
################################################################################
|
83 |
+
|
84 |
+
### OPEN AI
|
85 |
+
# IMAGE_PROVIDER - Image provider (Example: dalle)
|
86 |
+
IMAGE_PROVIDER=dalle
|
87 |
+
|
88 |
+
### HUGGINGFACE
|
89 |
+
# STABLE DIFFUSION
|
90 |
+
# (Default URL: https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4)
|
91 |
+
# Set in image_gen.py)
|
92 |
+
# HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
93 |
+
HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
94 |
+
|
95 |
+
################################################################################
|
96 |
+
### SEARCH PROVIDER
|
97 |
+
################################################################################
|
98 |
+
|
99 |
+
### GOOGLE
|
100 |
+
# GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
101 |
+
# CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
102 |
+
GOOGLE_API_KEY=your-google-api-key
|
103 |
+
CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
104 |
+
|
105 |
+
################################################################################
|
106 |
+
### TTS PROVIDER
|
107 |
+
################################################################################
|
108 |
+
|
109 |
+
### MAC OS
|
110 |
+
# USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
111 |
+
USE_MAC_OS_TTS=False
|
112 |
+
|
113 |
+
### STREAMELEMENTS
|
114 |
+
# USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
115 |
+
USE_BRIAN_TTS=False
|
116 |
+
|
117 |
+
### ELEVENLABS
|
118 |
+
# ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
119 |
+
# ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
120 |
+
# ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
121 |
+
ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
122 |
+
ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
123 |
+
ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
.flake8
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[flake8]
|
2 |
+
max-line-length = 88
|
3 |
+
extend-ignore = E203
|
4 |
+
exclude =
|
5 |
+
.tox,
|
6 |
+
__pycache__,
|
7 |
+
*.pyc,
|
8 |
+
.env
|
9 |
+
venv/*
|
10 |
+
.venv/*
|
11 |
+
reports/*
|
12 |
+
dist/*
|
.gitignore
ADDED
@@ -0,0 +1,154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
## Original ignores
|
2 |
+
autogpt/keys.py
|
3 |
+
autogpt/*json
|
4 |
+
autogpt/node_modules/
|
5 |
+
autogpt/__pycache__/keys.cpython-310.pyc
|
6 |
+
package-lock.json
|
7 |
+
*.pyc
|
8 |
+
auto_gpt_workspace/*
|
9 |
+
*.mpeg
|
10 |
+
.env
|
11 |
+
azure.yaml
|
12 |
+
*venv/*
|
13 |
+
outputs/*
|
14 |
+
ai_settings.yaml
|
15 |
+
last_run_ai_settings.yaml
|
16 |
+
.vscode
|
17 |
+
.idea/*
|
18 |
+
auto-gpt.json
|
19 |
+
log.txt
|
20 |
+
log-ingestion.txt
|
21 |
+
logs
|
22 |
+
|
23 |
+
# Byte-compiled / optimized / DLL files
|
24 |
+
__pycache__/
|
25 |
+
*.py[cod]
|
26 |
+
*$py.class
|
27 |
+
|
28 |
+
# C extensions
|
29 |
+
*.so
|
30 |
+
|
31 |
+
# Distribution / packaging
|
32 |
+
.Python
|
33 |
+
build/
|
34 |
+
develop-eggs/
|
35 |
+
dist/
|
36 |
+
plugins/
|
37 |
+
downloads/
|
38 |
+
eggs/
|
39 |
+
.eggs/
|
40 |
+
lib/
|
41 |
+
lib64/
|
42 |
+
parts/
|
43 |
+
sdist/
|
44 |
+
var/
|
45 |
+
wheels/
|
46 |
+
pip-wheel-metadata/
|
47 |
+
share/python-wheels/
|
48 |
+
*.egg-info/
|
49 |
+
.installed.cfg
|
50 |
+
*.egg
|
51 |
+
MANIFEST
|
52 |
+
|
53 |
+
# PyInstaller
|
54 |
+
# Usually these files are written by a python script from a template
|
55 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
56 |
+
*.manifest
|
57 |
+
*.spec
|
58 |
+
|
59 |
+
# Installer logs
|
60 |
+
pip-log.txt
|
61 |
+
pip-delete-this-directory.txt
|
62 |
+
|
63 |
+
# Unit test / coverage reports
|
64 |
+
htmlcov/
|
65 |
+
.tox/
|
66 |
+
.nox/
|
67 |
+
.coverage
|
68 |
+
.coverage.*
|
69 |
+
.cache
|
70 |
+
nosetests.xml
|
71 |
+
coverage.xml
|
72 |
+
*.cover
|
73 |
+
*.py,cover
|
74 |
+
.hypothesis/
|
75 |
+
.pytest_cache/
|
76 |
+
|
77 |
+
# Translations
|
78 |
+
*.mo
|
79 |
+
*.pot
|
80 |
+
|
81 |
+
# Django stuff:
|
82 |
+
*.log
|
83 |
+
local_settings.py
|
84 |
+
db.sqlite3
|
85 |
+
db.sqlite3-journal
|
86 |
+
|
87 |
+
# Flask stuff:
|
88 |
+
instance/
|
89 |
+
.webassets-cache
|
90 |
+
|
91 |
+
# Scrapy stuff:
|
92 |
+
.scrapy
|
93 |
+
|
94 |
+
# Sphinx documentation
|
95 |
+
docs/_build/
|
96 |
+
|
97 |
+
# PyBuilder
|
98 |
+
target/
|
99 |
+
|
100 |
+
# Jupyter Notebook
|
101 |
+
.ipynb_checkpoints
|
102 |
+
|
103 |
+
# IPython
|
104 |
+
profile_default/
|
105 |
+
ipython_config.py
|
106 |
+
|
107 |
+
# pyenv
|
108 |
+
.python-version
|
109 |
+
|
110 |
+
# pipenv
|
111 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
112 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
113 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
114 |
+
# install all needed dependencies.
|
115 |
+
#Pipfile.lock
|
116 |
+
|
117 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
|
118 |
+
__pypackages__/
|
119 |
+
|
120 |
+
# Celery stuff
|
121 |
+
celerybeat-schedule
|
122 |
+
celerybeat.pid
|
123 |
+
|
124 |
+
# SageMath parsed files
|
125 |
+
*.sage.py
|
126 |
+
|
127 |
+
# Environments
|
128 |
+
.env
|
129 |
+
.venv
|
130 |
+
env/
|
131 |
+
venv/
|
132 |
+
ENV/
|
133 |
+
env.bak/
|
134 |
+
venv.bak/
|
135 |
+
|
136 |
+
# Spyder project settings
|
137 |
+
.spyderproject
|
138 |
+
.spyproject
|
139 |
+
|
140 |
+
# Rope project settings
|
141 |
+
.ropeproject
|
142 |
+
|
143 |
+
# mkdocs documentation
|
144 |
+
/site
|
145 |
+
|
146 |
+
# mypy
|
147 |
+
.mypy_cache/
|
148 |
+
.dmypy.json
|
149 |
+
dmypy.json
|
150 |
+
|
151 |
+
# Pyre type checker
|
152 |
+
.pyre/
|
153 |
+
llama-*
|
154 |
+
vicuna-*
|
.isort.cfg
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[settings]
|
2 |
+
profile = black
|
3 |
+
multi_line_output = 3
|
4 |
+
include_trailing_comma = True
|
5 |
+
force_grid_wrap = 0
|
6 |
+
use_parentheses = True
|
7 |
+
ensure_newline_before_comments = True
|
8 |
+
line_length = 88
|
9 |
+
skip = venv,env,node_modules,.env,.venv,dist
|
10 |
+
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
.pre-commit-config.yaml
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
repos:
|
2 |
+
- repo: https://github.com/sourcery-ai/sourcery
|
3 |
+
rev: v1.1.0 # Get the latest tag from https://github.com/sourcery-ai/sourcery/tags
|
4 |
+
hooks:
|
5 |
+
- id: sourcery
|
6 |
+
|
7 |
+
- repo: https://github.com/pre-commit/pre-commit-hooks
|
8 |
+
rev: v0.9.2
|
9 |
+
hooks:
|
10 |
+
- id: check-added-large-files
|
11 |
+
args: [ '--maxkb=500' ]
|
12 |
+
- id: check-byte-order-marker
|
13 |
+
- id: check-case-conflict
|
14 |
+
- id: check-merge-conflict
|
15 |
+
- id: check-symlinks
|
16 |
+
- id: debug-statements
|
17 |
+
|
18 |
+
- repo: local
|
19 |
+
hooks:
|
20 |
+
- id: isort
|
21 |
+
name: isort-local
|
22 |
+
entry: isort
|
23 |
+
language: python
|
24 |
+
types: [ python ]
|
25 |
+
exclude: .+/(dist|.venv|venv|build)/.+
|
26 |
+
pass_filenames: true
|
27 |
+
- id: black
|
28 |
+
name: black-local
|
29 |
+
entry: black
|
30 |
+
language: python
|
31 |
+
types: [ python ]
|
32 |
+
exclude: .+/(dist|.venv|venv|build)/.+
|
33 |
+
pass_filenames: true
|
.sourcery.yaml
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# 🪄 This is your project's Sourcery configuration file.
|
2 |
+
|
3 |
+
# You can use it to get Sourcery working in the way you want, such as
|
4 |
+
# ignoring specific refactorings, skipping directories in your project,
|
5 |
+
# or writing custom rules.
|
6 |
+
|
7 |
+
# 📚 For a complete reference to this file, see the documentation at
|
8 |
+
# https://docs.sourcery.ai/Configuration/Project-Settings/
|
9 |
+
|
10 |
+
# This file was auto-generated by Sourcery on 2023-02-25 at 21:07.
|
11 |
+
|
12 |
+
version: '1' # The schema version of this config file
|
13 |
+
|
14 |
+
ignore: # A list of paths or files which Sourcery will ignore.
|
15 |
+
- .git
|
16 |
+
- venv
|
17 |
+
- .venv
|
18 |
+
- build
|
19 |
+
- dist
|
20 |
+
- env
|
21 |
+
- .env
|
22 |
+
- .tox
|
23 |
+
|
24 |
+
rule_settings:
|
25 |
+
enable:
|
26 |
+
- default
|
27 |
+
- gpsg
|
28 |
+
disable: [] # A list of rule IDs Sourcery will never suggest.
|
29 |
+
rule_types:
|
30 |
+
- refactoring
|
31 |
+
- suggestion
|
32 |
+
- comment
|
33 |
+
python_version: '3.9' # A string specifying the lowest Python version your project supports. Sourcery will not suggest refactorings requiring a higher Python version.
|
34 |
+
|
35 |
+
# rules: # A list of custom rules Sourcery will include in its analysis.
|
36 |
+
# - id: no-print-statements
|
37 |
+
# description: Do not use print statements in the test directory.
|
38 |
+
# pattern: print(...)
|
39 |
+
# language: python
|
40 |
+
# replacement:
|
41 |
+
# condition:
|
42 |
+
# explanation:
|
43 |
+
# paths:
|
44 |
+
# include:
|
45 |
+
# - test
|
46 |
+
# exclude:
|
47 |
+
# - conftest.py
|
48 |
+
# tests: []
|
49 |
+
# tags: []
|
50 |
+
|
51 |
+
# rule_tags: {} # Additional rule tags.
|
52 |
+
|
53 |
+
# metrics:
|
54 |
+
# quality_threshold: 25.0
|
55 |
+
|
56 |
+
# github:
|
57 |
+
# labels: []
|
58 |
+
# ignore_labels:
|
59 |
+
# - sourcery-ignore
|
60 |
+
# request_review: author
|
61 |
+
# sourcery_branch: sourcery/{base_branch}
|
62 |
+
|
63 |
+
# clone_detection:
|
64 |
+
# min_lines: 3
|
65 |
+
# min_duplicates: 2
|
66 |
+
# identical_clones_only: false
|
67 |
+
|
68 |
+
# proxy:
|
69 |
+
# url:
|
70 |
+
# ssl_certs_file:
|
71 |
+
# no_ssl_verify: false
|
CONTRIBUTING.md
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
To contribute to this GitHub project, you can follow these steps:
|
3 |
+
|
4 |
+
1. Fork the repository you want to contribute to by clicking the "Fork" button on the project page.
|
5 |
+
|
6 |
+
2. Clone the repository to your local machine using the following command:
|
7 |
+
|
8 |
+
```
|
9 |
+
git clone https://github.com/<YOUR-GITHUB-USERNAME>/Auto-GPT
|
10 |
+
```
|
11 |
+
3. Install the project requirements
|
12 |
+
```
|
13 |
+
pip install -r requirements.txt
|
14 |
+
```
|
15 |
+
4. Install pre-commit hooks
|
16 |
+
```
|
17 |
+
pre-commit install
|
18 |
+
```
|
19 |
+
5. Create a new branch for your changes using the following command:
|
20 |
+
|
21 |
+
```
|
22 |
+
git checkout -b "branch-name"
|
23 |
+
```
|
24 |
+
6. Make your changes to the code or documentation.
|
25 |
+
- Example: Improve User Interface or Add Documentation.
|
26 |
+
|
27 |
+
|
28 |
+
7. Add the changes to the staging area using the following command:
|
29 |
+
```
|
30 |
+
git add .
|
31 |
+
```
|
32 |
+
|
33 |
+
8. Commit the changes with a meaningful commit message using the following command:
|
34 |
+
```
|
35 |
+
git commit -m "your commit message"
|
36 |
+
```
|
37 |
+
9. Push the changes to your forked repository using the following command:
|
38 |
+
```
|
39 |
+
git push origin branch-name
|
40 |
+
```
|
41 |
+
10. Go to the GitHub website and navigate to your forked repository.
|
42 |
+
|
43 |
+
11. Click the "New pull request" button.
|
44 |
+
|
45 |
+
12. Select the branch you just pushed to and the branch you want to merge into on the original repository.
|
46 |
+
|
47 |
+
13. Add a description of your changes and click the "Create pull request" button.
|
48 |
+
|
49 |
+
14. Wait for the project maintainer to review your changes and provide feedback.
|
50 |
+
|
51 |
+
15. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project.
|
52 |
+
|
53 |
+
16. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands:
|
54 |
+
|
55 |
+
```
|
56 |
+
git fetch upstream
|
57 |
+
git checkout master
|
58 |
+
git merge upstream/master
|
59 |
+
```
|
60 |
+
Finally, delete the branch you created with the following command:
|
61 |
+
```
|
62 |
+
git branch -d branch-name
|
63 |
+
```
|
64 |
+
That's it you made it 🐣⭐⭐
|
Dockerfile
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Use an official Python base image from the Docker Hub
|
2 |
+
FROM python:3.11-slim
|
3 |
+
|
4 |
+
# Set environment variables
|
5 |
+
ENV PIP_NO_CACHE_DIR=yes \
|
6 |
+
PYTHONUNBUFFERED=1 \
|
7 |
+
PYTHONDONTWRITEBYTECODE=1
|
8 |
+
|
9 |
+
# Create a non-root user and set permissions
|
10 |
+
RUN useradd --create-home appuser
|
11 |
+
WORKDIR /home/appuser
|
12 |
+
RUN chown appuser:appuser /home/appuser
|
13 |
+
USER appuser
|
14 |
+
|
15 |
+
# Copy the requirements.txt file and install the requirements
|
16 |
+
COPY --chown=appuser:appuser requirements.txt .
|
17 |
+
RUN pip install --no-cache-dir --user -r requirements.txt
|
18 |
+
|
19 |
+
# Copy the application files
|
20 |
+
COPY --chown=appuser:appuser autogpt/ .
|
21 |
+
|
22 |
+
# Set the entrypoint
|
23 |
+
ENTRYPOINT ["python", "-m", "autogpt"]
|
LICENSE
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
MIT License
|
2 |
+
|
3 |
+
Copyright (c) 2023 Toran Bruce Richards
|
4 |
+
|
5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6 |
+
of this software and associated documentation files (the "Software"), to deal
|
7 |
+
in the Software without restriction, including without limitation the rights
|
8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9 |
+
copies of the Software, and to permit persons to whom the Software is
|
10 |
+
furnished to do so, subject to the following conditions:
|
11 |
+
|
12 |
+
The above copyright notice and this permission notice shall be included in all
|
13 |
+
copies or substantial portions of the Software.
|
14 |
+
|
15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
21 |
+
SOFTWARE.
|
README.md
CHANGED
@@ -1,12 +1,22 @@
|
|
1 |
---
|
2 |
-
|
3 |
-
|
4 |
-
|
5 |
-
colorTo: purple
|
6 |
sdk: streamlit
|
7 |
-
|
|
|
8 |
app_file: app.py
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
1 |
---
|
2 |
+
emoji: 🤖
|
3 |
+
colorFrom: blue
|
4 |
+
colorTo: indigo
|
|
|
5 |
sdk: streamlit
|
6 |
+
python_version: 3.9
|
7 |
+
sdk_version: 1.0.0
|
8 |
app_file: app.py
|
9 |
+
app_port: 7860
|
10 |
+
fullWidth: false
|
11 |
+
models:
|
12 |
+
- openai/gpt-3
|
13 |
+
datasets:
|
14 |
+
- wikitext
|
15 |
+
tags:
|
16 |
+
- gpt
|
17 |
+
- nlp
|
18 |
+
- text-generation
|
19 |
+
pinned: true
|
20 |
---
|
21 |
|
22 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
autogpt/__init__.py
ADDED
File without changes
|
autogpt/__main__.py
ADDED
@@ -0,0 +1,572 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import json
|
3 |
+
import logging
|
4 |
+
import traceback
|
5 |
+
|
6 |
+
from colorama import Fore, Style
|
7 |
+
|
8 |
+
from autogpt import chat
|
9 |
+
from autogpt import commands as cmd
|
10 |
+
from autogpt import speak, utils
|
11 |
+
from autogpt.ai_config import AIConfig
|
12 |
+
from autogpt.config import Config
|
13 |
+
from autogpt.json_parser import fix_and_parse_json
|
14 |
+
from autogpt.logger import logger
|
15 |
+
from autogpt.memory import get_memory, get_supported_memory_backends
|
16 |
+
from autogpt.spinner import Spinner
|
17 |
+
|
18 |
+
cfg = Config()
|
19 |
+
config = None
|
20 |
+
|
21 |
+
|
22 |
+
def check_openai_api_key():
|
23 |
+
"""Check if the OpenAI API key is set in config.py or as an environment variable."""
|
24 |
+
if not cfg.openai_api_key:
|
25 |
+
print(
|
26 |
+
Fore.RED
|
27 |
+
+ "Please set your OpenAI API key in .env or as an environment variable."
|
28 |
+
)
|
29 |
+
print("You can get your key from https://beta.openai.com/account/api-keys")
|
30 |
+
exit(1)
|
31 |
+
|
32 |
+
|
33 |
+
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
34 |
+
if cfg.speak_mode and cfg.debug_mode:
|
35 |
+
speak.say_text(
|
36 |
+
"I have received an invalid JSON response from the OpenAI API. "
|
37 |
+
"Trying to fix it now."
|
38 |
+
)
|
39 |
+
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
40 |
+
|
41 |
+
try:
|
42 |
+
# Use regex to search for JSON objects
|
43 |
+
import regex
|
44 |
+
|
45 |
+
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
46 |
+
json_match = json_pattern.search(json_string)
|
47 |
+
|
48 |
+
if json_match:
|
49 |
+
# Extract the valid JSON object from the string
|
50 |
+
json_string = json_match.group(0)
|
51 |
+
logger.typewriter_log(
|
52 |
+
title="Apparently json was fixed.", title_color=Fore.GREEN
|
53 |
+
)
|
54 |
+
if cfg.speak_mode and cfg.debug_mode:
|
55 |
+
speak.say_text("Apparently json was fixed.")
|
56 |
+
else:
|
57 |
+
raise ValueError("No valid JSON object found")
|
58 |
+
|
59 |
+
except (json.JSONDecodeError, ValueError) as e:
|
60 |
+
if cfg.debug_mode:
|
61 |
+
logger.error("Error: Invalid JSON: %s\n", json_string)
|
62 |
+
if cfg.speak_mode:
|
63 |
+
speak.say_text("Didn't work. I will have to ignore this response then.")
|
64 |
+
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
65 |
+
json_string = {}
|
66 |
+
|
67 |
+
return json_string
|
68 |
+
|
69 |
+
|
70 |
+
def print_assistant_thoughts(assistant_reply):
|
71 |
+
"""Prints the assistant's thoughts to the console"""
|
72 |
+
global ai_name
|
73 |
+
global cfg
|
74 |
+
try:
|
75 |
+
try:
|
76 |
+
# Parse and print Assistant response
|
77 |
+
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
78 |
+
except json.JSONDecodeError as e:
|
79 |
+
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
80 |
+
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
81 |
+
assistant_reply
|
82 |
+
)
|
83 |
+
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
84 |
+
|
85 |
+
# Check if assistant_reply_json is a string and attempt to parse it into a
|
86 |
+
# JSON object
|
87 |
+
if isinstance(assistant_reply_json, str):
|
88 |
+
try:
|
89 |
+
assistant_reply_json = json.loads(assistant_reply_json)
|
90 |
+
except json.JSONDecodeError as e:
|
91 |
+
logger.error("Error: Invalid JSON\n", assistant_reply)
|
92 |
+
assistant_reply_json = (
|
93 |
+
attempt_to_fix_json_by_finding_outermost_brackets(
|
94 |
+
assistant_reply_json
|
95 |
+
)
|
96 |
+
)
|
97 |
+
|
98 |
+
assistant_thoughts_reasoning = None
|
99 |
+
assistant_thoughts_plan = None
|
100 |
+
assistant_thoughts_speak = None
|
101 |
+
assistant_thoughts_criticism = None
|
102 |
+
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
103 |
+
assistant_thoughts_text = assistant_thoughts.get("text")
|
104 |
+
|
105 |
+
if assistant_thoughts:
|
106 |
+
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
107 |
+
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
108 |
+
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
109 |
+
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
110 |
+
|
111 |
+
logger.typewriter_log(
|
112 |
+
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
113 |
+
)
|
114 |
+
logger.typewriter_log(
|
115 |
+
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
116 |
+
)
|
117 |
+
|
118 |
+
if assistant_thoughts_plan:
|
119 |
+
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
120 |
+
# If it's a list, join it into a string
|
121 |
+
if isinstance(assistant_thoughts_plan, list):
|
122 |
+
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
123 |
+
elif isinstance(assistant_thoughts_plan, dict):
|
124 |
+
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
125 |
+
|
126 |
+
# Split the input_string using the newline character and dashes
|
127 |
+
lines = assistant_thoughts_plan.split("\n")
|
128 |
+
for line in lines:
|
129 |
+
line = line.lstrip("- ")
|
130 |
+
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
131 |
+
|
132 |
+
logger.typewriter_log(
|
133 |
+
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
134 |
+
)
|
135 |
+
# Speak the assistant's thoughts
|
136 |
+
if cfg.speak_mode and assistant_thoughts_speak:
|
137 |
+
speak.say_text(assistant_thoughts_speak)
|
138 |
+
|
139 |
+
return assistant_reply_json
|
140 |
+
except json.decoder.JSONDecodeError:
|
141 |
+
call_stack = traceback.format_exc()
|
142 |
+
logger.error("Error: Invalid JSON\n", assistant_reply)
|
143 |
+
logger.error("Traceback: \n", call_stack)
|
144 |
+
if cfg.speak_mode:
|
145 |
+
speak.say_text(
|
146 |
+
"I have received an invalid JSON response from the OpenAI API."
|
147 |
+
" I cannot ignore this response."
|
148 |
+
)
|
149 |
+
|
150 |
+
# All other errors, return "Error: + error message"
|
151 |
+
except Exception:
|
152 |
+
call_stack = traceback.format_exc()
|
153 |
+
logger.error("Error: \n", call_stack)
|
154 |
+
|
155 |
+
|
156 |
+
def construct_prompt():
|
157 |
+
"""Construct the prompt for the AI to respond to"""
|
158 |
+
config: AIConfig = AIConfig.load(cfg.ai_settings_file)
|
159 |
+
if cfg.skip_reprompt and config.ai_name:
|
160 |
+
logger.typewriter_log("Name :", Fore.GREEN, config.ai_name)
|
161 |
+
logger.typewriter_log("Role :", Fore.GREEN, config.ai_role)
|
162 |
+
logger.typewriter_log("Goals:", Fore.GREEN, f"{config.ai_goals}")
|
163 |
+
elif config.ai_name:
|
164 |
+
logger.typewriter_log(
|
165 |
+
"Welcome back! ",
|
166 |
+
Fore.GREEN,
|
167 |
+
f"Would you like me to return to being {config.ai_name}?",
|
168 |
+
speak_text=True,
|
169 |
+
)
|
170 |
+
should_continue = utils.clean_input(
|
171 |
+
f"""Continue with the last settings?
|
172 |
+
Name: {config.ai_name}
|
173 |
+
Role: {config.ai_role}
|
174 |
+
Goals: {config.ai_goals}
|
175 |
+
Continue (y/n): """
|
176 |
+
)
|
177 |
+
if should_continue.lower() == "n":
|
178 |
+
config = AIConfig()
|
179 |
+
|
180 |
+
if not config.ai_name:
|
181 |
+
config = prompt_user()
|
182 |
+
config.save()
|
183 |
+
|
184 |
+
# Get rid of this global:
|
185 |
+
global ai_name
|
186 |
+
ai_name = config.ai_name
|
187 |
+
|
188 |
+
return config.construct_full_prompt()
|
189 |
+
|
190 |
+
|
191 |
+
def prompt_user():
|
192 |
+
"""Prompt the user for input"""
|
193 |
+
ai_name = ""
|
194 |
+
# Construct the prompt
|
195 |
+
logger.typewriter_log(
|
196 |
+
"Welcome to Auto-GPT! ",
|
197 |
+
Fore.GREEN,
|
198 |
+
"Enter the name of your AI and its role below. Entering nothing will load"
|
199 |
+
" defaults.",
|
200 |
+
speak_text=True,
|
201 |
+
)
|
202 |
+
|
203 |
+
# Get AI Name from User
|
204 |
+
logger.typewriter_log(
|
205 |
+
"Name your AI: ", Fore.GREEN, "For example, 'Entrepreneur-GPT'"
|
206 |
+
)
|
207 |
+
ai_name = utils.clean_input("AI Name: ")
|
208 |
+
if ai_name == "":
|
209 |
+
ai_name = "Entrepreneur-GPT"
|
210 |
+
|
211 |
+
logger.typewriter_log(
|
212 |
+
f"{ai_name} here!", Fore.LIGHTBLUE_EX, "I am at your service.", speak_text=True
|
213 |
+
)
|
214 |
+
|
215 |
+
# Get AI Role from User
|
216 |
+
logger.typewriter_log(
|
217 |
+
"Describe your AI's role: ",
|
218 |
+
Fore.GREEN,
|
219 |
+
"For example, 'an AI designed to autonomously develop and run businesses with"
|
220 |
+
" the sole goal of increasing your net worth.'",
|
221 |
+
)
|
222 |
+
ai_role = utils.clean_input(f"{ai_name} is: ")
|
223 |
+
if ai_role == "":
|
224 |
+
ai_role = "an AI designed to autonomously develop and run businesses with the"
|
225 |
+
" sole goal of increasing your net worth."
|
226 |
+
|
227 |
+
# Enter up to 5 goals for the AI
|
228 |
+
logger.typewriter_log(
|
229 |
+
"Enter up to 5 goals for your AI: ",
|
230 |
+
Fore.GREEN,
|
231 |
+
"For example: \nIncrease net worth, Grow Twitter Account, Develop and manage"
|
232 |
+
" multiple businesses autonomously'",
|
233 |
+
)
|
234 |
+
print("Enter nothing to load defaults, enter nothing when finished.", flush=True)
|
235 |
+
ai_goals = []
|
236 |
+
for i in range(5):
|
237 |
+
ai_goal = utils.clean_input(f"{Fore.LIGHTBLUE_EX}Goal{Style.RESET_ALL} {i+1}: ")
|
238 |
+
if ai_goal == "":
|
239 |
+
break
|
240 |
+
ai_goals.append(ai_goal)
|
241 |
+
if len(ai_goals) == 0:
|
242 |
+
ai_goals = [
|
243 |
+
"Increase net worth",
|
244 |
+
"Grow Twitter Account",
|
245 |
+
"Develop and manage multiple businesses autonomously",
|
246 |
+
]
|
247 |
+
|
248 |
+
config = AIConfig(ai_name, ai_role, ai_goals)
|
249 |
+
return config
|
250 |
+
|
251 |
+
|
252 |
+
def parse_arguments():
|
253 |
+
"""Parses the arguments passed to the script"""
|
254 |
+
global cfg
|
255 |
+
cfg.set_debug_mode(False)
|
256 |
+
cfg.set_continuous_mode(False)
|
257 |
+
cfg.set_speak_mode(False)
|
258 |
+
|
259 |
+
parser = argparse.ArgumentParser(description="Process arguments.")
|
260 |
+
parser.add_argument(
|
261 |
+
"--continuous", "-c", action="store_true", help="Enable Continuous Mode"
|
262 |
+
)
|
263 |
+
parser.add_argument(
|
264 |
+
"--continuous-limit",
|
265 |
+
"-l",
|
266 |
+
type=int,
|
267 |
+
dest="continuous_limit",
|
268 |
+
help="Defines the number of times to run in continuous mode",
|
269 |
+
)
|
270 |
+
parser.add_argument("--speak", action="store_true", help="Enable Speak Mode")
|
271 |
+
parser.add_argument("--debug", action="store_true", help="Enable Debug Mode")
|
272 |
+
parser.add_argument(
|
273 |
+
"--gpt3only", action="store_true", help="Enable GPT3.5 Only Mode"
|
274 |
+
)
|
275 |
+
parser.add_argument("--gpt4only", action="store_true", help="Enable GPT4 Only Mode")
|
276 |
+
parser.add_argument(
|
277 |
+
"--use-memory",
|
278 |
+
"-m",
|
279 |
+
dest="memory_type",
|
280 |
+
help="Defines which Memory backend to use",
|
281 |
+
)
|
282 |
+
parser.add_argument(
|
283 |
+
"--skip-reprompt",
|
284 |
+
"-y",
|
285 |
+
dest="skip_reprompt",
|
286 |
+
action="store_true",
|
287 |
+
help="Skips the re-prompting messages at the beginning of the script",
|
288 |
+
)
|
289 |
+
parser.add_argument(
|
290 |
+
"--ai-settings",
|
291 |
+
"-C",
|
292 |
+
dest="ai_settings_file",
|
293 |
+
help="Specifies which ai_settings.yaml file to use, will also automatically"
|
294 |
+
" skip the re-prompt.",
|
295 |
+
)
|
296 |
+
args = parser.parse_args()
|
297 |
+
|
298 |
+
if args.debug:
|
299 |
+
logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED")
|
300 |
+
cfg.set_debug_mode(True)
|
301 |
+
|
302 |
+
if args.continuous:
|
303 |
+
logger.typewriter_log("Continuous Mode: ", Fore.RED, "ENABLED")
|
304 |
+
logger.typewriter_log(
|
305 |
+
"WARNING: ",
|
306 |
+
Fore.RED,
|
307 |
+
"Continuous mode is not recommended. It is potentially dangerous and may"
|
308 |
+
" cause your AI to run forever or carry out actions you would not usually"
|
309 |
+
" authorise. Use at your own risk.",
|
310 |
+
)
|
311 |
+
cfg.set_continuous_mode(True)
|
312 |
+
|
313 |
+
if args.continuous_limit:
|
314 |
+
logger.typewriter_log(
|
315 |
+
"Continuous Limit: ", Fore.GREEN, f"{args.continuous_limit}"
|
316 |
+
)
|
317 |
+
cfg.set_continuous_limit(args.continuous_limit)
|
318 |
+
|
319 |
+
# Check if continuous limit is used without continuous mode
|
320 |
+
if args.continuous_limit and not args.continuous:
|
321 |
+
parser.error("--continuous-limit can only be used with --continuous")
|
322 |
+
|
323 |
+
if args.speak:
|
324 |
+
logger.typewriter_log("Speak Mode: ", Fore.GREEN, "ENABLED")
|
325 |
+
cfg.set_speak_mode(True)
|
326 |
+
|
327 |
+
if args.gpt3only:
|
328 |
+
logger.typewriter_log("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
329 |
+
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
330 |
+
|
331 |
+
if args.gpt4only:
|
332 |
+
logger.typewriter_log("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
333 |
+
cfg.set_fast_llm_model(cfg.smart_llm_model)
|
334 |
+
|
335 |
+
if args.memory_type:
|
336 |
+
supported_memory = get_supported_memory_backends()
|
337 |
+
chosen = args.memory_type
|
338 |
+
if not chosen in supported_memory:
|
339 |
+
logger.typewriter_log(
|
340 |
+
"ONLY THE FOLLOWING MEMORY BACKENDS ARE SUPPORTED: ",
|
341 |
+
Fore.RED,
|
342 |
+
f"{supported_memory}",
|
343 |
+
)
|
344 |
+
logger.typewriter_log("Defaulting to: ", Fore.YELLOW, cfg.memory_backend)
|
345 |
+
else:
|
346 |
+
cfg.memory_backend = chosen
|
347 |
+
|
348 |
+
if args.skip_reprompt:
|
349 |
+
logger.typewriter_log("Skip Re-prompt: ", Fore.GREEN, "ENABLED")
|
350 |
+
cfg.skip_reprompt = True
|
351 |
+
|
352 |
+
if args.ai_settings_file:
|
353 |
+
file = args.ai_settings_file
|
354 |
+
|
355 |
+
# Validate file
|
356 |
+
(validated, message) = utils.validate_yaml_file(file)
|
357 |
+
if not validated:
|
358 |
+
logger.typewriter_log("FAILED FILE VALIDATION", Fore.RED, message)
|
359 |
+
logger.double_check()
|
360 |
+
exit(1)
|
361 |
+
|
362 |
+
logger.typewriter_log("Using AI Settings File:", Fore.GREEN, file)
|
363 |
+
cfg.ai_settings_file = file
|
364 |
+
cfg.skip_reprompt = True
|
365 |
+
|
366 |
+
|
367 |
+
def main():
|
368 |
+
global ai_name, memory
|
369 |
+
# TODO: fill in llm values here
|
370 |
+
check_openai_api_key()
|
371 |
+
parse_arguments()
|
372 |
+
logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO)
|
373 |
+
ai_name = ""
|
374 |
+
prompt = construct_prompt()
|
375 |
+
# print(prompt)
|
376 |
+
# Initialize variables
|
377 |
+
full_message_history = []
|
378 |
+
next_action_count = 0
|
379 |
+
# Make a constant:
|
380 |
+
user_input = (
|
381 |
+
"Determine which next command to use, and respond using the"
|
382 |
+
" format specified above:"
|
383 |
+
)
|
384 |
+
# Initialize memory and make sure it is empty.
|
385 |
+
# this is particularly important for indexing and referencing pinecone memory
|
386 |
+
memory = get_memory(cfg, init=True)
|
387 |
+
print(f"Using memory of type: {memory.__class__.__name__}")
|
388 |
+
agent = Agent(
|
389 |
+
ai_name=ai_name,
|
390 |
+
memory=memory,
|
391 |
+
full_message_history=full_message_history,
|
392 |
+
next_action_count=next_action_count,
|
393 |
+
prompt=prompt,
|
394 |
+
user_input=user_input,
|
395 |
+
)
|
396 |
+
agent.start_interaction_loop()
|
397 |
+
|
398 |
+
|
399 |
+
class Agent:
|
400 |
+
"""Agent class for interacting with Auto-GPT.
|
401 |
+
|
402 |
+
Attributes:
|
403 |
+
ai_name: The name of the agent.
|
404 |
+
memory: The memory object to use.
|
405 |
+
full_message_history: The full message history.
|
406 |
+
next_action_count: The number of actions to execute.
|
407 |
+
prompt: The prompt to use.
|
408 |
+
user_input: The user input.
|
409 |
+
|
410 |
+
"""
|
411 |
+
|
412 |
+
def __init__(
|
413 |
+
self,
|
414 |
+
ai_name,
|
415 |
+
memory,
|
416 |
+
full_message_history,
|
417 |
+
next_action_count,
|
418 |
+
prompt,
|
419 |
+
user_input,
|
420 |
+
):
|
421 |
+
self.ai_name = ai_name
|
422 |
+
self.memory = memory
|
423 |
+
self.full_message_history = full_message_history
|
424 |
+
self.next_action_count = next_action_count
|
425 |
+
self.prompt = prompt
|
426 |
+
self.user_input = user_input
|
427 |
+
|
428 |
+
def start_interaction_loop(self):
|
429 |
+
# Interaction Loop
|
430 |
+
loop_count = 0
|
431 |
+
command_name = None
|
432 |
+
arguments = None
|
433 |
+
while True:
|
434 |
+
# Discontinue if continuous limit is reached
|
435 |
+
loop_count += 1
|
436 |
+
if (
|
437 |
+
cfg.continuous_mode
|
438 |
+
and cfg.continuous_limit > 0
|
439 |
+
and loop_count > cfg.continuous_limit
|
440 |
+
):
|
441 |
+
logger.typewriter_log(
|
442 |
+
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
443 |
+
)
|
444 |
+
break
|
445 |
+
|
446 |
+
# Send message to AI, get response
|
447 |
+
with Spinner("Thinking... "):
|
448 |
+
assistant_reply = chat.chat_with_ai(
|
449 |
+
self.prompt,
|
450 |
+
self.user_input,
|
451 |
+
self.full_message_history,
|
452 |
+
self.memory,
|
453 |
+
cfg.fast_token_limit,
|
454 |
+
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
455 |
+
|
456 |
+
# Print Assistant thoughts
|
457 |
+
print_assistant_thoughts(assistant_reply)
|
458 |
+
|
459 |
+
# Get command name and arguments
|
460 |
+
try:
|
461 |
+
command_name, arguments = cmd.get_command(
|
462 |
+
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
463 |
+
)
|
464 |
+
if cfg.speak_mode:
|
465 |
+
speak.say_text(f"I want to execute {command_name}")
|
466 |
+
except Exception as e:
|
467 |
+
logger.error("Error: \n", str(e))
|
468 |
+
|
469 |
+
if not cfg.continuous_mode and self.next_action_count == 0:
|
470 |
+
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
471 |
+
# Get key press: Prompt the user to press enter to continue or escape
|
472 |
+
# to exit
|
473 |
+
self.user_input = ""
|
474 |
+
logger.typewriter_log(
|
475 |
+
"NEXT ACTION: ",
|
476 |
+
Fore.CYAN,
|
477 |
+
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
478 |
+
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
479 |
+
)
|
480 |
+
print(
|
481 |
+
"Enter 'y' to authorise command, 'y -N' to run N continuous"
|
482 |
+
" commands, 'n' to exit program, or enter feedback for"
|
483 |
+
f" {self.ai_name}...",
|
484 |
+
flush=True,
|
485 |
+
)
|
486 |
+
while True:
|
487 |
+
console_input = utils.clean_input(
|
488 |
+
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
489 |
+
)
|
490 |
+
if console_input.lower().rstrip() == "y":
|
491 |
+
self.user_input = "GENERATE NEXT COMMAND JSON"
|
492 |
+
break
|
493 |
+
elif console_input.lower().startswith("y -"):
|
494 |
+
try:
|
495 |
+
self.next_action_count = abs(
|
496 |
+
int(console_input.split(" ")[1])
|
497 |
+
)
|
498 |
+
self.user_input = "GENERATE NEXT COMMAND JSON"
|
499 |
+
except ValueError:
|
500 |
+
print(
|
501 |
+
"Invalid input format. Please enter 'y -n' where n"
|
502 |
+
" is the number of continuous tasks."
|
503 |
+
)
|
504 |
+
continue
|
505 |
+
break
|
506 |
+
elif console_input.lower() == "n":
|
507 |
+
self.user_input = "EXIT"
|
508 |
+
break
|
509 |
+
else:
|
510 |
+
self.user_input = console_input
|
511 |
+
command_name = "human_feedback"
|
512 |
+
break
|
513 |
+
|
514 |
+
if self.user_input == "GENERATE NEXT COMMAND JSON":
|
515 |
+
logger.typewriter_log(
|
516 |
+
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
517 |
+
Fore.MAGENTA,
|
518 |
+
"",
|
519 |
+
)
|
520 |
+
elif self.user_input == "EXIT":
|
521 |
+
print("Exiting...", flush=True)
|
522 |
+
break
|
523 |
+
else:
|
524 |
+
# Print command
|
525 |
+
logger.typewriter_log(
|
526 |
+
"NEXT ACTION: ",
|
527 |
+
Fore.CYAN,
|
528 |
+
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
529 |
+
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
530 |
+
)
|
531 |
+
|
532 |
+
# Execute command
|
533 |
+
if command_name is not None and command_name.lower().startswith("error"):
|
534 |
+
result = (
|
535 |
+
f"Command {command_name} threw the following error: {arguments}"
|
536 |
+
)
|
537 |
+
elif command_name == "human_feedback":
|
538 |
+
result = f"Human feedback: {self.user_input}"
|
539 |
+
else:
|
540 |
+
result = (
|
541 |
+
f"Command {command_name} "
|
542 |
+
f"returned: {cmd.execute_command(command_name, arguments)}"
|
543 |
+
)
|
544 |
+
if self.next_action_count > 0:
|
545 |
+
self.next_action_count -= 1
|
546 |
+
|
547 |
+
memory_to_add = (
|
548 |
+
f"Assistant Reply: {assistant_reply} "
|
549 |
+
f"\nResult: {result} "
|
550 |
+
f"\nHuman Feedback: {self.user_input} "
|
551 |
+
)
|
552 |
+
|
553 |
+
self.memory.add(memory_to_add)
|
554 |
+
|
555 |
+
# Check if there's a result from the command append it to the message
|
556 |
+
# history
|
557 |
+
if result is not None:
|
558 |
+
self.full_message_history.append(
|
559 |
+
chat.create_chat_message("system", result)
|
560 |
+
)
|
561 |
+
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
562 |
+
else:
|
563 |
+
self.full_message_history.append(
|
564 |
+
chat.create_chat_message("system", "Unable to execute command")
|
565 |
+
)
|
566 |
+
logger.typewriter_log(
|
567 |
+
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
568 |
+
)
|
569 |
+
|
570 |
+
|
571 |
+
if __name__ == "__main__":
|
572 |
+
main()
|
autogpt/agent.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import regex
|
3 |
+
import traceback
|
4 |
+
|
5 |
+
from colorama import Fore, Style
|
6 |
+
|
7 |
+
from autogpt.chat import chat_with_ai, create_chat_message
|
8 |
+
import autogpt.commands as cmd
|
9 |
+
from autogpt.config import Config
|
10 |
+
from autogpt.json_parser import fix_and_parse_json
|
11 |
+
from autogpt.logger import logger
|
12 |
+
from autogpt.speak import say_text
|
13 |
+
from autogpt.spinner import Spinner
|
14 |
+
from autogpt.utils import clean_input
|
15 |
+
|
16 |
+
|
17 |
+
class Agent:
|
18 |
+
"""Agent class for interacting with Auto-GPT.
|
19 |
+
|
20 |
+
Attributes:
|
21 |
+
ai_name: The name of the agent.
|
22 |
+
memory: The memory object to use.
|
23 |
+
full_message_history: The full message history.
|
24 |
+
next_action_count: The number of actions to execute.
|
25 |
+
prompt: The prompt to use.
|
26 |
+
user_input: The user input.
|
27 |
+
|
28 |
+
"""
|
29 |
+
|
30 |
+
def __init__(
|
31 |
+
self,
|
32 |
+
ai_name,
|
33 |
+
memory,
|
34 |
+
full_message_history,
|
35 |
+
next_action_count,
|
36 |
+
prompt,
|
37 |
+
user_input,
|
38 |
+
):
|
39 |
+
self.ai_name = ai_name
|
40 |
+
self.memory = memory
|
41 |
+
self.full_message_history = full_message_history
|
42 |
+
self.next_action_count = next_action_count
|
43 |
+
self.prompt = prompt
|
44 |
+
self.user_input = user_input
|
45 |
+
|
46 |
+
def start_interaction_loop(self):
|
47 |
+
# Interaction Loop
|
48 |
+
cfg = Config()
|
49 |
+
loop_count = 0
|
50 |
+
command_name = None
|
51 |
+
arguments = None
|
52 |
+
while True:
|
53 |
+
# Discontinue if continuous limit is reached
|
54 |
+
loop_count += 1
|
55 |
+
if (
|
56 |
+
cfg.continuous_mode
|
57 |
+
and cfg.continuous_limit > 0
|
58 |
+
and loop_count > cfg.continuous_limit
|
59 |
+
):
|
60 |
+
logger.typewriter_log(
|
61 |
+
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
62 |
+
)
|
63 |
+
break
|
64 |
+
|
65 |
+
# Send message to AI, get response
|
66 |
+
with Spinner("Thinking... "):
|
67 |
+
assistant_reply = chat_with_ai(
|
68 |
+
self.prompt,
|
69 |
+
self.user_input,
|
70 |
+
self.full_message_history,
|
71 |
+
self.memory,
|
72 |
+
cfg.fast_token_limit,
|
73 |
+
) # TODO: This hardcodes the model to use GPT3.5. Make this an argument
|
74 |
+
|
75 |
+
# Print Assistant thoughts
|
76 |
+
print_assistant_thoughts(self.ai_name, assistant_reply)
|
77 |
+
|
78 |
+
# Get command name and arguments
|
79 |
+
try:
|
80 |
+
command_name, arguments = cmd.get_command(
|
81 |
+
attempt_to_fix_json_by_finding_outermost_brackets(assistant_reply)
|
82 |
+
)
|
83 |
+
if cfg.speak_mode:
|
84 |
+
say_text(f"I want to execute {command_name}")
|
85 |
+
except Exception as e:
|
86 |
+
logger.error("Error: \n", str(e))
|
87 |
+
|
88 |
+
if not cfg.continuous_mode and self.next_action_count == 0:
|
89 |
+
### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
90 |
+
# Get key press: Prompt the user to press enter to continue or escape
|
91 |
+
# to exit
|
92 |
+
self.user_input = ""
|
93 |
+
logger.typewriter_log(
|
94 |
+
"NEXT ACTION: ",
|
95 |
+
Fore.CYAN,
|
96 |
+
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL} "
|
97 |
+
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
98 |
+
)
|
99 |
+
print(
|
100 |
+
"Enter 'y' to authorise command, 'y -N' to run N continuous "
|
101 |
+
"commands, 'n' to exit program, or enter feedback for "
|
102 |
+
f"{self.ai_name}...",
|
103 |
+
flush=True,
|
104 |
+
)
|
105 |
+
while True:
|
106 |
+
console_input = clean_input(
|
107 |
+
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
108 |
+
)
|
109 |
+
if console_input.lower().rstrip() == "y":
|
110 |
+
self.user_input = "GENERATE NEXT COMMAND JSON"
|
111 |
+
break
|
112 |
+
elif console_input.lower().startswith("y -"):
|
113 |
+
try:
|
114 |
+
self.next_action_count = abs(
|
115 |
+
int(console_input.split(" ")[1])
|
116 |
+
)
|
117 |
+
self.user_input = "GENERATE NEXT COMMAND JSON"
|
118 |
+
except ValueError:
|
119 |
+
print(
|
120 |
+
"Invalid input format. Please enter 'y -n' where n is"
|
121 |
+
" the number of continuous tasks."
|
122 |
+
)
|
123 |
+
continue
|
124 |
+
break
|
125 |
+
elif console_input.lower() == "n":
|
126 |
+
self.user_input = "EXIT"
|
127 |
+
break
|
128 |
+
else:
|
129 |
+
self.user_input = console_input
|
130 |
+
command_name = "human_feedback"
|
131 |
+
break
|
132 |
+
|
133 |
+
if self.user_input == "GENERATE NEXT COMMAND JSON":
|
134 |
+
logger.typewriter_log(
|
135 |
+
"-=-=-=-=-=-=-= COMMAND AUTHORISED BY USER -=-=-=-=-=-=-=",
|
136 |
+
Fore.MAGENTA,
|
137 |
+
"",
|
138 |
+
)
|
139 |
+
elif self.user_input == "EXIT":
|
140 |
+
print("Exiting...", flush=True)
|
141 |
+
break
|
142 |
+
else:
|
143 |
+
# Print command
|
144 |
+
logger.typewriter_log(
|
145 |
+
"NEXT ACTION: ",
|
146 |
+
Fore.CYAN,
|
147 |
+
f"COMMAND = {Fore.CYAN}{command_name}{Style.RESET_ALL}"
|
148 |
+
f" ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
149 |
+
)
|
150 |
+
|
151 |
+
# Execute command
|
152 |
+
if command_name is not None and command_name.lower().startswith("error"):
|
153 |
+
result = (
|
154 |
+
f"Command {command_name} threw the following error: {arguments}"
|
155 |
+
)
|
156 |
+
elif command_name == "human_feedback":
|
157 |
+
result = f"Human feedback: {self.user_input}"
|
158 |
+
else:
|
159 |
+
result = (
|
160 |
+
f"Command {command_name} returned: "
|
161 |
+
f"{cmd.execute_command(command_name, arguments)}"
|
162 |
+
)
|
163 |
+
if self.next_action_count > 0:
|
164 |
+
self.next_action_count -= 1
|
165 |
+
|
166 |
+
memory_to_add = (
|
167 |
+
f"Assistant Reply: {assistant_reply} "
|
168 |
+
f"\nResult: {result} "
|
169 |
+
f"\nHuman Feedback: {self.user_input} "
|
170 |
+
)
|
171 |
+
|
172 |
+
self.memory.add(memory_to_add)
|
173 |
+
|
174 |
+
# Check if there's a result from the command append it to the message
|
175 |
+
# history
|
176 |
+
if result is not None:
|
177 |
+
self.full_message_history.append(create_chat_message("system", result))
|
178 |
+
logger.typewriter_log("SYSTEM: ", Fore.YELLOW, result)
|
179 |
+
else:
|
180 |
+
self.full_message_history.append(
|
181 |
+
create_chat_message("system", "Unable to execute command")
|
182 |
+
)
|
183 |
+
logger.typewriter_log(
|
184 |
+
"SYSTEM: ", Fore.YELLOW, "Unable to execute command"
|
185 |
+
)
|
186 |
+
|
187 |
+
|
188 |
+
def attempt_to_fix_json_by_finding_outermost_brackets(json_string):
|
189 |
+
cfg = Config()
|
190 |
+
if cfg.speak_mode and cfg.debug_mode:
|
191 |
+
say_text(
|
192 |
+
"I have received an invalid JSON response from the OpenAI API. "
|
193 |
+
"Trying to fix it now."
|
194 |
+
)
|
195 |
+
logger.typewriter_log("Attempting to fix JSON by finding outermost brackets\n")
|
196 |
+
|
197 |
+
try:
|
198 |
+
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
199 |
+
json_match = json_pattern.search(json_string)
|
200 |
+
|
201 |
+
if json_match:
|
202 |
+
# Extract the valid JSON object from the string
|
203 |
+
json_string = json_match.group(0)
|
204 |
+
logger.typewriter_log(
|
205 |
+
title="Apparently json was fixed.", title_color=Fore.GREEN
|
206 |
+
)
|
207 |
+
if cfg.speak_mode and cfg.debug_mode:
|
208 |
+
say_text("Apparently json was fixed.")
|
209 |
+
else:
|
210 |
+
raise ValueError("No valid JSON object found")
|
211 |
+
|
212 |
+
except (json.JSONDecodeError, ValueError):
|
213 |
+
if cfg.speak_mode:
|
214 |
+
say_text("Didn't work. I will have to ignore this response then.")
|
215 |
+
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
216 |
+
json_string = {}
|
217 |
+
|
218 |
+
return json_string
|
219 |
+
|
220 |
+
|
221 |
+
def print_assistant_thoughts(ai_name, assistant_reply):
|
222 |
+
"""Prints the assistant's thoughts to the console"""
|
223 |
+
cfg = Config()
|
224 |
+
try:
|
225 |
+
try:
|
226 |
+
# Parse and print Assistant response
|
227 |
+
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
228 |
+
except json.JSONDecodeError:
|
229 |
+
logger.error("Error: Invalid JSON in assistant thoughts\n", assistant_reply)
|
230 |
+
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
231 |
+
assistant_reply
|
232 |
+
)
|
233 |
+
if isinstance(assistant_reply_json, str):
|
234 |
+
assistant_reply_json = fix_and_parse_json(assistant_reply_json)
|
235 |
+
|
236 |
+
# Check if assistant_reply_json is a string and attempt to parse
|
237 |
+
# it into a JSON object
|
238 |
+
if isinstance(assistant_reply_json, str):
|
239 |
+
try:
|
240 |
+
assistant_reply_json = json.loads(assistant_reply_json)
|
241 |
+
except json.JSONDecodeError:
|
242 |
+
logger.error("Error: Invalid JSON\n", assistant_reply)
|
243 |
+
assistant_reply_json = (
|
244 |
+
attempt_to_fix_json_by_finding_outermost_brackets(
|
245 |
+
assistant_reply_json
|
246 |
+
)
|
247 |
+
)
|
248 |
+
|
249 |
+
assistant_thoughts_reasoning = None
|
250 |
+
assistant_thoughts_plan = None
|
251 |
+
assistant_thoughts_speak = None
|
252 |
+
assistant_thoughts_criticism = None
|
253 |
+
if not isinstance(assistant_reply_json, dict):
|
254 |
+
assistant_reply_json = {}
|
255 |
+
assistant_thoughts = assistant_reply_json.get("thoughts", {})
|
256 |
+
assistant_thoughts_text = assistant_thoughts.get("text")
|
257 |
+
|
258 |
+
if assistant_thoughts:
|
259 |
+
assistant_thoughts_reasoning = assistant_thoughts.get("reasoning")
|
260 |
+
assistant_thoughts_plan = assistant_thoughts.get("plan")
|
261 |
+
assistant_thoughts_criticism = assistant_thoughts.get("criticism")
|
262 |
+
assistant_thoughts_speak = assistant_thoughts.get("speak")
|
263 |
+
|
264 |
+
logger.typewriter_log(
|
265 |
+
f"{ai_name.upper()} THOUGHTS:", Fore.YELLOW, f"{assistant_thoughts_text}"
|
266 |
+
)
|
267 |
+
logger.typewriter_log(
|
268 |
+
"REASONING:", Fore.YELLOW, f"{assistant_thoughts_reasoning}"
|
269 |
+
)
|
270 |
+
|
271 |
+
if assistant_thoughts_plan:
|
272 |
+
logger.typewriter_log("PLAN:", Fore.YELLOW, "")
|
273 |
+
# If it's a list, join it into a string
|
274 |
+
if isinstance(assistant_thoughts_plan, list):
|
275 |
+
assistant_thoughts_plan = "\n".join(assistant_thoughts_plan)
|
276 |
+
elif isinstance(assistant_thoughts_plan, dict):
|
277 |
+
assistant_thoughts_plan = str(assistant_thoughts_plan)
|
278 |
+
|
279 |
+
# Split the input_string using the newline character and dashes
|
280 |
+
lines = assistant_thoughts_plan.split("\n")
|
281 |
+
for line in lines:
|
282 |
+
line = line.lstrip("- ")
|
283 |
+
logger.typewriter_log("- ", Fore.GREEN, line.strip())
|
284 |
+
|
285 |
+
logger.typewriter_log(
|
286 |
+
"CRITICISM:", Fore.YELLOW, f"{assistant_thoughts_criticism}"
|
287 |
+
)
|
288 |
+
# Speak the assistant's thoughts
|
289 |
+
if cfg.speak_mode and assistant_thoughts_speak:
|
290 |
+
say_text(assistant_thoughts_speak)
|
291 |
+
|
292 |
+
return assistant_reply_json
|
293 |
+
except json.decoder.JSONDecodeError:
|
294 |
+
logger.error("Error: Invalid JSON\n", assistant_reply)
|
295 |
+
if cfg.speak_mode:
|
296 |
+
say_text(
|
297 |
+
"I have received an invalid JSON response from the OpenAI API."
|
298 |
+
" I cannot ignore this response."
|
299 |
+
)
|
300 |
+
|
301 |
+
# All other errors, return "Error: + error message"
|
302 |
+
except Exception:
|
303 |
+
call_stack = traceback.format_exc()
|
304 |
+
logger.error("Error: \n", call_stack)
|
autogpt/agent_manager.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogpt.llm_utils import create_chat_completion
|
2 |
+
|
3 |
+
next_key = 0
|
4 |
+
agents = {} # key, (task, full_message_history, model)
|
5 |
+
|
6 |
+
# Create new GPT agent
|
7 |
+
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
8 |
+
|
9 |
+
|
10 |
+
def create_agent(task, prompt, model):
|
11 |
+
"""Create a new agent and return its key"""
|
12 |
+
global next_key
|
13 |
+
global agents
|
14 |
+
|
15 |
+
messages = [
|
16 |
+
{"role": "user", "content": prompt},
|
17 |
+
]
|
18 |
+
|
19 |
+
# Start GPT instance
|
20 |
+
agent_reply = create_chat_completion(
|
21 |
+
model=model,
|
22 |
+
messages=messages,
|
23 |
+
)
|
24 |
+
|
25 |
+
# Update full message history
|
26 |
+
messages.append({"role": "assistant", "content": agent_reply})
|
27 |
+
|
28 |
+
key = next_key
|
29 |
+
# This is done instead of len(agents) to make keys unique even if agents
|
30 |
+
# are deleted
|
31 |
+
next_key += 1
|
32 |
+
|
33 |
+
agents[key] = (task, messages, model)
|
34 |
+
|
35 |
+
return key, agent_reply
|
36 |
+
|
37 |
+
|
38 |
+
def message_agent(key, message):
|
39 |
+
"""Send a message to an agent and return its response"""
|
40 |
+
global agents
|
41 |
+
|
42 |
+
task, messages, model = agents[int(key)]
|
43 |
+
|
44 |
+
# Add user message to message history before sending to agent
|
45 |
+
messages.append({"role": "user", "content": message})
|
46 |
+
|
47 |
+
# Start GPT instance
|
48 |
+
agent_reply = create_chat_completion(
|
49 |
+
model=model,
|
50 |
+
messages=messages,
|
51 |
+
)
|
52 |
+
|
53 |
+
# Update full message history
|
54 |
+
messages.append({"role": "assistant", "content": agent_reply})
|
55 |
+
|
56 |
+
return agent_reply
|
57 |
+
|
58 |
+
|
59 |
+
def list_agents():
|
60 |
+
"""Return a list of all agents"""
|
61 |
+
global agents
|
62 |
+
|
63 |
+
# Return a list of agent keys and their tasks
|
64 |
+
return [(key, task) for key, (task, _, _) in agents.items()]
|
65 |
+
|
66 |
+
|
67 |
+
def delete_agent(key):
|
68 |
+
"""Delete an agent and return True if successful, False otherwise"""
|
69 |
+
global agents
|
70 |
+
|
71 |
+
try:
|
72 |
+
del agents[int(key)]
|
73 |
+
return True
|
74 |
+
except KeyError:
|
75 |
+
return False
|
autogpt/ai_config.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from typing import Type
|
3 |
+
import yaml
|
4 |
+
|
5 |
+
from autogpt.prompt import get_prompt
|
6 |
+
|
7 |
+
|
8 |
+
class AIConfig:
|
9 |
+
"""
|
10 |
+
A class object that contains the configuration information for the AI
|
11 |
+
|
12 |
+
Attributes:
|
13 |
+
ai_name (str): The name of the AI.
|
14 |
+
ai_role (str): The description of the AI's role.
|
15 |
+
ai_goals (list): The list of objectives the AI is supposed to complete.
|
16 |
+
"""
|
17 |
+
|
18 |
+
def __init__(
|
19 |
+
self, ai_name: str = "", ai_role: str = "", ai_goals: list = []
|
20 |
+
) -> None:
|
21 |
+
"""
|
22 |
+
Initialize a class instance
|
23 |
+
|
24 |
+
Parameters:
|
25 |
+
ai_name (str): The name of the AI.
|
26 |
+
ai_role (str): The description of the AI's role.
|
27 |
+
ai_goals (list): The list of objectives the AI is supposed to complete.
|
28 |
+
Returns:
|
29 |
+
None
|
30 |
+
"""
|
31 |
+
|
32 |
+
self.ai_name = ai_name
|
33 |
+
self.ai_role = ai_role
|
34 |
+
self.ai_goals = ai_goals
|
35 |
+
|
36 |
+
# Soon this will go in a folder where it remembers more stuff about the run(s)
|
37 |
+
SAVE_FILE = os.path.join(os.path.dirname(__file__), "..", "ai_settings.yaml")
|
38 |
+
|
39 |
+
@classmethod
|
40 |
+
def load(cls: "Type[AIConfig]", config_file: str = SAVE_FILE) -> "Type[AIConfig]":
|
41 |
+
"""
|
42 |
+
Returns class object with parameters (ai_name, ai_role, ai_goals) loaded from
|
43 |
+
yaml file if yaml file exists,
|
44 |
+
else returns class with no parameters.
|
45 |
+
|
46 |
+
Parameters:
|
47 |
+
cls (class object): An AIConfig Class object.
|
48 |
+
config_file (int): The path to the config yaml file.
|
49 |
+
DEFAULT: "../ai_settings.yaml"
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
cls (object): An instance of given cls object
|
53 |
+
"""
|
54 |
+
|
55 |
+
try:
|
56 |
+
with open(config_file, encoding="utf-8") as file:
|
57 |
+
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
58 |
+
except FileNotFoundError:
|
59 |
+
config_params = {}
|
60 |
+
|
61 |
+
ai_name = config_params.get("ai_name", "")
|
62 |
+
ai_role = config_params.get("ai_role", "")
|
63 |
+
ai_goals = config_params.get("ai_goals", [])
|
64 |
+
# type: Type[AIConfig]
|
65 |
+
return cls(ai_name, ai_role, ai_goals)
|
66 |
+
|
67 |
+
def save(self, config_file: str = SAVE_FILE) -> None:
|
68 |
+
"""
|
69 |
+
Saves the class parameters to the specified file yaml file path as a yaml file.
|
70 |
+
|
71 |
+
Parameters:
|
72 |
+
config_file(str): The path to the config yaml file.
|
73 |
+
DEFAULT: "../ai_settings.yaml"
|
74 |
+
|
75 |
+
Returns:
|
76 |
+
None
|
77 |
+
"""
|
78 |
+
|
79 |
+
config = {
|
80 |
+
"ai_name": self.ai_name,
|
81 |
+
"ai_role": self.ai_role,
|
82 |
+
"ai_goals": self.ai_goals,
|
83 |
+
}
|
84 |
+
with open(config_file, "w", encoding="utf-8") as file:
|
85 |
+
yaml.dump(config, file, allow_unicode=True)
|
86 |
+
|
87 |
+
def construct_full_prompt(self) -> str:
|
88 |
+
"""
|
89 |
+
Returns a prompt to the user with the class information in an organized fashion.
|
90 |
+
|
91 |
+
Parameters:
|
92 |
+
None
|
93 |
+
|
94 |
+
Returns:
|
95 |
+
full_prompt (str): A string containing the initial prompt for the user
|
96 |
+
including the ai_name, ai_role and ai_goals.
|
97 |
+
"""
|
98 |
+
|
99 |
+
prompt_start = (
|
100 |
+
"Your decisions must always be made independently without"
|
101 |
+
"seeking user assistance. Play to your strengths as an LLM and pursue"
|
102 |
+
" simple strategies with no legal complications."
|
103 |
+
""
|
104 |
+
)
|
105 |
+
|
106 |
+
# Construct full prompt
|
107 |
+
full_prompt = (
|
108 |
+
f"You are {self.ai_name}, {self.ai_role}\n{prompt_start}\n\nGOALS:\n\n"
|
109 |
+
)
|
110 |
+
for i, goal in enumerate(self.ai_goals):
|
111 |
+
full_prompt += f"{i+1}. {goal}\n"
|
112 |
+
|
113 |
+
full_prompt += f"\n\n{get_prompt()}"
|
114 |
+
return full_prompt
|
autogpt/ai_functions.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
from autogpt.call_ai_function import call_ai_function
|
5 |
+
from autogpt.config import Config
|
6 |
+
|
7 |
+
cfg = Config()
|
8 |
+
|
9 |
+
|
10 |
+
def evaluate_code(code: str) -> List[str]:
|
11 |
+
"""
|
12 |
+
A function that takes in a string and returns a response from create chat
|
13 |
+
completion api call.
|
14 |
+
|
15 |
+
Parameters:
|
16 |
+
code (str): Code to be evaluated.
|
17 |
+
Returns:
|
18 |
+
A result string from create chat completion. A list of suggestions to
|
19 |
+
improve the code.
|
20 |
+
"""
|
21 |
+
|
22 |
+
function_string = "def analyze_code(code: str) -> List[str]:"
|
23 |
+
args = [code]
|
24 |
+
description_string = (
|
25 |
+
"Analyzes the given code and returns a list of suggestions" " for improvements."
|
26 |
+
)
|
27 |
+
|
28 |
+
return call_ai_function(function_string, args, description_string)
|
29 |
+
|
30 |
+
|
31 |
+
def improve_code(suggestions: List[str], code: str) -> str:
|
32 |
+
"""
|
33 |
+
A function that takes in code and suggestions and returns a response from create
|
34 |
+
chat completion api call.
|
35 |
+
|
36 |
+
Parameters:
|
37 |
+
suggestions (List): A list of suggestions around what needs to be improved.
|
38 |
+
code (str): Code to be improved.
|
39 |
+
Returns:
|
40 |
+
A result string from create chat completion. Improved code in response.
|
41 |
+
"""
|
42 |
+
|
43 |
+
function_string = (
|
44 |
+
"def generate_improved_code(suggestions: List[str], code: str) -> str:"
|
45 |
+
)
|
46 |
+
args = [json.dumps(suggestions), code]
|
47 |
+
description_string = (
|
48 |
+
"Improves the provided code based on the suggestions"
|
49 |
+
" provided, making no other changes."
|
50 |
+
)
|
51 |
+
|
52 |
+
return call_ai_function(function_string, args, description_string)
|
53 |
+
|
54 |
+
|
55 |
+
def write_tests(code: str, focus: List[str]) -> str:
|
56 |
+
"""
|
57 |
+
A function that takes in code and focus topics and returns a response from create
|
58 |
+
chat completion api call.
|
59 |
+
|
60 |
+
Parameters:
|
61 |
+
focus (List): A list of suggestions around what needs to be improved.
|
62 |
+
code (str): Code for test cases to be generated against.
|
63 |
+
Returns:
|
64 |
+
A result string from create chat completion. Test cases for the submitted code
|
65 |
+
in response.
|
66 |
+
"""
|
67 |
+
|
68 |
+
function_string = (
|
69 |
+
"def create_test_cases(code: str, focus: Optional[str] = None) -> str:"
|
70 |
+
)
|
71 |
+
args = [code, json.dumps(focus)]
|
72 |
+
description_string = (
|
73 |
+
"Generates test cases for the existing code, focusing on"
|
74 |
+
" specific areas if required."
|
75 |
+
)
|
76 |
+
|
77 |
+
return call_ai_function(function_string, args, description_string)
|
autogpt/browse.py
ADDED
@@ -0,0 +1,198 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from urllib.parse import urljoin, urlparse
|
2 |
+
|
3 |
+
import requests
|
4 |
+
from bs4 import BeautifulSoup
|
5 |
+
|
6 |
+
from autogpt.config import Config
|
7 |
+
from autogpt.llm_utils import create_chat_completion
|
8 |
+
from autogpt.memory import get_memory
|
9 |
+
|
10 |
+
cfg = Config()
|
11 |
+
memory = get_memory(cfg)
|
12 |
+
|
13 |
+
session = requests.Session()
|
14 |
+
session.headers.update({"User-Agent": cfg.user_agent})
|
15 |
+
|
16 |
+
|
17 |
+
# Function to check if the URL is valid
|
18 |
+
def is_valid_url(url):
|
19 |
+
try:
|
20 |
+
result = urlparse(url)
|
21 |
+
return all([result.scheme, result.netloc])
|
22 |
+
except ValueError:
|
23 |
+
return False
|
24 |
+
|
25 |
+
|
26 |
+
# Function to sanitize the URL
|
27 |
+
def sanitize_url(url):
|
28 |
+
return urljoin(url, urlparse(url).path)
|
29 |
+
|
30 |
+
|
31 |
+
# Define and check for local file address prefixes
|
32 |
+
def check_local_file_access(url):
|
33 |
+
local_prefixes = [
|
34 |
+
"file:///",
|
35 |
+
"file://localhost",
|
36 |
+
"http://localhost",
|
37 |
+
"https://localhost",
|
38 |
+
]
|
39 |
+
return any(url.startswith(prefix) for prefix in local_prefixes)
|
40 |
+
|
41 |
+
|
42 |
+
def get_response(url, timeout=10):
|
43 |
+
try:
|
44 |
+
# Restrict access to local files
|
45 |
+
if check_local_file_access(url):
|
46 |
+
raise ValueError("Access to local files is restricted")
|
47 |
+
|
48 |
+
# Most basic check if the URL is valid:
|
49 |
+
if not url.startswith("http://") and not url.startswith("https://"):
|
50 |
+
raise ValueError("Invalid URL format")
|
51 |
+
|
52 |
+
sanitized_url = sanitize_url(url)
|
53 |
+
|
54 |
+
response = session.get(sanitized_url, timeout=timeout)
|
55 |
+
|
56 |
+
# Check if the response contains an HTTP error
|
57 |
+
if response.status_code >= 400:
|
58 |
+
return None, "Error: HTTP " + str(response.status_code) + " error"
|
59 |
+
|
60 |
+
return response, None
|
61 |
+
except ValueError as ve:
|
62 |
+
# Handle invalid URL format
|
63 |
+
return None, "Error: " + str(ve)
|
64 |
+
|
65 |
+
except requests.exceptions.RequestException as re:
|
66 |
+
# Handle exceptions related to the HTTP request
|
67 |
+
# (e.g., connection errors, timeouts, etc.)
|
68 |
+
return None, "Error: " + str(re)
|
69 |
+
|
70 |
+
|
71 |
+
def scrape_text(url):
|
72 |
+
"""Scrape text from a webpage"""
|
73 |
+
response, error_message = get_response(url)
|
74 |
+
if error_message:
|
75 |
+
return error_message
|
76 |
+
if not response:
|
77 |
+
return "Error: Could not get response"
|
78 |
+
|
79 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
80 |
+
|
81 |
+
for script in soup(["script", "style"]):
|
82 |
+
script.extract()
|
83 |
+
|
84 |
+
text = soup.get_text()
|
85 |
+
lines = (line.strip() for line in text.splitlines())
|
86 |
+
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
87 |
+
text = "\n".join(chunk for chunk in chunks if chunk)
|
88 |
+
|
89 |
+
return text
|
90 |
+
|
91 |
+
|
92 |
+
def extract_hyperlinks(soup):
|
93 |
+
"""Extract hyperlinks from a BeautifulSoup object"""
|
94 |
+
hyperlinks = []
|
95 |
+
for link in soup.find_all("a", href=True):
|
96 |
+
hyperlinks.append((link.text, link["href"]))
|
97 |
+
return hyperlinks
|
98 |
+
|
99 |
+
|
100 |
+
def format_hyperlinks(hyperlinks):
|
101 |
+
"""Format hyperlinks into a list of strings"""
|
102 |
+
formatted_links = []
|
103 |
+
for link_text, link_url in hyperlinks:
|
104 |
+
formatted_links.append(f"{link_text} ({link_url})")
|
105 |
+
return formatted_links
|
106 |
+
|
107 |
+
|
108 |
+
def scrape_links(url):
|
109 |
+
"""Scrape links from a webpage"""
|
110 |
+
response, error_message = get_response(url)
|
111 |
+
if error_message:
|
112 |
+
return error_message
|
113 |
+
if not response:
|
114 |
+
return "Error: Could not get response"
|
115 |
+
soup = BeautifulSoup(response.text, "html.parser")
|
116 |
+
|
117 |
+
for script in soup(["script", "style"]):
|
118 |
+
script.extract()
|
119 |
+
|
120 |
+
hyperlinks = extract_hyperlinks(soup)
|
121 |
+
|
122 |
+
return format_hyperlinks(hyperlinks)
|
123 |
+
|
124 |
+
|
125 |
+
def split_text(text, max_length=cfg.browse_chunk_max_length):
|
126 |
+
"""Split text into chunks of a maximum length"""
|
127 |
+
paragraphs = text.split("\n")
|
128 |
+
current_length = 0
|
129 |
+
current_chunk = []
|
130 |
+
|
131 |
+
for paragraph in paragraphs:
|
132 |
+
if current_length + len(paragraph) + 1 <= max_length:
|
133 |
+
current_chunk.append(paragraph)
|
134 |
+
current_length += len(paragraph) + 1
|
135 |
+
else:
|
136 |
+
yield "\n".join(current_chunk)
|
137 |
+
current_chunk = [paragraph]
|
138 |
+
current_length = len(paragraph) + 1
|
139 |
+
|
140 |
+
if current_chunk:
|
141 |
+
yield "\n".join(current_chunk)
|
142 |
+
|
143 |
+
|
144 |
+
def create_message(chunk, question):
|
145 |
+
"""Create a message for the user to summarize a chunk of text"""
|
146 |
+
return {
|
147 |
+
"role": "user",
|
148 |
+
"content": f'"""{chunk}""" Using the above text, please answer the following'
|
149 |
+
f' question: "{question}" -- if the question cannot be answered using the'
|
150 |
+
" text, please summarize the text.",
|
151 |
+
}
|
152 |
+
|
153 |
+
|
154 |
+
def summarize_text(url, text, question):
|
155 |
+
"""Summarize text using the LLM model"""
|
156 |
+
if not text:
|
157 |
+
return "Error: No text to summarize"
|
158 |
+
|
159 |
+
text_length = len(text)
|
160 |
+
print(f"Text length: {text_length} characters")
|
161 |
+
|
162 |
+
summaries = []
|
163 |
+
chunks = list(split_text(text))
|
164 |
+
|
165 |
+
for i, chunk in enumerate(chunks):
|
166 |
+
print(f"Adding chunk {i + 1} / {len(chunks)} to memory")
|
167 |
+
|
168 |
+
memory_to_add = f"Source: {url}\n" f"Raw content part#{i + 1}: {chunk}"
|
169 |
+
|
170 |
+
memory.add(memory_to_add)
|
171 |
+
|
172 |
+
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
|
173 |
+
messages = [create_message(chunk, question)]
|
174 |
+
|
175 |
+
summary = create_chat_completion(
|
176 |
+
model=cfg.fast_llm_model,
|
177 |
+
messages=messages,
|
178 |
+
max_tokens=cfg.browse_summary_max_token,
|
179 |
+
)
|
180 |
+
summaries.append(summary)
|
181 |
+
print(f"Added chunk {i + 1} summary to memory")
|
182 |
+
|
183 |
+
memory_to_add = f"Source: {url}\n" f"Content summary part#{i + 1}: {summary}"
|
184 |
+
|
185 |
+
memory.add(memory_to_add)
|
186 |
+
|
187 |
+
print(f"Summarized {len(chunks)} chunks.")
|
188 |
+
|
189 |
+
combined_summary = "\n".join(summaries)
|
190 |
+
messages = [create_message(combined_summary, question)]
|
191 |
+
|
192 |
+
final_summary = create_chat_completion(
|
193 |
+
model=cfg.fast_llm_model,
|
194 |
+
messages=messages,
|
195 |
+
max_tokens=cfg.browse_summary_max_token,
|
196 |
+
)
|
197 |
+
|
198 |
+
return final_summary
|
autogpt/call_ai_function.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogpt.config import Config
|
2 |
+
from autogpt.llm_utils import create_chat_completion
|
3 |
+
|
4 |
+
cfg = Config()
|
5 |
+
|
6 |
+
|
7 |
+
# This is a magic function that can do anything with no-code. See
|
8 |
+
# https://github.com/Torantulino/AI-Functions for more info.
|
9 |
+
def call_ai_function(function, args, description, model=None) -> str:
|
10 |
+
"""Call an AI function"""
|
11 |
+
if model is None:
|
12 |
+
model = cfg.smart_llm_model
|
13 |
+
# For each arg, if any are None, convert to "None":
|
14 |
+
args = [str(arg) if arg is not None else "None" for arg in args]
|
15 |
+
# parse args to comma separated string
|
16 |
+
args = ", ".join(args)
|
17 |
+
messages = [
|
18 |
+
{
|
19 |
+
"role": "system",
|
20 |
+
"content": f"You are now the following python function: ```# {description}"
|
21 |
+
f"\n{function}```\n\nOnly respond with your `return` value.",
|
22 |
+
},
|
23 |
+
{"role": "user", "content": args},
|
24 |
+
]
|
25 |
+
|
26 |
+
return create_chat_completion(model=model, messages=messages, temperature=0)
|
autogpt/chat.py
ADDED
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
from openai.error import RateLimitError
|
4 |
+
|
5 |
+
from autogpt import token_counter
|
6 |
+
from autogpt.config import Config
|
7 |
+
from autogpt.llm_utils import create_chat_completion
|
8 |
+
from autogpt.logger import logger
|
9 |
+
|
10 |
+
cfg = Config()
|
11 |
+
|
12 |
+
|
13 |
+
def create_chat_message(role, content):
|
14 |
+
"""
|
15 |
+
Create a chat message with the given role and content.
|
16 |
+
|
17 |
+
Args:
|
18 |
+
role (str): The role of the message sender, e.g., "system", "user", or "assistant".
|
19 |
+
content (str): The content of the message.
|
20 |
+
|
21 |
+
Returns:
|
22 |
+
dict: A dictionary containing the role and content of the message.
|
23 |
+
"""
|
24 |
+
return {"role": role, "content": content}
|
25 |
+
|
26 |
+
|
27 |
+
def generate_context(prompt, relevant_memory, full_message_history, model):
|
28 |
+
current_context = [
|
29 |
+
create_chat_message("system", prompt),
|
30 |
+
create_chat_message(
|
31 |
+
"system", f"The current time and date is {time.strftime('%c')}"
|
32 |
+
),
|
33 |
+
create_chat_message(
|
34 |
+
"system",
|
35 |
+
f"This reminds you of these events from your past:\n{relevant_memory}\n\n",
|
36 |
+
),
|
37 |
+
]
|
38 |
+
|
39 |
+
# Add messages from the full message history until we reach the token limit
|
40 |
+
next_message_to_add_index = len(full_message_history) - 1
|
41 |
+
insertion_index = len(current_context)
|
42 |
+
# Count the currently used tokens
|
43 |
+
current_tokens_used = token_counter.count_message_tokens(current_context, model)
|
44 |
+
return (
|
45 |
+
next_message_to_add_index,
|
46 |
+
current_tokens_used,
|
47 |
+
insertion_index,
|
48 |
+
current_context,
|
49 |
+
)
|
50 |
+
|
51 |
+
|
52 |
+
# TODO: Change debug from hardcode to argument
|
53 |
+
def chat_with_ai(
|
54 |
+
prompt, user_input, full_message_history, permanent_memory, token_limit
|
55 |
+
):
|
56 |
+
"""Interact with the OpenAI API, sending the prompt, user input, message history,
|
57 |
+
and permanent memory."""
|
58 |
+
while True:
|
59 |
+
try:
|
60 |
+
"""
|
61 |
+
Interact with the OpenAI API, sending the prompt, user input,
|
62 |
+
message history, and permanent memory.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
prompt (str): The prompt explaining the rules to the AI.
|
66 |
+
user_input (str): The input from the user.
|
67 |
+
full_message_history (list): The list of all messages sent between the
|
68 |
+
user and the AI.
|
69 |
+
permanent_memory (Obj): The memory object containing the permanent
|
70 |
+
memory.
|
71 |
+
token_limit (int): The maximum number of tokens allowed in the API call.
|
72 |
+
|
73 |
+
Returns:
|
74 |
+
str: The AI's response.
|
75 |
+
"""
|
76 |
+
model = cfg.fast_llm_model # TODO: Change model from hardcode to argument
|
77 |
+
# Reserve 1000 tokens for the response
|
78 |
+
|
79 |
+
logger.debug(f"Token limit: {token_limit}")
|
80 |
+
send_token_limit = token_limit - 1000
|
81 |
+
|
82 |
+
relevant_memory = (
|
83 |
+
""
|
84 |
+
if len(full_message_history) == 0
|
85 |
+
else permanent_memory.get_relevant(str(full_message_history[-9:]), 10)
|
86 |
+
)
|
87 |
+
|
88 |
+
logger.debug(f"Memory Stats: {permanent_memory.get_stats()}")
|
89 |
+
|
90 |
+
(
|
91 |
+
next_message_to_add_index,
|
92 |
+
current_tokens_used,
|
93 |
+
insertion_index,
|
94 |
+
current_context,
|
95 |
+
) = generate_context(prompt, relevant_memory, full_message_history, model)
|
96 |
+
|
97 |
+
while current_tokens_used > 2500:
|
98 |
+
# remove memories until we are under 2500 tokens
|
99 |
+
relevant_memory = relevant_memory[1:]
|
100 |
+
(
|
101 |
+
next_message_to_add_index,
|
102 |
+
current_tokens_used,
|
103 |
+
insertion_index,
|
104 |
+
current_context,
|
105 |
+
) = generate_context(
|
106 |
+
prompt, relevant_memory, full_message_history, model
|
107 |
+
)
|
108 |
+
|
109 |
+
current_tokens_used += token_counter.count_message_tokens(
|
110 |
+
[create_chat_message("user", user_input)], model
|
111 |
+
) # Account for user input (appended later)
|
112 |
+
|
113 |
+
while next_message_to_add_index >= 0:
|
114 |
+
# print (f"CURRENT TOKENS USED: {current_tokens_used}")
|
115 |
+
message_to_add = full_message_history[next_message_to_add_index]
|
116 |
+
|
117 |
+
tokens_to_add = token_counter.count_message_tokens(
|
118 |
+
[message_to_add], model
|
119 |
+
)
|
120 |
+
if current_tokens_used + tokens_to_add > send_token_limit:
|
121 |
+
break
|
122 |
+
|
123 |
+
# Add the most recent message to the start of the current context,
|
124 |
+
# after the two system prompts.
|
125 |
+
current_context.insert(
|
126 |
+
insertion_index, full_message_history[next_message_to_add_index]
|
127 |
+
)
|
128 |
+
|
129 |
+
# Count the currently used tokens
|
130 |
+
current_tokens_used += tokens_to_add
|
131 |
+
|
132 |
+
# Move to the next most recent message in the full message history
|
133 |
+
next_message_to_add_index -= 1
|
134 |
+
|
135 |
+
# Append user input, the length of this is accounted for above
|
136 |
+
current_context.extend([create_chat_message("user", user_input)])
|
137 |
+
|
138 |
+
# Calculate remaining tokens
|
139 |
+
tokens_remaining = token_limit - current_tokens_used
|
140 |
+
# assert tokens_remaining >= 0, "Tokens remaining is negative.
|
141 |
+
# This should never happen, please submit a bug report at
|
142 |
+
# https://www.github.com/Torantulino/Auto-GPT"
|
143 |
+
|
144 |
+
# Debug print the current context
|
145 |
+
logger.debug(f"Token limit: {token_limit}")
|
146 |
+
logger.debug(f"Send Token Count: {current_tokens_used}")
|
147 |
+
logger.debug(f"Tokens remaining for response: {tokens_remaining}")
|
148 |
+
logger.debug("------------ CONTEXT SENT TO AI ---------------")
|
149 |
+
for message in current_context:
|
150 |
+
# Skip printing the prompt
|
151 |
+
if message["role"] == "system" and message["content"] == prompt:
|
152 |
+
continue
|
153 |
+
logger.debug(f"{message['role'].capitalize()}: {message['content']}")
|
154 |
+
logger.debug("")
|
155 |
+
logger.debug("----------- END OF CONTEXT ----------------")
|
156 |
+
|
157 |
+
# TODO: use a model defined elsewhere, so that model can contain
|
158 |
+
# temperature and other settings we care about
|
159 |
+
assistant_reply = create_chat_completion(
|
160 |
+
model=model,
|
161 |
+
messages=current_context,
|
162 |
+
max_tokens=tokens_remaining,
|
163 |
+
)
|
164 |
+
|
165 |
+
# Update full message history
|
166 |
+
full_message_history.append(create_chat_message("user", user_input))
|
167 |
+
full_message_history.append(
|
168 |
+
create_chat_message("assistant", assistant_reply)
|
169 |
+
)
|
170 |
+
|
171 |
+
return assistant_reply
|
172 |
+
except RateLimitError:
|
173 |
+
# TODO: When we switch to langchain, this is built in
|
174 |
+
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
175 |
+
time.sleep(10)
|
autogpt/commands.py
ADDED
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import datetime
|
3 |
+
import autogpt.agent_manager as agents
|
4 |
+
from autogpt.config import Config
|
5 |
+
from autogpt.json_parser import fix_and_parse_json
|
6 |
+
from autogpt.image_gen import generate_image
|
7 |
+
from duckduckgo_search import ddg
|
8 |
+
from autogpt.ai_functions import evaluate_code, improve_code, write_tests
|
9 |
+
from autogpt.browse import scrape_links, scrape_text, summarize_text
|
10 |
+
from autogpt.execute_code import execute_python_file, execute_shell
|
11 |
+
from autogpt.file_operations import (
|
12 |
+
append_to_file,
|
13 |
+
delete_file,
|
14 |
+
read_file,
|
15 |
+
search_files,
|
16 |
+
write_to_file,
|
17 |
+
)
|
18 |
+
from autogpt.memory import get_memory
|
19 |
+
from autogpt.speak import say_text
|
20 |
+
from autogpt.web import browse_website
|
21 |
+
|
22 |
+
|
23 |
+
cfg = Config()
|
24 |
+
|
25 |
+
|
26 |
+
def is_valid_int(value) -> bool:
|
27 |
+
try:
|
28 |
+
int(value)
|
29 |
+
return True
|
30 |
+
except ValueError:
|
31 |
+
return False
|
32 |
+
|
33 |
+
|
34 |
+
def get_command(response):
|
35 |
+
"""Parse the response and return the command name and arguments"""
|
36 |
+
try:
|
37 |
+
response_json = fix_and_parse_json(response)
|
38 |
+
|
39 |
+
if "command" not in response_json:
|
40 |
+
return "Error:", "Missing 'command' object in JSON"
|
41 |
+
|
42 |
+
if not isinstance(response_json, dict):
|
43 |
+
return "Error:", f"'response_json' object is not dictionary {response_json}"
|
44 |
+
|
45 |
+
command = response_json["command"]
|
46 |
+
if not isinstance(command, dict):
|
47 |
+
return "Error:", "'command' object is not a dictionary"
|
48 |
+
|
49 |
+
if "name" not in command:
|
50 |
+
return "Error:", "Missing 'name' field in 'command' object"
|
51 |
+
|
52 |
+
command_name = command["name"]
|
53 |
+
|
54 |
+
# Use an empty dictionary if 'args' field is not present in 'command' object
|
55 |
+
arguments = command.get("args", {})
|
56 |
+
|
57 |
+
return command_name, arguments
|
58 |
+
except json.decoder.JSONDecodeError:
|
59 |
+
return "Error:", "Invalid JSON"
|
60 |
+
# All other errors, return "Error: + error message"
|
61 |
+
except Exception as e:
|
62 |
+
return "Error:", str(e)
|
63 |
+
|
64 |
+
|
65 |
+
def execute_command(command_name, arguments):
|
66 |
+
"""Execute the command and return the result"""
|
67 |
+
memory = get_memory(cfg)
|
68 |
+
|
69 |
+
try:
|
70 |
+
if command_name == "google":
|
71 |
+
# Check if the Google API key is set and use the official search method
|
72 |
+
# If the API key is not set or has only whitespaces, use the unofficial
|
73 |
+
# search method
|
74 |
+
key = cfg.google_api_key
|
75 |
+
if key and key.strip() and key != "your-google-api-key":
|
76 |
+
return google_official_search(arguments["input"])
|
77 |
+
else:
|
78 |
+
return google_search(arguments["input"])
|
79 |
+
elif command_name == "memory_add":
|
80 |
+
return memory.add(arguments["string"])
|
81 |
+
elif command_name == "start_agent":
|
82 |
+
return start_agent(
|
83 |
+
arguments["name"], arguments["task"], arguments["prompt"]
|
84 |
+
)
|
85 |
+
elif command_name == "message_agent":
|
86 |
+
return message_agent(arguments["key"], arguments["message"])
|
87 |
+
elif command_name == "list_agents":
|
88 |
+
return list_agents()
|
89 |
+
elif command_name == "delete_agent":
|
90 |
+
return delete_agent(arguments["key"])
|
91 |
+
elif command_name == "get_text_summary":
|
92 |
+
return get_text_summary(arguments["url"], arguments["question"])
|
93 |
+
elif command_name == "get_hyperlinks":
|
94 |
+
return get_hyperlinks(arguments["url"])
|
95 |
+
elif command_name == "read_file":
|
96 |
+
return read_file(arguments["file"])
|
97 |
+
elif command_name == "write_to_file":
|
98 |
+
return write_to_file(arguments["file"], arguments["text"])
|
99 |
+
elif command_name == "append_to_file":
|
100 |
+
return append_to_file(arguments["file"], arguments["text"])
|
101 |
+
elif command_name == "delete_file":
|
102 |
+
return delete_file(arguments["file"])
|
103 |
+
elif command_name == "search_files":
|
104 |
+
return search_files(arguments["directory"])
|
105 |
+
elif command_name == "browse_website":
|
106 |
+
return browse_website(arguments["url"], arguments["question"])
|
107 |
+
# TODO: Change these to take in a file rather than pasted code, if
|
108 |
+
# non-file is given, return instructions "Input should be a python
|
109 |
+
# filepath, write your code to file and try again"
|
110 |
+
elif command_name == "evaluate_code":
|
111 |
+
return evaluate_code(arguments["code"])
|
112 |
+
elif command_name == "improve_code":
|
113 |
+
return improve_code(arguments["suggestions"], arguments["code"])
|
114 |
+
elif command_name == "write_tests":
|
115 |
+
return write_tests(arguments["code"], arguments.get("focus"))
|
116 |
+
elif command_name == "execute_python_file": # Add this command
|
117 |
+
return execute_python_file(arguments["file"])
|
118 |
+
elif command_name == "execute_shell":
|
119 |
+
if cfg.execute_local_commands:
|
120 |
+
return execute_shell(arguments["command_line"])
|
121 |
+
else:
|
122 |
+
return (
|
123 |
+
"You are not allowed to run local shell commands. To execute"
|
124 |
+
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
125 |
+
"in your config. Do not attempt to bypass the restriction."
|
126 |
+
)
|
127 |
+
elif command_name == "generate_image":
|
128 |
+
return generate_image(arguments["prompt"])
|
129 |
+
elif command_name == "do_nothing":
|
130 |
+
return "No action performed."
|
131 |
+
elif command_name == "task_complete":
|
132 |
+
shutdown()
|
133 |
+
else:
|
134 |
+
return (
|
135 |
+
f"Unknown command '{command_name}'. Please refer to the 'COMMANDS'"
|
136 |
+
" list for available commands and only respond in the specified JSON"
|
137 |
+
" format."
|
138 |
+
)
|
139 |
+
# All errors, return "Error: + error message"
|
140 |
+
except Exception as e:
|
141 |
+
return "Error: " + str(e)
|
142 |
+
|
143 |
+
|
144 |
+
def get_datetime():
|
145 |
+
"""Return the current date and time"""
|
146 |
+
return "Current date and time: " + datetime.datetime.now().strftime(
|
147 |
+
"%Y-%m-%d %H:%M:%S"
|
148 |
+
)
|
149 |
+
|
150 |
+
|
151 |
+
def google_search(query, num_results=8):
|
152 |
+
"""Return the results of a google search"""
|
153 |
+
search_results = []
|
154 |
+
if not query:
|
155 |
+
return json.dumps(search_results)
|
156 |
+
|
157 |
+
for j in ddg(query, max_results=num_results):
|
158 |
+
search_results.append(j)
|
159 |
+
|
160 |
+
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
161 |
+
|
162 |
+
|
163 |
+
def google_official_search(query, num_results=8):
|
164 |
+
"""Return the results of a google search using the official Google API"""
|
165 |
+
import json
|
166 |
+
|
167 |
+
from googleapiclient.discovery import build
|
168 |
+
from googleapiclient.errors import HttpError
|
169 |
+
|
170 |
+
try:
|
171 |
+
# Get the Google API key and Custom Search Engine ID from the config file
|
172 |
+
api_key = cfg.google_api_key
|
173 |
+
custom_search_engine_id = cfg.custom_search_engine_id
|
174 |
+
|
175 |
+
# Initialize the Custom Search API service
|
176 |
+
service = build("customsearch", "v1", developerKey=api_key)
|
177 |
+
|
178 |
+
# Send the search query and retrieve the results
|
179 |
+
result = (
|
180 |
+
service.cse()
|
181 |
+
.list(q=query, cx=custom_search_engine_id, num=num_results)
|
182 |
+
.execute()
|
183 |
+
)
|
184 |
+
|
185 |
+
# Extract the search result items from the response
|
186 |
+
search_results = result.get("items", [])
|
187 |
+
|
188 |
+
# Create a list of only the URLs from the search results
|
189 |
+
search_results_links = [item["link"] for item in search_results]
|
190 |
+
|
191 |
+
except HttpError as e:
|
192 |
+
# Handle errors in the API call
|
193 |
+
error_details = json.loads(e.content.decode())
|
194 |
+
|
195 |
+
# Check if the error is related to an invalid or missing API key
|
196 |
+
if error_details.get("error", {}).get(
|
197 |
+
"code"
|
198 |
+
) == 403 and "invalid API key" in error_details.get("error", {}).get(
|
199 |
+
"message", ""
|
200 |
+
):
|
201 |
+
return "Error: The provided Google API key is invalid or missing."
|
202 |
+
else:
|
203 |
+
return f"Error: {e}"
|
204 |
+
|
205 |
+
# Return the list of search result URLs
|
206 |
+
return search_results_links
|
207 |
+
|
208 |
+
|
209 |
+
def get_text_summary(url, question):
|
210 |
+
"""Return the results of a google search"""
|
211 |
+
text = scrape_text(url)
|
212 |
+
summary = summarize_text(url, text, question)
|
213 |
+
return """ "Result" : """ + summary
|
214 |
+
|
215 |
+
|
216 |
+
def get_hyperlinks(url):
|
217 |
+
"""Return the results of a google search"""
|
218 |
+
return scrape_links(url)
|
219 |
+
|
220 |
+
|
221 |
+
def shutdown():
|
222 |
+
"""Shut down the program"""
|
223 |
+
print("Shutting down...")
|
224 |
+
quit()
|
225 |
+
|
226 |
+
|
227 |
+
def start_agent(name, task, prompt, model=cfg.fast_llm_model):
|
228 |
+
"""Start an agent with a given name, task, and prompt"""
|
229 |
+
# Remove underscores from name
|
230 |
+
voice_name = name.replace("_", " ")
|
231 |
+
|
232 |
+
first_message = f"""You are {name}. Respond with: "Acknowledged"."""
|
233 |
+
agent_intro = f"{voice_name} here, Reporting for duty!"
|
234 |
+
|
235 |
+
# Create agent
|
236 |
+
if cfg.speak_mode:
|
237 |
+
say_text(agent_intro, 1)
|
238 |
+
key, ack = agents.create_agent(task, first_message, model)
|
239 |
+
|
240 |
+
if cfg.speak_mode:
|
241 |
+
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
242 |
+
|
243 |
+
# Assign task (prompt), get response
|
244 |
+
agent_response = agents.message_agent(key, prompt)
|
245 |
+
|
246 |
+
return f"Agent {name} created with key {key}. First response: {agent_response}"
|
247 |
+
|
248 |
+
|
249 |
+
def message_agent(key, message):
|
250 |
+
"""Message an agent with a given key and message"""
|
251 |
+
# Check if the key is a valid integer
|
252 |
+
if is_valid_int(key):
|
253 |
+
agent_response = agents.message_agent(int(key), message)
|
254 |
+
# Check if the key is a valid string
|
255 |
+
elif isinstance(key, str):
|
256 |
+
agent_response = agents.message_agent(key, message)
|
257 |
+
else:
|
258 |
+
return "Invalid key, must be an integer or a string."
|
259 |
+
|
260 |
+
# Speak response
|
261 |
+
if cfg.speak_mode:
|
262 |
+
say_text(agent_response, 1)
|
263 |
+
return agent_response
|
264 |
+
|
265 |
+
|
266 |
+
def list_agents():
|
267 |
+
"""List all agents"""
|
268 |
+
return list_agents()
|
269 |
+
|
270 |
+
|
271 |
+
def delete_agent(key):
|
272 |
+
"""Delete an agent with a given key"""
|
273 |
+
result = agents.delete_agent(key)
|
274 |
+
return f"Agent {key} deleted." if result else f"Agent {key} does not exist."
|
autogpt/config.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import abc
|
2 |
+
import os
|
3 |
+
|
4 |
+
import openai
|
5 |
+
import yaml
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
# Load environment variables from .env file
|
9 |
+
load_dotenv()
|
10 |
+
|
11 |
+
|
12 |
+
class Singleton(abc.ABCMeta, type):
|
13 |
+
"""
|
14 |
+
Singleton metaclass for ensuring only one instance of a class.
|
15 |
+
"""
|
16 |
+
|
17 |
+
_instances = {}
|
18 |
+
|
19 |
+
def __call__(cls, *args, **kwargs):
|
20 |
+
"""Call method for the singleton metaclass."""
|
21 |
+
if cls not in cls._instances:
|
22 |
+
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
|
23 |
+
return cls._instances[cls]
|
24 |
+
|
25 |
+
|
26 |
+
class AbstractSingleton(abc.ABC, metaclass=Singleton):
|
27 |
+
pass
|
28 |
+
|
29 |
+
|
30 |
+
class Config(metaclass=Singleton):
|
31 |
+
"""
|
32 |
+
Configuration class to store the state of bools for different scripts access.
|
33 |
+
"""
|
34 |
+
|
35 |
+
def __init__(self):
|
36 |
+
"""Initialize the Config class"""
|
37 |
+
self.debug_mode = False
|
38 |
+
self.continuous_mode = False
|
39 |
+
self.continuous_limit = 0
|
40 |
+
self.speak_mode = False
|
41 |
+
self.skip_reprompt = False
|
42 |
+
|
43 |
+
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
44 |
+
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
45 |
+
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
46 |
+
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
47 |
+
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
48 |
+
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
|
49 |
+
self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300))
|
50 |
+
|
51 |
+
self.openai_api_key = os.getenv("OPENAI_API_KEY")
|
52 |
+
self.temperature = float(os.getenv("TEMPERATURE", "1"))
|
53 |
+
self.use_azure = os.getenv("USE_AZURE") == "True"
|
54 |
+
self.execute_local_commands = (
|
55 |
+
os.getenv("EXECUTE_LOCAL_COMMANDS", "False") == "True"
|
56 |
+
)
|
57 |
+
|
58 |
+
if self.use_azure:
|
59 |
+
self.load_azure_config()
|
60 |
+
openai.api_type = self.openai_api_type
|
61 |
+
openai.api_base = self.openai_api_base
|
62 |
+
openai.api_version = self.openai_api_version
|
63 |
+
|
64 |
+
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
65 |
+
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
66 |
+
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
67 |
+
|
68 |
+
self.use_mac_os_tts = False
|
69 |
+
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
70 |
+
|
71 |
+
self.use_brian_tts = False
|
72 |
+
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
|
73 |
+
|
74 |
+
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
75 |
+
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
76 |
+
|
77 |
+
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
78 |
+
self.pinecone_region = os.getenv("PINECONE_ENV")
|
79 |
+
|
80 |
+
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
81 |
+
self.huggingface_api_token = os.getenv("HUGGINGFACE_API_TOKEN")
|
82 |
+
|
83 |
+
# User agent headers to use when browsing web
|
84 |
+
# Some websites might just completely deny request with an error code if no user agent was found.
|
85 |
+
self.user_agent = os.getenv(
|
86 |
+
"USER_AGENT",
|
87 |
+
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36",
|
88 |
+
)
|
89 |
+
self.redis_host = os.getenv("REDIS_HOST", "localhost")
|
90 |
+
self.redis_port = os.getenv("REDIS_PORT", "6379")
|
91 |
+
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
92 |
+
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == "True"
|
93 |
+
self.memory_index = os.getenv("MEMORY_INDEX", "auto-gpt")
|
94 |
+
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
95 |
+
|
96 |
+
self.memory_backend = os.getenv("MEMORY_BACKEND", "local")
|
97 |
+
# Initialize the OpenAI API client
|
98 |
+
openai.api_key = self.openai_api_key
|
99 |
+
|
100 |
+
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
101 |
+
"""
|
102 |
+
Returns the relevant deployment id for the model specified.
|
103 |
+
|
104 |
+
Parameters:
|
105 |
+
model(str): The model to map to the deployment id.
|
106 |
+
|
107 |
+
Returns:
|
108 |
+
The matching deployment id if found, otherwise an empty string.
|
109 |
+
"""
|
110 |
+
if model == self.fast_llm_model:
|
111 |
+
return self.azure_model_to_deployment_id_map["fast_llm_model_deployment_id"]
|
112 |
+
elif model == self.smart_llm_model:
|
113 |
+
return self.azure_model_to_deployment_id_map[
|
114 |
+
"smart_llm_model_deployment_id"
|
115 |
+
]
|
116 |
+
elif model == "text-embedding-ada-002":
|
117 |
+
return self.azure_model_to_deployment_id_map[
|
118 |
+
"embedding_model_deployment_id"
|
119 |
+
]
|
120 |
+
else:
|
121 |
+
return ""
|
122 |
+
|
123 |
+
AZURE_CONFIG_FILE = os.path.join(os.path.dirname(__file__), "..", "azure.yaml")
|
124 |
+
|
125 |
+
def load_azure_config(self, config_file: str = AZURE_CONFIG_FILE) -> None:
|
126 |
+
"""
|
127 |
+
Loads the configuration parameters for Azure hosting from the specified file path as a yaml file.
|
128 |
+
|
129 |
+
Parameters:
|
130 |
+
config_file(str): The path to the config yaml file. DEFAULT: "../azure.yaml"
|
131 |
+
|
132 |
+
Returns:
|
133 |
+
None
|
134 |
+
"""
|
135 |
+
try:
|
136 |
+
with open(config_file) as file:
|
137 |
+
config_params = yaml.load(file, Loader=yaml.FullLoader)
|
138 |
+
except FileNotFoundError:
|
139 |
+
config_params = {}
|
140 |
+
self.openai_api_type = os.getenv(
|
141 |
+
"OPENAI_API_TYPE", config_params.get("azure_api_type", "azure")
|
142 |
+
)
|
143 |
+
self.openai_api_base = os.getenv(
|
144 |
+
"OPENAI_AZURE_API_BASE", config_params.get("azure_api_base", "")
|
145 |
+
)
|
146 |
+
self.openai_api_version = os.getenv(
|
147 |
+
"OPENAI_AZURE_API_VERSION", config_params.get("azure_api_version", "")
|
148 |
+
)
|
149 |
+
self.azure_model_to_deployment_id_map = config_params.get("azure_model_map", [])
|
150 |
+
|
151 |
+
def set_continuous_mode(self, value: bool):
|
152 |
+
"""Set the continuous mode value."""
|
153 |
+
self.continuous_mode = value
|
154 |
+
|
155 |
+
def set_continuous_limit(self, value: int):
|
156 |
+
"""Set the continuous limit value."""
|
157 |
+
self.continuous_limit = value
|
158 |
+
|
159 |
+
def set_speak_mode(self, value: bool):
|
160 |
+
"""Set the speak mode value."""
|
161 |
+
self.speak_mode = value
|
162 |
+
|
163 |
+
def set_fast_llm_model(self, value: str):
|
164 |
+
"""Set the fast LLM model value."""
|
165 |
+
self.fast_llm_model = value
|
166 |
+
|
167 |
+
def set_smart_llm_model(self, value: str):
|
168 |
+
"""Set the smart LLM model value."""
|
169 |
+
self.smart_llm_model = value
|
170 |
+
|
171 |
+
def set_fast_token_limit(self, value: int):
|
172 |
+
"""Set the fast token limit value."""
|
173 |
+
self.fast_token_limit = value
|
174 |
+
|
175 |
+
def set_smart_token_limit(self, value: int):
|
176 |
+
"""Set the smart token limit value."""
|
177 |
+
self.smart_token_limit = value
|
178 |
+
|
179 |
+
def set_browse_chunk_max_length(self, value: int):
|
180 |
+
"""Set the browse_website command chunk max length value."""
|
181 |
+
self.browse_chunk_max_length = value
|
182 |
+
|
183 |
+
def set_browse_summary_max_token(self, value: int):
|
184 |
+
"""Set the browse_website command summary max token value."""
|
185 |
+
self.browse_summary_max_token = value
|
186 |
+
|
187 |
+
def set_openai_api_key(self, value: str):
|
188 |
+
"""Set the OpenAI API key value."""
|
189 |
+
self.openai_api_key = value
|
190 |
+
|
191 |
+
def set_elevenlabs_api_key(self, value: str):
|
192 |
+
"""Set the ElevenLabs API key value."""
|
193 |
+
self.elevenlabs_api_key = value
|
194 |
+
|
195 |
+
def set_elevenlabs_voice_1_id(self, value: str):
|
196 |
+
"""Set the ElevenLabs Voice 1 ID value."""
|
197 |
+
self.elevenlabs_voice_1_id = value
|
198 |
+
|
199 |
+
def set_elevenlabs_voice_2_id(self, value: str):
|
200 |
+
"""Set the ElevenLabs Voice 2 ID value."""
|
201 |
+
self.elevenlabs_voice_2_id = value
|
202 |
+
|
203 |
+
def set_google_api_key(self, value: str):
|
204 |
+
"""Set the Google API key value."""
|
205 |
+
self.google_api_key = value
|
206 |
+
|
207 |
+
def set_custom_search_engine_id(self, value: str):
|
208 |
+
"""Set the custom search engine id value."""
|
209 |
+
self.custom_search_engine_id = value
|
210 |
+
|
211 |
+
def set_pinecone_api_key(self, value: str):
|
212 |
+
"""Set the Pinecone API key value."""
|
213 |
+
self.pinecone_api_key = value
|
214 |
+
|
215 |
+
def set_pinecone_region(self, value: str):
|
216 |
+
"""Set the Pinecone region value."""
|
217 |
+
self.pinecone_region = value
|
218 |
+
|
219 |
+
def set_debug_mode(self, value: bool):
|
220 |
+
"""Set the debug mode value."""
|
221 |
+
self.debug_mode = value
|
autogpt/data_ingestion.py
ADDED
@@ -0,0 +1,95 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import argparse
|
2 |
+
import logging
|
3 |
+
|
4 |
+
from autogpt.config import Config
|
5 |
+
from autogpt.file_operations import ingest_file, search_files
|
6 |
+
from autogpt.memory import get_memory
|
7 |
+
|
8 |
+
cfg = Config()
|
9 |
+
|
10 |
+
|
11 |
+
def configure_logging():
|
12 |
+
logging.basicConfig(
|
13 |
+
filename="log-ingestion.txt",
|
14 |
+
filemode="a",
|
15 |
+
format="%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s",
|
16 |
+
datefmt="%H:%M:%S",
|
17 |
+
level=logging.DEBUG,
|
18 |
+
)
|
19 |
+
return logging.getLogger("AutoGPT-Ingestion")
|
20 |
+
|
21 |
+
|
22 |
+
def ingest_directory(directory, memory, args):
|
23 |
+
"""
|
24 |
+
Ingest all files in a directory by calling the ingest_file function for each file.
|
25 |
+
|
26 |
+
:param directory: The directory containing the files to ingest
|
27 |
+
:param memory: An object with an add() method to store the chunks in memory
|
28 |
+
"""
|
29 |
+
try:
|
30 |
+
files = search_files(directory)
|
31 |
+
for file in files:
|
32 |
+
ingest_file(file, memory, args.max_length, args.overlap)
|
33 |
+
except Exception as e:
|
34 |
+
print(f"Error while ingesting directory '{directory}': {str(e)}")
|
35 |
+
|
36 |
+
|
37 |
+
def main() -> None:
|
38 |
+
logger = configure_logging()
|
39 |
+
|
40 |
+
parser = argparse.ArgumentParser(
|
41 |
+
description="Ingest a file or a directory with multiple files into memory. "
|
42 |
+
"Make sure to set your .env before running this script."
|
43 |
+
)
|
44 |
+
group = parser.add_mutually_exclusive_group(required=True)
|
45 |
+
group.add_argument("--file", type=str, help="The file to ingest.")
|
46 |
+
group.add_argument(
|
47 |
+
"--dir", type=str, help="The directory containing the files to ingest."
|
48 |
+
)
|
49 |
+
parser.add_argument(
|
50 |
+
"--init",
|
51 |
+
action="store_true",
|
52 |
+
help="Init the memory and wipe its content (default: False)",
|
53 |
+
default=False,
|
54 |
+
)
|
55 |
+
parser.add_argument(
|
56 |
+
"--overlap",
|
57 |
+
type=int,
|
58 |
+
help="The overlap size between chunks when ingesting files (default: 200)",
|
59 |
+
default=200,
|
60 |
+
)
|
61 |
+
parser.add_argument(
|
62 |
+
"--max_length",
|
63 |
+
type=int,
|
64 |
+
help="The max_length of each chunk when ingesting files (default: 4000)",
|
65 |
+
default=4000,
|
66 |
+
)
|
67 |
+
|
68 |
+
args = parser.parse_args()
|
69 |
+
|
70 |
+
# Initialize memory
|
71 |
+
memory = get_memory(cfg, init=args.init)
|
72 |
+
print("Using memory of type: " + memory.__class__.__name__)
|
73 |
+
|
74 |
+
if args.file:
|
75 |
+
try:
|
76 |
+
ingest_file(args.file, memory, args.max_length, args.overlap)
|
77 |
+
print(f"File '{args.file}' ingested successfully.")
|
78 |
+
except Exception as e:
|
79 |
+
logger.error(f"Error while ingesting file '{args.file}': {str(e)}")
|
80 |
+
print(f"Error while ingesting file '{args.file}': {str(e)}")
|
81 |
+
elif args.dir:
|
82 |
+
try:
|
83 |
+
ingest_directory(args.dir, memory, args)
|
84 |
+
print(f"Directory '{args.dir}' ingested successfully.")
|
85 |
+
except Exception as e:
|
86 |
+
logger.error(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
87 |
+
print(f"Error while ingesting directory '{args.dir}': {str(e)}")
|
88 |
+
else:
|
89 |
+
print(
|
90 |
+
"Please provide either a file path (--file) or a directory name (--dir) inside the auto_gpt_workspace directory as input."
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
if __name__ == "__main__":
|
95 |
+
main()
|
autogpt/execute_code.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import subprocess
|
3 |
+
|
4 |
+
import docker
|
5 |
+
from docker.errors import ImageNotFound
|
6 |
+
|
7 |
+
WORKSPACE_FOLDER = "auto_gpt_workspace"
|
8 |
+
|
9 |
+
|
10 |
+
def execute_python_file(file):
|
11 |
+
"""Execute a Python file in a Docker container and return the output"""
|
12 |
+
|
13 |
+
print(f"Executing file '{file}' in workspace '{WORKSPACE_FOLDER}'")
|
14 |
+
|
15 |
+
if not file.endswith(".py"):
|
16 |
+
return "Error: Invalid file type. Only .py files are allowed."
|
17 |
+
|
18 |
+
file_path = os.path.join(WORKSPACE_FOLDER, file)
|
19 |
+
|
20 |
+
if not os.path.isfile(file_path):
|
21 |
+
return f"Error: File '{file}' does not exist."
|
22 |
+
|
23 |
+
if we_are_running_in_a_docker_container():
|
24 |
+
result = subprocess.run(
|
25 |
+
f"python {file_path}", capture_output=True, encoding="utf8", shell=True
|
26 |
+
)
|
27 |
+
if result.returncode == 0:
|
28 |
+
return result.stdout
|
29 |
+
else:
|
30 |
+
return f"Error: {result.stderr}"
|
31 |
+
else:
|
32 |
+
try:
|
33 |
+
client = docker.from_env()
|
34 |
+
|
35 |
+
image_name = "python:3.10"
|
36 |
+
try:
|
37 |
+
client.images.get(image_name)
|
38 |
+
print(f"Image '{image_name}' found locally")
|
39 |
+
except ImageNotFound:
|
40 |
+
print(
|
41 |
+
f"Image '{image_name}' not found locally, pulling from Docker Hub"
|
42 |
+
)
|
43 |
+
# Use the low-level API to stream the pull response
|
44 |
+
low_level_client = docker.APIClient()
|
45 |
+
for line in low_level_client.pull(image_name, stream=True, decode=True):
|
46 |
+
# Print the status and progress, if available
|
47 |
+
status = line.get("status")
|
48 |
+
progress = line.get("progress")
|
49 |
+
if status and progress:
|
50 |
+
print(f"{status}: {progress}")
|
51 |
+
elif status:
|
52 |
+
print(status)
|
53 |
+
|
54 |
+
# You can replace 'python:3.8' with the desired Python image/version
|
55 |
+
# You can find available Python images on Docker Hub:
|
56 |
+
# https://hub.docker.com/_/python
|
57 |
+
container = client.containers.run(
|
58 |
+
image_name,
|
59 |
+
f"python {file}",
|
60 |
+
volumes={
|
61 |
+
os.path.abspath(WORKSPACE_FOLDER): {
|
62 |
+
"bind": "/workspace",
|
63 |
+
"mode": "ro",
|
64 |
+
}
|
65 |
+
},
|
66 |
+
working_dir="/workspace",
|
67 |
+
stderr=True,
|
68 |
+
stdout=True,
|
69 |
+
detach=True,
|
70 |
+
)
|
71 |
+
|
72 |
+
container.wait()
|
73 |
+
logs = container.logs().decode("utf-8")
|
74 |
+
container.remove()
|
75 |
+
|
76 |
+
# print(f"Execution complete. Output: {output}")
|
77 |
+
# print(f"Logs: {logs}")
|
78 |
+
|
79 |
+
return logs
|
80 |
+
|
81 |
+
except Exception as e:
|
82 |
+
return f"Error: {str(e)}"
|
83 |
+
|
84 |
+
|
85 |
+
def execute_shell(command_line):
|
86 |
+
current_dir = os.getcwd()
|
87 |
+
|
88 |
+
if WORKSPACE_FOLDER not in current_dir: # Change dir into workspace if necessary
|
89 |
+
work_dir = os.path.join(os.getcwd(), WORKSPACE_FOLDER)
|
90 |
+
os.chdir(work_dir)
|
91 |
+
|
92 |
+
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
93 |
+
|
94 |
+
result = subprocess.run(command_line, capture_output=True, shell=True)
|
95 |
+
output = f"STDOUT:\n{result.stdout}\nSTDERR:\n{result.stderr}"
|
96 |
+
|
97 |
+
# Change back to whatever the prior working dir was
|
98 |
+
|
99 |
+
os.chdir(current_dir)
|
100 |
+
|
101 |
+
return output
|
102 |
+
|
103 |
+
|
104 |
+
def we_are_running_in_a_docker_container():
|
105 |
+
os.path.exists("/.dockerenv")
|
autogpt/file_operations.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import os.path
|
3 |
+
|
4 |
+
# Set a dedicated folder for file I/O
|
5 |
+
working_directory = "auto_gpt_workspace"
|
6 |
+
|
7 |
+
# Create the directory if it doesn't exist
|
8 |
+
if not os.path.exists(working_directory):
|
9 |
+
os.makedirs(working_directory)
|
10 |
+
|
11 |
+
|
12 |
+
def safe_join(base, *paths):
|
13 |
+
"""Join one or more path components intelligently."""
|
14 |
+
new_path = os.path.join(base, *paths)
|
15 |
+
norm_new_path = os.path.normpath(new_path)
|
16 |
+
|
17 |
+
if os.path.commonprefix([base, norm_new_path]) != base:
|
18 |
+
raise ValueError("Attempted to access outside of working directory.")
|
19 |
+
|
20 |
+
return norm_new_path
|
21 |
+
|
22 |
+
|
23 |
+
def split_file(content, max_length=4000, overlap=0):
|
24 |
+
"""
|
25 |
+
Split text into chunks of a specified maximum length with a specified overlap
|
26 |
+
between chunks.
|
27 |
+
|
28 |
+
:param text: The input text to be split into chunks
|
29 |
+
:param max_length: The maximum length of each chunk,
|
30 |
+
default is 4000 (about 1k token)
|
31 |
+
:param overlap: The number of overlapping characters between chunks,
|
32 |
+
default is no overlap
|
33 |
+
:return: A generator yielding chunks of text
|
34 |
+
"""
|
35 |
+
start = 0
|
36 |
+
content_length = len(content)
|
37 |
+
|
38 |
+
while start < content_length:
|
39 |
+
end = start + max_length
|
40 |
+
if end + overlap < content_length:
|
41 |
+
chunk = content[start : end + overlap]
|
42 |
+
else:
|
43 |
+
chunk = content[start:content_length]
|
44 |
+
yield chunk
|
45 |
+
start += max_length - overlap
|
46 |
+
|
47 |
+
|
48 |
+
def read_file(filename) -> str:
|
49 |
+
"""Read a file and return the contents"""
|
50 |
+
try:
|
51 |
+
filepath = safe_join(working_directory, filename)
|
52 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
53 |
+
content = f.read()
|
54 |
+
return content
|
55 |
+
except Exception as e:
|
56 |
+
return f"Error: {str(e)}"
|
57 |
+
|
58 |
+
|
59 |
+
def ingest_file(filename, memory, max_length=4000, overlap=200):
|
60 |
+
"""
|
61 |
+
Ingest a file by reading its content, splitting it into chunks with a specified
|
62 |
+
maximum length and overlap, and adding the chunks to the memory storage.
|
63 |
+
|
64 |
+
:param filename: The name of the file to ingest
|
65 |
+
:param memory: An object with an add() method to store the chunks in memory
|
66 |
+
:param max_length: The maximum length of each chunk, default is 4000
|
67 |
+
:param overlap: The number of overlapping characters between chunks, default is 200
|
68 |
+
"""
|
69 |
+
try:
|
70 |
+
print(f"Working with file {filename}")
|
71 |
+
content = read_file(filename)
|
72 |
+
content_length = len(content)
|
73 |
+
print(f"File length: {content_length} characters")
|
74 |
+
|
75 |
+
chunks = list(split_file(content, max_length=max_length, overlap=overlap))
|
76 |
+
|
77 |
+
num_chunks = len(chunks)
|
78 |
+
for i, chunk in enumerate(chunks):
|
79 |
+
print(f"Ingesting chunk {i + 1} / {num_chunks} into memory")
|
80 |
+
memory_to_add = (
|
81 |
+
f"Filename: {filename}\n" f"Content part#{i + 1}/{num_chunks}: {chunk}"
|
82 |
+
)
|
83 |
+
|
84 |
+
memory.add(memory_to_add)
|
85 |
+
|
86 |
+
print(f"Done ingesting {num_chunks} chunks from {filename}.")
|
87 |
+
except Exception as e:
|
88 |
+
print(f"Error while ingesting file '{filename}': {str(e)}")
|
89 |
+
|
90 |
+
|
91 |
+
def write_to_file(filename, text):
|
92 |
+
"""Write text to a file"""
|
93 |
+
try:
|
94 |
+
filepath = safe_join(working_directory, filename)
|
95 |
+
directory = os.path.dirname(filepath)
|
96 |
+
if not os.path.exists(directory):
|
97 |
+
os.makedirs(directory)
|
98 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
99 |
+
f.write(text)
|
100 |
+
return "File written to successfully."
|
101 |
+
except Exception as e:
|
102 |
+
return "Error: " + str(e)
|
103 |
+
|
104 |
+
|
105 |
+
def append_to_file(filename, text):
|
106 |
+
"""Append text to a file"""
|
107 |
+
try:
|
108 |
+
filepath = safe_join(working_directory, filename)
|
109 |
+
with open(filepath, "a") as f:
|
110 |
+
f.write(text)
|
111 |
+
return "Text appended successfully."
|
112 |
+
except Exception as e:
|
113 |
+
return "Error: " + str(e)
|
114 |
+
|
115 |
+
|
116 |
+
def delete_file(filename):
|
117 |
+
"""Delete a file"""
|
118 |
+
try:
|
119 |
+
filepath = safe_join(working_directory, filename)
|
120 |
+
os.remove(filepath)
|
121 |
+
return "File deleted successfully."
|
122 |
+
except Exception as e:
|
123 |
+
return "Error: " + str(e)
|
124 |
+
|
125 |
+
|
126 |
+
def search_files(directory):
|
127 |
+
found_files = []
|
128 |
+
|
129 |
+
if directory == "" or directory == "/":
|
130 |
+
search_directory = working_directory
|
131 |
+
else:
|
132 |
+
search_directory = safe_join(working_directory, directory)
|
133 |
+
|
134 |
+
for root, _, files in os.walk(search_directory):
|
135 |
+
for file in files:
|
136 |
+
if file.startswith("."):
|
137 |
+
continue
|
138 |
+
relative_path = os.path.relpath(os.path.join(root, file), working_directory)
|
139 |
+
found_files.append(relative_path)
|
140 |
+
|
141 |
+
return found_files
|
autogpt/image_gen.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
import os.path
|
3 |
+
import uuid
|
4 |
+
from base64 import b64decode
|
5 |
+
|
6 |
+
import openai
|
7 |
+
import requests
|
8 |
+
from PIL import Image
|
9 |
+
|
10 |
+
from autogpt.config import Config
|
11 |
+
|
12 |
+
cfg = Config()
|
13 |
+
|
14 |
+
working_directory = "auto_gpt_workspace"
|
15 |
+
|
16 |
+
|
17 |
+
def generate_image(prompt):
|
18 |
+
filename = str(uuid.uuid4()) + ".jpg"
|
19 |
+
|
20 |
+
# DALL-E
|
21 |
+
if cfg.image_provider == "dalle":
|
22 |
+
openai.api_key = cfg.openai_api_key
|
23 |
+
|
24 |
+
response = openai.Image.create(
|
25 |
+
prompt=prompt,
|
26 |
+
n=1,
|
27 |
+
size="256x256",
|
28 |
+
response_format="b64_json",
|
29 |
+
)
|
30 |
+
|
31 |
+
print("Image Generated for prompt:" + prompt)
|
32 |
+
|
33 |
+
image_data = b64decode(response["data"][0]["b64_json"])
|
34 |
+
|
35 |
+
with open(working_directory + "/" + filename, mode="wb") as png:
|
36 |
+
png.write(image_data)
|
37 |
+
|
38 |
+
return "Saved to disk:" + filename
|
39 |
+
|
40 |
+
# STABLE DIFFUSION
|
41 |
+
elif cfg.image_provider == "sd":
|
42 |
+
API_URL = (
|
43 |
+
"https://api-inference.huggingface.co/models/CompVis/stable-diffusion-v1-4"
|
44 |
+
)
|
45 |
+
if cfg.huggingface_api_token is None:
|
46 |
+
raise ValueError(
|
47 |
+
"You need to set your Hugging Face API token in the config file."
|
48 |
+
)
|
49 |
+
headers = {"Authorization": "Bearer " + cfg.huggingface_api_token}
|
50 |
+
|
51 |
+
response = requests.post(
|
52 |
+
API_URL,
|
53 |
+
headers=headers,
|
54 |
+
json={
|
55 |
+
"inputs": prompt,
|
56 |
+
},
|
57 |
+
)
|
58 |
+
|
59 |
+
image = Image.open(io.BytesIO(response.content))
|
60 |
+
print("Image Generated for prompt:" + prompt)
|
61 |
+
|
62 |
+
image.save(os.path.join(working_directory, filename))
|
63 |
+
|
64 |
+
return "Saved to disk:" + filename
|
65 |
+
|
66 |
+
else:
|
67 |
+
return "No Image Provider Set"
|
autogpt/js/overlay.js
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
const overlay = document.createElement('div');
|
2 |
+
Object.assign(overlay.style, {
|
3 |
+
position: 'fixed',
|
4 |
+
zIndex: 999999,
|
5 |
+
top: 0,
|
6 |
+
left: 0,
|
7 |
+
width: '100%',
|
8 |
+
height: '100%',
|
9 |
+
background: 'rgba(0, 0, 0, 0.7)',
|
10 |
+
color: '#fff',
|
11 |
+
fontSize: '24px',
|
12 |
+
fontWeight: 'bold',
|
13 |
+
display: 'flex',
|
14 |
+
justifyContent: 'center',
|
15 |
+
alignItems: 'center',
|
16 |
+
});
|
17 |
+
const textContent = document.createElement('div');
|
18 |
+
Object.assign(textContent.style, {
|
19 |
+
textAlign: 'center',
|
20 |
+
});
|
21 |
+
textContent.textContent = 'AutoGPT Analyzing Page';
|
22 |
+
overlay.appendChild(textContent);
|
23 |
+
document.body.append(overlay);
|
24 |
+
document.body.style.overflow = 'hidden';
|
25 |
+
let dotCount = 0;
|
26 |
+
setInterval(() => {
|
27 |
+
textContent.textContent = 'AutoGPT Analyzing Page' + '.'.repeat(dotCount);
|
28 |
+
dotCount = (dotCount + 1) % 4;
|
29 |
+
}, 1000);
|
autogpt/json_parser.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from typing import Any, Dict, Union
|
3 |
+
|
4 |
+
from autogpt.call_ai_function import call_ai_function
|
5 |
+
from autogpt.config import Config
|
6 |
+
from autogpt.json_utils import correct_json
|
7 |
+
from autogpt.logger import logger
|
8 |
+
|
9 |
+
cfg = Config()
|
10 |
+
|
11 |
+
JSON_SCHEMA = """
|
12 |
+
{
|
13 |
+
"command": {
|
14 |
+
"name": "command name",
|
15 |
+
"args": {
|
16 |
+
"arg name": "value"
|
17 |
+
}
|
18 |
+
},
|
19 |
+
"thoughts":
|
20 |
+
{
|
21 |
+
"text": "thought",
|
22 |
+
"reasoning": "reasoning",
|
23 |
+
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
24 |
+
"criticism": "constructive self-criticism",
|
25 |
+
"speak": "thoughts summary to say to user"
|
26 |
+
}
|
27 |
+
}
|
28 |
+
"""
|
29 |
+
|
30 |
+
|
31 |
+
def fix_and_parse_json(
|
32 |
+
json_str: str, try_to_fix_with_gpt: bool = True
|
33 |
+
) -> Union[str, Dict[Any, Any]]:
|
34 |
+
"""Fix and parse JSON string"""
|
35 |
+
try:
|
36 |
+
json_str = json_str.replace("\t", "")
|
37 |
+
return json.loads(json_str)
|
38 |
+
except json.JSONDecodeError as _: # noqa: F841
|
39 |
+
try:
|
40 |
+
json_str = correct_json(json_str)
|
41 |
+
return json.loads(json_str)
|
42 |
+
except json.JSONDecodeError as _: # noqa: F841
|
43 |
+
pass
|
44 |
+
# Let's do something manually:
|
45 |
+
# sometimes GPT responds with something BEFORE the braces:
|
46 |
+
# "I'm sorry, I don't understand. Please try again."
|
47 |
+
# {"text": "I'm sorry, I don't understand. Please try again.",
|
48 |
+
# "confidence": 0.0}
|
49 |
+
# So let's try to find the first brace and then parse the rest
|
50 |
+
# of the string
|
51 |
+
try:
|
52 |
+
brace_index = json_str.index("{")
|
53 |
+
json_str = json_str[brace_index:]
|
54 |
+
last_brace_index = json_str.rindex("}")
|
55 |
+
json_str = json_str[: last_brace_index + 1]
|
56 |
+
return json.loads(json_str)
|
57 |
+
# Can throw a ValueError if there is no "{" or "}" in the json_str
|
58 |
+
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
|
59 |
+
if try_to_fix_with_gpt:
|
60 |
+
logger.warn(
|
61 |
+
"Warning: Failed to parse AI output, attempting to fix."
|
62 |
+
"\n If you see this warning frequently, it's likely that"
|
63 |
+
" your prompt is confusing the AI. Try changing it up"
|
64 |
+
" slightly."
|
65 |
+
)
|
66 |
+
# Now try to fix this up using the ai_functions
|
67 |
+
ai_fixed_json = fix_json(json_str, JSON_SCHEMA)
|
68 |
+
|
69 |
+
if ai_fixed_json != "failed":
|
70 |
+
return json.loads(ai_fixed_json)
|
71 |
+
else:
|
72 |
+
# This allows the AI to react to the error message,
|
73 |
+
# which usually results in it correcting its ways.
|
74 |
+
logger.error("Failed to fix AI output, telling the AI.")
|
75 |
+
return json_str
|
76 |
+
else:
|
77 |
+
raise e
|
78 |
+
|
79 |
+
|
80 |
+
def fix_json(json_str: str, schema: str) -> str:
|
81 |
+
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
|
82 |
+
# Try to fix the JSON using GPT:
|
83 |
+
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
84 |
+
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
85 |
+
description_string = (
|
86 |
+
"Fixes the provided JSON string to make it parseable"
|
87 |
+
" and fully compliant with the provided schema.\n If an object or"
|
88 |
+
" field specified in the schema isn't contained within the correct"
|
89 |
+
" JSON, it is omitted.\n This function is brilliant at guessing"
|
90 |
+
" when the format is incorrect."
|
91 |
+
)
|
92 |
+
|
93 |
+
# If it doesn't already start with a "`", add one:
|
94 |
+
if not json_str.startswith("`"):
|
95 |
+
json_str = "```json\n" + json_str + "\n```"
|
96 |
+
result_string = call_ai_function(
|
97 |
+
function_string, args, description_string, model=cfg.fast_llm_model
|
98 |
+
)
|
99 |
+
logger.debug("------------ JSON FIX ATTEMPT ---------------")
|
100 |
+
logger.debug(f"Original JSON: {json_str}")
|
101 |
+
logger.debug("-----------")
|
102 |
+
logger.debug(f"Fixed JSON: {result_string}")
|
103 |
+
logger.debug("----------- END OF FIX ATTEMPT ----------------")
|
104 |
+
|
105 |
+
try:
|
106 |
+
json.loads(result_string) # just check the validity
|
107 |
+
return result_string
|
108 |
+
except: # noqa: E722
|
109 |
+
# Get the call stack:
|
110 |
+
# import traceback
|
111 |
+
# call_stack = traceback.format_exc()
|
112 |
+
# print(f"Failed to fix JSON: '{json_str}' "+call_stack)
|
113 |
+
return "failed"
|
autogpt/json_utils.py
ADDED
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
from typing import Optional
|
4 |
+
|
5 |
+
from autogpt.config import Config
|
6 |
+
|
7 |
+
cfg = Config()
|
8 |
+
|
9 |
+
|
10 |
+
def extract_char_position(error_message: str) -> int:
|
11 |
+
"""Extract the character position from the JSONDecodeError message.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
error_message (str): The error message from the JSONDecodeError
|
15 |
+
exception.
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
int: The character position.
|
19 |
+
"""
|
20 |
+
import re
|
21 |
+
|
22 |
+
char_pattern = re.compile(r"\(char (\d+)\)")
|
23 |
+
if match := char_pattern.search(error_message):
|
24 |
+
return int(match[1])
|
25 |
+
else:
|
26 |
+
raise ValueError("Character position not found in the error message.")
|
27 |
+
|
28 |
+
|
29 |
+
def add_quotes_to_property_names(json_string: str) -> str:
|
30 |
+
"""
|
31 |
+
Add quotes to property names in a JSON string.
|
32 |
+
|
33 |
+
Args:
|
34 |
+
json_string (str): The JSON string.
|
35 |
+
|
36 |
+
Returns:
|
37 |
+
str: The JSON string with quotes added to property names.
|
38 |
+
"""
|
39 |
+
|
40 |
+
def replace_func(match):
|
41 |
+
return f'"{match.group(1)}":'
|
42 |
+
|
43 |
+
property_name_pattern = re.compile(r"(\w+):")
|
44 |
+
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
|
45 |
+
|
46 |
+
try:
|
47 |
+
json.loads(corrected_json_string)
|
48 |
+
return corrected_json_string
|
49 |
+
except json.JSONDecodeError as e:
|
50 |
+
raise e
|
51 |
+
|
52 |
+
|
53 |
+
def balance_braces(json_string: str) -> Optional[str]:
|
54 |
+
"""
|
55 |
+
Balance the braces in a JSON string.
|
56 |
+
|
57 |
+
Args:
|
58 |
+
json_string (str): The JSON string.
|
59 |
+
|
60 |
+
Returns:
|
61 |
+
str: The JSON string with braces balanced.
|
62 |
+
"""
|
63 |
+
|
64 |
+
open_braces_count = json_string.count("{")
|
65 |
+
close_braces_count = json_string.count("}")
|
66 |
+
|
67 |
+
while open_braces_count > close_braces_count:
|
68 |
+
json_string += "}"
|
69 |
+
close_braces_count += 1
|
70 |
+
|
71 |
+
while close_braces_count > open_braces_count:
|
72 |
+
json_string = json_string.rstrip("}")
|
73 |
+
close_braces_count -= 1
|
74 |
+
|
75 |
+
try:
|
76 |
+
json.loads(json_string)
|
77 |
+
return json_string
|
78 |
+
except json.JSONDecodeError:
|
79 |
+
pass
|
80 |
+
|
81 |
+
|
82 |
+
def fix_invalid_escape(json_str: str, error_message: str) -> str:
|
83 |
+
while error_message.startswith("Invalid \\escape"):
|
84 |
+
bad_escape_location = extract_char_position(error_message)
|
85 |
+
json_str = json_str[:bad_escape_location] + json_str[bad_escape_location + 1 :]
|
86 |
+
try:
|
87 |
+
json.loads(json_str)
|
88 |
+
return json_str
|
89 |
+
except json.JSONDecodeError as e:
|
90 |
+
if cfg.debug_mode:
|
91 |
+
print("json loads error - fix invalid escape", e)
|
92 |
+
error_message = str(e)
|
93 |
+
return json_str
|
94 |
+
|
95 |
+
|
96 |
+
def correct_json(json_str: str) -> str:
|
97 |
+
"""
|
98 |
+
Correct common JSON errors.
|
99 |
+
|
100 |
+
Args:
|
101 |
+
json_str (str): The JSON string.
|
102 |
+
"""
|
103 |
+
|
104 |
+
try:
|
105 |
+
if cfg.debug_mode:
|
106 |
+
print("json", json_str)
|
107 |
+
json.loads(json_str)
|
108 |
+
return json_str
|
109 |
+
except json.JSONDecodeError as e:
|
110 |
+
if cfg.debug_mode:
|
111 |
+
print("json loads error", e)
|
112 |
+
error_message = str(e)
|
113 |
+
if error_message.startswith("Invalid \\escape"):
|
114 |
+
json_str = fix_invalid_escape(json_str, error_message)
|
115 |
+
if error_message.startswith(
|
116 |
+
"Expecting property name enclosed in double quotes"
|
117 |
+
):
|
118 |
+
json_str = add_quotes_to_property_names(json_str)
|
119 |
+
try:
|
120 |
+
json.loads(json_str)
|
121 |
+
return json_str
|
122 |
+
except json.JSONDecodeError as e:
|
123 |
+
if cfg.debug_mode:
|
124 |
+
print("json loads error - add quotes", e)
|
125 |
+
error_message = str(e)
|
126 |
+
if balanced_str := balance_braces(json_str):
|
127 |
+
return balanced_str
|
128 |
+
return json_str
|
autogpt/llm_utils.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import time
|
2 |
+
|
3 |
+
import openai
|
4 |
+
from openai.error import APIError, RateLimitError
|
5 |
+
from colorama import Fore
|
6 |
+
|
7 |
+
from autogpt.config import Config
|
8 |
+
|
9 |
+
cfg = Config()
|
10 |
+
|
11 |
+
openai.api_key = cfg.openai_api_key
|
12 |
+
|
13 |
+
|
14 |
+
# Overly simple abstraction until we create something better
|
15 |
+
# simple retry mechanism when getting a rate error or a bad gateway
|
16 |
+
def create_chat_completion(
|
17 |
+
messages, model=None, temperature=cfg.temperature, max_tokens=None
|
18 |
+
) -> str:
|
19 |
+
"""Create a chat completion using the OpenAI API"""
|
20 |
+
response = None
|
21 |
+
num_retries = 5
|
22 |
+
if cfg.debug_mode:
|
23 |
+
print(
|
24 |
+
Fore.GREEN
|
25 |
+
+ f"Creating chat completion with model {model}, temperature {temperature},"
|
26 |
+
f" max_tokens {max_tokens}" + Fore.RESET
|
27 |
+
)
|
28 |
+
for attempt in range(num_retries):
|
29 |
+
try:
|
30 |
+
if cfg.use_azure:
|
31 |
+
response = openai.ChatCompletion.create(
|
32 |
+
deployment_id=cfg.get_azure_deployment_id_for_model(model),
|
33 |
+
model=model,
|
34 |
+
messages=messages,
|
35 |
+
temperature=temperature,
|
36 |
+
max_tokens=max_tokens,
|
37 |
+
)
|
38 |
+
else:
|
39 |
+
response = openai.ChatCompletion.create(
|
40 |
+
model=model,
|
41 |
+
messages=messages,
|
42 |
+
temperature=temperature,
|
43 |
+
max_tokens=max_tokens,
|
44 |
+
)
|
45 |
+
break
|
46 |
+
except RateLimitError:
|
47 |
+
if cfg.debug_mode:
|
48 |
+
print(
|
49 |
+
Fore.RED + "Error: ",
|
50 |
+
"API Rate Limit Reached. Waiting 20 seconds..." + Fore.RESET,
|
51 |
+
)
|
52 |
+
time.sleep(20)
|
53 |
+
except APIError as e:
|
54 |
+
if e.http_status == 502:
|
55 |
+
if cfg.debug_mode:
|
56 |
+
print(
|
57 |
+
Fore.RED + "Error: ",
|
58 |
+
"API Bad gateway. Waiting 20 seconds..." + Fore.RESET,
|
59 |
+
)
|
60 |
+
time.sleep(20)
|
61 |
+
else:
|
62 |
+
raise
|
63 |
+
if attempt == num_retries - 1:
|
64 |
+
raise
|
65 |
+
|
66 |
+
if response is None:
|
67 |
+
raise RuntimeError("Failed to get response after 5 retries")
|
68 |
+
|
69 |
+
return response.choices[0].message["content"]
|
autogpt/logger.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
import os
|
3 |
+
import random
|
4 |
+
import re
|
5 |
+
import time
|
6 |
+
from logging import LogRecord
|
7 |
+
|
8 |
+
from colorama import Fore, Style
|
9 |
+
|
10 |
+
from autogpt import speak
|
11 |
+
from autogpt.config import Config, Singleton
|
12 |
+
|
13 |
+
cfg = Config()
|
14 |
+
|
15 |
+
"""
|
16 |
+
Logger that handle titles in different colors.
|
17 |
+
Outputs logs in console, activity.log, and errors.log
|
18 |
+
For console handler: simulates typing
|
19 |
+
"""
|
20 |
+
|
21 |
+
|
22 |
+
class Logger(metaclass=Singleton):
|
23 |
+
def __init__(self):
|
24 |
+
# create log directory if it doesn't exist
|
25 |
+
this_files_dir_path = os.path.dirname(__file__)
|
26 |
+
log_dir = os.path.join(this_files_dir_path, "../logs")
|
27 |
+
if not os.path.exists(log_dir):
|
28 |
+
os.makedirs(log_dir)
|
29 |
+
|
30 |
+
log_file = "activity.log"
|
31 |
+
error_file = "error.log"
|
32 |
+
|
33 |
+
console_formatter = AutoGptFormatter("%(title_color)s %(message)s")
|
34 |
+
|
35 |
+
# Create a handler for console which simulate typing
|
36 |
+
self.typing_console_handler = TypingConsoleHandler()
|
37 |
+
self.typing_console_handler.setLevel(logging.INFO)
|
38 |
+
self.typing_console_handler.setFormatter(console_formatter)
|
39 |
+
|
40 |
+
# Create a handler for console without typing simulation
|
41 |
+
self.console_handler = ConsoleHandler()
|
42 |
+
self.console_handler.setLevel(logging.DEBUG)
|
43 |
+
self.console_handler.setFormatter(console_formatter)
|
44 |
+
|
45 |
+
# Info handler in activity.log
|
46 |
+
self.file_handler = logging.FileHandler(os.path.join(log_dir, log_file))
|
47 |
+
self.file_handler.setLevel(logging.DEBUG)
|
48 |
+
info_formatter = AutoGptFormatter(
|
49 |
+
"%(asctime)s %(levelname)s %(title)s %(message_no_color)s"
|
50 |
+
)
|
51 |
+
self.file_handler.setFormatter(info_formatter)
|
52 |
+
|
53 |
+
# Error handler error.log
|
54 |
+
error_handler = logging.FileHandler(os.path.join(log_dir, error_file))
|
55 |
+
error_handler.setLevel(logging.ERROR)
|
56 |
+
error_formatter = AutoGptFormatter(
|
57 |
+
"%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s"
|
58 |
+
" %(message_no_color)s"
|
59 |
+
)
|
60 |
+
error_handler.setFormatter(error_formatter)
|
61 |
+
|
62 |
+
self.typing_logger = logging.getLogger("TYPER")
|
63 |
+
self.typing_logger.addHandler(self.typing_console_handler)
|
64 |
+
self.typing_logger.addHandler(self.file_handler)
|
65 |
+
self.typing_logger.addHandler(error_handler)
|
66 |
+
self.typing_logger.setLevel(logging.DEBUG)
|
67 |
+
|
68 |
+
self.logger = logging.getLogger("LOGGER")
|
69 |
+
self.logger.addHandler(self.console_handler)
|
70 |
+
self.logger.addHandler(self.file_handler)
|
71 |
+
self.logger.addHandler(error_handler)
|
72 |
+
self.logger.setLevel(logging.DEBUG)
|
73 |
+
|
74 |
+
def typewriter_log(
|
75 |
+
self, title="", title_color="", content="", speak_text=False, level=logging.INFO
|
76 |
+
):
|
77 |
+
if speak_text and cfg.speak_mode:
|
78 |
+
speak.say_text(f"{title}. {content}")
|
79 |
+
|
80 |
+
if content:
|
81 |
+
if isinstance(content, list):
|
82 |
+
content = " ".join(content)
|
83 |
+
else:
|
84 |
+
content = ""
|
85 |
+
|
86 |
+
self.typing_logger.log(
|
87 |
+
level, content, extra={"title": title, "color": title_color}
|
88 |
+
)
|
89 |
+
|
90 |
+
def debug(
|
91 |
+
self,
|
92 |
+
message,
|
93 |
+
title="",
|
94 |
+
title_color="",
|
95 |
+
):
|
96 |
+
self._log(title, title_color, message, logging.DEBUG)
|
97 |
+
|
98 |
+
def warn(
|
99 |
+
self,
|
100 |
+
message,
|
101 |
+
title="",
|
102 |
+
title_color="",
|
103 |
+
):
|
104 |
+
self._log(title, title_color, message, logging.WARN)
|
105 |
+
|
106 |
+
def error(self, title, message=""):
|
107 |
+
self._log(title, Fore.RED, message, logging.ERROR)
|
108 |
+
|
109 |
+
def _log(self, title="", title_color="", message="", level=logging.INFO):
|
110 |
+
if message:
|
111 |
+
if isinstance(message, list):
|
112 |
+
message = " ".join(message)
|
113 |
+
self.logger.log(level, message, extra={"title": title, "color": title_color})
|
114 |
+
|
115 |
+
def set_level(self, level):
|
116 |
+
self.logger.setLevel(level)
|
117 |
+
self.typing_logger.setLevel(level)
|
118 |
+
|
119 |
+
def double_check(self, additionalText=None):
|
120 |
+
if not additionalText:
|
121 |
+
additionalText = (
|
122 |
+
"Please ensure you've setup and configured everything"
|
123 |
+
" correctly. Read https://github.com/Torantulino/Auto-GPT#readme to "
|
124 |
+
"double check. You can also create a github issue or join the discord"
|
125 |
+
" and ask there!"
|
126 |
+
)
|
127 |
+
|
128 |
+
self.typewriter_log("DOUBLE CHECK CONFIGURATION", Fore.YELLOW, additionalText)
|
129 |
+
|
130 |
+
|
131 |
+
"""
|
132 |
+
Output stream to console using simulated typing
|
133 |
+
"""
|
134 |
+
|
135 |
+
|
136 |
+
class TypingConsoleHandler(logging.StreamHandler):
|
137 |
+
def emit(self, record):
|
138 |
+
min_typing_speed = 0.05
|
139 |
+
max_typing_speed = 0.01
|
140 |
+
|
141 |
+
msg = self.format(record)
|
142 |
+
try:
|
143 |
+
words = msg.split()
|
144 |
+
for i, word in enumerate(words):
|
145 |
+
print(word, end="", flush=True)
|
146 |
+
if i < len(words) - 1:
|
147 |
+
print(" ", end="", flush=True)
|
148 |
+
typing_speed = random.uniform(min_typing_speed, max_typing_speed)
|
149 |
+
time.sleep(typing_speed)
|
150 |
+
# type faster after each word
|
151 |
+
min_typing_speed = min_typing_speed * 0.95
|
152 |
+
max_typing_speed = max_typing_speed * 0.95
|
153 |
+
print()
|
154 |
+
except Exception:
|
155 |
+
self.handleError(record)
|
156 |
+
|
157 |
+
|
158 |
+
class ConsoleHandler(logging.StreamHandler):
|
159 |
+
def emit(self, record) -> None:
|
160 |
+
msg = self.format(record)
|
161 |
+
try:
|
162 |
+
print(msg)
|
163 |
+
except Exception:
|
164 |
+
self.handleError(record)
|
165 |
+
|
166 |
+
|
167 |
+
class AutoGptFormatter(logging.Formatter):
|
168 |
+
"""
|
169 |
+
Allows to handle custom placeholders 'title_color' and 'message_no_color'.
|
170 |
+
To use this formatter, make sure to pass 'color', 'title' as log extras.
|
171 |
+
"""
|
172 |
+
|
173 |
+
def format(self, record: LogRecord) -> str:
|
174 |
+
if hasattr(record, "color"):
|
175 |
+
record.title_color = (
|
176 |
+
getattr(record, "color")
|
177 |
+
+ getattr(record, "title")
|
178 |
+
+ " "
|
179 |
+
+ Style.RESET_ALL
|
180 |
+
)
|
181 |
+
else:
|
182 |
+
record.title_color = getattr(record, "title")
|
183 |
+
if hasattr(record, "msg"):
|
184 |
+
record.message_no_color = remove_color_codes(getattr(record, "msg"))
|
185 |
+
else:
|
186 |
+
record.message_no_color = ""
|
187 |
+
return super().format(record)
|
188 |
+
|
189 |
+
|
190 |
+
def remove_color_codes(s: str) -> str:
|
191 |
+
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
|
192 |
+
return ansi_escape.sub("", s)
|
193 |
+
|
194 |
+
|
195 |
+
logger = Logger()
|
autogpt/memory/__init__.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogpt.memory.local import LocalCache
|
2 |
+
from autogpt.memory.no_memory import NoMemory
|
3 |
+
|
4 |
+
# List of supported memory backends
|
5 |
+
# Add a backend to this list if the import attempt is successful
|
6 |
+
supported_memory = ["local", "no_memory"]
|
7 |
+
|
8 |
+
try:
|
9 |
+
from autogpt.memory.redismem import RedisMemory
|
10 |
+
|
11 |
+
supported_memory.append("redis")
|
12 |
+
except ImportError:
|
13 |
+
print("Redis not installed. Skipping import.")
|
14 |
+
RedisMemory = None
|
15 |
+
|
16 |
+
try:
|
17 |
+
from autogpt.memory.pinecone import PineconeMemory
|
18 |
+
|
19 |
+
supported_memory.append("pinecone")
|
20 |
+
except ImportError:
|
21 |
+
print("Pinecone not installed. Skipping import.")
|
22 |
+
PineconeMemory = None
|
23 |
+
|
24 |
+
|
25 |
+
def get_memory(cfg, init=False):
|
26 |
+
memory = None
|
27 |
+
if cfg.memory_backend == "pinecone":
|
28 |
+
if not PineconeMemory:
|
29 |
+
print(
|
30 |
+
"Error: Pinecone is not installed. Please install pinecone"
|
31 |
+
" to use Pinecone as a memory backend."
|
32 |
+
)
|
33 |
+
else:
|
34 |
+
memory = PineconeMemory(cfg)
|
35 |
+
if init:
|
36 |
+
memory.clear()
|
37 |
+
elif cfg.memory_backend == "redis":
|
38 |
+
if not RedisMemory:
|
39 |
+
print(
|
40 |
+
"Error: Redis is not installed. Please install redis-py to"
|
41 |
+
" use Redis as a memory backend."
|
42 |
+
)
|
43 |
+
else:
|
44 |
+
memory = RedisMemory(cfg)
|
45 |
+
elif cfg.memory_backend == "no_memory":
|
46 |
+
memory = NoMemory(cfg)
|
47 |
+
|
48 |
+
if memory is None:
|
49 |
+
memory = LocalCache(cfg)
|
50 |
+
if init:
|
51 |
+
memory.clear()
|
52 |
+
return memory
|
53 |
+
|
54 |
+
|
55 |
+
def get_supported_memory_backends():
|
56 |
+
return supported_memory
|
57 |
+
|
58 |
+
|
59 |
+
__all__ = ["get_memory", "LocalCache", "RedisMemory", "PineconeMemory", "NoMemory"]
|
autogpt/memory/base.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Base class for memory providers."""
|
2 |
+
import abc
|
3 |
+
|
4 |
+
import openai
|
5 |
+
|
6 |
+
from autogpt.config import AbstractSingleton, Config
|
7 |
+
|
8 |
+
cfg = Config()
|
9 |
+
|
10 |
+
|
11 |
+
def get_ada_embedding(text):
|
12 |
+
text = text.replace("\n", " ")
|
13 |
+
if cfg.use_azure:
|
14 |
+
return openai.Embedding.create(
|
15 |
+
input=[text],
|
16 |
+
engine=cfg.get_azure_deployment_id_for_model("text-embedding-ada-002"),
|
17 |
+
)["data"][0]["embedding"]
|
18 |
+
else:
|
19 |
+
return openai.Embedding.create(input=[text], model="text-embedding-ada-002")[
|
20 |
+
"data"
|
21 |
+
][0]["embedding"]
|
22 |
+
|
23 |
+
|
24 |
+
class MemoryProviderSingleton(AbstractSingleton):
|
25 |
+
@abc.abstractmethod
|
26 |
+
def add(self, data):
|
27 |
+
pass
|
28 |
+
|
29 |
+
@abc.abstractmethod
|
30 |
+
def get(self, data):
|
31 |
+
pass
|
32 |
+
|
33 |
+
@abc.abstractmethod
|
34 |
+
def clear(self):
|
35 |
+
pass
|
36 |
+
|
37 |
+
@abc.abstractmethod
|
38 |
+
def get_relevant(self, data, num_relevant=5):
|
39 |
+
pass
|
40 |
+
|
41 |
+
@abc.abstractmethod
|
42 |
+
def get_stats(self):
|
43 |
+
pass
|
autogpt/memory/local.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import dataclasses
|
2 |
+
import os
|
3 |
+
from typing import Any, List, Optional
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import orjson
|
7 |
+
|
8 |
+
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
9 |
+
|
10 |
+
EMBED_DIM = 1536
|
11 |
+
SAVE_OPTIONS = orjson.OPT_SERIALIZE_NUMPY | orjson.OPT_SERIALIZE_DATACLASS
|
12 |
+
|
13 |
+
|
14 |
+
def create_default_embeddings():
|
15 |
+
return np.zeros((0, EMBED_DIM)).astype(np.float32)
|
16 |
+
|
17 |
+
|
18 |
+
@dataclasses.dataclass
|
19 |
+
class CacheContent:
|
20 |
+
texts: List[str] = dataclasses.field(default_factory=list)
|
21 |
+
embeddings: np.ndarray = dataclasses.field(
|
22 |
+
default_factory=create_default_embeddings
|
23 |
+
)
|
24 |
+
|
25 |
+
|
26 |
+
class LocalCache(MemoryProviderSingleton):
|
27 |
+
# on load, load our database
|
28 |
+
def __init__(self, cfg) -> None:
|
29 |
+
self.filename = f"{cfg.memory_index}.json"
|
30 |
+
if os.path.exists(self.filename):
|
31 |
+
try:
|
32 |
+
with open(self.filename, "w+b") as f:
|
33 |
+
file_content = f.read()
|
34 |
+
if not file_content.strip():
|
35 |
+
file_content = b"{}"
|
36 |
+
f.write(file_content)
|
37 |
+
|
38 |
+
loaded = orjson.loads(file_content)
|
39 |
+
self.data = CacheContent(**loaded)
|
40 |
+
except orjson.JSONDecodeError:
|
41 |
+
print(f"Error: The file '{self.filename}' is not in JSON format.")
|
42 |
+
self.data = CacheContent()
|
43 |
+
else:
|
44 |
+
print(
|
45 |
+
f"Warning: The file '{self.filename}' does not exist. Local memory would not be saved to a file."
|
46 |
+
)
|
47 |
+
self.data = CacheContent()
|
48 |
+
|
49 |
+
def add(self, text: str):
|
50 |
+
"""
|
51 |
+
Add text to our list of texts, add embedding as row to our
|
52 |
+
embeddings-matrix
|
53 |
+
|
54 |
+
Args:
|
55 |
+
text: str
|
56 |
+
|
57 |
+
Returns: None
|
58 |
+
"""
|
59 |
+
if "Command Error:" in text:
|
60 |
+
return ""
|
61 |
+
self.data.texts.append(text)
|
62 |
+
|
63 |
+
embedding = get_ada_embedding(text)
|
64 |
+
|
65 |
+
vector = np.array(embedding).astype(np.float32)
|
66 |
+
vector = vector[np.newaxis, :]
|
67 |
+
self.data.embeddings = np.concatenate(
|
68 |
+
[
|
69 |
+
self.data.embeddings,
|
70 |
+
vector,
|
71 |
+
],
|
72 |
+
axis=0,
|
73 |
+
)
|
74 |
+
|
75 |
+
with open(self.filename, "wb") as f:
|
76 |
+
out = orjson.dumps(self.data, option=SAVE_OPTIONS)
|
77 |
+
f.write(out)
|
78 |
+
return text
|
79 |
+
|
80 |
+
def clear(self) -> str:
|
81 |
+
"""
|
82 |
+
Clears the redis server.
|
83 |
+
|
84 |
+
Returns: A message indicating that the memory has been cleared.
|
85 |
+
"""
|
86 |
+
self.data = CacheContent()
|
87 |
+
return "Obliviated"
|
88 |
+
|
89 |
+
def get(self, data: str) -> Optional[List[Any]]:
|
90 |
+
"""
|
91 |
+
Gets the data from the memory that is most relevant to the given data.
|
92 |
+
|
93 |
+
Args:
|
94 |
+
data: The data to compare to.
|
95 |
+
|
96 |
+
Returns: The most relevant data.
|
97 |
+
"""
|
98 |
+
return self.get_relevant(data, 1)
|
99 |
+
|
100 |
+
def get_relevant(self, text: str, k: int) -> List[Any]:
|
101 |
+
""" "
|
102 |
+
matrix-vector mult to find score-for-each-row-of-matrix
|
103 |
+
get indices for top-k winning scores
|
104 |
+
return texts for those indices
|
105 |
+
Args:
|
106 |
+
text: str
|
107 |
+
k: int
|
108 |
+
|
109 |
+
Returns: List[str]
|
110 |
+
"""
|
111 |
+
embedding = get_ada_embedding(text)
|
112 |
+
|
113 |
+
scores = np.dot(self.data.embeddings, embedding)
|
114 |
+
|
115 |
+
top_k_indices = np.argsort(scores)[-k:][::-1]
|
116 |
+
|
117 |
+
return [self.data.texts[i] for i in top_k_indices]
|
118 |
+
|
119 |
+
def get_stats(self):
|
120 |
+
"""
|
121 |
+
Returns: The stats of the local cache.
|
122 |
+
"""
|
123 |
+
return len(self.data.texts), self.data.embeddings.shape
|
autogpt/memory/no_memory.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional, List, Any
|
2 |
+
|
3 |
+
from autogpt.memory.base import MemoryProviderSingleton
|
4 |
+
|
5 |
+
|
6 |
+
class NoMemory(MemoryProviderSingleton):
|
7 |
+
def __init__(self, cfg):
|
8 |
+
"""
|
9 |
+
Initializes the NoMemory provider.
|
10 |
+
|
11 |
+
Args:
|
12 |
+
cfg: The config object.
|
13 |
+
|
14 |
+
Returns: None
|
15 |
+
"""
|
16 |
+
pass
|
17 |
+
|
18 |
+
def add(self, data: str) -> str:
|
19 |
+
"""
|
20 |
+
Adds a data point to the memory. No action is taken in NoMemory.
|
21 |
+
|
22 |
+
Args:
|
23 |
+
data: The data to add.
|
24 |
+
|
25 |
+
Returns: An empty string.
|
26 |
+
"""
|
27 |
+
return ""
|
28 |
+
|
29 |
+
def get(self, data: str) -> Optional[List[Any]]:
|
30 |
+
"""
|
31 |
+
Gets the data from the memory that is most relevant to the given data.
|
32 |
+
NoMemory always returns None.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
data: The data to compare to.
|
36 |
+
|
37 |
+
Returns: None
|
38 |
+
"""
|
39 |
+
return None
|
40 |
+
|
41 |
+
def clear(self) -> str:
|
42 |
+
"""
|
43 |
+
Clears the memory. No action is taken in NoMemory.
|
44 |
+
|
45 |
+
Returns: An empty string.
|
46 |
+
"""
|
47 |
+
return ""
|
48 |
+
|
49 |
+
def get_relevant(self, data: str, num_relevant: int = 5) -> Optional[List[Any]]:
|
50 |
+
"""
|
51 |
+
Returns all the data in the memory that is relevant to the given data.
|
52 |
+
NoMemory always returns None.
|
53 |
+
|
54 |
+
Args:
|
55 |
+
data: The data to compare to.
|
56 |
+
num_relevant: The number of relevant data to return.
|
57 |
+
|
58 |
+
Returns: None
|
59 |
+
"""
|
60 |
+
return None
|
61 |
+
|
62 |
+
def get_stats(self):
|
63 |
+
"""
|
64 |
+
Returns: An empty dictionary as there are no stats in NoMemory.
|
65 |
+
"""
|
66 |
+
return {}
|
autogpt/memory/pinecone.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pinecone
|
2 |
+
from colorama import Fore, Style
|
3 |
+
|
4 |
+
from autogpt.logger import logger
|
5 |
+
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
6 |
+
|
7 |
+
|
8 |
+
class PineconeMemory(MemoryProviderSingleton):
|
9 |
+
def __init__(self, cfg):
|
10 |
+
pinecone_api_key = cfg.pinecone_api_key
|
11 |
+
pinecone_region = cfg.pinecone_region
|
12 |
+
pinecone.init(api_key=pinecone_api_key, environment=pinecone_region)
|
13 |
+
dimension = 1536
|
14 |
+
metric = "cosine"
|
15 |
+
pod_type = "p1"
|
16 |
+
table_name = "auto-gpt"
|
17 |
+
# this assumes we don't start with memory.
|
18 |
+
# for now this works.
|
19 |
+
# we'll need a more complicated and robust system if we want to start with memory.
|
20 |
+
self.vec_num = 0
|
21 |
+
|
22 |
+
try:
|
23 |
+
pinecone.whoami()
|
24 |
+
except Exception as e:
|
25 |
+
logger.typewriter_log(
|
26 |
+
"FAILED TO CONNECT TO PINECONE",
|
27 |
+
Fore.RED,
|
28 |
+
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
29 |
+
)
|
30 |
+
logger.double_check(
|
31 |
+
"Please ensure you have setup and configured Pinecone properly for use. "
|
32 |
+
+ f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#-pinecone-api-key-setup{Style.RESET_ALL} to ensure you've set up everything correctly."
|
33 |
+
)
|
34 |
+
exit(1)
|
35 |
+
|
36 |
+
if table_name not in pinecone.list_indexes():
|
37 |
+
pinecone.create_index(
|
38 |
+
table_name, dimension=dimension, metric=metric, pod_type=pod_type
|
39 |
+
)
|
40 |
+
self.index = pinecone.Index(table_name)
|
41 |
+
|
42 |
+
def add(self, data):
|
43 |
+
vector = get_ada_embedding(data)
|
44 |
+
# no metadata here. We may wish to change that long term.
|
45 |
+
resp = self.index.upsert([(str(self.vec_num), vector, {"raw_text": data})])
|
46 |
+
_text = f"Inserting data into memory at index: {self.vec_num}:\n data: {data}"
|
47 |
+
self.vec_num += 1
|
48 |
+
return _text
|
49 |
+
|
50 |
+
def get(self, data):
|
51 |
+
return self.get_relevant(data, 1)
|
52 |
+
|
53 |
+
def clear(self):
|
54 |
+
self.index.delete(deleteAll=True)
|
55 |
+
return "Obliviated"
|
56 |
+
|
57 |
+
def get_relevant(self, data, num_relevant=5):
|
58 |
+
"""
|
59 |
+
Returns all the data in the memory that is relevant to the given data.
|
60 |
+
:param data: The data to compare to.
|
61 |
+
:param num_relevant: The number of relevant data to return. Defaults to 5
|
62 |
+
"""
|
63 |
+
query_embedding = get_ada_embedding(data)
|
64 |
+
results = self.index.query(
|
65 |
+
query_embedding, top_k=num_relevant, include_metadata=True
|
66 |
+
)
|
67 |
+
sorted_results = sorted(results.matches, key=lambda x: x.score)
|
68 |
+
return [str(item["metadata"]["raw_text"]) for item in sorted_results]
|
69 |
+
|
70 |
+
def get_stats(self):
|
71 |
+
return self.index.describe_index_stats()
|
autogpt/memory/redismem.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Redis memory provider."""
|
2 |
+
from typing import Any, List, Optional
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import redis
|
6 |
+
from colorama import Fore, Style
|
7 |
+
from redis.commands.search.field import TextField, VectorField
|
8 |
+
from redis.commands.search.indexDefinition import IndexDefinition, IndexType
|
9 |
+
from redis.commands.search.query import Query
|
10 |
+
|
11 |
+
from autogpt.logger import logger
|
12 |
+
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
13 |
+
|
14 |
+
SCHEMA = [
|
15 |
+
TextField("data"),
|
16 |
+
VectorField(
|
17 |
+
"embedding",
|
18 |
+
"HNSW",
|
19 |
+
{"TYPE": "FLOAT32", "DIM": 1536, "DISTANCE_METRIC": "COSINE"},
|
20 |
+
),
|
21 |
+
]
|
22 |
+
|
23 |
+
|
24 |
+
class RedisMemory(MemoryProviderSingleton):
|
25 |
+
def __init__(self, cfg):
|
26 |
+
"""
|
27 |
+
Initializes the Redis memory provider.
|
28 |
+
|
29 |
+
Args:
|
30 |
+
cfg: The config object.
|
31 |
+
|
32 |
+
Returns: None
|
33 |
+
"""
|
34 |
+
redis_host = cfg.redis_host
|
35 |
+
redis_port = cfg.redis_port
|
36 |
+
redis_password = cfg.redis_password
|
37 |
+
self.dimension = 1536
|
38 |
+
self.redis = redis.Redis(
|
39 |
+
host=redis_host,
|
40 |
+
port=redis_port,
|
41 |
+
password=redis_password,
|
42 |
+
db=0, # Cannot be changed
|
43 |
+
)
|
44 |
+
self.cfg = cfg
|
45 |
+
|
46 |
+
# Check redis connection
|
47 |
+
try:
|
48 |
+
self.redis.ping()
|
49 |
+
except redis.ConnectionError as e:
|
50 |
+
logger.typewriter_log(
|
51 |
+
"FAILED TO CONNECT TO REDIS",
|
52 |
+
Fore.RED,
|
53 |
+
Style.BRIGHT + str(e) + Style.RESET_ALL,
|
54 |
+
)
|
55 |
+
logger.double_check(
|
56 |
+
"Please ensure you have setup and configured Redis properly for use. "
|
57 |
+
+ f"You can check out {Fore.CYAN + Style.BRIGHT}https://github.com/Torantulino/Auto-GPT#redis-setup{Style.RESET_ALL} to ensure you've set up everything correctly."
|
58 |
+
)
|
59 |
+
exit(1)
|
60 |
+
|
61 |
+
if cfg.wipe_redis_on_start:
|
62 |
+
self.redis.flushall()
|
63 |
+
try:
|
64 |
+
self.redis.ft(f"{cfg.memory_index}").create_index(
|
65 |
+
fields=SCHEMA,
|
66 |
+
definition=IndexDefinition(
|
67 |
+
prefix=[f"{cfg.memory_index}:"], index_type=IndexType.HASH
|
68 |
+
),
|
69 |
+
)
|
70 |
+
except Exception as e:
|
71 |
+
print("Error creating Redis search index: ", e)
|
72 |
+
existing_vec_num = self.redis.get(f"{cfg.memory_index}-vec_num")
|
73 |
+
self.vec_num = int(existing_vec_num.decode("utf-8")) if existing_vec_num else 0
|
74 |
+
|
75 |
+
def add(self, data: str) -> str:
|
76 |
+
"""
|
77 |
+
Adds a data point to the memory.
|
78 |
+
|
79 |
+
Args:
|
80 |
+
data: The data to add.
|
81 |
+
|
82 |
+
Returns: Message indicating that the data has been added.
|
83 |
+
"""
|
84 |
+
if "Command Error:" in data:
|
85 |
+
return ""
|
86 |
+
vector = get_ada_embedding(data)
|
87 |
+
vector = np.array(vector).astype(np.float32).tobytes()
|
88 |
+
data_dict = {b"data": data, "embedding": vector}
|
89 |
+
pipe = self.redis.pipeline()
|
90 |
+
pipe.hset(f"{self.cfg.memory_index}:{self.vec_num}", mapping=data_dict)
|
91 |
+
_text = (
|
92 |
+
f"Inserting data into memory at index: {self.vec_num}:\n" f"data: {data}"
|
93 |
+
)
|
94 |
+
self.vec_num += 1
|
95 |
+
pipe.set(f"{self.cfg.memory_index}-vec_num", self.vec_num)
|
96 |
+
pipe.execute()
|
97 |
+
return _text
|
98 |
+
|
99 |
+
def get(self, data: str) -> Optional[List[Any]]:
|
100 |
+
"""
|
101 |
+
Gets the data from the memory that is most relevant to the given data.
|
102 |
+
|
103 |
+
Args:
|
104 |
+
data: The data to compare to.
|
105 |
+
|
106 |
+
Returns: The most relevant data.
|
107 |
+
"""
|
108 |
+
return self.get_relevant(data, 1)
|
109 |
+
|
110 |
+
def clear(self) -> str:
|
111 |
+
"""
|
112 |
+
Clears the redis server.
|
113 |
+
|
114 |
+
Returns: A message indicating that the memory has been cleared.
|
115 |
+
"""
|
116 |
+
self.redis.flushall()
|
117 |
+
return "Obliviated"
|
118 |
+
|
119 |
+
def get_relevant(self, data: str, num_relevant: int = 5) -> Optional[List[Any]]:
|
120 |
+
"""
|
121 |
+
Returns all the data in the memory that is relevant to the given data.
|
122 |
+
Args:
|
123 |
+
data: The data to compare to.
|
124 |
+
num_relevant: The number of relevant data to return.
|
125 |
+
|
126 |
+
Returns: A list of the most relevant data.
|
127 |
+
"""
|
128 |
+
query_embedding = get_ada_embedding(data)
|
129 |
+
base_query = f"*=>[KNN {num_relevant} @embedding $vector AS vector_score]"
|
130 |
+
query = (
|
131 |
+
Query(base_query)
|
132 |
+
.return_fields("data", "vector_score")
|
133 |
+
.sort_by("vector_score")
|
134 |
+
.dialect(2)
|
135 |
+
)
|
136 |
+
query_vector = np.array(query_embedding).astype(np.float32).tobytes()
|
137 |
+
|
138 |
+
try:
|
139 |
+
results = self.redis.ft(f"{self.cfg.memory_index}").search(
|
140 |
+
query, query_params={"vector": query_vector}
|
141 |
+
)
|
142 |
+
except Exception as e:
|
143 |
+
print("Error calling Redis search: ", e)
|
144 |
+
return None
|
145 |
+
return [result.data for result in results.docs]
|
146 |
+
|
147 |
+
def get_stats(self):
|
148 |
+
"""
|
149 |
+
Returns: The stats of the memory index.
|
150 |
+
"""
|
151 |
+
return self.redis.ft(f"{self.cfg.memory_index}").info()
|
autogpt/prompt.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogpt.promptgenerator import PromptGenerator
|
2 |
+
|
3 |
+
|
4 |
+
def get_prompt() -> str:
|
5 |
+
"""
|
6 |
+
This function generates a prompt string that includes various constraints,
|
7 |
+
commands, resources, and performance evaluations.
|
8 |
+
|
9 |
+
Returns:
|
10 |
+
str: The generated prompt string.
|
11 |
+
"""
|
12 |
+
|
13 |
+
# Initialize the PromptGenerator object
|
14 |
+
prompt_generator = PromptGenerator()
|
15 |
+
|
16 |
+
# Add constraints to the PromptGenerator object
|
17 |
+
prompt_generator.add_constraint(
|
18 |
+
"~4000 word limit for short term memory. Your short term memory is short, so"
|
19 |
+
" immediately save important information to files."
|
20 |
+
)
|
21 |
+
prompt_generator.add_constraint(
|
22 |
+
"If you are unsure how you previously did something or want to recall past"
|
23 |
+
" events, thinking about similar events will help you remember."
|
24 |
+
)
|
25 |
+
prompt_generator.add_constraint("No user assistance")
|
26 |
+
prompt_generator.add_constraint(
|
27 |
+
'Exclusively use the commands listed in double quotes e.g. "command name"'
|
28 |
+
)
|
29 |
+
|
30 |
+
# Define the command list
|
31 |
+
commands = [
|
32 |
+
("Google Search", "google", {"input": "<search>"}),
|
33 |
+
(
|
34 |
+
"Browse Website",
|
35 |
+
"browse_website",
|
36 |
+
{"url": "<url>", "question": "<what_you_want_to_find_on_website>"},
|
37 |
+
),
|
38 |
+
(
|
39 |
+
"Start GPT Agent",
|
40 |
+
"start_agent",
|
41 |
+
{"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"},
|
42 |
+
),
|
43 |
+
(
|
44 |
+
"Message GPT Agent",
|
45 |
+
"message_agent",
|
46 |
+
{"key": "<key>", "message": "<message>"},
|
47 |
+
),
|
48 |
+
("List GPT Agents", "list_agents", {}),
|
49 |
+
("Delete GPT Agent", "delete_agent", {"key": "<key>"}),
|
50 |
+
("Write to file", "write_to_file", {"file": "<file>", "text": "<text>"}),
|
51 |
+
("Read file", "read_file", {"file": "<file>"}),
|
52 |
+
("Append to file", "append_to_file", {"file": "<file>", "text": "<text>"}),
|
53 |
+
("Delete file", "delete_file", {"file": "<file>"}),
|
54 |
+
("Search Files", "search_files", {"directory": "<directory>"}),
|
55 |
+
("Evaluate Code", "evaluate_code", {"code": "<full_code_string>"}),
|
56 |
+
(
|
57 |
+
"Get Improved Code",
|
58 |
+
"improve_code",
|
59 |
+
{"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"},
|
60 |
+
),
|
61 |
+
(
|
62 |
+
"Write Tests",
|
63 |
+
"write_tests",
|
64 |
+
{"code": "<full_code_string>", "focus": "<list_of_focus_areas>"},
|
65 |
+
),
|
66 |
+
("Execute Python File", "execute_python_file", {"file": "<file>"}),
|
67 |
+
(
|
68 |
+
"Execute Shell Command, non-interactive commands only",
|
69 |
+
"execute_shell",
|
70 |
+
{"command_line": "<command_line>"},
|
71 |
+
),
|
72 |
+
("Task Complete (Shutdown)", "task_complete", {"reason": "<reason>"}),
|
73 |
+
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
|
74 |
+
("Do Nothing", "do_nothing", {}),
|
75 |
+
]
|
76 |
+
|
77 |
+
# Add commands to the PromptGenerator object
|
78 |
+
for command_label, command_name, args in commands:
|
79 |
+
prompt_generator.add_command(command_label, command_name, args)
|
80 |
+
|
81 |
+
# Add resources to the PromptGenerator object
|
82 |
+
prompt_generator.add_resource(
|
83 |
+
"Internet access for searches and information gathering."
|
84 |
+
)
|
85 |
+
prompt_generator.add_resource("Long Term memory management.")
|
86 |
+
prompt_generator.add_resource(
|
87 |
+
"GPT-3.5 powered Agents for delegation of simple tasks."
|
88 |
+
)
|
89 |
+
prompt_generator.add_resource("File output.")
|
90 |
+
|
91 |
+
# Add performance evaluations to the PromptGenerator object
|
92 |
+
prompt_generator.add_performance_evaluation(
|
93 |
+
"Continuously review and analyze your actions to ensure you are performing to"
|
94 |
+
" the best of your abilities."
|
95 |
+
)
|
96 |
+
prompt_generator.add_performance_evaluation(
|
97 |
+
"Constructively self-criticize your big-picture behavior constantly."
|
98 |
+
)
|
99 |
+
prompt_generator.add_performance_evaluation(
|
100 |
+
"Reflect on past decisions and strategies to refine your approach."
|
101 |
+
)
|
102 |
+
prompt_generator.add_performance_evaluation(
|
103 |
+
"Every command has a cost, so be smart and efficient. Aim to complete tasks in"
|
104 |
+
" the least number of steps."
|
105 |
+
)
|
106 |
+
|
107 |
+
# Generate the prompt string
|
108 |
+
return prompt_generator.generate_prompt_string()
|
autogpt/promptgenerator.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
|
3 |
+
|
4 |
+
class PromptGenerator:
|
5 |
+
"""
|
6 |
+
A class for generating custom prompt strings based on constraints, commands,
|
7 |
+
resources, and performance evaluations.
|
8 |
+
"""
|
9 |
+
|
10 |
+
def __init__(self):
|
11 |
+
"""
|
12 |
+
Initialize the PromptGenerator object with empty lists of constraints,
|
13 |
+
commands, resources, and performance evaluations.
|
14 |
+
"""
|
15 |
+
self.constraints = []
|
16 |
+
self.commands = []
|
17 |
+
self.resources = []
|
18 |
+
self.performance_evaluation = []
|
19 |
+
self.response_format = {
|
20 |
+
"thoughts": {
|
21 |
+
"text": "thought",
|
22 |
+
"reasoning": "reasoning",
|
23 |
+
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
24 |
+
"criticism": "constructive self-criticism",
|
25 |
+
"speak": "thoughts summary to say to user",
|
26 |
+
},
|
27 |
+
"command": {"name": "command name", "args": {"arg name": "value"}},
|
28 |
+
}
|
29 |
+
|
30 |
+
def add_constraint(self, constraint):
|
31 |
+
"""
|
32 |
+
Add a constraint to the constraints list.
|
33 |
+
|
34 |
+
Args:
|
35 |
+
constraint (str): The constraint to be added.
|
36 |
+
"""
|
37 |
+
self.constraints.append(constraint)
|
38 |
+
|
39 |
+
def add_command(self, command_label, command_name, args=None):
|
40 |
+
"""
|
41 |
+
Add a command to the commands list with a label, name, and optional arguments.
|
42 |
+
|
43 |
+
Args:
|
44 |
+
command_label (str): The label of the command.
|
45 |
+
command_name (str): The name of the command.
|
46 |
+
args (dict, optional): A dictionary containing argument names and their
|
47 |
+
values. Defaults to None.
|
48 |
+
"""
|
49 |
+
if args is None:
|
50 |
+
args = {}
|
51 |
+
|
52 |
+
command_args = {arg_key: arg_value for arg_key, arg_value in args.items()}
|
53 |
+
|
54 |
+
command = {
|
55 |
+
"label": command_label,
|
56 |
+
"name": command_name,
|
57 |
+
"args": command_args,
|
58 |
+
}
|
59 |
+
|
60 |
+
self.commands.append(command)
|
61 |
+
|
62 |
+
def _generate_command_string(self, command):
|
63 |
+
"""
|
64 |
+
Generate a formatted string representation of a command.
|
65 |
+
|
66 |
+
Args:
|
67 |
+
command (dict): A dictionary containing command information.
|
68 |
+
|
69 |
+
Returns:
|
70 |
+
str: The formatted command string.
|
71 |
+
"""
|
72 |
+
args_string = ", ".join(
|
73 |
+
f'"{key}": "{value}"' for key, value in command["args"].items()
|
74 |
+
)
|
75 |
+
return f'{command["label"]}: "{command["name"]}", args: {args_string}'
|
76 |
+
|
77 |
+
def add_resource(self, resource: str) -> None:
|
78 |
+
"""
|
79 |
+
Add a resource to the resources list.
|
80 |
+
|
81 |
+
Args:
|
82 |
+
resource (str): The resource to be added.
|
83 |
+
"""
|
84 |
+
self.resources.append(resource)
|
85 |
+
|
86 |
+
def add_performance_evaluation(self, evaluation: str) -> None:
|
87 |
+
"""
|
88 |
+
Add a performance evaluation item to the performance_evaluation list.
|
89 |
+
|
90 |
+
Args:
|
91 |
+
evaluation (str): The evaluation item to be added.
|
92 |
+
"""
|
93 |
+
self.performance_evaluation.append(evaluation)
|
94 |
+
|
95 |
+
def _generate_numbered_list(self, items, item_type="list") -> str:
|
96 |
+
"""
|
97 |
+
Generate a numbered list from given items based on the item_type.
|
98 |
+
|
99 |
+
Args:
|
100 |
+
items (list): A list of items to be numbered.
|
101 |
+
item_type (str, optional): The type of items in the list.
|
102 |
+
Defaults to 'list'.
|
103 |
+
|
104 |
+
Returns:
|
105 |
+
str: The formatted numbered list.
|
106 |
+
"""
|
107 |
+
if item_type == "command":
|
108 |
+
return "\n".join(
|
109 |
+
f"{i+1}. {self._generate_command_string(item)}"
|
110 |
+
for i, item in enumerate(items)
|
111 |
+
)
|
112 |
+
else:
|
113 |
+
return "\n".join(f"{i+1}. {item}" for i, item in enumerate(items))
|
114 |
+
|
115 |
+
def generate_prompt_string(self) -> str:
|
116 |
+
"""
|
117 |
+
Generate a prompt string based on the constraints, commands, resources,
|
118 |
+
and performance evaluations.
|
119 |
+
|
120 |
+
Returns:
|
121 |
+
str: The generated prompt string.
|
122 |
+
"""
|
123 |
+
formatted_response_format = json.dumps(self.response_format, indent=4)
|
124 |
+
return (
|
125 |
+
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
126 |
+
"Commands:\n"
|
127 |
+
f"{self._generate_numbered_list(self.commands, item_type='command')}\n\n"
|
128 |
+
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
129 |
+
"Performance Evaluation:\n"
|
130 |
+
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
131 |
+
"You should only respond in JSON format as described below \nResponse"
|
132 |
+
f" Format: \n{formatted_response_format} \nEnsure the response can be"
|
133 |
+
"parsed by Python json.loads"
|
134 |
+
)
|
autogpt/speak.py
ADDED
@@ -0,0 +1,120 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import requests
|
4 |
+
from playsound import playsound
|
5 |
+
|
6 |
+
from autogpt.config import Config
|
7 |
+
|
8 |
+
import threading
|
9 |
+
from threading import Lock, Semaphore
|
10 |
+
|
11 |
+
import gtts
|
12 |
+
|
13 |
+
cfg = Config()
|
14 |
+
|
15 |
+
# Default voice IDs
|
16 |
+
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
17 |
+
|
18 |
+
# Retrieve custom voice IDs from the Config class
|
19 |
+
custom_voice_1 = cfg.elevenlabs_voice_1_id
|
20 |
+
custom_voice_2 = cfg.elevenlabs_voice_2_id
|
21 |
+
|
22 |
+
# Placeholder values that should be treated as empty
|
23 |
+
placeholders = {"your-voice-id"}
|
24 |
+
|
25 |
+
# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs
|
26 |
+
voices = [
|
27 |
+
custom_voice_1
|
28 |
+
if custom_voice_1 and custom_voice_1 not in placeholders
|
29 |
+
else default_voices[0],
|
30 |
+
custom_voice_2
|
31 |
+
if custom_voice_2 and custom_voice_2 not in placeholders
|
32 |
+
else default_voices[1],
|
33 |
+
]
|
34 |
+
|
35 |
+
tts_headers = {"Content-Type": "application/json", "xi-api-key": cfg.elevenlabs_api_key}
|
36 |
+
|
37 |
+
mutex_lock = Lock() # Ensure only one sound is played at a time
|
38 |
+
queue_semaphore = Semaphore(
|
39 |
+
1
|
40 |
+
) # The amount of sounds to queue before blocking the main thread
|
41 |
+
|
42 |
+
|
43 |
+
def eleven_labs_speech(text, voice_index=0):
|
44 |
+
"""Speak text using elevenlabs.io's API"""
|
45 |
+
tts_url = "https://api.elevenlabs.io/v1/text-to-speech/{voice_id}".format(
|
46 |
+
voice_id=voices[voice_index]
|
47 |
+
)
|
48 |
+
formatted_message = {"text": text}
|
49 |
+
response = requests.post(tts_url, headers=tts_headers, json=formatted_message)
|
50 |
+
|
51 |
+
if response.status_code == 200:
|
52 |
+
with mutex_lock:
|
53 |
+
with open("speech.mpeg", "wb") as f:
|
54 |
+
f.write(response.content)
|
55 |
+
playsound("speech.mpeg", True)
|
56 |
+
os.remove("speech.mpeg")
|
57 |
+
return True
|
58 |
+
else:
|
59 |
+
print("Request failed with status code:", response.status_code)
|
60 |
+
print("Response content:", response.content)
|
61 |
+
return False
|
62 |
+
|
63 |
+
|
64 |
+
def brian_speech(text):
|
65 |
+
"""Speak text using Brian with the streamelements API"""
|
66 |
+
tts_url = f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
|
67 |
+
response = requests.get(tts_url)
|
68 |
+
|
69 |
+
if response.status_code == 200:
|
70 |
+
with mutex_lock:
|
71 |
+
with open("speech.mp3", "wb") as f:
|
72 |
+
f.write(response.content)
|
73 |
+
playsound("speech.mp3")
|
74 |
+
os.remove("speech.mp3")
|
75 |
+
return True
|
76 |
+
else:
|
77 |
+
print("Request failed with status code:", response.status_code)
|
78 |
+
print("Response content:", response.content)
|
79 |
+
return False
|
80 |
+
|
81 |
+
|
82 |
+
def gtts_speech(text):
|
83 |
+
tts = gtts.gTTS(text)
|
84 |
+
with mutex_lock:
|
85 |
+
tts.save("speech.mp3")
|
86 |
+
playsound("speech.mp3", True)
|
87 |
+
os.remove("speech.mp3")
|
88 |
+
|
89 |
+
|
90 |
+
def macos_tts_speech(text, voice_index=0):
|
91 |
+
if voice_index == 0:
|
92 |
+
os.system(f'say "{text}"')
|
93 |
+
else:
|
94 |
+
if voice_index == 1:
|
95 |
+
os.system(f'say -v "Ava (Premium)" "{text}"')
|
96 |
+
else:
|
97 |
+
os.system(f'say -v Samantha "{text}"')
|
98 |
+
|
99 |
+
|
100 |
+
def say_text(text, voice_index=0):
|
101 |
+
def speak():
|
102 |
+
if not cfg.elevenlabs_api_key:
|
103 |
+
if cfg.use_mac_os_tts == "True":
|
104 |
+
macos_tts_speech(text)
|
105 |
+
elif cfg.use_brian_tts == "True":
|
106 |
+
success = brian_speech(text)
|
107 |
+
if not success:
|
108 |
+
gtts_speech(text)
|
109 |
+
else:
|
110 |
+
gtts_speech(text)
|
111 |
+
else:
|
112 |
+
success = eleven_labs_speech(text, voice_index)
|
113 |
+
if not success:
|
114 |
+
gtts_speech(text)
|
115 |
+
|
116 |
+
queue_semaphore.release()
|
117 |
+
|
118 |
+
queue_semaphore.acquire(True)
|
119 |
+
thread = threading.Thread(target=speak)
|
120 |
+
thread.start()
|
autogpt/spinner.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
import sys
|
3 |
+
import threading
|
4 |
+
import time
|
5 |
+
|
6 |
+
|
7 |
+
class Spinner:
|
8 |
+
"""A simple spinner class"""
|
9 |
+
|
10 |
+
def __init__(self, message="Loading...", delay=0.1):
|
11 |
+
"""Initialize the spinner class"""
|
12 |
+
self.spinner = itertools.cycle(["-", "/", "|", "\\"])
|
13 |
+
self.delay = delay
|
14 |
+
self.message = message
|
15 |
+
self.running = False
|
16 |
+
self.spinner_thread = None
|
17 |
+
|
18 |
+
def spin(self):
|
19 |
+
"""Spin the spinner"""
|
20 |
+
while self.running:
|
21 |
+
sys.stdout.write(f"{next(self.spinner)} {self.message}\r")
|
22 |
+
sys.stdout.flush()
|
23 |
+
time.sleep(self.delay)
|
24 |
+
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
25 |
+
|
26 |
+
def __enter__(self):
|
27 |
+
"""Start the spinner"""
|
28 |
+
self.running = True
|
29 |
+
self.spinner_thread = threading.Thread(target=self.spin)
|
30 |
+
self.spinner_thread.start()
|
31 |
+
|
32 |
+
def __exit__(self, exc_type, exc_value, exc_traceback):
|
33 |
+
"""Stop the spinner"""
|
34 |
+
self.running = False
|
35 |
+
if self.spinner_thread is not None:
|
36 |
+
self.spinner_thread.join()
|
37 |
+
sys.stdout.write(f"\r{' ' * (len(self.message) + 2)}\r")
|
38 |
+
sys.stdout.flush()
|
autogpt/summary.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogpt.llm_utils import create_chat_completion
|
2 |
+
|
3 |
+
|
4 |
+
def summarize_text(driver, text, question):
|
5 |
+
if not text:
|
6 |
+
return "Error: No text to summarize"
|
7 |
+
|
8 |
+
text_length = len(text)
|
9 |
+
print(f"Text length: {text_length} characters")
|
10 |
+
|
11 |
+
summaries = []
|
12 |
+
chunks = list(split_text(text))
|
13 |
+
|
14 |
+
scroll_ratio = 1 / len(chunks)
|
15 |
+
for i, chunk in enumerate(chunks):
|
16 |
+
scroll_to_percentage(driver, scroll_ratio * i)
|
17 |
+
print(f"Summarizing chunk {i + 1} / {len(chunks)}")
|
18 |
+
messages = [create_message(chunk, question)]
|
19 |
+
|
20 |
+
summary = create_chat_completion(
|
21 |
+
model="gpt-3.5-turbo",
|
22 |
+
messages=messages,
|
23 |
+
max_tokens=300,
|
24 |
+
)
|
25 |
+
summaries.append(summary)
|
26 |
+
|
27 |
+
print(f"Summarized {len(chunks)} chunks.")
|
28 |
+
|
29 |
+
combined_summary = "\n".join(summaries)
|
30 |
+
messages = [create_message(combined_summary, question)]
|
31 |
+
|
32 |
+
return create_chat_completion(
|
33 |
+
model="gpt-3.5-turbo",
|
34 |
+
messages=messages,
|
35 |
+
max_tokens=300,
|
36 |
+
)
|
37 |
+
|
38 |
+
|
39 |
+
def split_text(text, max_length=8192):
|
40 |
+
paragraphs = text.split("\n")
|
41 |
+
current_length = 0
|
42 |
+
current_chunk = []
|
43 |
+
|
44 |
+
for paragraph in paragraphs:
|
45 |
+
if current_length + len(paragraph) + 1 <= max_length:
|
46 |
+
current_chunk.append(paragraph)
|
47 |
+
current_length += len(paragraph) + 1
|
48 |
+
else:
|
49 |
+
yield "\n".join(current_chunk)
|
50 |
+
current_chunk = [paragraph]
|
51 |
+
current_length = len(paragraph) + 1
|
52 |
+
|
53 |
+
if current_chunk:
|
54 |
+
yield "\n".join(current_chunk)
|
55 |
+
|
56 |
+
|
57 |
+
def create_message(chunk, question):
|
58 |
+
return {
|
59 |
+
"role": "user",
|
60 |
+
"content": f'"""{chunk}""" Using the above text, please answer the following'
|
61 |
+
f' question: "{question}" -- if the question cannot be answered using the text,'
|
62 |
+
" please summarize the text.",
|
63 |
+
}
|
64 |
+
|
65 |
+
|
66 |
+
def scroll_to_percentage(driver, ratio):
|
67 |
+
if ratio < 0 or ratio > 1:
|
68 |
+
raise ValueError("Percentage should be between 0 and 1")
|
69 |
+
driver.execute_script(f"window.scrollTo(0, document.body.scrollHeight * {ratio});")
|
autogpt/token_counter.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict, List
|
2 |
+
|
3 |
+
import tiktoken
|
4 |
+
|
5 |
+
from autogpt.logger import logger
|
6 |
+
|
7 |
+
|
8 |
+
def count_message_tokens(
|
9 |
+
messages: List[Dict[str, str]], model: str = "gpt-3.5-turbo-0301"
|
10 |
+
) -> int:
|
11 |
+
"""
|
12 |
+
Returns the number of tokens used by a list of messages.
|
13 |
+
|
14 |
+
Args:
|
15 |
+
messages (list): A list of messages, each of which is a dictionary
|
16 |
+
containing the role and content of the message.
|
17 |
+
model (str): The name of the model to use for tokenization.
|
18 |
+
Defaults to "gpt-3.5-turbo-0301".
|
19 |
+
|
20 |
+
Returns:
|
21 |
+
int: The number of tokens used by the list of messages.
|
22 |
+
"""
|
23 |
+
try:
|
24 |
+
encoding = tiktoken.encoding_for_model(model)
|
25 |
+
except KeyError:
|
26 |
+
logger.warn("Warning: model not found. Using cl100k_base encoding.")
|
27 |
+
encoding = tiktoken.get_encoding("cl100k_base")
|
28 |
+
if model == "gpt-3.5-turbo":
|
29 |
+
# !Node: gpt-3.5-turbo may change over time.
|
30 |
+
# Returning num tokens assuming gpt-3.5-turbo-0301.")
|
31 |
+
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
32 |
+
elif model == "gpt-4":
|
33 |
+
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
34 |
+
return count_message_tokens(messages, model="gpt-4-0314")
|
35 |
+
elif model == "gpt-3.5-turbo-0301":
|
36 |
+
tokens_per_message = (
|
37 |
+
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
38 |
+
)
|
39 |
+
tokens_per_name = -1 # if there's a name, the role is omitted
|
40 |
+
elif model == "gpt-4-0314":
|
41 |
+
tokens_per_message = 3
|
42 |
+
tokens_per_name = 1
|
43 |
+
else:
|
44 |
+
raise NotImplementedError(
|
45 |
+
f"num_tokens_from_messages() is not implemented for model {model}.\n"
|
46 |
+
" See https://github.com/openai/openai-python/blob/main/chatml.md for"
|
47 |
+
" information on how messages are converted to tokens."
|
48 |
+
)
|
49 |
+
num_tokens = 0
|
50 |
+
for message in messages:
|
51 |
+
num_tokens += tokens_per_message
|
52 |
+
for key, value in message.items():
|
53 |
+
num_tokens += len(encoding.encode(value))
|
54 |
+
if key == "name":
|
55 |
+
num_tokens += tokens_per_name
|
56 |
+
num_tokens += 3 # every reply is primed with <|start|>assistant<|message|>
|
57 |
+
return num_tokens
|
58 |
+
|
59 |
+
|
60 |
+
def count_string_tokens(string: str, model_name: str) -> int:
|
61 |
+
"""
|
62 |
+
Returns the number of tokens in a text string.
|
63 |
+
|
64 |
+
Args:
|
65 |
+
string (str): The text string.
|
66 |
+
model_name (str): The name of the encoding to use. (e.g., "gpt-3.5-turbo")
|
67 |
+
|
68 |
+
Returns:
|
69 |
+
int: The number of tokens in the text string.
|
70 |
+
"""
|
71 |
+
encoding = tiktoken.encoding_for_model(model_name)
|
72 |
+
return len(encoding.encode(string))
|
autogpt/utils.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import yaml
|
2 |
+
from colorama import Fore
|
3 |
+
|
4 |
+
|
5 |
+
def clean_input(prompt: str = ""):
|
6 |
+
try:
|
7 |
+
return input(prompt)
|
8 |
+
except KeyboardInterrupt:
|
9 |
+
print("You interrupted Auto-GPT")
|
10 |
+
print("Quitting...")
|
11 |
+
exit(0)
|
12 |
+
|
13 |
+
|
14 |
+
def validate_yaml_file(file: str):
|
15 |
+
try:
|
16 |
+
with open(file, encoding="utf-8") as fp:
|
17 |
+
yaml.load(fp.read(), Loader=yaml.FullLoader)
|
18 |
+
except FileNotFoundError:
|
19 |
+
return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found")
|
20 |
+
except yaml.YAMLError as e:
|
21 |
+
return (
|
22 |
+
False,
|
23 |
+
f"There was an issue while trying to read with your AI Settings file: {e}",
|
24 |
+
)
|
25 |
+
|
26 |
+
return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!")
|
autogpt/web.py
ADDED
@@ -0,0 +1,85 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from selenium import webdriver
|
2 |
+
import autogpt.summary as summary
|
3 |
+
from bs4 import BeautifulSoup
|
4 |
+
from selenium.webdriver.common.by import By
|
5 |
+
from selenium.webdriver.support.wait import WebDriverWait
|
6 |
+
from selenium.webdriver.support import expected_conditions as EC
|
7 |
+
from webdriver_manager.chrome import ChromeDriverManager
|
8 |
+
from selenium.webdriver.chrome.options import Options
|
9 |
+
import logging
|
10 |
+
from pathlib import Path
|
11 |
+
from autogpt.config import Config
|
12 |
+
|
13 |
+
file_dir = Path(__file__).parent
|
14 |
+
cfg = Config()
|
15 |
+
|
16 |
+
|
17 |
+
def browse_website(url, question):
|
18 |
+
driver, text = scrape_text_with_selenium(url)
|
19 |
+
add_header(driver)
|
20 |
+
summary_text = summary.summarize_text(driver, text, question)
|
21 |
+
links = scrape_links_with_selenium(driver)
|
22 |
+
|
23 |
+
# Limit links to 5
|
24 |
+
if len(links) > 5:
|
25 |
+
links = links[:5]
|
26 |
+
close_browser(driver)
|
27 |
+
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
|
28 |
+
|
29 |
+
|
30 |
+
def scrape_text_with_selenium(url):
|
31 |
+
logging.getLogger("selenium").setLevel(logging.CRITICAL)
|
32 |
+
|
33 |
+
options = Options()
|
34 |
+
options.add_argument(
|
35 |
+
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
36 |
+
)
|
37 |
+
driver = webdriver.Chrome(
|
38 |
+
executable_path=ChromeDriverManager().install(), options=options
|
39 |
+
)
|
40 |
+
driver.get(url)
|
41 |
+
|
42 |
+
WebDriverWait(driver, 10).until(
|
43 |
+
EC.presence_of_element_located((By.TAG_NAME, "body"))
|
44 |
+
)
|
45 |
+
|
46 |
+
# Get the HTML content directly from the browser's DOM
|
47 |
+
page_source = driver.execute_script("return document.body.outerHTML;")
|
48 |
+
soup = BeautifulSoup(page_source, "html.parser")
|
49 |
+
|
50 |
+
for script in soup(["script", "style"]):
|
51 |
+
script.extract()
|
52 |
+
|
53 |
+
text = soup.get_text()
|
54 |
+
lines = (line.strip() for line in text.splitlines())
|
55 |
+
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
56 |
+
text = "\n".join(chunk for chunk in chunks if chunk)
|
57 |
+
return driver, text
|
58 |
+
|
59 |
+
|
60 |
+
def scrape_links_with_selenium(driver):
|
61 |
+
page_source = driver.page_source
|
62 |
+
soup = BeautifulSoup(page_source, "html.parser")
|
63 |
+
|
64 |
+
for script in soup(["script", "style"]):
|
65 |
+
script.extract()
|
66 |
+
|
67 |
+
hyperlinks = extract_hyperlinks(soup)
|
68 |
+
|
69 |
+
return format_hyperlinks(hyperlinks)
|
70 |
+
|
71 |
+
|
72 |
+
def close_browser(driver):
|
73 |
+
driver.quit()
|
74 |
+
|
75 |
+
|
76 |
+
def extract_hyperlinks(soup):
|
77 |
+
return [(link.text, link["href"]) for link in soup.find_all("a", href=True)]
|
78 |
+
|
79 |
+
|
80 |
+
def format_hyperlinks(hyperlinks):
|
81 |
+
return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks]
|
82 |
+
|
83 |
+
|
84 |
+
def add_header(driver):
|
85 |
+
driver.execute_script(open(f"{file_dir}/js/overlay.js", "r").read())
|
azure.yaml.template
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
azure_api_type: azure_ad
|
2 |
+
azure_api_base: your-base-url-for-azure
|
3 |
+
azure_api_version: api-version-for-azure
|
4 |
+
azure_model_map:
|
5 |
+
fast_llm_model_deployment_id: gpt35-deployment-id-for-azure
|
6 |
+
smart_llm_model_deployment_id: gpt4-deployment-id-for-azure
|
7 |
+
embedding_model_deployment_id: embedding-deployment-id-for-azure
|
docker-compose.yml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# To boot the app run the following:
|
2 |
+
# docker-compose run auto-gpt
|
3 |
+
version: "3.9"
|
4 |
+
|
5 |
+
services:
|
6 |
+
auto-gpt:
|
7 |
+
depends_on:
|
8 |
+
- redis
|
9 |
+
build: ./
|
10 |
+
volumes:
|
11 |
+
- "./autogpt:/app"
|
12 |
+
- ".env:/app/.env"
|
13 |
+
profiles: ["exclude-from-up"]
|
14 |
+
|
15 |
+
redis:
|
16 |
+
image: "redis/redis-stack-server:latest"
|
main.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from autogpt import main
|
pyproject.toml
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "auto-gpt"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "A GPT based ai agent"
|
5 |
+
readme = "README.md"
|
6 |
+
|
7 |
+
[tool.black]
|
8 |
+
line-length = 88
|
9 |
+
target-version = ['py310']
|
10 |
+
include = '\.pyi?$'
|
11 |
+
extend-exclude = ""
|