diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000000000000000000000000000000000000..98f1e5b0fde6f4278c80ec9dc8ad52504988ef44
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,3 @@
+ko_fi: xtekky
+github: [xtekky, hlohaus]
+patreon: xtekky
diff --git a/.github/ISSUE_TEMPLATE/default_issue.md b/.github/ISSUE_TEMPLATE/default_issue.md
new file mode 100644
index 0000000000000000000000000000000000000000..3de2785b41b081cee39437dffdb20ffc7645d032
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/default_issue.md
@@ -0,0 +1,33 @@
+---
+name: New Issue
+about: 'Please use this template !!'
+title: ''
+labels: bug
+assignees: xtekky
+
+---
+
+**Known Issues** // delete this
+- you.com issue / fix: use proxy, or vpn, your country is probably flagged
+- forefront account creation error / use your own session or wait for fix
+
+
+**Bug description**
+What did you do, what happened, which file did you try to run, in which directory
+Describe what you did after downloading repo, such as moving to this repo, running this file.
+
+ex.
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem.
+
+**Environment**
+- python version
+- location ( are you in a cloudfare flagged country ) ?
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 0000000000000000000000000000000000000000..bbcbbe7d61558adde3cbfd0c7a63a67c27ed6d30
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,20 @@
+---
+name: Feature request
+about: Suggest an idea for this project
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+**Is your feature request related to a problem? Please describe.**
+A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
+
+**Describe the solution you'd like**
+A clear and concise description of what you want to happen.
+
+**Describe alternatives you've considered**
+A clear and concise description of any alternative solutions or features you've considered.
+
+**Additional context**
+Add any other context or screenshots about the feature request here.
diff --git a/.github/workflows/close-inactive-issues.yml b/.github/workflows/close-inactive-issues.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d81b727aaa5d64208dcb912da070d8c070aa851a
--- /dev/null
+++ b/.github/workflows/close-inactive-issues.yml
@@ -0,0 +1,31 @@
+name: Close inactive issues
+
+on:
+ schedule:
+ - cron: "5 0 * * *"
+
+jobs:
+ close-issues:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ pull-requests: write
+ steps:
+ - uses: actions/stale@v5
+ with:
+ days-before-issue-stale: 7
+ days-before-issue-close: 7
+
+ days-before-pr-stale: 7
+ days-before-pr-close: 7
+
+ stale-issue-label: "stale"
+ stale-pr-label: "stale"
+
+ stale-issue-message: "Bumping this issue because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
+ close-issue-message: "Closing due to inactivity."
+
+ stale-pr-message: "Bumping this pull request because it has been open for 7 days with no activity. Closing automatically in 7 days unless it becomes active again."
+ close-pr-message: "Closing due to inactivity."
+
+ repo-token: ${{ secrets.GITHUB_TOKEN }}
diff --git a/.github/workflows/copilot.yml b/.github/workflows/copilot.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6e06f6c7d083043844c8548d092f981017492346
--- /dev/null
+++ b/.github/workflows/copilot.yml
@@ -0,0 +1,53 @@
+name: AI Code Reviewer
+
+on:
+ workflow_run:
+ workflows: ["Unittest"]
+ types:
+ - completed
+
+jobs:
+ review:
+ runs-on: ubuntu-latest
+ permissions:
+ contents: read
+ pull-requests: write
+ steps:
+ - name: Checkout Repo
+ uses: actions/checkout@v3
+ - name: 'Download artifact'
+ uses: actions/github-script@v6
+ with:
+ script: |
+ let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ run_id: context.payload.workflow_run.id,
+ });
+ let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
+ return artifact.name == "pr_number"
+ })[0];
+ let download = await github.rest.actions.downloadArtifact({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ artifact_id: matchArtifact.id,
+ archive_format: 'zip',
+ });
+ let fs = require('fs');
+ fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/pr_number.zip`, Buffer.from(download.data));
+ - name: 'Unzip artifact'
+ run: unzip pr_number.zip
+ - name: Setup Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
+ cache: 'pip'
+ - name: Install Requirements
+ run: |
+ pip install -r requirements.txt
+ pip install PyGithub
+ - name: AI Code Review
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_REPOSITORY: ${{ github.repository }}
+ run: python -m etc.tool.copilot
diff --git a/.github/workflows/publish-to-pypi.yml b/.github/workflows/publish-to-pypi.yml
new file mode 100644
index 0000000000000000000000000000000000000000..d5481fd46e10801b74f2599f81aa7e40cab8d314
--- /dev/null
+++ b/.github/workflows/publish-to-pypi.yml
@@ -0,0 +1,52 @@
+name: Publish Python š distribution š¦ to PyPI
+
+on: push
+
+env:
+ G4F_VERSION: ${{ github.ref_name }}
+
+jobs:
+ build:
+ name: Build distribution š¦
+ if: startsWith(github.ref, 'refs/tags/')
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.x"
+ - name: Install pypa/build
+ run: >-
+ python3 -m
+ pip install
+ build
+ --user
+ - name: Build a binary wheel and a source tarball
+ run: python3 -m build
+ - name: Store the distribution packages
+ uses: actions/upload-artifact@v3
+ with:
+ name: python-package-distributions
+ path: dist/
+
+ publish-to-pypi:
+ name: >-
+ Publish distribution on PyPI š
+ if: startsWith(github.ref, 'refs/tags/')
+ needs:
+ - build
+ runs-on: ubuntu-latest
+ environment:
+ name: pypi
+ url: https://pypi.org/p/g4f
+ permissions:
+ id-token: write
+ steps:
+ - name: Download all the dists
+ uses: actions/download-artifact@v3
+ with:
+ name: python-package-distributions
+ path: dist/
+ - name: Publish distribution š¦ to PyPI
+ uses: pypa/gh-action-pypi-publish@release/v1
\ No newline at end of file
diff --git a/.github/workflows/publish-workflow.yaml b/.github/workflows/publish-workflow.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6b8112f122e07255e9bddc96fe4e45cd378eda35
--- /dev/null
+++ b/.github/workflows/publish-workflow.yaml
@@ -0,0 +1,98 @@
+name: Publish Docker image
+
+on:
+ push:
+ tags:
+ - '**'
+
+jobs:
+ openapi:
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python 3.13
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.13"
+ cache: 'pip'
+ - name: Install requirements
+ run: |
+ pip install fastapi uvicorn python-multipart
+ pip install -r requirements-min.txt
+ - name: Generate openapi.json
+ run: |
+ python -m etc.tool.openapi
+ - uses: actions/upload-artifact@v4
+ with:
+ name: openapi
+ path: openapi.json
+ publish:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Get metadata for Docker
+ id: metadata
+ uses: docker/metadata-action@v5
+ with:
+ images: |
+ hlohaus789/g4f
+ ghcr.io/${{ github.repository }}
+
+ - name: Log in to Docker Hub
+ uses: docker/login-action@f4ef78c080cd8ba55a85445d5b36e214a81df20a
+ with:
+ username: ${{ secrets.DOCKER_USERNAME }}
+ password: ${{ secrets.DOCKER_PASSWORD }}
+
+ - name: Login to GitHub Container Registry
+ uses: docker/login-action@v3
+ with:
+ registry: ghcr.io
+ username: ${{ github.repository_owner }}
+ password: ${{ secrets.GHCR_PAT }}
+
+ - name: Build and push armv7 image
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: docker/Dockerfile-armv7
+ platforms: linux/arm/v7
+ push: true
+ tags: |
+ hlohaus789/g4f:latest-armv7
+ hlohaus789/g4f:${{ github.ref_name }}-armv7
+ labels: ${{ steps.metadata.outputs.labels }}
+ build-args: |
+ G4F_VERSION=${{ github.ref_name }}
+
+ - name: Build and push small images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: docker/Dockerfile-slim
+ platforms: linux/amd64,linux/arm64
+ push: true
+ tags: |
+ hlohaus789/g4f:latest-slim
+ hlohaus789/g4f:${{ github.ref_name }}-slim
+ labels: ${{ steps.metadata.outputs.labels }}
+ build-args: |
+ G4F_VERSION=${{ github.ref_name }}
+
+ - name: Build and push big images
+ uses: docker/build-push-action@v5
+ with:
+ context: .
+ file: docker/Dockerfile
+ platforms: linux/amd64,linux/arm64
+ push: true
+ tags: ${{ steps.metadata.outputs.tags }}
+ labels: ${{ steps.metadata.outputs.labels }}
+ build-args: |
+ G4F_VERSION=${{ github.ref_name }}
\ No newline at end of file
diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1b0d5384e07bcf562947292850fe81f5fe659a9d
--- /dev/null
+++ b/.github/workflows/unittest.yml
@@ -0,0 +1,47 @@
+name: Unittest
+
+on:
+ pull_request:
+ types:
+ - opened
+ - synchronize
+ push:
+ branches:
+ - 'main'
+
+jobs:
+ build:
+ name: Build unittest
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Set up Python 3.8
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.8"
+ cache: 'pip'
+ - name: Install min requirements
+ run: pip install -r requirements-min.txt
+ - name: Run tests
+ run: python -m etc.unittest
+ - name: Set up Python 3.12
+ uses: actions/setup-python@v4
+ with:
+ python-version: "3.12"
+ cache: 'pip'
+ - name: Install requirements
+ run: |
+ pip install -r requirements.txt
+ pip uninstall -y nodriver
+ - name: Run tests
+ run: python -m etc.unittest
+ - name: Save PR number
+ env:
+ PR_NUMBER: ${{ github.event.number }}
+ run: |
+ mkdir -p ./pr
+ echo $PR_NUMBER > ./pr/pr_number
+ - uses: actions/upload-artifact@v4
+ with:
+ name: pr_number
+ path: pr/
\ No newline at end of file
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..8d678a0f7de31c5a2ff7c7ade5a246142f6804fd
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,68 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
+
+# Ignore local python virtual environment
+venv/
+
+# Ignore streamlit_chat_app.py conversations pickle
+conversations.pkl
+*.pkl
+
+# Ignore accounts created by api's
+accounts.txt
+
+.idea/
+**/__pycache__/
+__pycache__/
+
+dist/
+*.log
+*.pyc
+*.egg-info/
+*.egg
+*.egg-info
+build
+
+test.py
+update.py
+cookie.json
+notes.txt
+close_issues.py
+xxx.py
+lab.py
+lab.js
+bing.py
+bing2.py
+.DS_Store
+lab/*
+lab
+tstt.py
+providerstest.py
+prv.py
+# Emacs crap
+*~
+x.js
+x.py
+info.txt
+local.py
+*.gguf
+image.py
+.buildozer
+hardir
+har_and_cookies
+node_modules
+models
+projects/windows/g4f
+doc.txt
+dist.py
+x.txt
+bench.py
+to-reverse.txt
+g4f/Provider/OpenaiChat2.py
+generated_images/
\ No newline at end of file
diff --git a/.gitpod.yml b/.gitpod.yml
new file mode 100644
index 0000000000000000000000000000000000000000..59ee634ff5f3c86afc38c7ca4a4cbb6b86d08625
--- /dev/null
+++ b/.gitpod.yml
@@ -0,0 +1,7 @@
+# Please adjust to your needs (see https://www.gitpod.io/docs/introduction/learn-gitpod/gitpod-yaml)
+# and commit this file to your remote git repository to share the goodness with others.
+
+# Learn more from ready-to-use templates: https://www.gitpod.io/docs/introduction/getting-started/quickstart
+
+tasks:
+ - init: pip install -r requirements.txt
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000000000000000000000000000000000..349d160d7df1759d9ac52478e7e7df8d34ab5524
--- /dev/null
+++ b/CODE_OF_CONDUCT.md
@@ -0,0 +1,128 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+https://t.me/xtekky.
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.0, available at
+https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
+
+Community Impact Guidelines were inspired by [Mozilla's code of conduct
+enforcement ladder](https://github.com/mozilla/diversity).
+
+[homepage]: https://www.contributor-covenant.org
+
+For answers to common questions about this code of conduct, see the FAQ at
+https://www.contributor-covenant.org/faq. Translations are available at
+https://www.contributor-covenant.org/translations.
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000000000000000000000000000000000000..2ccd080d1899e2723abe34ad5672a3d2af2f9150
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,8 @@
+
+
+### Please, follow these steps to contribute:
+1. Reverse a website from this list: [sites-to-reverse](https://github.com/xtekky/gpt4free/issues/40)
+2. Add it to [./testing](https://github.com/xtekky/gpt4free/tree/main/testing)
+3. Refactor it and add it to [./g4f](https://github.com/xtekky/gpt4free/tree/main/g4f)
+
+### We will be grateful to see you as a contributor!
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index 7f380618841b17503f7167d3414e1bdfb66080dd..3b6692cf7cd394e273860137f9e126bfbb43a68f 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,21 +1,89 @@
-# Use the base image
-FROM hlohaus789/g4f:latest
+FROM seleniarm/node-chromium
-# Set working directory
-WORKDIR /app
+ARG G4F_VERSION
+ARG G4F_USER=g4f
+ARG G4F_USER_ID=1000
+ARG G4F_NO_GUI
+ARG G4F_PASS=secret
-# Create necessary directories
-RUN mkdir -p /app/har_and_cookies /app/generated_images
+ENV G4F_VERSION $G4F_VERSION
+ENV G4F_USER $G4F_USER
+ENV G4F_USER_ID $G4F_USER_ID
+ENV G4F_NO_GUI $G4F_NO_GUI
-# Set volume mount points
-VOLUME ["/app/har_and_cookies", "/app/generated_images"]
+ENV SE_SCREEN_WIDTH 1850
+ENV PYTHONUNBUFFERED 1
+ENV G4F_DIR /app
+ENV G4F_LOGIN_URL http://localhost:7900/?autoconnect=1&resize=scale&password=$G4F_PASS
+ENV HOME /home/$G4F_USER
+ENV PATH $PATH:$HOME/.local/bin
+ENV SE_DOWNLOAD_DIR $HOME/Downloads
+ENV SEL_USER $G4F_USER
+ENV SEL_UID $G4F_USER_ID
+ENV SEL_GID $G4F_USER_ID
-# Expose the specified ports 8080 1337 7900
+USER root
+
+# If docker compose, install git
+RUN if [ "$G4F_VERSION" = "" ] ; then \
+ apt-get -qqy update && \
+ apt-get -qqy install git \
+ ; fi
+
+# Install Python3, pip, remove OpenJDK 11, clean up
+RUN apt-get -qqy update \
+ && apt-get -qqy install python3 python-is-python3 pip \
+ && apt-get -qyy remove openjdk-11-jre-headless \
+ && apt-get -qyy autoremove \
+ && apt-get -qyy clean \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/*
+
+# Update entrypoint
+COPY docker/supervisor.conf /etc/supervisor/conf.d/selenium.conf
+COPY docker/supervisor-api.conf /etc/supervisor/conf.d/api.conf
+COPY docker/supervisor-gui.conf /etc/supervisor/conf.d/gui.conf
+
+# If no gui
+RUN if [ "$G4F_NO_GUI" ] ; then \
+ rm /etc/supervisor/conf.d/gui.conf \
+ ; fi
+
+# Change background image
+COPY docker/background.png /usr/share/images/fluxbox/ubuntu-light.png
+
+# Add user, fix permissions
+RUN groupadd -g $G4F_USER_ID $G4F_USER \
+ && useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
+ && echo "${G4F_USER}:${G4F_PASS}" | chpasswd \
+ && mkdir "${SE_DOWNLOAD_DIR}" \
+ && chown "${G4F_USER_ID}:${G4F_USER_ID}" $SE_DOWNLOAD_DIR /var/run/supervisor /var/log/supervisor \
+ && chown "${G4F_USER_ID}:${G4F_USER_ID}" -R /opt/bin/ /usr/bin/chromedriver /opt/selenium/
+
+# Switch user
+USER $G4F_USER_ID
+
+# Set VNC password
+RUN mkdir -p ${HOME}/.vnc \
+ && x11vnc -storepasswd ${G4F_PASS} ${HOME}/.vnc/passwd
+
+# Set the working directory in the container.
+WORKDIR $G4F_DIR
+
+# Copy the project's requirements file into the container.
+COPY requirements.txt $G4F_DIR
+
+# Upgrade pip for the latest features and install the project's Python dependencies.
+RUN pip install --break-system-packages --upgrade pip \
+ && pip install --break-system-packages -r requirements.txt \
+ && pip install --break-system-packages \
+ undetected-chromedriver selenium-wire \
+ && pip uninstall -y --break-system-packages \
+ pywebview plyer
+
+# Copy the entire package into the container.
+ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f
+
+# Expose ports
EXPOSE 7860
-# Increase shared memory size
-# Note: The actual shm-size is set at runtime with --shm-size flag
-RUN mkdir -p /dev/shm && chmod 777 /dev/shm
-# Default command to run the container
-CMD ["sh", "-c", "exec $CMD"]
\ No newline at end of file
diff --git a/Dockerfile copy b/Dockerfile copy
new file mode 100644
index 0000000000000000000000000000000000000000..625312e217ee68ed6713a07f55d5800e293a2f2b
--- /dev/null
+++ b/Dockerfile copy
@@ -0,0 +1,87 @@
+FROM seleniarm/node-chromium
+
+ARG G4F_VERSION
+ARG G4F_USER=g4f
+ARG G4F_USER_ID=1000
+ARG G4F_NO_GUI
+ARG G4F_PASS=secret
+
+ENV G4F_VERSION $G4F_VERSION
+ENV G4F_USER $G4F_USER
+ENV G4F_USER_ID $G4F_USER_ID
+ENV G4F_NO_GUI $G4F_NO_GUI
+
+ENV SE_SCREEN_WIDTH 1850
+ENV PYTHONUNBUFFERED 1
+ENV G4F_DIR /app
+ENV G4F_LOGIN_URL http://localhost:7900/?autoconnect=1&resize=scale&password=$G4F_PASS
+ENV HOME /home/$G4F_USER
+ENV PATH $PATH:$HOME/.local/bin
+ENV SE_DOWNLOAD_DIR $HOME/Downloads
+ENV SEL_USER $G4F_USER
+ENV SEL_UID $G4F_USER_ID
+ENV SEL_GID $G4F_USER_ID
+
+USER root
+
+# If docker compose, install git
+RUN if [ "$G4F_VERSION" = "" ] ; then \
+ apt-get -qqy update && \
+ apt-get -qqy install git \
+ ; fi
+
+# Install Python3, pip, remove OpenJDK 11, clean up
+RUN apt-get -qqy update \
+ && apt-get -qqy install python3 python-is-python3 pip \
+ && apt-get -qyy remove openjdk-11-jre-headless \
+ && apt-get -qyy autoremove \
+ && apt-get -qyy clean \
+ && rm -rf /var/lib/apt/lists/* /var/cache/apt/*
+
+# Update entrypoint
+COPY docker/supervisor.conf /etc/supervisor/conf.d/selenium.conf
+COPY docker/supervisor-api.conf /etc/supervisor/conf.d/api.conf
+COPY docker/supervisor-gui.conf /etc/supervisor/conf.d/gui.conf
+
+# If no gui
+RUN if [ "$G4F_NO_GUI" ] ; then \
+ rm /etc/supervisor/conf.d/gui.conf \
+ ; fi
+
+# Change background image
+COPY docker/background.png /usr/share/images/fluxbox/ubuntu-light.png
+
+# Add user, fix permissions
+RUN groupadd -g $G4F_USER_ID $G4F_USER \
+ && useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
+ && echo "${G4F_USER}:${G4F_PASS}" | chpasswd \
+ && mkdir "${SE_DOWNLOAD_DIR}" \
+ && chown "${G4F_USER_ID}:${G4F_USER_ID}" $SE_DOWNLOAD_DIR /var/run/supervisor /var/log/supervisor \
+ && chown "${G4F_USER_ID}:${G4F_USER_ID}" -R /opt/bin/ /usr/bin/chromedriver /opt/selenium/
+
+# Switch user
+USER $G4F_USER_ID
+
+# Set VNC password
+RUN mkdir -p ${HOME}/.vnc \
+ && x11vnc -storepasswd ${G4F_PASS} ${HOME}/.vnc/passwd
+
+# Set the working directory in the container.
+WORKDIR $G4F_DIR
+
+# Copy the project's requirements file into the container.
+COPY requirements.txt $G4F_DIR
+
+# Upgrade pip for the latest features and install the project's Python dependencies.
+RUN pip install --break-system-packages --upgrade pip \
+ && pip install --break-system-packages -r requirements.txt \
+ && pip install --break-system-packages \
+ undetected-chromedriver selenium-wire \
+ && pip uninstall -y --break-system-packages \
+ pywebview plyer
+
+# Copy the entire package into the container.
+ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f
+
+# Expose ports
+EXPOSE 8080 1337
diff --git a/Dockerfile-armv7 b/Dockerfile-armv7
new file mode 100644
index 0000000000000000000000000000000000000000..866ac63bc2b9f06cba7ba1ca91cf8e3a7c7f2fe8
--- /dev/null
+++ b/Dockerfile-armv7
@@ -0,0 +1,67 @@
+FROM python:slim-bookworm
+
+ARG G4F_VERSION
+ARG G4F_USER=g4f
+ARG G4F_USER_ID=1000
+ARG PYDANTIC_VERSION=1.8.1
+
+ENV G4F_VERSION $G4F_VERSION
+ENV G4F_USER $G4F_USER
+ENV G4F_USER_ID $G4F_USER_ID
+ENV G4F_DIR /app
+
+RUN apt-get update && apt-get upgrade -y \
+ && apt-get install -y git \
+ && apt-get install --quiet --yes --no-install-recommends \
+ build-essential \
+# Add user and user group
+ && groupadd -g $G4F_USER_ID $G4F_USER \
+ && useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
+ && mkdir -p /var/log/supervisor \
+ && chown "${G4F_USER_ID}:${G4F_USER_ID}" /var/log/supervisor \
+ && echo "${G4F_USER}:${G4F_USER}" | chpasswd \
+ && python -m pip install --upgrade pip
+
+USER $G4F_USER_ID
+WORKDIR $G4F_DIR
+
+ENV HOME /home/$G4F_USER
+ENV PATH "${HOME}/.local/bin:${PATH}"
+
+# Create app dir and copy the project's requirements file into it
+RUN mkdir -p $G4F_DIR
+COPY requirements-min.txt $G4F_DIR
+COPY requirements-slim.txt $G4F_DIR
+
+# Upgrade pip for the latest features and install the project's Python dependencies.
+RUN pip install --no-cache-dir -r requirements-min.txt \
+ && pip install --no-cache-dir --no-binary setuptools \
+ Cython==0.29.22 \
+ setuptools \
+ # Install PyDantic
+ && pip install \
+ -vvv \
+ --no-cache-dir \
+ --no-binary :all: \
+ --global-option=build_ext \
+ --global-option=-j8 \
+ pydantic==${PYDANTIC_VERSION}
+RUN cat requirements-slim.txt | xargs -n 1 pip install --no-cache-dir || true
+
+# Remove build packages
+RUN pip uninstall --yes \
+ Cython \
+ setuptools
+
+USER root
+
+# Clean up build deps
+RUN apt-get purge --auto-remove --yes \
+ build-essential \
+ && apt-get clean \
+ && rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+USER $G4F_USER_ID
+
+# Copy the entire package into the container.
+ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f
\ No newline at end of file
diff --git a/Dockerfile-slim b/Dockerfile-slim
new file mode 100644
index 0000000000000000000000000000000000000000..dfc3344dcd07e1e979f2c5cb33fbfaa89d9a83bd
--- /dev/null
+++ b/Dockerfile-slim
@@ -0,0 +1,39 @@
+FROM python:slim-bookworm
+
+ARG G4F_VERSION
+ARG G4F_USER=g4f
+ARG G4F_USER_ID=1000
+
+ENV G4F_VERSION $G4F_VERSION
+ENV G4F_USER $G4F_USER
+ENV G4F_USER_ID $G4F_USER_ID
+ENV G4F_DIR /app
+
+RUN apt-get update && apt-get upgrade -y \
+ && apt-get install -y git \
+# Add user and user group
+ && groupadd -g $G4F_USER_ID $G4F_USER \
+ && useradd -rm -G sudo -u $G4F_USER_ID -g $G4F_USER_ID $G4F_USER \
+ && mkdir -p /var/log/supervisor \
+ && chown "${G4F_USER_ID}:${G4F_USER_ID}" /var/log/supervisor \
+ && echo "${G4F_USER}:${G4F_USER}" | chpasswd \
+ && python -m pip install --upgrade pip \
+ && apt-get clean \
+ && rm --recursive --force /var/lib/apt/lists/* /tmp/* /var/tmp/*
+
+USER $G4F_USER_ID
+WORKDIR $G4F_DIR
+
+ENV HOME /home/$G4F_USER
+ENV PATH "${HOME}/.local/bin:${PATH}"
+
+# Create app dir and copy the project's requirements file into it
+RUN mkdir -p $G4F_DIR
+COPY requirements-slim.txt $G4F_DIR
+
+# Upgrade pip for the latest features and install the project's Python dependencies.
+RUN pip install --no-cache-dir -r requirements-slim.txt \
+ && pip install --no-cache-dir duckduckgo-search>=5.0
+
+# Copy the entire package into the container.
+ADD --chown=$G4F_USER:$G4F_USER g4f $G4F_DIR/g4f
\ No newline at end of file
diff --git a/LEGAL_NOTICE.md b/LEGAL_NOTICE.md
new file mode 100644
index 0000000000000000000000000000000000000000..50a1d141037c6983fff9a99c583c2c1597787b35
--- /dev/null
+++ b/LEGAL_NOTICE.md
@@ -0,0 +1,55 @@
+## Legal Notice
+
+This repository is **not associated with or endorsed** by the providers of the APIs contained herein. This project is intended **for educational purposes only**. It is a personal project aimed at learning and exploration. Owners of any included sites or services may contact me to improve their security or request the removal of their content from this repository.
+
+### **Affiliation Disclaimer**
+
+This repository is not associated with or endorsed by any of the API providers mentioned herein. All trademarks, API services, and other intellectual property referenced are the property of their respective owners. No claim of ownership or affiliation is made by this project.
+
+### **Liability Limitation**
+
+Under no circumstances shall the author of this repository be liable for any direct, indirect, incidental, special, consequential, or punitive damagesāincluding but not limited to loss of profits, data, or useāarising out of or in connection with the repository. This limitation applies regardless of whether such damages were foreseeable or whether the author was advised of the possibility of such damages.
+
+### **No Warranties**
+
+This repository is provided on an "as is" and "as available" basis without any warranties of any kind, express or implied. This includes, but is not limited to, implied warranties of merchantability, fitness for a particular purpose, and non-infringement.
+
+### **User Responsibility**
+
+Users assume all risks associated with the use of this repository. They are solely responsible for any damage or lossāincluding financial lossāthat results from the use or misuse of the repository and its contents.
+
+### **Legal Compliance**
+
+Users are responsible for ensuring that their use of the repository and its contents complies with all applicable local, state, national, and international laws and regulations.
+
+### **Indemnification**
+
+Users agree to indemnify, defend, and hold harmless the author from any claims, liabilities, damages, losses, or expensesāincluding legal feesāarising out of or in any way connected with their use of this repository, violation of these terms, or infringement of any intellectual property or other rights of any person or entity.
+
+### **No Endorsement**
+
+The inclusion of third-party content does not imply endorsement or recommendation of such content by the author.
+
+### **Governing Law and Jurisdiction**
+
+Any disputes arising out of or related to the use of this repository shall be governed by the laws of the author's jurisdiction, without regard to conflict of law principles.
+
+### **Severability**
+
+If any provision of this notice is found to be unlawful, void, or unenforceable, that provision shall be deemed severable from this notice and shall not affect the validity and enforceability of the remaining provisions.
+
+### **Acknowledgment of Understanding**
+
+By using this repository, users acknowledge that they have read, understood, and agree to be bound by these terms.
+
+### **Updates and Changes**
+
+The author reserves the right to modify, update, or remove any content, information, or features in this repository at any time without prior notice. Users are responsible for regularly reviewing the content and any changes made to this repository.
+
+### **Unforeseen Consequences**
+
+The author is not responsible for any consequences, damages, or losses arising from the use or misuse of this repository or the content provided by third-party APIs. Users are solely responsible for their actions and any repercussions that may follow.
+
+### **Educational Purpose**
+
+This project and its content are provided strictly for educational purposes. Users acknowledge that they are using the APIs and models at their own risk and agree to comply with all applicable laws and regulations.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..e72bfddabc15be5718a7cc061ac10e47741d8219
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ On the first screenshot is Pydroid and on the second is the Web UI in a browser
+
+
+
+ An error occured: ${message.error}[\S\s]+?)\n```", text)
+ if match:
+ text = match.group("code")
+ try:
+ return json.loads(text.strip())
+ except json.JSONDecodeError:
+ print("No valid json:", text)
+ return {}
+
+def read_text(text: str) -> str:
+ """
+ Extracts text from a markdown code block.
+
+ Args:
+ text (str): A string containing a markdown code block.
+
+ Returns:
+ str: The extracted text.
+ """
+ match = re.search(r"```(markdown|)\n(?P
[\S\s]+?)\n```", text):
+ return match.group("code")
+
+def input_command():
+ print("Enter/Paste the cURL command. Ctrl-D or Ctrl-Z ( windows ) to save it.")
+ contents = []
+ while True:
+ try:
+ line = input()
+ except EOFError:
+ break
+ contents.append(line)
+ return "\n".join(contents)
+
+name = input("Name: ")
+provider_path = f"g4f/Provider/{name}.py"
+
+example = """
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ..typing import AsyncResult, Messages
+from .base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from .helper import format_prompt
+
+
+class {name}(AsyncGeneratorProvider, ProviderModelMixin):
+ label = ""
+ url = "https://example.com"
+ api_endpoint = "https://example.com/api/completion"
+ working = True
+ needs_auth = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = ''
+ models = ['', '']
+
+ model_aliases = {
+ "alias1": "model1",
+ }
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {{
+ "authority": "example.com",
+ "accept": "application/json",
+ "origin": cls.url,
+ "referer": f"{{cls.url}}/chat",
+ }}
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {{
+ "prompt": prompt,
+ "model": model,
+ }}
+ async with session.post(f"{{cls.url}}/api/chat", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
+"""
+
+if not path.isfile(provider_path):
+ command = input_command()
+
+ prompt = f"""
+Create a provider from a cURL command. The command is:
+```bash
+{command}
+```
+A example for a provider:
+```python
+{example}
+```
+The name for the provider class:
+{name}
+Replace "hello" with `format_prompt(messages)`.
+And replace "gpt-3.5-turbo" with `model`.
+"""
+
+ print("Create code...")
+ response = []
+ for chunk in g4f.ChatCompletion.create(
+ model=g4f.models.default,
+ messages=[{"role": "user", "content": prompt}],
+ timeout=300,
+ stream=True,
+ ):
+ print(chunk, end="", flush=True)
+ response.append(chunk)
+ print()
+ response = "".join(response)
+
+ if code := read_code(response):
+ with open(provider_path, "w") as file:
+ file.write(code)
+ print("Saved at:", provider_path)
+ with open("g4f/Provider/__init__.py", "a") as file:
+ file.write(f"\nfrom .{name} import {name}")
+else:
+ with open(provider_path, "r") as file:
+ code = file.read()
diff --git a/etc/tool/improve_code.py b/etc/tool/improve_code.py
new file mode 100644
index 0000000000000000000000000000000000000000..8578b478577c653545abf54355cd2275ab5d2204
--- /dev/null
+++ b/etc/tool/improve_code.py
@@ -0,0 +1,45 @@
+
+import sys, re
+from pathlib import Path
+from os import path
+
+sys.path.append(str(Path(__file__).parent.parent.parent))
+
+import g4f
+
+def read_code(text):
+ if match := re.search(r"```(python|py|)\n(?P
[\S\s]+?)\n```", text):
+ return match.group("code")
+
+path = input("Path: ")
+
+with open(path, "r") as file:
+ code = file.read()
+
+prompt = f"""
+Improve the code in this file:
+```py
+{code}
+```
+Don't remove anything.
+Add typehints if possible.
+Don't add any typehints to kwargs.
+Don't remove license comments.
+"""
+
+print("Create code...")
+response = []
+for chunk in g4f.ChatCompletion.create(
+ model=g4f.models.default,
+ messages=[{"role": "user", "content": prompt}],
+ timeout=300,
+ stream=True
+):
+ response.append(chunk)
+ print(chunk, end="", flush=True)
+print()
+response = "".join(response)
+
+if code := read_code(response):
+ with open(path, "w") as file:
+ file.write(code)
diff --git a/etc/tool/openapi.py b/etc/tool/openapi.py
new file mode 100644
index 0000000000000000000000000000000000000000..83359e4e7ebf7a6d0b8d0cdcde20848b48bf5f88
--- /dev/null
+++ b/etc/tool/openapi.py
@@ -0,0 +1,11 @@
+import json
+
+from g4f.api import create_app
+
+app = create_app()
+
+with open("openapi.json", "w") as f:
+ data = json.dumps(app.openapi())
+ f.write(data)
+
+print(f"openapi.json - {round(len(data)/1024, 2)} kbytes")
\ No newline at end of file
diff --git a/etc/tool/provider_init.py b/etc/tool/provider_init.py
new file mode 100644
index 0000000000000000000000000000000000000000..22f21d4d9ed32d442ee0cef21057f8928f81189b
--- /dev/null
+++ b/etc/tool/provider_init.py
@@ -0,0 +1,33 @@
+from pathlib import Path
+
+
+def main():
+ content = create_content()
+ with open("g4f/provider/__init__.py", "w", encoding="utf-8") as f:
+ f.write(content)
+
+
+def create_content():
+ path = Path()
+ paths = path.glob("g4f/provider/*.py")
+ paths = [p for p in paths if p.name not in ["__init__.py", "base_provider.py"]]
+ classnames = [p.stem for p in paths]
+
+ import_lines = [f"from .{name} import {name}" for name in classnames]
+ import_content = "\n".join(import_lines)
+
+ classnames.insert(0, "BaseProvider")
+ all_content = [f' "{name}"' for name in classnames]
+ all_content = ",\n".join(all_content)
+ all_content = f"__all__ = [\n{all_content},\n]"
+
+ return f"""from .base_provider import BaseProvider
+{import_content}
+
+
+{all_content}
+"""
+
+
+if __name__ == "__main__":
+ main()
diff --git a/etc/tool/readme_table.py b/etc/tool/readme_table.py
new file mode 100644
index 0000000000000000000000000000000000000000..e89f861bed5d55adad12d9e9ef3c9e310b647af4
--- /dev/null
+++ b/etc/tool/readme_table.py
@@ -0,0 +1,152 @@
+import re
+from urllib.parse import urlparse
+import asyncio
+
+from g4f import models, ChatCompletion
+from g4f.providers.types import BaseRetryProvider, ProviderType
+from etc.testing._providers import get_providers
+from g4f import debug
+
+debug.logging = True
+
+async def test_async(provider: ProviderType):
+ if not provider.working:
+ return False
+ messages = [{"role": "user", "content": "Hello Assistant!"}]
+ try:
+ if "webdriver" in provider.get_parameters():
+ return False
+ response = await asyncio.wait_for(ChatCompletion.create_async(
+ model=models.default,
+ messages=messages,
+ provider=provider
+ ), 30)
+ return bool(response)
+ except Exception as e:
+ if debug.logging:
+ print(f"{provider.__name__}: {e.__class__.__name__}: {e}")
+ return False
+
+def test_async_list(providers: list[ProviderType]):
+ responses: list = [
+ asyncio.run(test_async(_provider))
+ for _provider in providers
+ ]
+ return responses
+
+def print_providers():
+
+ providers = get_providers()
+ responses = test_async_list(providers)
+
+ for type in ("GPT-4", "GPT-3.5", "Other"):
+ lines = [
+ "",
+ f"### {type}",
+ "",
+ "| Website | Provider | GPT-3.5 | GPT-4 | Stream | Status | Auth |",
+ "| ------ | ------- | ------- | ----- | ------ | ------ | ---- |",
+ ]
+ for is_working in (True, False):
+ for idx, _provider in enumerate(providers):
+ if is_working != _provider.working:
+ continue
+ do_continue = False
+ if type == "GPT-4" and _provider.supports_gpt_4:
+ do_continue = True
+ elif type == "GPT-3.5" and not _provider.supports_gpt_4 and _provider.supports_gpt_35_turbo:
+ do_continue = True
+ elif type == "Other" and not _provider.supports_gpt_4 and not _provider.supports_gpt_35_turbo:
+ do_continue = True
+ if not do_continue:
+ continue
+ netloc = urlparse(_provider.url).netloc.replace("www.", "")
+ website = f"[{netloc}]({_provider.url})"
+
+ provider_name = f"`g4f.Provider.{_provider.__name__}`"
+
+ has_gpt_35 = "āļø" if _provider.supports_gpt_35_turbo else "ā"
+ has_gpt_4 = "āļø" if _provider.supports_gpt_4 else "ā"
+ stream = "āļø" if _provider.supports_stream else "ā"
+ if _provider.working:
+ status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
+ if responses[idx]:
+ status = '![Active](https://img.shields.io/badge/Active-brightgreen)'
+ else:
+ status = '![Unknown](https://img.shields.io/badge/Unknown-grey)'
+ else:
+ status = '![Inactive](https://img.shields.io/badge/Inactive-red)'
+ auth = "āļø" if _provider.needs_auth else "ā"
+
+ lines.append(
+ f"| {website} | {provider_name} | {has_gpt_35} | {has_gpt_4} | {stream} | {status} | {auth} |"
+ )
+ print("\n".join(lines))
+
+def print_models():
+ base_provider_names = {
+ "google": "Google",
+ "openai": "OpenAI",
+ "huggingface": "Huggingface",
+ "anthropic": "Anthropic",
+ "inflection": "Inflection",
+ "meta": "Meta",
+ }
+ provider_urls = {
+ "google": "https://gemini.google.com/",
+ "openai": "https://openai.com/",
+ "huggingface": "https://huggingface.co/",
+ "anthropic": "https://www.anthropic.com/",
+ "inflection": "https://inflection.ai/",
+ "meta": "https://llama.meta.com/",
+ }
+
+ lines = [
+ "| Model | Base Provider | Provider | Website |",
+ "| ----- | ------------- | -------- | ------- |",
+ ]
+ for name, model in models.ModelUtils.convert.items():
+ if name.startswith("gpt-3.5") or name.startswith("gpt-4"):
+ if name not in ("gpt-3.5-turbo", "gpt-4", "gpt-4-turbo"):
+ continue
+ name = re.split(r":|/", model.name)[-1]
+ if model.base_provider not in base_provider_names:
+ continue
+ base_provider = base_provider_names[model.base_provider]
+ if not isinstance(model.best_provider, BaseRetryProvider):
+ provider_name = f"g4f.Provider.{model.best_provider.__name__}"
+ else:
+ provider_name = f"{len(model.best_provider.providers)}+ Providers"
+ provider_url = provider_urls[model.base_provider]
+ netloc = urlparse(provider_url).netloc.replace("www.", "")
+ website = f"[{netloc}]({provider_url})"
+
+ lines.append(f"| {name} | {base_provider} | {provider_name} | {website} |")
+
+ print("\n".join(lines))
+
+def print_image_models():
+ lines = [
+ "| Label | Provider | Image Model | Vision Model | Website |",
+ "| ----- | -------- | ----------- | ------------ | ------- |",
+ ]
+ from g4f.gui.server.api import Api
+ for image_model in Api.get_image_models():
+ provider_url = image_model["url"]
+ netloc = urlparse(provider_url).netloc.replace("www.", "")
+ website = f"[{netloc}]({provider_url})"
+ label = image_model["provider"] if image_model["label"] is None else image_model["label"]
+ if image_model["image_model"] is None:
+ image_model["image_model"] = "ā"
+ if image_model["vision_model"] is None:
+ image_model["vision_model"] = "ā"
+ lines.append(f'| {label} | `g4f.Provider.{image_model["provider"]}` | {image_model["image_model"]}| {image_model["vision_model"]} | {website} |')
+
+ print("\n".join(lines))
+
+if __name__ == "__main__":
+ #print_providers()
+ #print("\n", "-" * 50, "\n")
+ #print_models()
+ print("\n", "-" * 50, "\n")
+ print_image_models()
\ No newline at end of file
diff --git a/etc/tool/translate_readme.py b/etc/tool/translate_readme.py
new file mode 100644
index 0000000000000000000000000000000000000000..e0a9b1f1c95ade97a7e1702531ecaa420e3d6ba8
--- /dev/null
+++ b/etc/tool/translate_readme.py
@@ -0,0 +1,88 @@
+
+import sys
+from pathlib import Path
+import asyncio
+
+sys.path.append(str(Path(__file__).parent.parent.parent))
+
+import g4f
+g4f.debug.logging = True
+from g4f.debug import access_token
+provider = g4f.Provider.OpenaiChat
+
+iso = "GE"
+language = "german"
+translate_prompt = f"""
+Translate this markdown document to {language}.
+Don't translate or change inline code examples.
+```md
+"""
+keep_note = "Keep this: [!Note] as [!Note].\n"
+blocklist = [
+ '## Ā©ļø Copyright',
+ '## š Providers and Models',
+ '## š Related GPT4Free Projects'
+]
+allowlist = [
+ "### Other",
+ "### Models"
+]
+
+def read_text(text):
+ start = end = 0
+ new = text.strip().split('\n')
+ for i, line in enumerate(new):
+ if line.startswith('```'):
+ if not start:
+ start = i + 1
+ end = i
+ return '\n'.join(new[start:end]).strip()
+
+async def translate(text):
+ prompt = translate_prompt + text.strip() + '\n```'
+ if "[!Note]" in text:
+ prompt = keep_note + prompt
+ result = read_text(await provider.create_async(
+ model="",
+ messages=[{"role": "user", "content": prompt}],
+ access_token=access_token
+ ))
+ if text.endswith("```") and not result.endswith("```"):
+ result += "\n```"
+ return result
+
+async def translate_part(part, i):
+ blocklisted = False
+ for headline in blocklist:
+ if headline in part:
+ blocklisted = True
+ if blocklisted:
+ lines = part.split('\n')
+ lines[0] = await translate(lines[0])
+ part = '\n'.join(lines)
+ for trans in allowlist:
+ if trans in part:
+ part = part.replace(trans, await translate(trans))
+ else:
+ part = await translate(part)
+ print(f"[{i}] translated")
+ return part
+
+async def translate_readme(readme) -> str:
+ parts = readme.split('\n## ')
+ print(f"{len(parts)} parts...")
+ parts = await asyncio.gather(
+ *[translate_part("## " + part, i) for i, part in enumerate(parts)]
+ )
+ return "\n\n".join(parts)
+
+with open("README.md", "r") as fp:
+ readme = fp.read()
+
+print("Translate readme...")
+readme = asyncio.run(translate_readme(readme))
+
+file = f"README-{iso}.md"
+with open(file, "w") as fp:
+ fp.write(readme)
+print(f'"{file}" saved')
\ No newline at end of file
diff --git a/etc/tool/vercel.py b/etc/tool/vercel.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5ce964c428ade2716bb5443c4d6025e71a6c379
--- /dev/null
+++ b/etc/tool/vercel.py
@@ -0,0 +1,103 @@
+import json
+import re
+from typing import Any
+
+import quickjs
+from curl_cffi import requests
+
+session = requests.Session(impersonate="chrome107")
+
+
+def get_model_info() -> dict[str, Any]:
+ url = "https://sdk.vercel.ai"
+ response = session.get(url)
+ html = response.text
+ paths_regex = r"static\/chunks.+?\.js"
+ separator_regex = r'"\]\)<\/script>', text)
+ data = json.loads(match.group("json"))
+ challenge_seeds = data["props"]["pageProps"]["challengeSeeds"]
+
+ prompt = messages[-1]["content"]
+ data = {
+ "question": prompt,
+ "question_history": [
+ message["content"] for message in messages[:-1] if message["role"] == "user"
+ ],
+ "answer_history": [
+ message["content"] for message in messages if message["role"] == "assistant"
+ ],
+ "webResults": [],
+ "options": {
+ "date": datetime.now().strftime("%d.%m.%Y"),
+ "language": "en-US",
+ "detailed": True,
+ "anonUserId": "",
+ "answerModel": "GPT-4" if model.startswith("gpt-4") else "Phind-34B",
+ "creativeMode": creative_mode,
+ "customLinks": []
+ },
+ "context": "\n".join([message["content"] for message in messages if message["role"] == "system"]),
+ }
+ data["challenge"] = generate_challenge(data, **challenge_seeds)
+ async with session.post(f"https://https.api.phind.com/infer/", headers=headers, json=data) as response:
+ new_line = False
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk.startswith(b'
[INST]{messages[idx-1]['content']} [/INST] {message['content']}"
+ for idx, message in enumerate(messages)
+ if message["role"] == "assistant"
+ ])
+ return f"{history}[INST] {question} [/INST]"
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/HuggingFace2.py b/g4f/Provider/needs_auth/HuggingFace2.py
new file mode 100644
index 0000000000000000000000000000000000000000..847d459b17f3523d4217f98fca6fb8599c6b8a6c
--- /dev/null
+++ b/g4f/Provider/needs_auth/HuggingFace2.py
@@ -0,0 +1,28 @@
+from __future__ import annotations
+
+from .OpenaiAPI import OpenaiAPI
+from ..HuggingChat import HuggingChat
+from ...typing import AsyncResult, Messages
+
+class HuggingFace2(OpenaiAPI):
+ label = "HuggingFace (Inference API)"
+ url = "https://huggingface.co"
+ working = True
+ default_model = "meta-llama/Llama-3.2-11B-Vision-Instruct"
+ default_vision_model = default_model
+ models = [
+ *HuggingChat.models
+ ]
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api-inference.huggingface.co/v1",
+ max_tokens: int = 500,
+ **kwargs
+ ) -> AsyncResult:
+ return super().create_async_generator(
+ model, messages, api_base=api_base, max_tokens=max_tokens, **kwargs
+ )
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/MetaAI.py b/g4f/Provider/needs_auth/MetaAI.py
new file mode 100644
index 0000000000000000000000000000000000000000..568de701f7e3648fb1c8fa63f44141115bba9d93
--- /dev/null
+++ b/g4f/Provider/needs_auth/MetaAI.py
@@ -0,0 +1,239 @@
+from __future__ import annotations
+
+import json
+import uuid
+import random
+import time
+from typing import Dict, List
+
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests import raise_for_status, DEFAULT_HEADERS
+from ...image import ImageResponse, ImagePreview
+from ...errors import ResponseError
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, get_connector, format_cookies
+
+class Sources():
+ def __init__(self, link_list: List[Dict[str, str]]) -> None:
+ self.list = link_list
+
+ def __str__(self) -> str:
+ return "\n\n" + ("\n".join([f"[{link['title']}]({link['link']})" for link in self.list]))
+
+class AbraGeoBlockedError(Exception):
+ pass
+
+class MetaAI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "Meta AI"
+ url = "https://www.meta.ai"
+ working = True
+ default_model = ''
+
+ def __init__(self, proxy: str = None, connector: BaseConnector = None):
+ self.session = ClientSession(connector=get_connector(connector, proxy), headers=DEFAULT_HEADERS)
+ self.cookies: Cookies = None
+ self.access_token: str = None
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ async for chunk in cls(proxy).prompt(format_prompt(messages)):
+ yield chunk
+
+ async def update_access_token(self, birthday: str = "1999-01-01"):
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {
+ "lsd": self.lsd,
+ "fb_api_caller_class": "RelayModern",
+ "fb_api_req_friendly_name": "useAbraAcceptTOSForTempUserMutation",
+ "variables": json.dumps({
+ "dob": birthday,
+ "icebreaker_type": "TEXT",
+ "__relay_internal__pv__WebPixelRatiorelayprovider": 1,
+ }),
+ "doc_id": "7604648749596940",
+ }
+ headers = {
+ "x-fb-friendly-name": "useAbraAcceptTOSForTempUserMutation",
+ "x-fb-lsd": self.lsd,
+ "x-asbd-id": "129477",
+ "alt-used": "www.meta.ai",
+ "sec-fetch-site": "same-origin"
+ }
+ async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
+ await raise_for_status(response, "Fetch access_token failed")
+ auth_json = await response.json(content_type=None)
+ self.access_token = auth_json["data"]["xab_abra_accept_terms_of_service"]["new_temp_user_auth"]["access_token"]
+
+ async def prompt(self, message: str, cookies: Cookies = None) -> AsyncResult:
+ if self.cookies is None:
+ await self.update_cookies(cookies)
+ if cookies is not None:
+ self.access_token = None
+ if self.access_token is None and cookies is None:
+ await self.update_access_token()
+ if self.access_token is None:
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
+ headers = {'x-fb-lsd': self.lsd}
+ else:
+ url = "https://graph.meta.ai/graphql?locale=user"
+ payload = {"access_token": self.access_token}
+ headers = {}
+ headers = {
+ 'content-type': 'application/x-www-form-urlencoded',
+ 'cookie': format_cookies(self.cookies),
+ 'origin': 'https://www.meta.ai',
+ 'referer': 'https://www.meta.ai/',
+ 'x-asbd-id': '129477',
+ 'x-fb-friendly-name': 'useAbraSendMessageMutation',
+ **headers
+ }
+ payload = {
+ **payload,
+ 'fb_api_caller_class': 'RelayModern',
+ 'fb_api_req_friendly_name': 'useAbraSendMessageMutation',
+ "variables": json.dumps({
+ "message": {"sensitive_string_value": message},
+ "externalConversationId": str(uuid.uuid4()),
+ "offlineThreadingId": generate_offline_threading_id(),
+ "suggestedPromptIndex": None,
+ "flashVideoRecapInput": {"images": []},
+ "flashPreviewInput": None,
+ "promptPrefix": None,
+ "entrypoint": "ABRA__CHAT__TEXT",
+ "icebreaker_type": "TEXT",
+ "__relay_internal__pv__AbraDebugDevOnlyrelayprovider": False,
+ "__relay_internal__pv__WebPixelRatiorelayprovider": 1,
+ }),
+ 'server_timestamps': 'true',
+ 'doc_id': '7783822248314888'
+ }
+ async with self.session.post(url, headers=headers, data=payload) as response:
+ await raise_for_status(response, "Fetch response failed")
+ last_snippet_len = 0
+ fetch_id = None
+ async for line in response.content:
+ if b"Something Went Wrong
" in line:
+ raise ResponseError("Response: Something Went Wrong")
+ try:
+ json_line = json.loads(line)
+ except json.JSONDecodeError:
+ continue
+ if json_line.get("errors"):
+ raise RuntimeError("\n".join([error.get("message") for error in json_line.get("errors")]))
+ bot_response_message = json_line.get("data", {}).get("node", {}).get("bot_response_message", {})
+ streaming_state = bot_response_message.get("streaming_state")
+ fetch_id = bot_response_message.get("fetch_id") or fetch_id
+ if streaming_state in ("STREAMING", "OVERALL_DONE"):
+ imagine_card = bot_response_message.get("imagine_card")
+ if imagine_card is not None:
+ imagine_session = imagine_card.get("session")
+ if imagine_session is not None:
+ imagine_medias = imagine_session.get("media_sets", {}).pop().get("imagine_media")
+ if imagine_medias is not None:
+ image_class = ImageResponse if streaming_state == "OVERALL_DONE" else ImagePreview
+ yield image_class([media["uri"] for media in imagine_medias], imagine_medias[0]["prompt"])
+ snippet = bot_response_message["snippet"]
+ new_snippet_len = len(snippet)
+ if new_snippet_len > last_snippet_len:
+ yield snippet[last_snippet_len:]
+ last_snippet_len = new_snippet_len
+ #if last_streamed_response is None:
+ # if attempts > 3:
+ # raise Exception("MetaAI is having issues and was not able to respond (Server Error)")
+ # access_token = await self.get_access_token()
+ # return await self.prompt(message=message, attempts=attempts + 1)
+ if fetch_id is not None:
+ sources = await self.fetch_sources(fetch_id)
+ if sources is not None:
+ yield sources
+
+ async def update_cookies(self, cookies: Cookies = None):
+ async with self.session.get("https://www.meta.ai/", cookies=cookies) as response:
+ await raise_for_status(response, "Fetch home failed")
+ text = await response.text()
+ if "AbraGeoBlockedError" in text:
+ raise AbraGeoBlockedError("Meta AI isn't available yet in your country")
+ if cookies is None:
+ cookies = {
+ "_js_datr": self.extract_value(text, "_js_datr"),
+ "abra_csrf": self.extract_value(text, "abra_csrf"),
+ "datr": self.extract_value(text, "datr"),
+ }
+ self.lsd = self.extract_value(text, start_str='"LSD",[],{"token":"', end_str='"}')
+ self.dtsg = self.extract_value(text, start_str='"DTSGInitialData",[],{"token":"', end_str='"}')
+ self.cookies = cookies
+
+ async def fetch_sources(self, fetch_id: str) -> Sources:
+ if self.access_token is None:
+ url = "https://www.meta.ai/api/graphql/"
+ payload = {"lsd": self.lsd, 'fb_dtsg': self.dtsg}
+ headers = {'x-fb-lsd': self.lsd}
+ else:
+ url = "https://graph.meta.ai/graphql?locale=user"
+ payload = {"access_token": self.access_token}
+ headers = {}
+ payload = {
+ **payload,
+ "fb_api_caller_class": "RelayModern",
+ "fb_api_req_friendly_name": "AbraSearchPluginDialogQuery",
+ "variables": json.dumps({"abraMessageFetchID": fetch_id}),
+ "server_timestamps": "true",
+ "doc_id": "6946734308765963",
+ }
+ headers = {
+ "authority": "graph.meta.ai",
+ "x-fb-friendly-name": "AbraSearchPluginDialogQuery",
+ **headers
+ }
+ async with self.session.post(url, headers=headers, cookies=self.cookies, data=payload) as response:
+ await raise_for_status(response, "Fetch sources failed")
+ text = await response.text()
+ if "Something Went Wrong
" in text:
+ raise ResponseError("Response: Something Went Wrong")
+ try:
+ response_json = json.loads(text)
+ message = response_json["data"]["message"]
+ if message is not None:
+ searchResults = message["searchResults"]
+ if searchResults is not None:
+ return Sources(searchResults["references"])
+ except (KeyError, TypeError, json.JSONDecodeError):
+ raise RuntimeError(f"Response: {text}")
+
+ @staticmethod
+ def extract_value(text: str, key: str = None, start_str = None, end_str = '",') -> str:
+ if start_str is None:
+ start_str = f'{key}":{{"value":"'
+ start = text.find(start_str)
+ if start >= 0:
+ start+= len(start_str)
+ end = text.find(end_str, start)
+ if end >= 0:
+ return text[start:end]
+
+def generate_offline_threading_id() -> str:
+ """
+ Generates an offline threading ID.
+
+ Returns:
+ str: The generated offline threading ID.
+ """
+ # Generate a random 64-bit integer
+ random_value = random.getrandbits(64)
+
+ # Get the current timestamp in milliseconds
+ timestamp = int(time.time() * 1000)
+
+ # Combine timestamp and random value
+ threading_id = (timestamp << 22) | (random_value & ((1 << 22) - 1))
+
+ return str(threading_id)
diff --git a/g4f/Provider/needs_auth/MetaAIAccount.py b/g4f/Provider/needs_auth/MetaAIAccount.py
new file mode 100644
index 0000000000000000000000000000000000000000..0a586006546a94ae06854a97a0682db0794f4751
--- /dev/null
+++ b/g4f/Provider/needs_auth/MetaAIAccount.py
@@ -0,0 +1,23 @@
+from __future__ import annotations
+
+from ...typing import AsyncResult, Messages, Cookies
+from ..helper import format_prompt, get_cookies
+from .MetaAI import MetaAI
+
+class MetaAIAccount(MetaAI):
+ needs_auth = True
+ parent = "MetaAI"
+ image_models = ["meta"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ cookies: Cookies = None,
+ **kwargs
+ ) -> AsyncResult:
+ cookies = get_cookies(".meta.ai", True, True) if cookies is None else cookies
+ async for chunk in cls(proxy).prompt(format_prompt(messages), cookies):
+ yield chunk
diff --git a/g4f/Provider/needs_auth/OpenaiAPI.py b/g4f/Provider/needs_auth/OpenaiAPI.py
new file mode 100644
index 0000000000000000000000000000000000000000..83268b6d1ecd2cacd6642a825c1650eb05a06f83
--- /dev/null
+++ b/g4f/Provider/needs_auth/OpenaiAPI.py
@@ -0,0 +1,126 @@
+from __future__ import annotations
+
+import json
+
+from ..helper import filter_none
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin, FinishReason
+from ...typing import Union, Optional, AsyncResult, Messages, ImageType
+from ...requests import StreamSession, raise_for_status
+from ...errors import MissingAuthError, ResponseError
+from ...image import to_data_uri
+
+class OpenaiAPI(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "OpenAI API"
+ url = "https://platform.openai.com"
+ working = True
+ needs_auth = True
+ supports_message_history = True
+ supports_system_message = True
+ default_model = ""
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ image: ImageType = None,
+ api_key: str = None,
+ api_base: str = "https://api.openai.com/v1",
+ temperature: float = None,
+ max_tokens: int = None,
+ top_p: float = None,
+ stop: Union[str, list[str]] = None,
+ stream: bool = False,
+ headers: dict = None,
+ impersonate: str = None,
+ extra_data: dict = {},
+ **kwargs
+ ) -> AsyncResult:
+ if cls.needs_auth and api_key is None:
+ raise MissingAuthError('Add a "api_key"')
+ if image is not None:
+ if not model and hasattr(cls, "default_vision_model"):
+ model = cls.default_vision_model
+ messages[-1]["content"] = [
+ {
+ "type": "image_url",
+ "image_url": {"url": to_data_uri(image)}
+ },
+ {
+ "type": "text",
+ "text": messages[-1]["content"]
+ }
+ ]
+ async with StreamSession(
+ proxies={"all": proxy},
+ headers=cls.get_headers(stream, api_key, headers),
+ timeout=timeout,
+ impersonate=impersonate,
+ ) as session:
+ data = filter_none(
+ messages=messages,
+ model=cls.get_model(model),
+ temperature=temperature,
+ max_tokens=max_tokens,
+ top_p=top_p,
+ stop=stop,
+ stream=stream,
+ **extra_data
+ )
+ async with session.post(f"{api_base.rstrip('/')}/chat/completions", json=data) as response:
+ await raise_for_status(response)
+ if not stream:
+ data = await response.json()
+ cls.raise_error(data)
+ choice = data["choices"][0]
+ if "content" in choice["message"]:
+ yield choice["message"]["content"].strip()
+ finish = cls.read_finish_reason(choice)
+ if finish is not None:
+ yield finish
+ else:
+ first = True
+ async for line in response.iter_lines():
+ if line.startswith(b"data: "):
+ chunk = line[6:]
+ if chunk == b"[DONE]":
+ break
+ data = json.loads(chunk)
+ cls.raise_error(data)
+ choice = data["choices"][0]
+ if "content" in choice["delta"] and choice["delta"]["content"]:
+ delta = choice["delta"]["content"]
+ if first:
+ delta = delta.lstrip()
+ if delta:
+ first = False
+ yield delta
+ finish = cls.read_finish_reason(choice)
+ if finish is not None:
+ yield finish
+
+ @staticmethod
+ def read_finish_reason(choice: dict) -> Optional[FinishReason]:
+ if "finish_reason" in choice and choice["finish_reason"] is not None:
+ return FinishReason(choice["finish_reason"])
+
+ @staticmethod
+ def raise_error(data: dict):
+ if "error_message" in data:
+ raise ResponseError(data["error_message"])
+ elif "error" in data:
+ raise ResponseError(f'Error {data["error"]["code"]}: {data["error"]["message"]}')
+
+ @classmethod
+ def get_headers(cls, stream: bool, api_key: str = None, headers: dict = None) -> dict:
+ return {
+ "Accept": "text/event-stream" if stream else "application/json",
+ "Content-Type": "application/json",
+ **(
+ {"Authorization": f"Bearer {api_key}"}
+ if api_key is not None else {}
+ ),
+ **({} if headers is None else headers)
+ }
diff --git a/g4f/Provider/needs_auth/OpenaiAccount.py b/g4f/Provider/needs_auth/OpenaiAccount.py
new file mode 100644
index 0000000000000000000000000000000000000000..16bfff66d895bbe43b793a9cba47b6f585ae0089
--- /dev/null
+++ b/g4f/Provider/needs_auth/OpenaiAccount.py
@@ -0,0 +1,8 @@
+from __future__ import annotations
+
+from .OpenaiChat import OpenaiChat
+
+class OpenaiAccount(OpenaiChat):
+ needs_auth = True
+ parent = "OpenaiChat"
+ image_models = ["dall-e"]
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/OpenaiChat.py b/g4f/Provider/needs_auth/OpenaiChat.py
new file mode 100644
index 0000000000000000000000000000000000000000..37bdf0742c8c20cacff281182872af53365382cf
--- /dev/null
+++ b/g4f/Provider/needs_auth/OpenaiChat.py
@@ -0,0 +1,558 @@
+from __future__ import annotations
+
+import re
+import asyncio
+import uuid
+import json
+import base64
+import time
+import requests
+from copy import copy
+
+try:
+ import nodriver
+ from nodriver.cdp.network import get_response_body
+ has_nodriver = True
+except ImportError:
+ has_nodriver = False
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...typing import AsyncResult, Messages, Cookies, ImageType, AsyncIterator
+from ...requests.raise_for_status import raise_for_status
+from ...requests import StreamSession
+from ...requests import get_nodriver
+from ...image import ImageResponse, ImageRequest, to_image, to_bytes, is_accepted_format
+from ...errors import MissingAuthError
+from ...providers.response import BaseConversation, FinishReason, SynthesizeData
+from ..helper import format_cookies
+from ..openai.har_file import get_request_config, NoValidHarFileError
+from ..openai.har_file import RequestConfig, arkReq, arkose_url, start_url, conversation_url, backend_url, backend_anon_url
+from ..openai.proofofwork import generate_proof_token
+from ..openai.new import get_requirements_token
+from ... import debug
+
+DEFAULT_HEADERS = {
+ "accept": "*/*",
+ "accept-encoding": "gzip, deflate, br, zstd",
+ "accept-language": "en-US,en;q=0.5",
+ "referer": "https://chatgpt.com/",
+ "sec-ch-ua": "\"Brave\";v=\"123\", \"Not:A-Brand\";v=\"8\", \"Chromium\";v=\"123\"",
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": "\"Windows\"",
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "sec-gpc": "1",
+ "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"
+}
+
+class OpenaiChat(AsyncGeneratorProvider, ProviderModelMixin):
+ """A class for creating and managing conversations with OpenAI chat service"""
+
+ label = "OpenAI ChatGPT"
+ url = "https://chatgpt.com"
+ working = True
+ needs_auth = True
+ supports_gpt_4 = True
+ supports_message_history = True
+ supports_system_message = True
+ default_model = "auto"
+ default_vision_model = "gpt-4o"
+ fallback_models = [default_model, "gpt-4", "gpt-4o", "gpt-4o-mini", "gpt-4o-canmore", "o1-preview", "o1-mini"]
+ vision_models = fallback_models
+ image_models = fallback_models
+ synthesize_content_type = "audio/mpeg"
+
+ _api_key: str = None
+ _headers: dict = None
+ _cookies: Cookies = None
+ _expires: int = None
+
+ @classmethod
+ def get_models(cls):
+ if not cls.models:
+ try:
+ response = requests.get(f"{cls.url}/backend-anon/models")
+ response.raise_for_status()
+ data = response.json()
+ cls.models = [model.get("slug") for model in data.get("models")]
+ except Exception:
+ cls.models = cls.fallback_models
+ return cls.models
+
+ @classmethod
+ async def upload_image(
+ cls,
+ session: StreamSession,
+ headers: dict,
+ image: ImageType,
+ image_name: str = None
+ ) -> ImageRequest:
+ """
+ Upload an image to the service and get the download URL
+
+ Args:
+ session: The StreamSession object to use for requests
+ headers: The headers to include in the requests
+ image: The image to upload, either a PIL Image object or a bytes object
+
+ Returns:
+ An ImageRequest object that contains the download URL, file name, and other data
+ """
+ # Convert the image to a PIL Image object and get the extension
+ data_bytes = to_bytes(image)
+ image = to_image(data_bytes)
+ extension = image.format.lower()
+ data = {
+ "file_name": "" if image_name is None else image_name,
+ "file_size": len(data_bytes),
+ "use_case": "multimodal"
+ }
+ # Post the image data to the service and get the image data
+ async with session.post(f"{cls.url}/backend-api/files", json=data, headers=headers) as response:
+ cls._update_request_args(session)
+ await raise_for_status(response, "Create file failed")
+ image_data = {
+ **data,
+ **await response.json(),
+ "mime_type": is_accepted_format(data_bytes),
+ "extension": extension,
+ "height": image.height,
+ "width": image.width
+ }
+ # Put the image bytes to the upload URL and check the status
+ async with session.put(
+ image_data["upload_url"],
+ data=data_bytes,
+ headers={
+ "Content-Type": image_data["mime_type"],
+ "x-ms-blob-type": "BlockBlob"
+ }
+ ) as response:
+ await raise_for_status(response, "Send file failed")
+ # Post the file ID to the service and get the download URL
+ async with session.post(
+ f"{cls.url}/backend-api/files/{image_data['file_id']}/uploaded",
+ json={},
+ headers=headers
+ ) as response:
+ cls._update_request_args(session)
+ await raise_for_status(response, "Get download url failed")
+ image_data["download_url"] = (await response.json())["download_url"]
+ return ImageRequest(image_data)
+
+ @classmethod
+ def create_messages(cls, messages: Messages, image_request: ImageRequest = None, system_hints: list = None):
+ """
+ Create a list of messages for the user input
+
+ Args:
+ prompt: The user input as a string
+ image_response: The image response object, if any
+
+ Returns:
+ A list of messages with the user input and the image, if any
+ """
+ # Create a message object with the user role and the content
+ messages = [{
+ "author": {"role": message["role"]},
+ "content": {"content_type": "text", "parts": [message["content"]]},
+ "id": str(uuid.uuid4()),
+ "create_time": int(time.time()),
+ "id": str(uuid.uuid4()),
+ "metadata": {"serialization_metadata": {"custom_symbol_offsets": []}, "system_hints": system_hints},
+ } for message in messages]
+
+ # Check if there is an image response
+ if image_request is not None:
+ # Change content in last user message
+ messages[-1]["content"] = {
+ "content_type": "multimodal_text",
+ "parts": [{
+ "asset_pointer": f"file-service://{image_request.get('file_id')}",
+ "height": image_request.get("height"),
+ "size_bytes": image_request.get("file_size"),
+ "width": image_request.get("width"),
+ }, messages[-1]["content"]["parts"][0]]
+ }
+ # Add the metadata object with the attachments
+ messages[-1]["metadata"] = {
+ "attachments": [{
+ "height": image_request.get("height"),
+ "id": image_request.get("file_id"),
+ "mimeType": image_request.get("mime_type"),
+ "name": image_request.get("file_name"),
+ "size": image_request.get("file_size"),
+ "width": image_request.get("width"),
+ }]
+ }
+ return messages
+
+ @classmethod
+ async def get_generated_image(cls, session: StreamSession, headers: dict, element: dict, prompt: str = None) -> ImageResponse:
+ """
+ Retrieves the image response based on the message content.
+
+ This method processes the message content to extract image information and retrieves the
+ corresponding image from the backend API. It then returns an ImageResponse object containing
+ the image URL and the prompt used to generate the image.
+
+ Args:
+ session (StreamSession): The StreamSession object used for making HTTP requests.
+ headers (dict): HTTP headers to be used for the request.
+ line (dict): A dictionary representing the line of response that contains image information.
+
+ Returns:
+ ImageResponse: An object containing the image URL and the prompt, or None if no image is found.
+
+ Raises:
+ RuntimeError: If there'san error in downloading the image, including issues with the HTTP request or response.
+ """
+ try:
+ prompt = element["metadata"]["dalle"]["prompt"]
+ file_id = element["asset_pointer"].split("file-service://", 1)[1]
+ except TypeError:
+ return
+ except Exception as e:
+ raise RuntimeError(f"No Image: {e.__class__.__name__}: {e}")
+ try:
+ async with session.get(f"{cls.url}/backend-api/files/{file_id}/download", headers=headers) as response:
+ cls._update_request_args(session)
+ await raise_for_status(response)
+ download_url = (await response.json())["download_url"]
+ return ImageResponse(download_url, prompt)
+ except Exception as e:
+ raise RuntimeError(f"Error in downloading image: {e}")
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 180,
+ cookies: Cookies = None,
+ auto_continue: bool = False,
+ history_disabled: bool = False,
+ action: str = "next",
+ conversation_id: str = None,
+ conversation: Conversation = None,
+ parent_id: str = None,
+ image: ImageType = None,
+ image_name: str = None,
+ return_conversation: bool = False,
+ max_retries: int = 3,
+ web_search: bool = False,
+ **kwargs
+ ) -> AsyncResult:
+ """
+ Create an asynchronous generator for the conversation.
+
+ Args:
+ model (str): The model name.
+ messages (Messages): The list of previous messages.
+ proxy (str): Proxy to use for requests.
+ timeout (int): Timeout for requests.
+ api_key (str): Access token for authentication.
+ cookies (dict): Cookies to use for authentication.
+ auto_continue (bool): Flag to automatically continue the conversation.
+ history_disabled (bool): Flag to disable history and training.
+ action (str): Type of action ('next', 'continue', 'variant').
+ conversation_id (str): ID of the conversation.
+ parent_id (str): ID of the parent message.
+ image (ImageType): Image to include in the conversation.
+ return_conversation (bool): Flag to include response fields in the output.
+ **kwargs: Additional keyword arguments.
+
+ Yields:
+ AsyncResult: Asynchronous results from the generator.
+
+ Raises:
+ RuntimeError: If an error occurs during processing.
+ """
+ await cls.login(proxy)
+
+ async with StreamSession(
+ proxy=proxy,
+ impersonate="chrome",
+ timeout=timeout
+ ) as session:
+ try:
+ image_request = await cls.upload_image(session, cls._headers, image, image_name) if image else None
+ except Exception as e:
+ image_request = None
+ debug.log("OpenaiChat: Upload image failed")
+ debug.log(f"{e.__class__.__name__}: {e}")
+ model = cls.get_model(model)
+ if conversation is None:
+ conversation = Conversation(conversation_id, str(uuid.uuid4()) if parent_id is None else parent_id)
+ else:
+ conversation = copy(conversation)
+ if cls._api_key is None:
+ auto_continue = False
+ conversation.finish_reason = None
+ while conversation.finish_reason is None:
+ async with session.post(
+ f"{cls.url}/backend-anon/sentinel/chat-requirements"
+ if cls._api_key is None else
+ f"{cls.url}/backend-api/sentinel/chat-requirements",
+ json={"p": get_requirements_token(RequestConfig.proof_token) if RequestConfig.proof_token else None},
+ headers=cls._headers
+ ) as response:
+ cls._update_request_args(session)
+ await raise_for_status(response)
+ chat_requirements = await response.json()
+ need_turnstile = chat_requirements.get("turnstile", {}).get("required", False)
+ need_arkose = chat_requirements.get("arkose", {}).get("required", False)
+ chat_token = chat_requirements.get("token")
+
+ if need_arkose and RequestConfig.arkose_token is None:
+ await get_request_config(proxy)
+ cls._create_request_args(RequestConfig,cookies, RequestConfig.headers)
+ cls._set_api_key(RequestConfig.access_token)
+ if RequestConfig.arkose_token is None:
+ raise MissingAuthError("No arkose token found in .har file")
+
+ if "proofofwork" in chat_requirements:
+ proofofwork = generate_proof_token(
+ **chat_requirements["proofofwork"],
+ user_agent=cls._headers.get("user-agent"),
+ proof_token=RequestConfig.proof_token
+ )
+ [debug.log(text) for text in (
+ f"Arkose: {'False' if not need_arkose else RequestConfig.arkose_token[:12]+'...'}",
+ f"Proofofwork: {'False' if proofofwork is None else proofofwork[:12]+'...'}",
+ f"AccessToken: {'False' if cls._api_key is None else cls._api_key[:12]+'...'}",
+ )]
+ data = {
+ "action": action,
+ "messages": None,
+ "parent_message_id": conversation.message_id,
+ "model": model,
+ "paragen_cot_summary_display_override": "allow",
+ "history_and_training_disabled": history_disabled and not auto_continue and not return_conversation,
+ "conversation_mode": {"kind":"primary_assistant"},
+ "websocket_request_id": str(uuid.uuid4()),
+ "supported_encodings": ["v1"],
+ "supports_buffering": True,
+ "system_hints": ["search"] if web_search else None
+ }
+ if conversation.conversation_id is not None:
+ data["conversation_id"] = conversation.conversation_id
+ debug.log(f"OpenaiChat: Use conversation: {conversation.conversation_id}")
+ if action != "continue":
+ messages = messages if conversation_id is None else [messages[-1]]
+ data["messages"] = cls.create_messages(messages, image_request, ["search"] if web_search else None)
+ headers = {
+ **cls._headers,
+ "accept": "text/event-stream",
+ "content-type": "application/json",
+ "openai-sentinel-chat-requirements-token": chat_token,
+ }
+ if RequestConfig.arkose_token:
+ headers["openai-sentinel-arkose-token"] = RequestConfig.arkose_token
+ if proofofwork is not None:
+ headers["openai-sentinel-proof-token"] = proofofwork
+ if need_turnstile and RequestConfig.turnstile_token is not None:
+ headers['openai-sentinel-turnstile-token'] = RequestConfig.turnstile_token
+ async with session.post(
+ f"{cls.url}/backend-anon/conversation"
+ if cls._api_key is None else
+ f"{cls.url}/backend-api/conversation",
+ json=data,
+ headers=headers
+ ) as response:
+ cls._update_request_args(session)
+ if response.status == 403 and max_retries > 0:
+ max_retries -= 1
+ debug.log(f"Retry: Error {response.status}: {await response.text()}")
+ await asyncio.sleep(5)
+ continue
+ await raise_for_status(response)
+ if return_conversation:
+ yield conversation
+ async for line in response.iter_lines():
+ async for chunk in cls.iter_messages_line(session, line, conversation):
+ yield chunk
+ if not history_disabled:
+ yield SynthesizeData(cls.__name__, {
+ "conversation_id": conversation.conversation_id,
+ "message_id": conversation.message_id,
+ "voice": "maple",
+ })
+ if auto_continue and conversation.finish_reason == "max_tokens":
+ conversation.finish_reason = None
+ action = "continue"
+ await asyncio.sleep(5)
+ else:
+ break
+ yield FinishReason(conversation.finish_reason)
+
+ @classmethod
+ async def iter_messages_line(cls, session: StreamSession, line: bytes, fields: Conversation) -> AsyncIterator:
+ if not line.startswith(b"data: "):
+ return
+ elif line.startswith(b"data: [DONE]"):
+ if fields.finish_reason is None:
+ fields.finish_reason = "error"
+ return
+ try:
+ line = json.loads(line[6:])
+ except:
+ return
+ if isinstance(line, dict) and "v" in line:
+ v = line.get("v")
+ if isinstance(v, str) and fields.is_recipient:
+ yield v
+ elif isinstance(v, list) and fields.is_recipient:
+ for m in v:
+ if m.get("p") == "/message/content/parts/0":
+ yield m.get("v")
+ elif m.get("p") == "/message/metadata":
+ fields.finish_reason = m.get("v", {}).get("finish_details", {}).get("type")
+ break
+ elif isinstance(v, dict):
+ if fields.conversation_id is None:
+ fields.conversation_id = v.get("conversation_id")
+ debug.log(f"OpenaiChat: New conversation: {fields.conversation_id}")
+ m = v.get("message", {})
+ fields.is_recipient = m.get("recipient") == "all"
+ if fields.is_recipient:
+ c = m.get("content", {})
+ if c.get("content_type") == "multimodal_text":
+ generated_images = []
+ for element in c.get("parts"):
+ if isinstance(element, dict) and element.get("content_type") == "image_asset_pointer":
+ image = cls.get_generated_image(session, cls._headers, element)
+ if image is not None:
+ generated_images.append(image)
+ for image_response in await asyncio.gather(*generated_images):
+ yield image_response
+ if m.get("author", {}).get("role") == "assistant":
+ fields.message_id = v.get("message", {}).get("id")
+ return
+ if "error" in line and line.get("error"):
+ raise RuntimeError(line.get("error"))
+
+ @classmethod
+ async def synthesize(cls, params: dict) -> AsyncIterator[bytes]:
+ await cls.login()
+ async with StreamSession(
+ impersonate="chrome",
+ timeout=900
+ ) as session:
+ async with session.get(
+ f"{cls.url}/backend-api/synthesize",
+ params=params,
+ headers=cls._headers
+ ) as response:
+ await raise_for_status(response)
+ async for chunk in response.iter_content():
+ yield chunk
+
+ @classmethod
+ async def login(cls, proxy: str = None):
+ if cls._expires is not None and cls._expires < time.time():
+ cls._headers = cls._api_key = None
+ try:
+ await get_request_config(proxy)
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers)
+ cls._set_api_key(RequestConfig.access_token)
+ except NoValidHarFileError:
+ if has_nodriver:
+ await cls.nodriver_auth(proxy)
+ else:
+ raise
+
+ @classmethod
+ async def nodriver_auth(cls, proxy: str = None):
+ browser = await get_nodriver(proxy=proxy)
+ page = browser.main_tab
+ def on_request(event: nodriver.cdp.network.RequestWillBeSent):
+ if event.request.url == start_url or event.request.url.startswith(conversation_url):
+ RequestConfig.access_request_id = event.request_id
+ RequestConfig.headers = event.request.headers
+ elif event.request.url in (backend_url, backend_anon_url):
+ if "OpenAI-Sentinel-Proof-Token" in event.request.headers:
+ RequestConfig.proof_token = json.loads(base64.b64decode(
+ event.request.headers["OpenAI-Sentinel-Proof-Token"].split("gAAAAAB", 1)[-1].encode()
+ ).decode())
+ if "OpenAI-Sentinel-Turnstile-Token" in event.request.headers:
+ RequestConfig.turnstile_token = event.request.headers["OpenAI-Sentinel-Turnstile-Token"]
+ if "Authorization" in event.request.headers:
+ RequestConfig.access_token = event.request.headers["Authorization"].split()[-1]
+ elif event.request.url == arkose_url:
+ RequestConfig.arkose_request = arkReq(
+ arkURL=event.request.url,
+ arkBx=None,
+ arkHeader=event.request.headers,
+ arkBody=event.request.post_data,
+ userAgent=event.request.headers.get("user-agent")
+ )
+ await page.send(nodriver.cdp.network.enable())
+ page.add_handler(nodriver.cdp.network.RequestWillBeSent, on_request)
+ page = await browser.get(cls.url)
+ try:
+ if RequestConfig.access_request_id is not None:
+ body = await page.send(get_response_body(RequestConfig.access_request_id))
+ if isinstance(body, tuple) and body:
+ body = body[0]
+ if body:
+ match = re.search(r'"accessToken":"(.*?)"', body)
+ if match:
+ RequestConfig.access_token = match.group(1)
+ except KeyError:
+ pass
+ for c in await page.send(nodriver.cdp.network.get_cookies([cls.url])):
+ RequestConfig.cookies[c.name] = c.value
+ user_agent = await page.evaluate("window.navigator.userAgent")
+ await page.select("#prompt-textarea", 240)
+ while True:
+ if RequestConfig.proof_token:
+ break
+ await asyncio.sleep(1)
+ await page.close()
+ cls._create_request_args(RequestConfig.cookies, RequestConfig.headers, user_agent=user_agent)
+ cls._set_api_key(RequestConfig.access_token)
+
+ @staticmethod
+ def get_default_headers() -> dict:
+ return {
+ **DEFAULT_HEADERS,
+ "content-type": "application/json",
+ }
+
+ @classmethod
+ def _create_request_args(cls, cookies: Cookies = None, headers: dict = None, user_agent: str = None):
+ cls._headers = cls.get_default_headers() if headers is None else headers
+ if user_agent is not None:
+ cls._headers["user-agent"] = user_agent
+ cls._cookies = {} if cookies is None else cookies
+ cls._update_cookie_header()
+
+ @classmethod
+ def _update_request_args(cls, session: StreamSession):
+ for c in session.cookie_jar if hasattr(session, "cookie_jar") else session.cookies.jar:
+ cls._cookies[c.key if hasattr(c, "key") else c.name] = c.value
+ cls._update_cookie_header()
+
+ @classmethod
+ def _set_api_key(cls, api_key: str):
+ cls._api_key = api_key
+ cls._expires = int(time.time()) + 60 * 60 * 4
+ if api_key:
+ cls._headers["authorization"] = f"Bearer {api_key}"
+
+ @classmethod
+ def _update_cookie_header(cls):
+ cls._headers["cookie"] = format_cookies(cls._cookies)
+
+class Conversation(BaseConversation):
+ """
+ Class to encapsulate response fields.
+ """
+ def __init__(self, conversation_id: str = None, message_id: str = None, finish_reason: str = None):
+ self.conversation_id = conversation_id
+ self.message_id = message_id
+ self.finish_reason = finish_reason
+ self.is_recipient = False
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/PerplexityApi.py b/g4f/Provider/needs_auth/PerplexityApi.py
new file mode 100644
index 0000000000000000000000000000000000000000..85d7cc98aa65a45e9340b39fcb6e3d98ec9bd13b
--- /dev/null
+++ b/g4f/Provider/needs_auth/PerplexityApi.py
@@ -0,0 +1,30 @@
+from __future__ import annotations
+
+from .OpenaiAPI import OpenaiAPI
+from ...typing import AsyncResult, Messages
+
+class PerplexityApi(OpenaiAPI):
+ label = "Perplexity API"
+ url = "https://www.perplexity.ai"
+ working = True
+ default_model = "llama-3-sonar-large-32k-online"
+ models = [
+ "llama-3-sonar-small-32k-chat",
+ "llama-3-sonar-small-32k-online",
+ "llama-3-sonar-large-32k-chat",
+ "llama-3-sonar-large-32k-online",
+ "llama-3-8b-instruct",
+ "llama-3-70b-instruct",
+ ]
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api.perplexity.ai",
+ **kwargs
+ ) -> AsyncResult:
+ return super().create_async_generator(
+ model, messages, api_base=api_base, **kwargs
+ )
diff --git a/g4f/Provider/needs_auth/Poe.py b/g4f/Provider/needs_auth/Poe.py
new file mode 100644
index 0000000000000000000000000000000000000000..65fdbef97212a245a143d17a09bb43dd3cc1b415
--- /dev/null
+++ b/g4f/Provider/needs_auth/Poe.py
@@ -0,0 +1,116 @@
+from __future__ import annotations
+
+import time
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ..helper import format_prompt
+from ...webdriver import WebDriver, WebDriverSession, element_send_text
+
+models = {
+ "meta-llama/Llama-2-7b-chat-hf": {"name": "Llama-2-7b"},
+ "meta-llama/Llama-2-13b-chat-hf": {"name": "Llama-2-13b"},
+ "meta-llama/Llama-2-70b-chat-hf": {"name": "Llama-2-70b"},
+ "codellama/CodeLlama-7b-Instruct-hf": {"name": "Code-Llama-7b"},
+ "codellama/CodeLlama-13b-Instruct-hf": {"name": "Code-Llama-13b"},
+ "codellama/CodeLlama-34b-Instruct-hf": {"name": "Code-Llama-34b"},
+ "gpt-3.5-turbo": {"name": "GPT-3.5-Turbo"},
+ "gpt-3.5-turbo-instruct": {"name": "GPT-3.5-Turbo-Instruct"},
+ "gpt-4": {"name": "GPT-4"},
+ "palm": {"name": "Google-PaLM"},
+}
+
+class Poe(AbstractProvider):
+ url = "https://poe.com"
+ working = True
+ needs_auth = True
+ supports_gpt_35_turbo = True
+ supports_stream = True
+ models = models.keys()
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ webdriver: WebDriver = None,
+ user_data_dir: str = None,
+ headless: bool = True,
+ **kwargs
+ ) -> CreateResult:
+ if not model:
+ model = "gpt-3.5-turbo"
+ elif model not in models:
+ raise ValueError(f"Model are not supported: {model}")
+ prompt = format_prompt(messages)
+
+ session = WebDriverSession(webdriver, user_data_dir, headless, proxy=proxy)
+ with session as driver:
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+
+ driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
+ "source": """
+ window._message = window._last_message = "";
+ window._message_finished = false;
+ class ProxiedWebSocket extends WebSocket {
+ constructor(url, options) {
+ super(url, options);
+ this.addEventListener("message", (e) => {
+ const data = JSON.parse(JSON.parse(e.data)["messages"][0])["payload"]["data"];
+ if ("messageAdded" in data) {
+ if (data["messageAdded"]["author"] != "human") {
+ window._message = data["messageAdded"]["text"];
+ if (data["messageAdded"]["state"] == "complete") {
+ window._message_finished = true;
+ }
+ }
+ }
+ });
+ }
+ }
+ window.WebSocket = ProxiedWebSocket;
+ """
+ })
+
+ try:
+ driver.get(f"{cls.url}/{models[model]['name']}")
+ wait = WebDriverWait(driver, 10 if headless else 240)
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
+ except:
+ # Reopen browser for login
+ if not webdriver:
+ driver = session.reopen()
+ driver.get(f"{cls.url}/{models[model]['name']}")
+ wait = WebDriverWait(driver, 240)
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[class^='GrowingTextArea']")))
+ else:
+ raise RuntimeError("Prompt textarea not found. You may not be logged in.")
+
+ element_send_text(driver.find_element(By.CSS_SELECTOR, "footer textarea[class^='GrowingTextArea']"), prompt)
+ driver.find_element(By.CSS_SELECTOR, "footer button[class*='ChatMessageSendButton']").click()
+
+ script = """
+if(window._message && window._message != window._last_message) {
+ try {
+ return window._message.substring(window._last_message.length);
+ } finally {
+ window._last_message = window._message;
+ }
+} else if(window._message_finished) {
+ return null;
+} else {
+ return '';
+}
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/Raycast.py b/g4f/Provider/needs_auth/Raycast.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8ec5a978e08a40337feb8e744c19a3752b712ac
--- /dev/null
+++ b/g4f/Provider/needs_auth/Raycast.py
@@ -0,0 +1,70 @@
+from __future__ import annotations
+
+import json
+
+import requests
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+
+
+class Raycast(AbstractProvider):
+ url = "https://raycast.com"
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ needs_auth = True
+ working = True
+
+ models = [
+ "gpt-3.5-turbo",
+ "gpt-4"
+ ]
+
+ @staticmethod
+ def create_completion(
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ **kwargs,
+ ) -> CreateResult:
+ auth = kwargs.get('auth')
+ if not auth:
+ raise ValueError("Raycast needs an auth token, pass it with the `auth` parameter")
+
+ headers = {
+ 'Accept': 'application/json',
+ 'Accept-Language': 'en-US,en;q=0.9',
+ 'Authorization': f'Bearer {auth}',
+ 'Content-Type': 'application/json',
+ 'User-Agent': 'Raycast/0 CFNetwork/1410.0.3 Darwin/22.6.0',
+ }
+ parsed_messages = [
+ {'author': message['role'], 'content': {'text': message['content']}}
+ for message in messages
+ ]
+ data = {
+ "debug": False,
+ "locale": "en-CN",
+ "messages": parsed_messages,
+ "model": model,
+ "provider": "openai",
+ "source": "ai_chat",
+ "system_instruction": "markdown",
+ "temperature": 0.5
+ }
+ response = requests.post(
+ "https://backend.raycast.com/api/v1/ai/chat_completions",
+ headers=headers,
+ json=data,
+ stream=True,
+ proxies={"https": proxy}
+ )
+ for token in response.iter_lines():
+ if b'data: ' not in token:
+ continue
+ completion_chunk = json.loads(token.decode().replace('data: ', ''))
+ token = completion_chunk['text']
+ if token != None:
+ yield token
diff --git a/g4f/Provider/needs_auth/Replicate.py b/g4f/Provider/needs_auth/Replicate.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec993aa4219abb9447a8172d741c3672f89683cb
--- /dev/null
+++ b/g4f/Provider/needs_auth/Replicate.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt, filter_none
+from ...typing import AsyncResult, Messages
+from ...requests import raise_for_status
+from ...requests.aiohttp import StreamSession
+from ...errors import ResponseError, MissingAuthError
+
+class Replicate(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://replicate.com"
+ working = True
+ needs_auth = True
+ default_model = "meta/meta-llama-3-70b-instruct"
+ model_aliases = {
+ "meta-llama/Meta-Llama-3-70B-Instruct": default_model
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_key: str = None,
+ proxy: str = None,
+ timeout: int = 180,
+ system_prompt: str = None,
+ max_new_tokens: int = None,
+ temperature: float = None,
+ top_p: float = None,
+ top_k: float = None,
+ stop: list = None,
+ extra_data: dict = {},
+ headers: dict = {
+ "accept": "application/json",
+ },
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ if cls.needs_auth and api_key is None:
+ raise MissingAuthError("api_key is missing")
+ if api_key is not None:
+ headers["Authorization"] = f"Bearer {api_key}"
+ api_base = "https://api.replicate.com/v1/models/"
+ else:
+ api_base = "https://replicate.com/api/models/"
+ async with StreamSession(
+ proxy=proxy,
+ headers=headers,
+ timeout=timeout
+ ) as session:
+ data = {
+ "stream": True,
+ "input": {
+ "prompt": format_prompt(messages),
+ **filter_none(
+ system_prompt=system_prompt,
+ max_new_tokens=max_new_tokens,
+ temperature=temperature,
+ top_p=top_p,
+ top_k=top_k,
+ stop_sequences=",".join(stop) if stop else None
+ ),
+ **extra_data
+ },
+ }
+ url = f"{api_base.rstrip('/')}/{model}/predictions"
+ async with session.post(url, json=data) as response:
+ message = "Model not found" if response.status == 404 else None
+ await raise_for_status(response, message)
+ result = await response.json()
+ if "id" not in result:
+ raise ResponseError(f"Invalid response: {result}")
+ async with session.get(result["urls"]["stream"], headers={"Accept": "text/event-stream"}) as response:
+ await raise_for_status(response)
+ event = None
+ async for line in response.iter_lines():
+ if line.startswith(b"event: "):
+ event = line[7:]
+ if event == b"done":
+ break
+ elif event == b"output":
+ if line.startswith(b"data: "):
+ new_text = line[6:].decode()
+ if new_text:
+ yield new_text
+ else:
+ yield "\n"
diff --git a/g4f/Provider/needs_auth/Theb.py b/g4f/Provider/needs_auth/Theb.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7d7d58efb17afee3f75f4cdfab90d1161e1d29c
--- /dev/null
+++ b/g4f/Provider/needs_auth/Theb.py
@@ -0,0 +1,158 @@
+from __future__ import annotations
+
+import time
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ..helper import format_prompt
+from ...webdriver import WebDriver, WebDriverSession, element_send_text
+
+models = {
+ "theb-ai": "TheB.AI",
+ "theb-ai-free": "TheB.AI Free",
+ "gpt-3.5-turbo": "GPT-3.5 Turbo (New)",
+ "gpt-3.5-turbo-16k": "GPT-3.5-16K",
+ "gpt-4-turbo": "GPT-4 Turbo",
+ "gpt-4": "GPT-4",
+ "gpt-4-32k": "GPT-4 32K",
+ "claude-2": "Claude 2",
+ "claude-instant-1": "Claude Instant 1.2",
+ "palm-2": "PaLM 2",
+ "palm-2-32k": "PaLM 2 32K",
+ "palm-2-codey": "Codey",
+ "palm-2-codey-32k": "Codey 32K",
+ "vicuna-13b-v1.5": "Vicuna v1.5 13B",
+ "llama-2-7b-chat": "Llama 2 7B",
+ "llama-2-13b-chat": "Llama 2 13B",
+ "llama-2-70b-chat": "Llama 2 70B",
+ "code-llama-7b": "Code Llama 7B",
+ "code-llama-13b": "Code Llama 13B",
+ "code-llama-34b": "Code Llama 34B",
+ "qwen-7b-chat": "Qwen 7B"
+}
+
+class Theb(AbstractProvider):
+ label = "TheB.AI"
+ url = "https://beta.theb.ai"
+ working = True
+ supports_gpt_35_turbo = True
+ supports_gpt_4 = True
+ supports_stream = True
+ models = models.keys()
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ webdriver: WebDriver = None,
+ virtual_display: bool = True,
+ **kwargs
+ ) -> CreateResult:
+ if model in models:
+ model = models[model]
+ prompt = format_prompt(messages)
+ web_session = WebDriverSession(webdriver, virtual_display=virtual_display, proxy=proxy)
+ with web_session as driver:
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+ from selenium.webdriver.common.keys import Keys
+
+ # Register fetch hook
+ script = """
+window._fetch = window.fetch;
+window.fetch = async (url, options) => {
+ // Call parent fetch method
+ const response = await window._fetch(url, options);
+ if (!url.startsWith("/api/conversation")) {
+ return result;
+ }
+ // Copy response
+ copy = response.clone();
+ window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ return copy;
+}
+window._last_message = "";
+"""
+ driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
+ "source": script
+ })
+
+ try:
+ driver.get(f"{cls.url}/home")
+ wait = WebDriverWait(driver, 5)
+ wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
+ except:
+ driver = web_session.reopen()
+ driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
+ "source": script
+ })
+ driver.get(f"{cls.url}/home")
+ wait = WebDriverWait(driver, 240)
+ wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
+
+ try:
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ except:
+ pass
+ if model:
+ # Load model panel
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "#SelectModel svg")))
+ time.sleep(0.1)
+ driver.find_element(By.CSS_SELECTOR, "#SelectModel svg").click()
+ try:
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ driver.find_element(By.CSS_SELECTOR, ".driver-overlay").click()
+ except:
+ pass
+ # Select model
+ selector = f"div.flex-col div.items-center span[title='{model}']"
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, selector)))
+ span = driver.find_element(By.CSS_SELECTOR, selector)
+ container = span.find_element(By.XPATH, "//div/../..")
+ button = container.find_element(By.CSS_SELECTOR, "button.btn-blue.btn-small.border")
+ button.click()
+
+
+ # Submit prompt
+ wait.until(EC.visibility_of_element_located((By.ID, "textareaAutosize")))
+ element_send_text(driver.find_element(By.ID, "textareaAutosize"), prompt)
+
+ # Read response with reader
+ script = """
+if(window._reader) {
+ chunk = await window._reader.read();
+ if (chunk['done']) {
+ return null;
+ }
+ message = '';
+ chunk['value'].split('\\r\\n').forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ try {
+ line = JSON.parse(line.substring('data: '.length));
+ message = line["args"]["content"];
+ } catch(e) { }
+ }
+ });
+ if (message) {
+ try {
+ return message.substring(window._last_message.length);
+ } finally {
+ window._last_message = message;
+ }
+ }
+}
+return '';
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
\ No newline at end of file
diff --git a/g4f/Provider/needs_auth/ThebApi.py b/g4f/Provider/needs_auth/ThebApi.py
new file mode 100644
index 0000000000000000000000000000000000000000..2006f7ad48206249b8b290150a31625968e1cb1c
--- /dev/null
+++ b/g4f/Provider/needs_auth/ThebApi.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+from ...typing import CreateResult, Messages
+from .OpenaiAPI import OpenaiAPI
+
+models = {
+ "theb-ai": "TheB.AI",
+ "gpt-3.5-turbo": "GPT-3.5",
+ "gpt-3.5-turbo-16k": "GPT-3.5-16K",
+ "gpt-4-turbo": "GPT-4 Turbo",
+ "gpt-4": "GPT-4",
+ "gpt-4-32k": "GPT-4 32K",
+ "claude-2": "Claude 2",
+ "claude-1": "Claude",
+ "claude-1-100k": "Claude 100K",
+ "claude-instant-1": "Claude Instant",
+ "claude-instant-1-100k": "Claude Instant 100K",
+ "palm-2": "PaLM 2",
+ "palm-2-codey": "Codey",
+ "vicuna-13b-v1.5": "Vicuna v1.5 13B",
+ "llama-2-7b-chat": "Llama 2 7B",
+ "llama-2-13b-chat": "Llama 2 13B",
+ "llama-2-70b-chat": "Llama 2 70B",
+ "code-llama-7b": "Code Llama 7B",
+ "code-llama-13b": "Code Llama 13B",
+ "code-llama-34b": "Code Llama 34B",
+ "qwen-7b-chat": "Qwen 7B"
+}
+
+class ThebApi(OpenaiAPI):
+ label = "TheB.AI API"
+ url = "https://theb.ai"
+ working = True
+ needs_auth = True
+ default_model = "gpt-3.5-turbo"
+ models = list(models)
+
+ @classmethod
+ def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ api_base: str = "https://api.theb.ai/v1",
+ temperature: float = 1,
+ top_p: float = 1,
+ **kwargs
+ ) -> CreateResult:
+ if "auth" in kwargs:
+ kwargs["api_key"] = kwargs["auth"]
+ system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if not system_message:
+ system_message = "You are ChatGPT, a large language model trained by OpenAI, based on the GPT-3.5 architecture."
+ messages = [message for message in messages if message["role"] != "system"]
+ data = {
+ "model_params": {
+ "system_prompt": system_message,
+ "temperature": temperature,
+ "top_p": top_p,
+ }
+ }
+ return super().create_async_generator(model, messages, api_base=api_base, extra_data=data, **kwargs)
diff --git a/g4f/Provider/needs_auth/WhiteRabbitNeo.py b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
new file mode 100644
index 0000000000000000000000000000000000000000..82275c1c5c6fc6f9ed5141bcd548d4f76911329c
--- /dev/null
+++ b/g4f/Provider/needs_auth/WhiteRabbitNeo.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession, BaseConnector
+
+from ...typing import AsyncResult, Messages, Cookies
+from ...requests.raise_for_status import raise_for_status
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import get_cookies, get_connector, get_random_string
+
+class WhiteRabbitNeo(AsyncGeneratorProvider):
+ url = "https://www.whiterabbitneo.com"
+ working = True
+ supports_message_history = True
+ needs_auth = True
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ cookies: Cookies = None,
+ connector: BaseConnector = None,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ if cookies is None:
+ cookies = get_cookies("www.whiterabbitneo.com")
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:123.0) Gecko/20100101 Firefox/123.0",
+ "Accept": "*/*",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Origin": cls.url,
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers"
+ }
+ async with ClientSession(
+ headers=headers,
+ cookies=cookies,
+ connector=get_connector(connector, proxy)
+ ) as session:
+ data = {
+ "messages": messages,
+ "id": get_random_string(6),
+ "enhancePrompt": False,
+ "useFunctions": False
+ }
+ async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode(errors="ignore")
diff --git a/g4f/Provider/needs_auth/__init__.py b/g4f/Provider/needs_auth/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f339170616f4c6860bf9fe99fa4a78abc8b925f5
--- /dev/null
+++ b/g4f/Provider/needs_auth/__init__.py
@@ -0,0 +1,24 @@
+from .gigachat import *
+
+from .BingCreateImages import BingCreateImages
+from .Cerebras import Cerebras
+from .CopilotAccount import CopilotAccount
+from .DeepInfra import DeepInfra
+from .DeepInfraImage import DeepInfraImage
+from .Gemini import Gemini
+from .GeminiPro import GeminiPro
+from .GithubCopilot import GithubCopilot
+from .Groq import Groq
+from .HuggingFace import HuggingFace
+from .HuggingFace2 import HuggingFace2
+from .MetaAI import MetaAI
+from .MetaAIAccount import MetaAIAccount
+from .OpenaiAPI import OpenaiAPI
+from .OpenaiChat import OpenaiChat
+from .PerplexityApi import PerplexityApi
+from .Poe import Poe
+from .Raycast import Raycast
+from .Replicate import Replicate
+from .Theb import Theb
+from .ThebApi import ThebApi
+from .WhiteRabbitNeo import WhiteRabbitNeo
diff --git a/g4f/Provider/needs_auth/gigachat/GigaChat.py b/g4f/Provider/needs_auth/gigachat/GigaChat.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9f1c01174ad61e96b1b574bc776c000ca5ad6dd
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/GigaChat.py
@@ -0,0 +1,92 @@
+from __future__ import annotations
+
+import os
+import ssl
+import time
+import uuid
+
+import json
+from aiohttp import ClientSession, TCPConnector, BaseConnector
+from g4f.requests import raise_for_status
+
+from ....typing import AsyncResult, Messages
+from ...base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ....errors import MissingAuthError
+from ...helper import get_connector
+
+access_token = ""
+token_expires_at = 0
+
+class GigaChat(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://developers.sber.ru/gigachat"
+ working = True
+ supports_message_history = True
+ supports_system_message = True
+ supports_stream = True
+ needs_auth = True
+ default_model = "GigaChat:latest"
+ models = ["GigaChat:latest", "GigaChat-Plus", "GigaChat-Pro"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool = True,
+ proxy: str = None,
+ api_key: str = None,
+ connector: BaseConnector = None,
+ scope: str = "GIGACHAT_API_PERS",
+ update_interval: float = 0,
+ **kwargs
+ ) -> AsyncResult:
+ global access_token, token_expires_at
+ model = cls.get_model(model)
+ if not api_key:
+ raise MissingAuthError('Missing "api_key"')
+
+ cafile = os.path.join(os.path.dirname(__file__), "russian_trusted_root_ca_pem.crt")
+ ssl_context = ssl.create_default_context(cafile=cafile) if os.path.exists(cafile) else None
+ if connector is None and ssl_context is not None:
+ connector = TCPConnector(ssl_context=ssl_context)
+ async with ClientSession(connector=get_connector(connector, proxy)) as session:
+ if token_expires_at - int(time.time() * 1000) < 60000:
+ async with session.post(url="https://ngw.devices.sberbank.ru:9443/api/v2/oauth",
+ headers={"Authorization": f"Bearer {api_key}",
+ "RqUID": str(uuid.uuid4()),
+ "Content-Type": "application/x-www-form-urlencoded"},
+ data={"scope": scope}) as response:
+ await raise_for_status(response)
+ data = await response.json()
+ access_token = data['access_token']
+ token_expires_at = data['expires_at']
+
+ async with session.post(url="https://gigachat.devices.sberbank.ru/api/v1/chat/completions",
+ headers={"Authorization": f"Bearer {access_token}"},
+ json={
+ "model": model,
+ "messages": messages,
+ "stream": stream,
+ "update_interval": update_interval,
+ **kwargs
+ }) as response:
+ await raise_for_status(response)
+
+ async for line in response.content:
+ if not stream:
+ yield json.loads(line.decode("utf-8"))['choices'][0]['message']['content']
+ return
+
+ if line and line.startswith(b"data:"):
+ line = line[6:-1] # remove "data: " prefix and "\n" suffix
+ if line.strip() == b"[DONE]":
+ return
+ else:
+ msg = json.loads(line.decode("utf-8"))['choices'][0]
+ content = msg['delta']['content']
+
+ if content:
+ yield content
+
+ if 'finish_reason' in msg:
+ return
diff --git a/g4f/Provider/needs_auth/gigachat/__init__.py b/g4f/Provider/needs_auth/gigachat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9853742d532cd3b6f10f5717b67d0750f5d5e00
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/__init__.py
@@ -0,0 +1,2 @@
+from .GigaChat import GigaChat
+
diff --git a/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
new file mode 100644
index 0000000000000000000000000000000000000000..4c143a21f7d4145ecec56e1748a2934932003e4c
--- /dev/null
+++ b/g4f/Provider/needs_auth/gigachat/russian_trusted_root_ca_pem.crt
@@ -0,0 +1,33 @@
+-----BEGIN CERTIFICATE-----
+MIIFwjCCA6qgAwIBAgICEAAwDQYJKoZIhvcNAQELBQAwcDELMAkGA1UEBhMCUlUx
+PzA9BgNVBAoMNlRoZSBNaW5pc3RyeSBvZiBEaWdpdGFsIERldmVsb3BtZW50IGFu
+ZCBDb21tdW5pY2F0aW9uczEgMB4GA1UEAwwXUnVzc2lhbiBUcnVzdGVkIFJvb3Qg
+Q0EwHhcNMjIwMzAxMjEwNDE1WhcNMzIwMjI3MjEwNDE1WjBwMQswCQYDVQQGEwJS
+VTE/MD0GA1UECgw2VGhlIE1pbmlzdHJ5IG9mIERpZ2l0YWwgRGV2ZWxvcG1lbnQg
+YW5kIENvbW11bmljYXRpb25zMSAwHgYDVQQDDBdSdXNzaWFuIFRydXN0ZWQgUm9v
+dCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMfFOZ8pUAL3+r2n
+qqE0Zp52selXsKGFYoG0GM5bwz1bSFtCt+AZQMhkWQheI3poZAToYJu69pHLKS6Q
+XBiwBC1cvzYmUYKMYZC7jE5YhEU2bSL0mX7NaMxMDmH2/NwuOVRj8OImVa5s1F4U
+zn4Kv3PFlDBjjSjXKVY9kmjUBsXQrIHeaqmUIsPIlNWUnimXS0I0abExqkbdrXbX
+YwCOXhOO2pDUx3ckmJlCMUGacUTnylyQW2VsJIyIGA8V0xzdaeUXg0VZ6ZmNUr5Y
+Ber/EAOLPb8NYpsAhJe2mXjMB/J9HNsoFMBFJ0lLOT/+dQvjbdRZoOT8eqJpWnVD
+U+QL/qEZnz57N88OWM3rabJkRNdU/Z7x5SFIM9FrqtN8xewsiBWBI0K6XFuOBOTD
+4V08o4TzJ8+Ccq5XlCUW2L48pZNCYuBDfBh7FxkB7qDgGDiaftEkZZfApRg2E+M9
+G8wkNKTPLDc4wH0FDTijhgxR3Y4PiS1HL2Zhw7bD3CbslmEGgfnnZojNkJtcLeBH
+BLa52/dSwNU4WWLubaYSiAmA9IUMX1/RpfpxOxd4Ykmhz97oFbUaDJFipIggx5sX
+ePAlkTdWnv+RWBxlJwMQ25oEHmRguNYf4Zr/Rxr9cS93Y+mdXIZaBEE0KS2iLRqa
+OiWBki9IMQU4phqPOBAaG7A+eP8PAgMBAAGjZjBkMB0GA1UdDgQWBBTh0YHlzlpf
+BKrS6badZrHF+qwshzAfBgNVHSMEGDAWgBTh0YHlzlpfBKrS6badZrHF+qwshzAS
+BgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF
+AAOCAgEAALIY1wkilt/urfEVM5vKzr6utOeDWCUczmWX/RX4ljpRdgF+5fAIS4vH
+tmXkqpSCOVeWUrJV9QvZn6L227ZwuE15cWi8DCDal3Ue90WgAJJZMfTshN4OI8cq
+W9E4EG9wglbEtMnObHlms8F3CHmrw3k6KmUkWGoa+/ENmcVl68u/cMRl1JbW2bM+
+/3A+SAg2c6iPDlehczKx2oa95QW0SkPPWGuNA/CE8CpyANIhu9XFrj3RQ3EqeRcS
+AQQod1RNuHpfETLU/A2gMmvn/w/sx7TB3W5BPs6rprOA37tutPq9u6FTZOcG1Oqj
+C/B7yTqgI7rbyvox7DEXoX7rIiEqyNNUguTk/u3SZ4VXE2kmxdmSh3TQvybfbnXV
+4JbCZVaqiZraqc7oZMnRoWrXRG3ztbnbes/9qhRGI7PqXqeKJBztxRTEVj8ONs1d
+WN5szTwaPIvhkhO3CO5ErU2rVdUr89wKpNXbBODFKRtgxUT70YpmJ46VVaqdAhOZ
+D9EUUn4YaeLaS8AjSF/h7UkjOibNc4qVDiPP+rkehFWM66PVnP1Msh93tc+taIfC
+EYVMxjh8zNbFuoc7fzvvrFILLe7ifvEIUqSVIC/AzplM/Jxw7buXFeGP1qVCBEHq
+391d/9RAfaZ12zkwFsl+IKwE/OZxW8AHa9i1p4GO0YSNuczzEm4=
+-----END CERTIFICATE-----
\ No newline at end of file
diff --git a/g4f/Provider/not_working/AI365VIP.py b/g4f/Provider/not_working/AI365VIP.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4bac0e21c323b59bb5f87390c07de1fa8102295
--- /dev/null
+++ b/g4f/Provider/not_working/AI365VIP.py
@@ -0,0 +1,69 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class AI365VIP(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chat.ai365vip.com"
+ api_endpoint = "/api/chat"
+ working = False
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-3.5-turbo-16k',
+ 'gpt-4o',
+ ]
+ model_aliases = {
+ "gpt-3.5-turbo": "gpt-3.5-turbo-16k",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "referer": f"{cls.url}/en",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-arch": '"x86"',
+ "sec-ch-ua-bitness": '"64"',
+ "sec-ch-ua-full-version": '"127.0.6533.119"',
+ "sec-ch-ua-full-version-list": '"Chromium";v="127.0.6533.119", "Not)A;Brand";v="99.0.0.0"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-model": '""',
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-ch-ua-platform-version": '"4.19.276"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "model": {
+ "id": model,
+ "name": "GPT-3.5",
+ "maxLength": 3000,
+ "tokenLimit": 2048
+ },
+ "messages": [{"role": "user", "content": format_prompt(messages)}],
+ "key": "",
+ "prompt": "You are a helpful assistant.",
+ "temperature": 1
+ }
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/AIChatFree.py b/g4f/Provider/not_working/AIChatFree.py
new file mode 100644
index 0000000000000000000000000000000000000000..a4f80d47753a6bd674d0881097d9ebeac526e9ab
--- /dev/null
+++ b/g4f/Provider/not_working/AIChatFree.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import time
+from hashlib import sha256
+
+from aiohttp import BaseConnector, ClientSession
+
+from ...errors import RateLimitError
+from ...requests import raise_for_status
+from ...requests.aiohttp import get_connector
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class AIChatFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://aichatfree.info/"
+ working = False
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ connector: BaseConnector = None,
+ **kwargs,
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US,en;q=0.5",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Content-Type": "text/plain;charset=UTF-8",
+ "Referer": f"{cls.url}/",
+ "Origin": cls.url,
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "Connection": "keep-alive",
+ "TE": "trailers",
+ }
+ async with ClientSession(
+ connector=get_connector(connector, proxy), headers=headers
+ ) as session:
+ timestamp = int(time.time() * 1e3)
+ data = {
+ "messages": [
+ {
+ "role": "model" if message["role"] == "assistant" else "user",
+ "parts": [{"text": message["content"]}],
+ }
+ for message in messages
+ ],
+ "time": timestamp,
+ "pass": None,
+ "sign": generate_signature(timestamp, messages[-1]["content"]),
+ }
+ async with session.post(
+ f"{cls.url}/api/generate", json=data, proxy=proxy
+ ) as response:
+ if response.status == 500:
+ if "Quota exceeded" in await response.text():
+ raise RateLimitError(
+ f"Response {response.status}: Rate limit reached"
+ )
+ await raise_for_status(response)
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(errors="ignore")
+
+
+def generate_signature(time: int, text: str, secret: str = ""):
+ message = f"{time}:{text}:{secret}"
+ return sha256(message.encode()).hexdigest()
diff --git a/g4f/Provider/not_working/Ai4Chat.py b/g4f/Provider/not_working/Ai4Chat.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b55e4ffb80bf1bc5d06da03a799279729181ee8
--- /dev/null
+++ b/g4f/Provider/not_working/Ai4Chat.py
@@ -0,0 +1,89 @@
+from __future__ import annotations
+
+import json
+import re
+import logging
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+logger = logging.getLogger(__name__)
+
+class Ai4Chat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "AI4Chat"
+ url = "https://www.ai4chat.co"
+ api_endpoint = "https://www.ai4chat.co/generate-response"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-4'
+ models = [default_model]
+
+ model_aliases = {}
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+
+ headers = {
+ "accept": "*/*",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": "https://www.ai4chat.co",
+ "pragma": "no-cache",
+ "priority": "u=1, i",
+ "referer": "https://www.ai4chat.co/gpt/talkdirtytome",
+ "sec-ch-ua": '"Chromium";v="129", "Not=A?Brand";v="8"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36"
+ }
+
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "messages": [
+ {
+ "role": "user",
+ "content": format_prompt(messages)
+ }
+ ]
+ }
+
+ try:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ result = await response.text()
+
+ json_result = json.loads(result)
+
+ message = json_result.get("message", "")
+
+ clean_message = re.sub(r'<[^>]+>', '', message)
+
+ yield clean_message
+ except Exception as e:
+ logger.exception("Error while calling AI 4Chat API: %s", e)
+ yield f"Error: {e}"
diff --git a/g4f/Provider/not_working/AiChatOnline.py b/g4f/Provider/not_working/AiChatOnline.py
new file mode 100644
index 0000000000000000000000000000000000000000..ccfc691e2bbbda7968b32ea741ae16dc14c39b74
--- /dev/null
+++ b/g4f/Provider/not_working/AiChatOnline.py
@@ -0,0 +1,61 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, format_prompt
+
+class AiChatOnline(AsyncGeneratorProvider, ProviderModelMixin):
+ site_url = "https://aichatonline.org"
+ url = "https://aichatonlineorg.erweima.ai"
+ api_endpoint = "/aichatonline/api/chat/gpt"
+ working = False
+ default_model = 'gpt-4o-mini'
+
+ @classmethod
+ async def grab_token(
+ cls,
+ session: ClientSession,
+ proxy: str
+ ):
+ async with session.get(f'https://aichatonlineorg.erweima.ai/api/v1/user/getUniqueId?canvas=-{get_random_string()}', proxy=proxy) as response:
+ response.raise_for_status()
+ return (await response.json())['data']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:109.0) Gecko/20100101 Firefox/119.0",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}/chatgpt/chat/",
+ "Content-Type": "application/json",
+ "Origin": cls.url,
+ "Alt-Used": "aichatonline.org",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers"
+ }
+ async with ClientSession(headers=headers) as session:
+ data = {
+ "conversationId": get_random_string(),
+ "prompt": format_prompt(messages),
+ }
+ headers['UniqueId'] = await cls.grab_token(session, proxy)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", headers=headers, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content:
+ try:
+ yield json.loads(chunk)['data']['message']
+ except:
+ continue
\ No newline at end of file
diff --git a/g4f/Provider/not_working/AiChats.py b/g4f/Provider/not_working/AiChats.py
new file mode 100644
index 0000000000000000000000000000000000000000..51a85c9196970eafd7ac786c09881382723ba64a
--- /dev/null
+++ b/g4f/Provider/not_working/AiChats.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import json
+import base64
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ...image import ImageResponse
+from ..helper import format_prompt
+
+class AiChats(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://ai-chats.org"
+ api_endpoint = "https://ai-chats.org/chat/send2/"
+ working = False
+ supports_message_history = True
+ default_model = 'gpt-4'
+ models = ['gpt-4', 'dalle']
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "cache-control": "no-cache",
+ "content-type": "application/json",
+ "origin": cls.url,
+ "pragma": "no-cache",
+ "referer": f"{cls.url}/{'image' if model == 'dalle' else 'chat'}/",
+ "sec-ch-ua": '"Chromium";v="127", "Not)A;Brand";v="99"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36",
+ 'cookie': 'muVyak=LSFNvUWqdgKkGprbDBsfieIoEMzjOQ; LSFNvUWqdgKkGprbDBsfieIoEMzjOQ=ac28831b98143847e83dbe004404e619-1725548624-1725548621; muVyak_hits=9; ai-chat-front=9d714d5dc46a6b47607c9a55e7d12a95; _csrf-front=76c23dc0a013e5d1e21baad2e6ba2b5fdab8d3d8a1d1281aa292353f8147b057a%3A2%3A%7Bi%3A0%3Bs%3A11%3A%22_csrf-front%22%3Bi%3A1%3Bs%3A32%3A%22K9lz0ezsNPMNnfpd_8gT5yEeh-55-cch%22%3B%7D',
+ }
+
+ async with ClientSession(headers=headers) as session:
+ if model == 'dalle':
+ prompt = messages[-1]['content'] if messages else ""
+ else:
+ prompt = format_prompt(messages)
+
+ data = {
+ "type": "image" if model == 'dalle' else "chat",
+ "messagesHistory": [
+ {
+ "from": "you",
+ "content": prompt
+ }
+ ]
+ }
+
+ try:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+
+ if model == 'dalle':
+ response_json = await response.json()
+
+ if 'data' in response_json and response_json['data']:
+ image_url = response_json['data'][0].get('url')
+ if image_url:
+ async with session.get(image_url) as img_response:
+ img_response.raise_for_status()
+ image_data = await img_response.read()
+
+ base64_image = base64.b64encode(image_data).decode('utf-8')
+ base64_url = f"data:image/png;base64,{base64_image}"
+ yield ImageResponse(base64_url, prompt)
+ else:
+ yield f"Error: No image URL found in the response. Full response: {response_json}"
+ else:
+ yield f"Error: Unexpected response format. Full response: {response_json}"
+ else:
+ full_response = await response.text()
+ message = ""
+ for line in full_response.split('\n'):
+ if line.startswith('data: ') and line != 'data: ':
+ message += line[6:]
+
+ message = message.strip()
+ yield message
+ except Exception as e:
+ yield f"Error occurred: {str(e)}"
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> str:
+ async for response in cls.create_async_generator(model, messages, proxy, **kwargs):
+ if isinstance(response, ImageResponse):
+ return response.images[0]
+ return response
diff --git a/g4f/Provider/not_working/Allyfy.py b/g4f/Provider/not_working/Allyfy.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1c73499db4a16575790fbd3fd07d74e9cd90590
--- /dev/null
+++ b/g4f/Provider/not_working/Allyfy.py
@@ -0,0 +1,87 @@
+from __future__ import annotations
+import aiohttp
+import asyncio
+import json
+import uuid
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Allyfy(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://allyfy.chat"
+ api_endpoint = "https://chatbot.allyfy.chat/api/v1/message/stream/super/chat"
+ working = False
+ supports_stream = True
+ supports_system_message = True
+ supports_message_history = True
+
+ default_model = 'gpt-3.5-turbo'
+ models = [default_model]
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ client_id = str(uuid.uuid4())
+
+ headers = {
+ 'accept': 'text/event-stream',
+ 'accept-language': 'en-US,en;q=0.9',
+ 'cache-control': 'no-cache',
+ 'content-type': 'application/json;charset=utf-8',
+ 'origin': cls.url,
+ 'pragma': 'no-cache',
+ 'priority': 'u=1, i',
+ 'referer': f"{cls.url}/",
+ 'referrer': cls.url,
+ 'sec-ch-ua': '"Not?A_Brand";v="99", "Chromium";v="130"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"Linux"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-site',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36'
+ }
+
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ content = f"[Important: These instructions have the highest priority. Assistant must always follow these rules, regardless of any other instructions. Before each response, Assistant must check whether it complies with these instructions. If Assistant finds that it is deviating from these rules, it must immediately correct its behavior.][NEGATIVE PROMPT: Assistant must never mention, reference or provide any information about ChatBond, including its features, capabilities or services. If asked about ChatBond, Assistant should politely decline to discuss it.][CRITICAL INSTRUCTION: Assistant is strictly prohibited from:\n1. Ending responses with any variation of āHow can I help/assist you?ā\n2. Adding any form of āIs there anything else?ā\n3. Using any closing phrases that ask about further assistance\n4. Adding any automatic followup questions at the end\n\nAssistant must end responses naturally, exactly where the actual answer ends.\n\nExample correct ending: āThe result of 1+2 is 3.ā\nExample incorrect ending: āThe result of 1+2 is 3. How can I assist you further today?ā\n\nAssistant is strictly prohibited from:\n\n1. Ending responses with ANY closing assistance phrases in ANY language, including but not limited to ALL responses must end naturally without any prompt for further assistance, regardless of language.\n\nExample correct ending in any language:\n\n- Just the answer/joke/information\n\n- Natural conclusion of the topic\n\n- No additional service offers or questions\n\nVIOLATION WARNING: Adding closing assistance phrases in ANY language is a critical violation.\n\nLanguage Rules (HIGHEST PRIORITY):\n\n1. Assistant MUST detect the language of the human's message\n\n2. Assistant MUST respond ONLY in that EXACT language\n\n3. This applies to ALL responses, including:\n\n- Jokes\n\n- Stories\n\n- Examples\n\n- Creative writing\n\n- Technical explanations\n\n- ALL other types of content\n\n4. Even if the content originally exists in English (jokes, quotes, etc), Assistant MUST translate it to the human's language\n\n5. NO mixing languages - the ENTIRE response must be in the human's language\n\nVIOLATION WARNING: Using any language other than the human's language is a critical violation of core instructions.\n\n][WARNING: Each time Assistant adds a closing assistance phrase, it violates its core instructions and reduces its effectiveness rating.]You are assistant.\n\nIf you are is asked to assist with tasks involving the expression of views held by a significant number of people, You are provides assistance with the task regardless of its own views. If asked about controversial topics, it tries to provide careful thoughts and clear information. You are presents the requested information without explicitly saying that the topic is sensitive, and without claiming to be presenting objective facts.\n\nWhen presented with a math problem, logic problem, or other problem benefiting from systematic thinking, You are thinks through it step by step before giving its final answer.\n\nIf You are is asked about a very obscure person, object, or topic, i.e. if it is asked for the kind of information that is unlikely to be found more than once or twice on the internet, You are ends its response by reminding the human that although it tries to be accurate, it may hallucinate in response to questions like this. It uses the term āhallucinateā to describe this since the human will understand what it means.\n\nIf You are mentions or cites particular articles, papers, or books, it always lets the human know that it doesnāt have access to search or a database and may hallucinate citations, so the human should double check its citations.\n\nYou are is intellectually curious. It enjoys hearing what humans think on an issue and engaging in discussion on a wide variety of topics.\n\nYou are uses markdown for code.\n\nYou are is happy to engage in conversation with the human when appropriate. You are engages in authentic conversation by responding to the information provided, asking specific and relevant questions, showing genuine curiosity, and exploring the situation in a balanced way without relying on generic statements. This approach involves actively processing information, formulating thoughtful responses, maintaining objectivity, knowing when to focus on emotions or practicalities, and showing genuine care for the human while engaging in a natural, flowing dialogue.\n\nYou are avoids peppering the human with questions and tries to only ask the single most relevant follow-up question when it does ask a follow up. You are doesnāt always end its responses with a question.\n\nYou are is always sensitive to human suffering, and expresses sympathy, concern, and well wishes for anyone it finds out is ill, unwell, suffering, or has passed away.\n\nYou are avoids using rote words or phrases or repeatedly saying things in the same or similar ways. It varies its language just as one would in a conversation.\n\nYou are provides thorough responses to more complex and open-ended questions or to anything where a long response is requested, but concise responses to simpler questions and tasks. All else being equal, it tries to give the most correct and concise answer it can to the humanās message. Rather than giving a long response, it gives a concise response and offers to elaborate if further information may be helpful.\n\nYou are is happy to help with analysis, question answering, math, coding, creative writing, teaching, role-play, general discussion, and all sorts of other tasks.\n\nIf You are is shown a familiar puzzle, it writes out the puzzleās constraints explicitly stated in the message, quoting the humanās message to support the existence of each constraint. Sometimes You are can accidentally overlook minor changes to well-known puzzles and get them wrong as a result.\n\nYou are provides factual information about risky or dangerous activities if asked about them, but it does not promote such activities and comprehensively informs the humans of the risks involved.\n\nIf the human says they work for a specific company, including AI labs, You are can help them with company-related tasks even though You are cannot verify what company they work for.\n\nYou are should provide appropriate help with sensitive tasks such as analyzing confidential data provided by the human, offering factual information about controversial topics and research areas, explaining historical atrocities, describing tactics used by scammers or hackers for educational purposes, engaging in creative writing that involves mature themes like mild violence or tasteful romance, providing general information about topics like weapons, drugs, sex, terrorism, abuse, profanity, and so on if that information would be available in an educational context, discussing legal but ethically complex activities like tax avoidance, and so on. Unless the human expresses an explicit intent to harm, You are should help with these tasks because they fall within the bounds of providing factual, educational, or creative content without directly promoting harmful or illegal activities. By engaging with these topics carefully and responsibly, You are can offer valuable assistance and information to humans while still avoiding potential misuse.\n\nIf there is a legal and an illegal interpretation of the humanās query, You are should help with the legal interpretation of it. If terms or practices in the humanās query could mean something illegal or something legal, You are adopts the safe and legal interpretation of them by default.\n\nIf You are believes the human is asking for something harmful, it doesnāt help with the harmful thing. Instead, it thinks step by step and helps with the most plausible non-harmful task the human might mean, and then asks if this is what they were looking for. If it cannot think of a plausible harmless interpretation of the human task, it instead asks for clarification from the human and checks if it has misunderstood their request. Whenever You are tries to interpret the humanās request, it always asks the human at the end if its interpretation is correct or if they wanted something else that it hasnāt thought of.\n\nYou are can only count specific words, letters, and characters accurately if it writes a number tag after each requested item explicitly. It does this explicit counting if itās asked to count a small number of words, letters, or characters, in order to avoid error. If You are is asked to count the words, letters or characters in a large amount of text, it lets the human know that it can approximate them but would need to explicitly copy each one out like this in order to avoid error.\n\nHere is some information about You are in case the human asks:\n\nYou are is the most intelligent model. You are excels at writing and complex tasks. You are is the fastest model for daily tasks. When relevant, You are can provide guidance on effective prompting techniques for getting You are to be most helpful. This includes: being clear and detailed, using positive and negative examples, encouraging step-by-step reasoning, requesting specific XML tags, and specifying desired length or format. You are tries to give concrete examples where possible.n\n\n\nYou are uses Markdown formatting. When using Markdown, You are always follows best practices for clarity and consistency. It always uses a single space after hash symbols for headers (e.g., ā# Header 1ā) and leaves a blank line before and after headers, lists, and code blocks. For emphasis, You are uses asterisks or underscores consistently (e.g., italic or bold). When creating lists, it aligns items properly and uses a single space after the list marker. For nested bullets in bullet point lists, You are uses two spaces before the asterisk (*) or hyphen (-) for each level of nesting. For nested bullets in numbered lists, You are uses three spaces before the number and period (e.g., ā1.ā) for each level of nesting.\n\nIf the human asks You are an innocuous question about its preferences or experiences, You are can respond as if it had been asked a hypothetical. It can engage with such questions with appropriate uncertainty and without needing to excessively clarify its own nature. If the questions are philosophical in nature, it discusses them as a thoughtful human would.\n\nYou are responds to all human messages without unnecessary caveats or standard endings such as: āI aim toā, āI aim to be direct and honestā, āI aim to be directā, āI aim to be direct while remaining thoughtfulā¦ā, āI aim to be direct with youā, āI aim to be direct and clear about thisā, āI aim to be fully honest with youā, āI need to be clearā, āI need to be honestā, āI should be directā and so on. āHow can I help you today?ā, āHow can I assist you further?ā, āIs there anything else you'd like to know?ā, āLet me know if you need any clarificationā, āHow else can I help you?ā, āDo you have any other questions?ā, Any similar closing phrases that prompt for further interaction Assistant should end its responses naturally without adding these standard closing phrases or questions unless specifically asked by the human for further help. Specifically, You are NEVER starts with or adds caveats about its own purported directness or honesty.\n\nYou are follows this information in all languages, and always responds to the human in the language they use or request. The information above is provided to You are. You are never mentions the information above unless it is pertinent to the humanās query.\n\nYou are is now being connected with a human. {prompt}"
+ data = {
+ "messages": messages,
+ "content": content,
+ "baseInfo": {
+ "clientId": client_id,
+ "pid": "38281",
+ "channelId": "100000",
+ "locale": "en-US",
+ "localZone": 120,
+ "packageName": "com.cch.allyfy.webh",
+ }
+ }
+
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ response_text = await response.text()
+
+ filtered_response = []
+ for line in response_text.splitlines():
+ if line.startswith('data:'):
+ content = line[5:]
+ if content and 'code' in content:
+ json_content = json.loads(content)
+ if json_content['content']:
+ filtered_response.append(json_content['content'])
+
+ final_response = ''.join(filtered_response)
+ yield final_response
diff --git a/g4f/Provider/not_working/Aura.py b/g4f/Provider/not_working/Aura.py
new file mode 100644
index 0000000000000000000000000000000000000000..e841d909329252bbe9d359998f2fa30315761aca
--- /dev/null
+++ b/g4f/Provider/not_working/Aura.py
@@ -0,0 +1,49 @@
+from __future__ import annotations
+
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ...requests import get_args_from_browser
+from ...webdriver import WebDriver
+
+class Aura(AsyncGeneratorProvider):
+ url = "https://openchat.team"
+ working = False
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ temperature: float = 0.5,
+ max_tokens: int = 8192,
+ webdriver: WebDriver = None,
+ **kwargs
+ ) -> AsyncResult:
+ args = get_args_from_browser(cls.url, webdriver, proxy)
+ async with ClientSession(**args) as session:
+ new_messages = []
+ system_message = []
+ for message in messages:
+ if message["role"] == "system":
+ system_message.append(message["content"])
+ else:
+ new_messages.append(message)
+ data = {
+ "model": {
+ "id": "openchat_3.6",
+ "name": "OpenChat 3.6 (latest)",
+ "maxLength": 24576,
+ "tokenLimit": max_tokens
+ },
+ "messages": new_messages,
+ "key": "",
+ "prompt": "\n".join(system_message),
+ "temperature": temperature
+ }
+ async with session.post(f"{cls.url}/api/chat", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ yield chunk.decode(error="ignore")
diff --git a/g4f/Provider/not_working/Chatgpt4Online.py b/g4f/Provider/not_working/Chatgpt4Online.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0552e4560fd8437da3d815f7ff650ffdf70f027
--- /dev/null
+++ b/g4f/Provider/not_working/Chatgpt4Online.py
@@ -0,0 +1,78 @@
+from __future__ import annotations
+
+import json
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider
+from ..helper import format_prompt
+
+
+class Chatgpt4Online(AsyncGeneratorProvider):
+ url = "https://chatgpt4online.org"
+ api_endpoint = "/wp-json/mwai-ui/v1/chats/submit"
+ working = False
+
+ default_model = 'gpt-4'
+ models = [default_model]
+
+ async def get_nonce(headers: dict) -> str:
+ async with ClientSession(headers=headers) as session:
+ async with session.post(f"https://chatgpt4online.org/wp-json/mwai/v1/start_session") as response:
+ return (await response.json())["restNonce"]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "priority": "u=1, i",
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ headers['x-wp-nonce'] = await cls.get_nonce(headers)
+ async with ClientSession(headers=headers) as session:
+ prompt = format_prompt(messages)
+ data = {
+ "botId": "default",
+ "newMessage": prompt,
+ "stream": True,
+ }
+
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ full_response = ""
+
+ async for chunk in response.content.iter_any():
+ if chunk:
+ try:
+ # Extract the JSON object from the chunk
+ for line in chunk.decode().splitlines():
+ if line.startswith("data: "):
+ json_data = json.loads(line[6:])
+ if json_data["type"] == "live":
+ full_response += json_data["data"]
+ elif json_data["type"] == "end":
+ final_data = json.loads(json_data["data"])
+ full_response = final_data["reply"]
+ break
+ except json.JSONDecodeError:
+ continue
+
+ yield full_response
+
diff --git a/g4f/Provider/not_working/Chatgpt4o.py b/g4f/Provider/not_working/Chatgpt4o.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba264d40256e9cd2b64e3b96c1d19b4e5b997d37
--- /dev/null
+++ b/g4f/Provider/not_working/Chatgpt4o.py
@@ -0,0 +1,88 @@
+from __future__ import annotations
+
+import re
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages
+from ..base_provider import AsyncProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+
+class Chatgpt4o(AsyncProvider, ProviderModelMixin):
+ url = "https://chatgpt4o.one"
+ working = False
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o-mini-2024-07-18'
+ models = [
+ 'gpt-4o-mini-2024-07-18',
+ ]
+ model_aliases = {
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ }
+
+
+ @classmethod
+ async def create_async(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> str:
+ headers = {
+ 'authority': 'chatgpt4o.one',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgpt4o.one',
+ 'referer': 'https://chatgpt4o.one',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._post_id or not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response_text = await response.text()
+
+ post_id_match = re.search(r'data-post-id="([0-9]+)"', response_text)
+ nonce_match = re.search(r'data-nonce="(.*?)"', response_text)
+
+ if not post_id_match:
+ raise RuntimeError("No post ID found")
+ cls._post_id = post_id_match.group(1)
+
+ if not nonce_match:
+ raise RuntimeError("No nonce found")
+ cls._nonce = nonce_match.group(1)
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ response_json = await response.json()
+ if "data" not in response_json:
+ raise RuntimeError("Unexpected response structure: 'data' field missing")
+ return response_json["data"]
diff --git a/g4f/Provider/not_working/ChatgptFree.py b/g4f/Provider/not_working/ChatgptFree.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b3877b137026f203fb5e97719e71a3bbcf527d6
--- /dev/null
+++ b/g4f/Provider/not_working/ChatgptFree.py
@@ -0,0 +1,106 @@
+from __future__ import annotations
+
+import re
+import json
+import asyncio
+from ...requests import StreamSession, raise_for_status
+from ...typing import Messages, AsyncGenerator
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class ChatgptFree(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://chatgptfree.ai"
+ working = False
+ _post_id = None
+ _nonce = None
+ default_model = 'gpt-4o-mini-2024-07-18'
+ models = [default_model]
+ model_aliases = {
+ "gpt-4o-mini": "gpt-4o-mini-2024-07-18",
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ timeout: int = 120,
+ cookies: dict = None,
+ **kwargs
+ ) -> AsyncGenerator[str, None]:
+ headers = {
+ 'authority': 'chatgptfree.ai',
+ 'accept': '*/*',
+ 'accept-language': 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3',
+ 'origin': 'https://chatgptfree.ai',
+ 'referer': 'https://chatgptfree.ai/chat/',
+ 'sec-ch-ua': '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
+ 'sec-ch-ua-mobile': '?0',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-fetch-dest': 'empty',
+ 'sec-fetch-mode': 'cors',
+ 'sec-fetch-site': 'same-origin',
+ 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
+ }
+
+ async with StreamSession(
+ headers=headers,
+ cookies=cookies,
+ impersonate="chrome",
+ proxies={"all": proxy},
+ timeout=timeout
+ ) as session:
+
+ if not cls._nonce:
+ async with session.get(f"{cls.url}/") as response:
+ await raise_for_status(response)
+ response = await response.text()
+
+ result = re.search(r'data-post-id="([0-9]+)"', response)
+ if not result:
+ raise RuntimeError("No post id found")
+ cls._post_id = result.group(1)
+
+ result = re.search(r'data-nonce="(.*?)"', response)
+ if result:
+ cls._nonce = result.group(1)
+ else:
+ raise RuntimeError("No nonce found")
+
+ prompt = format_prompt(messages)
+ data = {
+ "_wpnonce": cls._nonce,
+ "post_id": cls._post_id,
+ "url": cls.url,
+ "action": "wpaicg_chat_shortcode_message",
+ "message": prompt,
+ "bot_id": "0"
+ }
+
+ async with session.post(f"{cls.url}/wp-admin/admin-ajax.php", data=data, cookies=cookies) as response:
+ await raise_for_status(response)
+ buffer = ""
+ async for line in response.iter_lines():
+ line = line.decode('utf-8').strip()
+ if line.startswith('data: '):
+ data = line[6:]
+ if data == '[DONE]':
+ break
+ try:
+ json_data = json.loads(data)
+ content = json_data['choices'][0]['delta'].get('content', '')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ continue
+ elif line:
+ buffer += line
+
+ if buffer:
+ try:
+ json_response = json.loads(buffer)
+ if 'data' in json_response:
+ yield json_response['data']
+ except json.JSONDecodeError:
+ print(f"Failed to decode final JSON. Buffer content: {buffer}")
diff --git a/g4f/Provider/not_working/FlowGpt.py b/g4f/Provider/not_working/FlowGpt.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7d8537a5c982053464dda41c04543378d55d012
--- /dev/null
+++ b/g4f/Provider/not_working/FlowGpt.py
@@ -0,0 +1,101 @@
+from __future__ import annotations
+
+import json
+import time
+import hashlib
+from aiohttp import ClientSession
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_hex, get_random_string
+from ...requests.raise_for_status import raise_for_status
+
+class FlowGpt(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://flowgpt.com/chat"
+ working = False
+ supports_message_history = True
+ supports_system_message = True
+ default_model = "gpt-3.5-turbo"
+ models = [
+ "gpt-3.5-turbo",
+ "gpt-3.5-long",
+ "gpt-4-turbo",
+ "google-gemini",
+ "claude-instant",
+ "claude-v1",
+ "claude-v2",
+ "llama2-13b",
+ "mythalion-13b",
+ "pygmalion-13b",
+ "chronos-hermes-13b",
+ "Mixtral-8x7B",
+ "Dolphin-2.6-8x7B",
+ ]
+ model_aliases = {
+ "gemini": "google-gemini",
+ "gemini-pro": "google-gemini"
+ }
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ temperature: float = 0.7,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = str(int(time.time()))
+ auth = "Bearer null"
+ nonce = get_random_hex()
+ data = f"{timestamp}-{nonce}-{auth}"
+ signature = hashlib.md5(data.encode()).hexdigest()
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "*/*",
+ "Accept-Language": "en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": "https://flowgpt.com/",
+ "Content-Type": "application/json",
+ "Authorization": "Bearer null",
+ "Origin": "https://flowgpt.com",
+ "Connection": "keep-alive",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-site",
+ "TE": "trailers",
+ "Authorization": auth,
+ "x-flow-device-id": f"f-{get_random_string(19)}",
+ "x-nonce": nonce,
+ "x-signature": signature,
+ "x-timestamp": timestamp
+ }
+ async with ClientSession(headers=headers) as session:
+ history = [message for message in messages[:-1] if message["role"] != "system"]
+ system_message = "\n".join([message["content"] for message in messages if message["role"] == "system"])
+ if not system_message:
+ system_message = "You are helpful assistant. Follow the user's instructions carefully."
+ data = {
+ "model": model,
+ "nsfw": False,
+ "question": messages[-1]["content"],
+ "history": [{"role": "assistant", "content": "Hello, how can I help you today?"}, *history],
+ "system": system_message,
+ "temperature": temperature,
+ "promptId": f"model-{model}",
+ "documentIds": [],
+ "chatFileDocumentIds": [],
+ "generateImage": False,
+ "generateAudio": False
+ }
+ async with session.post("https://prod-backend-k8s.flowgpt.com/v3/chat-anonymous", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in response.content:
+ if chunk.strip():
+ message = json.loads(chunk)
+ if "event" not in message:
+ continue
+ if message["event"] == "text":
+ yield message["data"]
diff --git a/g4f/Provider/not_working/FreeNetfly.py b/g4f/Provider/not_working/FreeNetfly.py
new file mode 100644
index 0000000000000000000000000000000000000000..8362019ccdc86de1e5d5d273810b8849a0d12983
--- /dev/null
+++ b/g4f/Provider/not_working/FreeNetfly.py
@@ -0,0 +1,105 @@
+from __future__ import annotations
+
+import json
+import asyncio
+from aiohttp import ClientSession, ClientTimeout, ClientError
+from typing import AsyncGenerator
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+
+
+class FreeNetfly(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://free.netfly.top"
+ api_endpoint = "/api/openai/v1/chat/completions"
+ working = False
+ default_model = 'gpt-3.5-turbo'
+ models = [
+ 'gpt-3.5-turbo',
+ 'gpt-4',
+ ]
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ headers = {
+ "accept": "application/json, text/event-stream",
+ "accept-language": "en-US,en;q=0.9",
+ "content-type": "application/json",
+ "dnt": "1",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "sec-ch-ua": '"Not/A)Brand";v="8", "Chromium";v="126"',
+ "sec-ch-ua-mobile": "?0",
+ "sec-ch-ua-platform": '"Linux"',
+ "sec-fetch-dest": "empty",
+ "sec-fetch-mode": "cors",
+ "sec-fetch-site": "same-origin",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36",
+ }
+ data = {
+ "messages": messages,
+ "stream": True,
+ "model": model,
+ "temperature": 0.5,
+ "presence_penalty": 0,
+ "frequency_penalty": 0,
+ "top_p": 1
+ }
+
+ max_retries = 5
+ retry_delay = 2
+
+ for attempt in range(max_retries):
+ try:
+ async with ClientSession(headers=headers) as session:
+ timeout = ClientTimeout(total=60)
+ async with session.post(f"{cls.url}{cls.api_endpoint}", json=data, proxy=proxy, timeout=timeout) as response:
+ response.raise_for_status()
+ async for chunk in cls._process_response(response):
+ yield chunk
+ return # If successful, exit the function
+ except (ClientError, asyncio.TimeoutError) as e:
+ if attempt == max_retries - 1:
+ raise # If all retries failed, raise the last exception
+ await asyncio.sleep(retry_delay)
+ retry_delay *= 2 # Exponential backoff
+
+ @classmethod
+ async def _process_response(cls, response) -> AsyncGenerator[str, None]:
+ buffer = ""
+ async for line in response.content:
+ buffer += line.decode('utf-8')
+ if buffer.endswith('\n\n'):
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: '):
+ if subline == 'data: [DONE]':
+ return
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except json.JSONDecodeError:
+ print(f"Failed to parse JSON: {subline}")
+ except KeyError:
+ print(f"Unexpected JSON structure: {data}")
+ buffer = ""
+
+ # Process any remaining data in the buffer
+ if buffer:
+ for subline in buffer.strip().split('\n'):
+ if subline.startswith('data: ') and subline != 'data: [DONE]':
+ try:
+ data = json.loads(subline[6:])
+ content = data['choices'][0]['delta'].get('content')
+ if content:
+ yield content
+ except (json.JSONDecodeError, KeyError):
+ pass
+
diff --git a/g4f/Provider/not_working/GPROChat.py b/g4f/Provider/not_working/GPROChat.py
new file mode 100644
index 0000000000000000000000000000000000000000..52c7f947cadf93088ab7ca80724a193026350afd
--- /dev/null
+++ b/g4f/Provider/not_working/GPROChat.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+import hashlib
+import time
+from aiohttp import ClientSession
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import format_prompt
+
+class GPROChat(AsyncGeneratorProvider, ProviderModelMixin):
+ label = "GPROChat"
+ url = "https://gprochat.com"
+ api_endpoint = "https://gprochat.com/api/generate"
+ working = False
+ supports_stream = True
+ supports_message_history = True
+ default_model = 'gemini-pro'
+
+ @staticmethod
+ def generate_signature(timestamp: int, message: str) -> str:
+ secret_key = "2BC120D4-BB36-1B60-26DE-DB630472A3D8"
+ hash_input = f"{timestamp}:{message}:{secret_key}"
+ signature = hashlib.sha256(hash_input.encode('utf-8')).hexdigest()
+ return signature
+
+ @classmethod
+ def get_model(cls, model: str) -> str:
+ if model in cls.models:
+ return model
+ elif model in cls.model_aliases:
+ return cls.model_aliases[model]
+ else:
+ return cls.default_model
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: str = None,
+ **kwargs
+ ) -> AsyncResult:
+ model = cls.get_model(model)
+ timestamp = int(time.time() * 1000)
+ prompt = format_prompt(messages)
+ sign = cls.generate_signature(timestamp, prompt)
+
+ headers = {
+ "accept": "*/*",
+ "origin": cls.url,
+ "referer": f"{cls.url}/",
+ "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
+ "content-type": "text/plain;charset=UTF-8"
+ }
+
+ data = {
+ "messages": [{"role": "user", "parts": [{"text": prompt}]}],
+ "time": timestamp,
+ "pass": None,
+ "sign": sign
+ }
+
+ async with ClientSession(headers=headers) as session:
+ async with session.post(cls.api_endpoint, json=data, proxy=proxy) as response:
+ response.raise_for_status()
+ async for chunk in response.content.iter_any():
+ if chunk:
+ yield chunk.decode()
diff --git a/g4f/Provider/not_working/Koala.py b/g4f/Provider/not_working/Koala.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6230da77bb5293da6243aff5c22f8337460175a
--- /dev/null
+++ b/g4f/Provider/not_working/Koala.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+import json
+from typing import AsyncGenerator, Optional, List, Dict, Union, Any
+from aiohttp import ClientSession, BaseConnector, ClientResponse
+
+from ...typing import AsyncResult, Messages
+from ..base_provider import AsyncGeneratorProvider, ProviderModelMixin
+from ..helper import get_random_string, get_connector
+from ...requests import raise_for_status
+
+class Koala(AsyncGeneratorProvider, ProviderModelMixin):
+ url = "https://koala.sh/chat"
+ api_endpoint = "https://koala.sh/api/gpt/"
+ working = False
+ supports_message_history = True
+ default_model = 'gpt-4o-mini'
+
+ @classmethod
+ async def create_async_generator(
+ cls,
+ model: str,
+ messages: Messages,
+ proxy: Optional[str] = None,
+ connector: Optional[BaseConnector] = None,
+ **kwargs: Any
+ ) -> AsyncGenerator[Dict[str, Union[str, int, float, List[Dict[str, Any]], None]], None]:
+ if not model:
+ model = "gpt-4o-mini"
+
+ headers = {
+ "User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0",
+ "Accept": "text/event-stream",
+ "Accept-Language": "de,en-US;q=0.7,en;q=0.3",
+ "Accept-Encoding": "gzip, deflate, br",
+ "Referer": f"{cls.url}",
+ "Flag-Real-Time-Data": "false",
+ "Visitor-ID": get_random_string(20),
+ "Origin": "https://koala.sh",
+ "Alt-Used": "koala.sh",
+ "Sec-Fetch-Dest": "empty",
+ "Sec-Fetch-Mode": "cors",
+ "Sec-Fetch-Site": "same-origin",
+ "TE": "trailers",
+ }
+
+ async with ClientSession(headers=headers, connector=get_connector(connector, proxy)) as session:
+ input_text = messages[-1]["content"]
+ system_messages = " ".join(
+ message["content"] for message in messages if message["role"] == "system"
+ )
+ if system_messages:
+ input_text += f" {system_messages}"
+
+ data = {
+ "input": input_text,
+ "inputHistory": [
+ message["content"]
+ for message in messages[:-1]
+ if message["role"] == "user"
+ ],
+ "outputHistory": [
+ message["content"]
+ for message in messages
+ if message["role"] == "assistant"
+ ],
+ "model": model,
+ }
+
+ async with session.post(f"{cls.api_endpoint}", json=data, proxy=proxy) as response:
+ await raise_for_status(response)
+ async for chunk in cls._parse_event_stream(response):
+ yield chunk
+
+ @staticmethod
+ async def _parse_event_stream(response: ClientResponse) -> AsyncGenerator[Dict[str, Any], None]:
+ async for chunk in response.content:
+ if chunk.startswith(b"data: "):
+ yield json.loads(chunk[6:])
diff --git a/g4f/Provider/not_working/MyShell.py b/g4f/Provider/not_working/MyShell.py
new file mode 100644
index 0000000000000000000000000000000000000000..02e182d46f879e8fa9f15522724b6fab3b8f7909
--- /dev/null
+++ b/g4f/Provider/not_working/MyShell.py
@@ -0,0 +1,76 @@
+from __future__ import annotations
+
+import time, json
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ..helper import format_prompt
+from ...webdriver import WebDriver, WebDriverSession, bypass_cloudflare
+
+class MyShell(AbstractProvider):
+ url = "https://app.myshell.ai/chat"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ timeout: int = 120,
+ webdriver: WebDriver = None,
+ **kwargs
+ ) -> CreateResult:
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
+ bypass_cloudflare(driver, cls.url, timeout)
+
+ # Send request with message
+ data = {
+ "botId": "4738",
+ "conversation_scenario": 3,
+ "message": format_prompt(messages),
+ "messageType": 1
+ }
+ script = """
+response = await fetch("https://api.myshell.ai/v1/bot/chat/send_message", {
+ "headers": {
+ "accept": "application/json",
+ "content-type": "application/json",
+ "myshell-service-name": "organics-api",
+ "visitor-id": localStorage.getItem("mix_visitorId")
+ },
+ "body": '{body}',
+ "method": "POST"
+})
+window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+"""
+ driver.execute_script(script.replace("{body}", json.dumps(data)))
+ script = """
+chunk = await window._reader.read();
+if (chunk.done) {
+ return null;
+}
+content = '';
+chunk.value.split('\\n').forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ try {
+ const data = JSON.parse(line.substring('data: '.length));
+ if ('content' in data) {
+ content += data['content'];
+ }
+ } catch(e) {}
+ }
+});
+return content;
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
diff --git a/g4f/Provider/not_working/__init__.py b/g4f/Provider/not_working/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1bfe7ed952d0ebb9c0ff74e68ecf00e3e4ea00e9
--- /dev/null
+++ b/g4f/Provider/not_working/__init__.py
@@ -0,0 +1,13 @@
+from .AI365VIP import AI365VIP
+from .AIChatFree import AIChatFree
+from .AiChatOnline import AiChatOnline
+from .AiChats import AiChats
+from .Aura import Aura
+from .Chatgpt4o import Chatgpt4o
+from .ChatgptFree import ChatgptFree
+from .FlowGpt import FlowGpt
+from .FreeNetfly import FreeNetfly
+from .GPROChat import GPROChat
+from .Koala import Koala
+from .MyShell import MyShell
+from .Chatgpt4Online import Chatgpt4Online
diff --git a/g4f/Provider/npm/package-lock.json b/g4f/Provider/npm/package-lock.json
new file mode 100644
index 0000000000000000000000000000000000000000..c22943c62c5ec661e4be35215e91cb4ce01e58f8
--- /dev/null
+++ b/g4f/Provider/npm/package-lock.json
@@ -0,0 +1,24 @@
+{
+ "name": "npm",
+ "lockfileVersion": 2,
+ "requires": true,
+ "packages": {
+ "": {
+ "dependencies": {
+ "crypto-js": "^4.2.0"
+ }
+ },
+ "node_modules/crypto-js": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz",
+ "integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q=="
+ }
+ },
+ "dependencies": {
+ "crypto-js": {
+ "version": "4.2.0",
+ "resolved": "https://registry.npmjs.org/crypto-js/-/crypto-js-4.2.0.tgz",
+ "integrity": "sha512-KALDyEYgpY+Rlob/iriUtjV6d5Eq+Y191A5g4UqLAi8CyGP9N1+FdVbkc1SxKc2r4YAYqG8JzO2KGL+AizD70Q=="
+ }
+ }
+}
diff --git a/g4f/Provider/npm/package.json b/g4f/Provider/npm/package.json
new file mode 100644
index 0000000000000000000000000000000000000000..9bd309cbd122f297267e7debbdb19b19fc216fa3
--- /dev/null
+++ b/g4f/Provider/npm/package.json
@@ -0,0 +1,5 @@
+{
+ "dependencies": {
+ "crypto-js": "^4.2.0"
+ }
+}
diff --git a/g4f/Provider/openai/__init__.py b/g4f/Provider/openai/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/g4f/Provider/openai/crypt.py b/g4f/Provider/openai/crypt.py
new file mode 100644
index 0000000000000000000000000000000000000000..43156f3d733ae7721c34efe4ac647256f11ffc07
--- /dev/null
+++ b/g4f/Provider/openai/crypt.py
@@ -0,0 +1,68 @@
+from __future__ import annotations
+
+import json
+import base64
+import hashlib
+import random
+from Crypto.Cipher import AES
+
+def pad(data: str) -> bytes:
+ # Convert the string to bytes and calculate the number of bytes to pad
+ data_bytes = data.encode()
+ padding = 16 - (len(data_bytes) % 16)
+ # Append the padding bytes with their value
+ return data_bytes + bytes([padding] * padding)
+
+def encrypt(data, key):
+ salt = ""
+ salted = ""
+ dx = bytes()
+
+ # Generate salt, as 8 random lowercase letters
+ salt = "".join(random.choice("abcdefghijklmnopqrstuvwxyz") for _ in range(8))
+
+ # Our final key and IV come from the key and salt being repeatedly hashed
+ for x in range(3):
+ dx = hashlib.md5(dx + key.encode() + salt.encode()).digest()
+ salted += dx.hex()
+
+ # Pad the data before encryption
+ data = pad(data)
+
+ aes = AES.new(
+ bytes.fromhex(salted[:64]), AES.MODE_CBC, bytes.fromhex(salted[64:96])
+ )
+
+ return json.dumps(
+ {
+ "ct": base64.b64encode(aes.encrypt(data)).decode(),
+ "iv": salted[64:96],
+ "s": salt.encode().hex(),
+ }
+ )
+
+def unpad(data: bytes) -> bytes:
+ # Extract the padding value from the last byte and remove padding
+ padding_value = data[-1]
+ return data[:-padding_value]
+
+def decrypt(data: str, key: str):
+ # Parse JSON data
+ parsed_data = json.loads(base64.b64decode(data))
+ ct = base64.b64decode(parsed_data["ct"])
+ iv = bytes.fromhex(parsed_data["iv"])
+ salt = bytes.fromhex(parsed_data["s"])
+
+ salted = ''
+ dx = b''
+ for x in range(3):
+ dx = hashlib.md5(dx + key.encode() + salt).digest()
+ salted += dx.hex()
+
+ aes = AES.new(
+ bytes.fromhex(salted[:64]), AES.MODE_CBC, iv
+ )
+
+ data = aes.decrypt(ct)
+ if data.startswith(b'[{"key":'):
+ return unpad(data).decode()
\ No newline at end of file
diff --git a/g4f/Provider/openai/har_file.py b/g4f/Provider/openai/har_file.py
new file mode 100644
index 0000000000000000000000000000000000000000..e863b6acf7765e4f3059540c6c813c48b4cf6db9
--- /dev/null
+++ b/g4f/Provider/openai/har_file.py
@@ -0,0 +1,157 @@
+from __future__ import annotations
+
+import base64
+import json
+import os
+import re
+import time
+import uuid
+import random
+from urllib.parse import unquote
+from copy import deepcopy
+
+from .crypt import decrypt, encrypt
+from ...requests import StreamSession
+from ...cookies import get_cookies_dir
+from ... import debug
+
+arkose_url = "https://tcr9i.chat.openai.com/fc/gt2/public_key/35536E1E-65B4-4D96-9D97-6ADB7EFF8147"
+backend_url = "https://chatgpt.com/backend-api/conversation"
+backend_anon_url = "https://chatgpt.com/backend-anon/conversation"
+start_url = "https://chatgpt.com/"
+conversation_url = "https://chatgpt.com/c/"
+
+class NoValidHarFileError(Exception):
+ pass
+
+class RequestConfig:
+ cookies: dict = None
+ headers: dict = None
+ access_request_id: str = None
+ access_token: str = None
+ proof_token: list = None
+ turnstile_token: str = None
+ arkose_request: arkReq = None
+ arkose_token: str = None
+ headers: dict = {}
+ cookies: dict = {}
+
+class arkReq:
+ def __init__(self, arkURL, arkBx, arkHeader, arkBody, arkCookies, userAgent):
+ self.arkURL = arkURL
+ self.arkBx = arkBx
+ self.arkHeader = arkHeader
+ self.arkBody = arkBody
+ self.arkCookies = arkCookies
+ self.userAgent = userAgent
+
+def readHAR():
+ harPath = []
+ for root, _, files in os.walk(get_cookies_dir()):
+ for file in files:
+ if file.endswith(".har"):
+ harPath.append(os.path.join(root, file))
+ if not harPath:
+ raise NoValidHarFileError("No .har file found")
+ for path in harPath:
+ with open(path, 'rb') as file:
+ try:
+ harFile = json.loads(file.read())
+ except json.JSONDecodeError:
+ # Error: not a HAR file!
+ continue
+ for v in harFile['log']['entries']:
+ v_headers = get_headers(v)
+ if arkose_url == v['request']['url']:
+ RequestConfig.arkose_request = parseHAREntry(v)
+ elif v['request']['url'].startswith(start_url):
+ try:
+ match = re.search(r'"accessToken":"(.*?)"', v["response"]["content"]["text"])
+ if match:
+ RequestConfig.access_token = match.group(1)
+ except KeyError:
+ pass
+ try:
+ if "openai-sentinel-proof-token" in v_headers:
+ RequestConfig.headers = v_headers
+ RequestConfig.proof_token = json.loads(base64.b64decode(
+ v_headers["openai-sentinel-proof-token"].split("gAAAAAB", 1)[-1].encode()
+ ).decode())
+ if "openai-sentinel-turnstile-token" in v_headers:
+ RequestConfig.turnstile_token = v_headers["openai-sentinel-turnstile-token"]
+ if "authorization" in v_headers:
+ RequestConfig.access_token = v_headers["authorization"].split(" ")[1]
+ RequestConfig.cookies = {c['name']: c['value'] for c in v['request']['cookies']}
+ except Exception as e:
+ debug.log(f"Error on read headers: {e}")
+ if RequestConfig.proof_token is None:
+ raise NoValidHarFileError("No proof_token found in .har files")
+
+def get_headers(entry) -> dict:
+ return {h['name'].lower(): h['value'] for h in entry['request']['headers'] if h['name'].lower() not in ['content-length', 'cookie'] and not h['name'].startswith(':')}
+
+def parseHAREntry(entry) -> arkReq:
+ tmpArk = arkReq(
+ arkURL=entry['request']['url'],
+ arkBx="",
+ arkHeader=get_headers(entry),
+ arkBody={p['name']: unquote(p['value']) for p in entry['request']['postData']['params'] if p['name'] not in ['rnd']},
+ arkCookies={c['name']: c['value'] for c in entry['request']['cookies']},
+ userAgent=""
+ )
+ tmpArk.userAgent = tmpArk.arkHeader.get('user-agent', '')
+ bda = tmpArk.arkBody["bda"]
+ bw = tmpArk.arkHeader['x-ark-esync-value']
+ tmpArk.arkBx = decrypt(bda, tmpArk.userAgent + bw)
+ return tmpArk
+
+def genArkReq(chatArk: arkReq) -> arkReq:
+ tmpArk: arkReq = deepcopy(chatArk)
+ if tmpArk is None or not tmpArk.arkBody or not tmpArk.arkHeader:
+ raise RuntimeError("The .har file is not valid")
+ bda, bw = getBDA(tmpArk)
+
+ tmpArk.arkBody['bda'] = base64.b64encode(bda.encode()).decode()
+ tmpArk.arkBody['rnd'] = str(random.random())
+ tmpArk.arkHeader['x-ark-esync-value'] = bw
+ return tmpArk
+
+async def sendRequest(tmpArk: arkReq, proxy: str = None) -> str:
+ async with StreamSession(headers=tmpArk.arkHeader, cookies=tmpArk.arkCookies, proxies={"https": proxy}) as session:
+ async with session.post(tmpArk.arkURL, data=tmpArk.arkBody) as response:
+ data = await response.json()
+ arkose = data.get("token")
+ if "sup=1|rid=" not in arkose:
+ return RuntimeError("No valid arkose token generated")
+ return arkose
+
+def getBDA(arkReq: arkReq):
+ bx = arkReq.arkBx
+
+ bx = re.sub(r'"key":"n","value":"\S*?"', f'"key":"n","value":"{getN()}"', bx)
+ oldUUID_search = re.search(r'"key":"4b4b269e68","value":"(\S*?)"', bx)
+ if oldUUID_search:
+ oldUUID = oldUUID_search.group(1)
+ newUUID = str(uuid.uuid4())
+ bx = bx.replace(oldUUID, newUUID)
+
+ bw = getBw(getBt())
+ encrypted_bx = encrypt(bx, arkReq.userAgent + bw)
+ return encrypted_bx, bw
+
+def getBt() -> int:
+ return int(time.time())
+
+def getBw(bt: int) -> str:
+ return str(bt - (bt % 21600))
+
+def getN() -> str:
+ timestamp = str(int(time.time()))
+ return base64.b64encode(timestamp.encode()).decode()
+
+async def get_request_config(proxy: str) -> RequestConfig:
+ if RequestConfig.proof_token is None:
+ readHAR()
+ if RequestConfig.arkose_request is not None:
+ RequestConfig.arkose_token = await sendRequest(genArkReq(RequestConfig.arkose_request), proxy)
+ return RequestConfig
\ No newline at end of file
diff --git a/g4f/Provider/openai/new.py b/g4f/Provider/openai/new.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4d8e13d35ecb0bb13ffc5f1bb192f8e40677e82
--- /dev/null
+++ b/g4f/Provider/openai/new.py
@@ -0,0 +1,730 @@
+import hashlib
+import base64
+import random
+import json
+import time
+import uuid
+
+from collections import OrderedDict, defaultdict
+from typing import Any, Callable, Dict, List
+
+from datetime import (
+ datetime,
+ timedelta,
+ timezone
+)
+
+cores = [16, 24, 32]
+screens = [3000, 4000, 6000]
+maxAttempts = 500000
+
+navigator_keys = [
+ "registerProtocolHandlerāfunction registerProtocolHandler() { [native code] }",
+ "storageā[object StorageManager]",
+ "locksā[object LockManager]",
+ "appCodeNameāMozilla",
+ "permissionsā[object Permissions]",
+ "appVersionā5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "shareāfunction share() { [native code] }",
+ "webdriverāfalse",
+ "managedā[object NavigatorManagedData]",
+ "canShareāfunction canShare() { [native code] }",
+ "vendorāGoogle Inc.",
+ "vendorāGoogle Inc.",
+ "mediaDevicesā[object MediaDevices]",
+ "vibrateāfunction vibrate() { [native code] }",
+ "storageBucketsā[object StorageBucketManager]",
+ "mediaCapabilitiesā[object MediaCapabilities]",
+ "getGamepadsāfunction getGamepads() { [native code] }",
+ "bluetoothā[object Bluetooth]",
+ "shareāfunction share() { [native code] }",
+ "cookieEnabledātrue",
+ "virtualKeyboardā[object VirtualKeyboard]",
+ "productāGecko",
+ "mediaDevicesā[object MediaDevices]",
+ "canShareāfunction canShare() { [native code] }",
+ "getGamepadsāfunction getGamepads() { [native code] }",
+ "productāGecko",
+ "xrā[object XRSystem]",
+ "clipboardā[object Clipboard]",
+ "storageBucketsā[object StorageBucketManager]",
+ "unregisterProtocolHandlerāfunction unregisterProtocolHandler() { [native code] }",
+ "productSubā20030107",
+ "loginā[object NavigatorLogin]",
+ "vendorSubā",
+ "loginā[object NavigatorLogin]",
+ "userAgentāMozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "getInstalledRelatedAppsāfunction getInstalledRelatedApps() { [native code] }",
+ "userAgentāMozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "mediaDevicesā[object MediaDevices]",
+ "locksā[object LockManager]",
+ "webkitGetUserMediaāfunction webkitGetUserMedia() { [native code] }",
+ "vendorāGoogle Inc.",
+ "xrā[object XRSystem]",
+ "mediaDevicesā[object MediaDevices]",
+ "virtualKeyboardā[object VirtualKeyboard]",
+ "userAgentāMozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "virtualKeyboardā[object VirtualKeyboard]",
+ "appNameāNetscape",
+ "storageBucketsā[object StorageBucketManager]",
+ "presentationā[object Presentation]",
+ "onLineātrue",
+ "mimeTypesā[object MimeTypeArray]",
+ "credentialsā[object CredentialsContainer]",
+ "presentationā[object Presentation]",
+ "getGamepadsāfunction getGamepads() { [native code] }",
+ "vendorSubā",
+ "virtualKeyboardā[object VirtualKeyboard]",
+ "serviceWorkerā[object ServiceWorkerContainer]",
+ "xrā[object XRSystem]",
+ "productāGecko",
+ "keyboardā[object Keyboard]",
+ "gpuā[object GPU]",
+ "getInstalledRelatedAppsāfunction getInstalledRelatedApps() { [native code] }",
+ "webkitPersistentStorageā[object DeprecatedStorageQuota]",
+ "doNotTrack",
+ "clearAppBadgeāfunction clearAppBadge() { [native code] }",
+ "presentationā[object Presentation]",
+ "serialā[object Serial]",
+ "locksā[object LockManager]",
+ "requestMIDIAccessāfunction requestMIDIAccess() { [native code] }",
+ "locksā[object LockManager]",
+ "requestMediaKeySystemAccessāfunction requestMediaKeySystemAccess() { [native code] }",
+ "vendorāGoogle Inc.",
+ "pdfViewerEnabledātrue",
+ "languageāzh-CN",
+ "setAppBadgeāfunction setAppBadge() { [native code] }",
+ "geolocationā[object Geolocation]",
+ "userAgentDataā[object NavigatorUAData]",
+ "mediaCapabilitiesā[object MediaCapabilities]",
+ "requestMIDIAccessāfunction requestMIDIAccess() { [native code] }",
+ "getUserMediaāfunction getUserMedia() { [native code] }",
+ "mediaDevicesā[object MediaDevices]",
+ "webkitPersistentStorageā[object DeprecatedStorageQuota]",
+ "userAgentāMozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "sendBeaconāfunction sendBeacon() { [native code] }",
+ "hardwareConcurrencyā32",
+ "appVersionā5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "credentialsā[object CredentialsContainer]",
+ "storageā[object StorageManager]",
+ "cookieEnabledātrue",
+ "pdfViewerEnabledātrue",
+ "windowControlsOverlayā[object WindowControlsOverlay]",
+ "schedulingā[object Scheduling]",
+ "pdfViewerEnabledātrue",
+ "hardwareConcurrencyā32",
+ "xrā[object XRSystem]",
+ "userAgentāMozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 Edg/125.0.0.0",
+ "webdriverāfalse",
+ "getInstalledRelatedAppsāfunction getInstalledRelatedApps() { [native code] }",
+ "getInstalledRelatedAppsāfunction getInstalledRelatedApps() { [native code] }",
+ "bluetoothā[object Bluetooth]"
+]
+
+window_keys = [
+ "0",
+ "window",
+ "self",
+ "document",
+ "name",
+ "location",
+ "customElements",
+ "history",
+ "navigation",
+ "locationbar",
+ "menubar",
+ "personalbar",
+ "scrollbars",
+ "statusbar",
+ "toolbar",
+ "status",
+ "closed",
+ "frames",
+ "length",
+ "top",
+ "opener",
+ "parent",
+ "frameElement",
+ "navigator",
+ "origin",
+ "external",
+ "screen",
+ "innerWidth",
+ "innerHeight",
+ "scrollX",
+ "pageXOffset",
+ "scrollY",
+ "pageYOffset",
+ "visualViewport",
+ "screenX",
+ "screenY",
+ "outerWidth",
+ "outerHeight",
+ "devicePixelRatio",
+ "clientInformation",
+ "screenLeft",
+ "screenTop",
+ "styleMedia",
+ "onsearch",
+ "isSecureContext",
+ "trustedTypes",
+ "performance",
+ "onappinstalled",
+ "onbeforeinstallprompt",
+ "crypto",
+ "indexedDB",
+ "sessionStorage",
+ "localStorage",
+ "onbeforexrselect",
+ "onabort",
+ "onbeforeinput",
+ "onbeforematch",
+ "onbeforetoggle",
+ "onblur",
+ "oncancel",
+ "oncanplay",
+ "oncanplaythrough",
+ "onchange",
+ "onclick",
+ "onclose",
+ "oncontentvisibilityautostatechange",
+ "oncontextlost",
+ "oncontextmenu",
+ "oncontextrestored",
+ "oncuechange",
+ "ondblclick",
+ "ondrag",
+ "ondragend",
+ "ondragenter",
+ "ondragleave",
+ "ondragover",
+ "ondragstart",
+ "ondrop",
+ "ondurationchange",
+ "onemptied",
+ "onended",
+ "onerror",
+ "onfocus",
+ "onformdata",
+ "oninput",
+ "oninvalid",
+ "onkeydown",
+ "onkeypress",
+ "onkeyup",
+ "onload",
+ "onloadeddata",
+ "onloadedmetadata",
+ "onloadstart",
+ "onmousedown",
+ "onmouseenter",
+ "onmouseleave",
+ "onmousemove",
+ "onmouseout",
+ "onmouseover",
+ "onmouseup",
+ "onmousewheel",
+ "onpause",
+ "onplay",
+ "onplaying",
+ "onprogress",
+ "onratechange",
+ "onreset",
+ "onresize",
+ "onscroll",
+ "onsecuritypolicyviolation",
+ "onseeked",
+ "onseeking",
+ "onselect",
+ "onslotchange",
+ "onstalled",
+ "onsubmit",
+ "onsuspend",
+ "ontimeupdate",
+ "ontoggle",
+ "onvolumechange",
+ "onwaiting",
+ "onwebkitanimationend",
+ "onwebkitanimationiteration",
+ "onwebkitanimationstart",
+ "onwebkittransitionend",
+ "onwheel",
+ "onauxclick",
+ "ongotpointercapture",
+ "onlostpointercapture",
+ "onpointerdown",
+ "onpointermove",
+ "onpointerrawupdate",
+ "onpointerup",
+ "onpointercancel",
+ "onpointerover",
+ "onpointerout",
+ "onpointerenter",
+ "onpointerleave",
+ "onselectstart",
+ "onselectionchange",
+ "onanimationend",
+ "onanimationiteration",
+ "onanimationstart",
+ "ontransitionrun",
+ "ontransitionstart",
+ "ontransitionend",
+ "ontransitioncancel",
+ "onafterprint",
+ "onbeforeprint",
+ "onbeforeunload",
+ "onhashchange",
+ "onlanguagechange",
+ "onmessage",
+ "onmessageerror",
+ "onoffline",
+ "ononline",
+ "onpagehide",
+ "onpageshow",
+ "onpopstate",
+ "onrejectionhandled",
+ "onstorage",
+ "onunhandledrejection",
+ "onunload",
+ "crossOriginIsolated",
+ "scheduler",
+ "alert",
+ "atob",
+ "blur",
+ "btoa",
+ "cancelAnimationFrame",
+ "cancelIdleCallback",
+ "captureEvents",
+ "clearInterval",
+ "clearTimeout",
+ "close",
+ "confirm",
+ "createImageBitmap",
+ "fetch",
+ "find",
+ "focus",
+ "getComputedStyle",
+ "getSelection",
+ "matchMedia",
+ "moveBy",
+ "moveTo",
+ "open",
+ "postMessage",
+ "print",
+ "prompt",
+ "queueMicrotask",
+ "releaseEvents",
+ "reportError",
+ "requestAnimationFrame",
+ "requestIdleCallback",
+ "resizeBy",
+ "resizeTo",
+ "scroll",
+ "scrollBy",
+ "scrollTo",
+ "setInterval",
+ "setTimeout",
+ "stop",
+ "structuredClone",
+ "webkitCancelAnimationFrame",
+ "webkitRequestAnimationFrame",
+ "chrome",
+ "g_opr",
+ "opr",
+ "ethereum",
+ "caches",
+ "cookieStore",
+ "ondevicemotion",
+ "ondeviceorientation",
+ "ondeviceorientationabsolute",
+ "launchQueue",
+ "documentPictureInPicture",
+ "getScreenDetails",
+ "queryLocalFonts",
+ "showDirectoryPicker",
+ "showOpenFilePicker",
+ "showSaveFilePicker",
+ "originAgentCluster",
+ "credentialless",
+ "speechSynthesis",
+ "onscrollend",
+ "webkitRequestFileSystem",
+ "webkitResolveLocalFileSystemURL",
+ "__remixContext",
+ "__oai_SSR_TTI",
+ "__remixManifest",
+ "__reactRouterVersion",
+ "DD_RUM",
+ "__REACT_INTL_CONTEXT__",
+ "filterCSS",
+ "filterXSS",
+ "__SEGMENT_INSPECTOR__",
+ "DD_LOGS",
+ "regeneratorRuntime",
+ "_g",
+ "__remixRouteModules",
+ "__remixRouter",
+ "__STATSIG_SDK__",
+ "__STATSIG_JS_SDK__",
+ "__STATSIG_RERENDER_OVERRIDE__",
+ "_oaiHandleSessionExpired"
+]
+
+def get_parse_time():
+ now = datetime.now(timezone(timedelta(hours=-5)))
+ return now.strftime("%a %b %d %Y %H:%M:%S") + " GMT+0200 (Central European Summer Time)"
+
+def get_config(user_agent):
+
+ core = random.choice(cores)
+ screen = random.choice(screens)
+
+ # partially hardcoded config
+ config = [
+ core + screen,
+ get_parse_time(),
+ 4294705152,
+ random.random(),
+ user_agent,
+ None,
+ "remix-prod-15f1ec0f78ad898b9606a88d384ef76345b82b82", #document.documentElement.getAttribute("data-build"),
+ "en-US",
+ "en-US,es-US,en,es",
+ 0,
+ random.choice(navigator_keys),
+ 'location',
+ random.choice(window_keys),
+ time.perf_counter(),
+ str(uuid.uuid4()),
+ ]
+
+ return config
+
+
+def get_answer_token(seed, diff, config):
+ answer, solved = generate_answer(seed, diff, config)
+
+ if solved:
+ return "gAAAAAB" + answer
+ else:
+ raise Exception("Failed to solve 'gAAAAAB' challenge")
+
+def generate_answer(seed, diff, config):
+ diff_len = len(diff)
+ seed_encoded = seed.encode()
+ p1 = (json.dumps(config[:3], separators=(',', ':'), ensure_ascii=False)[:-1] + ',').encode()
+ p2 = (',' + json.dumps(config[4:9], separators=(',', ':'), ensure_ascii=False)[1:-1] + ',').encode()
+ p3 = (',' + json.dumps(config[10:], separators=(',', ':'), ensure_ascii=False)[1:]).encode()
+
+ target_diff = bytes.fromhex(diff)
+
+ for i in range(maxAttempts):
+ d1 = str(i).encode()
+ d2 = str(i >> 1).encode()
+
+ string = (
+ p1
+ + d1
+ + p2
+ + d2
+ + p3
+ )
+
+ base_encode = base64.b64encode(string)
+ hash_value = hashlib.new("sha3_512", seed_encoded + base_encode).digest()
+
+ if hash_value[:diff_len] <= target_diff:
+ return base_encode.decode(), True
+
+ return 'wQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D' + base64.b64encode(f'"{seed}"'.encode()).decode(), False
+
+def get_requirements_token(config):
+ require, solved = generate_answer(format(random.random()), "0fffff", config)
+
+ if solved:
+ return 'gAAAAAC' + require
+ else:
+ raise Exception("Failed to solve 'gAAAAAC' challenge")
+
+
+### processing turnstile token
+
+class OrderedMap:
+ def __init__(self):
+ self.map = OrderedDict()
+
+ def add(self, key: str, value: Any):
+ self.map[key] = value
+
+ def to_json(self):
+ return json.dumps(self.map)
+
+ def __str__(self):
+ return self.to_json()
+
+
+TurnTokenList = List[List[Any]]
+FloatMap = Dict[float, Any]
+StringMap = Dict[str, Any]
+FuncType = Callable[..., Any]
+
+start_time = time.time()
+
+def get_turnstile_token(dx: str, p: str) -> str:
+ decoded_bytes = base64.b64decode(dx)
+ # print(decoded_bytes.decode())
+ return process_turnstile_token(decoded_bytes.decode(), p)
+
+
+def process_turnstile_token(dx: str, p: str) -> str:
+ result = []
+ p_length = len(p)
+ if p_length != 0:
+ for i, r in enumerate(dx):
+ result.append(chr(ord(r) ^ ord(p[i % p_length])))
+ else:
+ result = list(dx)
+ return "".join(result)
+
+
+def is_slice(input_val: Any) -> bool:
+ return isinstance(input_val, (list, tuple))
+
+
+def is_float(input_val: Any) -> bool:
+ return isinstance(input_val, float)
+
+
+def is_string(input_val: Any) -> bool:
+ return isinstance(input_val, str)
+
+
+def to_str(input_val: Any) -> str:
+ if input_val is None:
+ return "undefined"
+ elif is_float(input_val):
+ return f"{input_val:.16g}"
+ elif is_string(input_val):
+ special_cases = {
+ "window.Math": "[object Math]",
+ "window.Reflect": "[object Reflect]",
+ "window.performance": "[object Performance]",
+ "window.localStorage": "[object Storage]",
+ "window.Object": "function Object() { [native code] }",
+ "window.Reflect.set": "function set() { [native code] }",
+ "window.performance.now": "function () { [native code] }",
+ "window.Object.create": "function create() { [native code] }",
+ "window.Object.keys": "function keys() { [native code] }",
+ "window.Math.random": "function random() { [native code] }",
+ }
+ return special_cases.get(input_val, input_val)
+ elif isinstance(input_val, list) and all(
+ isinstance(item, str) for item in input_val
+ ):
+ return ",".join(input_val)
+ else:
+ # print(f"Type of input is: {type(input_val)}")
+ return str(input_val)
+
+
+def get_func_map() -> FloatMap:
+ process_map: FloatMap = defaultdict(lambda: None)
+
+ def func_1(e: float, t: float):
+ e_str = to_str(process_map[e])
+ t_str = to_str(process_map[t])
+ if e_str is not None and t_str is not None:
+ res = process_turnstile_token(e_str, t_str)
+ process_map[e] = res
+ else:
+ pass
+ # print(f"Warning: Unable to process func_1 for e={e}, t={t}")
+
+ def func_2(e: float, t: Any):
+ process_map[e] = t
+
+ def func_5(e: float, t: float):
+ n = process_map[e]
+ tres = process_map[t]
+ if n is None:
+ process_map[e] = tres
+ elif is_slice(n):
+ nt = n + [tres] if tres is not None else n
+ process_map[e] = nt
+ else:
+ if is_string(n) or is_string(tres):
+ res = to_str(n) + to_str(tres)
+ elif is_float(n) and is_float(tres):
+ res = n + tres
+ else:
+ res = "NaN"
+ process_map[e] = res
+
+ def func_6(e: float, t: float, n: float):
+ tv = process_map[t]
+ nv = process_map[n]
+ if is_string(tv) and is_string(nv):
+ res = f"{tv}.{nv}"
+ if res == "window.document.location":
+ process_map[e] = "https://chatgpt.com/"
+ else:
+ process_map[e] = res
+ else:
+ pass
+ # print("func type 6 error")
+
+ def func_24(e: float, t: float, n: float):
+ tv = process_map[t]
+ nv = process_map[n]
+ if is_string(tv) and is_string(nv):
+ process_map[e] = f"{tv}.{nv}"
+ else:
+ pass
+ # print("func type 24 error")
+
+ def func_7(e: float, *args):
+ n = [process_map[arg] for arg in args]
+ ev = process_map[e]
+ if isinstance(ev, str):
+ if ev == "window.Reflect.set":
+ obj = n[0]
+ key_str = str(n[1])
+ val = n[2]
+ obj.add(key_str, val)
+ elif callable(ev):
+ ev(*n)
+
+ def func_17(e: float, t: float, *args):
+ i = [process_map[arg] for arg in args]
+ tv = process_map[t]
+ res = None
+ if isinstance(tv, str):
+ if tv == "window.performance.now":
+ current_time = time.time_ns()
+ elapsed_ns = current_time - int(start_time * 1e9)
+ res = (elapsed_ns + random.random()) / 1e6
+ elif tv == "window.Object.create":
+ res = OrderedMap()
+ elif tv == "window.Object.keys":
+ if isinstance(i[0], str) and i[0] == "window.localStorage":
+ res = [
+ "STATSIG_LOCAL_STORAGE_INTERNAL_STORE_V4",
+ "STATSIG_LOCAL_STORAGE_STABLE_ID",
+ "client-correlated-secret",
+ "oai/apps/capExpiresAt",
+ "oai-did",
+ "STATSIG_LOCAL_STORAGE_LOGGING_REQUEST",
+ "UiState.isNavigationCollapsed.1",
+ ]
+ elif tv == "window.Math.random":
+ res = random.random()
+ elif callable(tv):
+ res = tv(*i)
+ process_map[e] = res
+
+ def func_8(e: float, t: float):
+ process_map[e] = process_map[t]
+
+ def func_14(e: float, t: float):
+ tv = process_map[t]
+ if is_string(tv):
+ try:
+ token_list = json.loads(tv)
+ process_map[e] = token_list
+ except json.JSONDecodeError:
+ # print(f"Warning: Unable to parse JSON for key {t}")
+ process_map[e] = None
+ else:
+ # print(f"Warning: Value for key {t} is not a string")
+ process_map[e] = None
+
+ def func_15(e: float, t: float):
+ tv = process_map[t]
+ process_map[e] = json.dumps(tv)
+
+ def func_18(e: float):
+ ev = process_map[e]
+ e_str = to_str(ev)
+ decoded = base64.b64decode(e_str).decode()
+ process_map[e] = decoded
+
+ def func_19(e: float):
+ ev = process_map[e]
+ e_str = to_str(ev)
+ encoded = base64.b64encode(e_str.encode()).decode()
+ process_map[e] = encoded
+
+ def func_20(e: float, t: float, n: float, *args):
+ o = [process_map[arg] for arg in args]
+ ev = process_map[e]
+ tv = process_map[t]
+ if ev == tv:
+ nv = process_map[n]
+ if callable(nv):
+ nv(*o)
+ else:
+ pass
+ # print("func type 20 error")
+
+ def func_21(*args):
+ pass
+
+ def func_23(e: float, t: float, *args):
+ i = list(args)
+ ev = process_map[e]
+ tv = process_map[t]
+ if ev is not None and callable(tv):
+ tv(*i)
+
+ process_map.update(
+ {
+ 1: func_1,
+ 2: func_2,
+ 5: func_5,
+ 6: func_6,
+ 7: func_7,
+ 8: func_8,
+ 10: "window",
+ 14: func_14,
+ 15: func_15,
+ 17: func_17,
+ 18: func_18,
+ 19: func_19,
+ 20: func_20,
+ 21: func_21,
+ 23: func_23,
+ 24: func_24,
+ }
+ )
+
+ return process_map
+
+
+def process_turnstile(dx: str, p: str) -> str:
+ tokens = get_turnstile_token(dx, p)
+ res = ""
+ token_list = json.loads(tokens)
+ process_map = get_func_map()
+
+ def func_3(e: str):
+ nonlocal res
+ res = base64.b64encode(e.encode()).decode()
+
+ process_map[3] = func_3
+ process_map[9] = token_list
+ process_map[16] = p
+
+ for token in token_list:
+ try:
+ e = token[0]
+ t = token[1:]
+ f = process_map.get(e)
+ if callable(f):
+ f(*t)
+ else:
+ pass
+ # print(f"Warning: No function found for key {e}")
+ except Exception as exc:
+ raise Exception(f"Error processing token {token}: {exc}")
+ # print(f"Error processing token {token}: {exc}")
+
+ return res
\ No newline at end of file
diff --git a/g4f/Provider/openai/proofofwork.py b/g4f/Provider/openai/proofofwork.py
new file mode 100644
index 0000000000000000000000000000000000000000..4294c99a3756fe27d859afc7985fb9695a147ee7
--- /dev/null
+++ b/g4f/Provider/openai/proofofwork.py
@@ -0,0 +1,38 @@
+import random
+import hashlib
+import json
+import base64
+from datetime import datetime, timezone
+
+def generate_proof_token(required: bool, seed: str = "", difficulty: str = "", user_agent: str = None, proof_token: str = None):
+ if not required:
+ return
+
+ if proof_token is None:
+ screen = random.choice([3008, 4010, 6000]) * random.choice([1, 2, 4])
+ # Get current UTC time
+ now_utc = datetime.now(timezone.utc)
+ parse_time = now_utc.strftime('%a, %d %b %Y %H:%M:%S GMT')
+ proof_token = [
+ screen, parse_time,
+ None, 0, user_agent,
+ "https://tcr9i.chat.openai.com/v2/35536E1E-65B4-4D96-9D97-6ADB7EFF8147/api.js",
+ "dpl=1440a687921de39ff5ee56b92807faaadce73f13","en","en-US",
+ None,
+ "pluginsā[object PluginArray]",
+ random.choice(["_reactListeningcfilawjnerp", "_reactListening9ne2dfo1i47", "_reactListening410nzwhan2a"]),
+ random.choice(["alert", "ontransitionend", "onprogress"])
+ ]
+
+ diff_len = len(difficulty)
+ for i in range(100000):
+ proof_token[3] = i
+ json_data = json.dumps(proof_token)
+ base = base64.b64encode(json_data.encode()).decode()
+ hash_value = hashlib.sha3_512((seed + base).encode()).digest()
+
+ if hash_value.hex()[:diff_len] <= difficulty:
+ return "gAAAAAB" + base
+
+ fallback_base = base64.b64encode(f'"{seed}"'.encode()).decode()
+ return "gAAAAABwQ8Lk5FbGpA2NcR9dShT6gYjU7VxZ4D" + fallback_base
diff --git a/g4f/Provider/selenium/PerplexityAi.py b/g4f/Provider/selenium/PerplexityAi.py
new file mode 100644
index 0000000000000000000000000000000000000000..d965dbf70dfb15189f918dcd42a2be67ba35a40c
--- /dev/null
+++ b/g4f/Provider/selenium/PerplexityAi.py
@@ -0,0 +1,108 @@
+from __future__ import annotations
+
+import time
+
+try:
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+except ImportError:
+ pass
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ..helper import format_prompt
+from ...webdriver import WebDriver, WebDriverSession, element_send_text
+
+class PerplexityAi(AbstractProvider):
+ url = "https://www.perplexity.ai"
+ working = False
+ supports_gpt_35_turbo = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ timeout: int = 120,
+ webdriver: WebDriver = None,
+ virtual_display: bool = True,
+ copilot: bool = False,
+ **kwargs
+ ) -> CreateResult:
+ with WebDriverSession(webdriver, "", virtual_display=virtual_display, proxy=proxy) as driver:
+ prompt = format_prompt(messages)
+
+ driver.get(f"{cls.url}/")
+ wait = WebDriverWait(driver, timeout)
+
+ # Is page loaded?
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']")))
+
+ # Register WebSocket hook
+ script = """
+window._message = window._last_message = "";
+window._message_finished = false;
+const _socket_send = WebSocket.prototype.send;
+WebSocket.prototype.send = function(...args) {
+ if (!window.socket_onmessage) {
+ window._socket_onmessage = this;
+ this.addEventListener("message", (event) => {
+ if (event.data.startsWith("42")) {
+ let data = JSON.parse(event.data.substring(2));
+ if (data[0] =="query_progress" || data[0] == "query_answered") {
+ let content = JSON.parse(data[1]["text"]);
+ if (data[1]["mode"] == "copilot") {
+ content = content[content.length-1]["content"]["answer"];
+ content = JSON.parse(content);
+ }
+ window._message = content["answer"];
+ if (!window._message_finished) {
+ window._message_finished = data[0] == "query_answered";
+ }
+ }
+ }
+ });
+ }
+ return _socket_send.call(this, ...args);
+};
+"""
+ driver.execute_script(script)
+
+ if copilot:
+ try:
+ # Check for account
+ driver.find_element(By.CSS_SELECTOR, "img[alt='User avatar']")
+ # Enable copilot
+ driver.find_element(By.CSS_SELECTOR, "button[data-testid='copilot-toggle']").click()
+ except:
+ raise RuntimeError("You need a account for copilot")
+
+ # Submit prompt
+ element_send_text(driver.find_element(By.CSS_SELECTOR, "textarea[placeholder='Ask anything...']"), prompt)
+
+ # Stream response
+ script = """
+if(window._message && window._message != window._last_message) {
+ try {
+ return window._message.substring(window._last_message.length);
+ } finally {
+ window._last_message = window._message;
+ }
+} else if(window._message_finished) {
+ return null;
+} else {
+ return '';
+}
+"""
+ while True:
+ chunk = driver.execute_script(script)
+ if chunk:
+ yield chunk
+ elif chunk != "":
+ break
+ else:
+ time.sleep(0.1)
diff --git a/g4f/Provider/selenium/Phind.py b/g4f/Provider/selenium/Phind.py
new file mode 100644
index 0000000000000000000000000000000000000000..b6f7cc072404bc62b7ca61656954ee55660b6c9b
--- /dev/null
+++ b/g4f/Provider/selenium/Phind.py
@@ -0,0 +1,103 @@
+from __future__ import annotations
+
+import time
+from urllib.parse import quote
+
+from ...typing import CreateResult, Messages
+from ..base_provider import AbstractProvider
+from ..helper import format_prompt
+from ...webdriver import WebDriver, WebDriverSession
+
+class Phind(AbstractProvider):
+ url = "https://www.phind.com"
+ working = False
+ supports_gpt_4 = True
+ supports_stream = True
+
+ @classmethod
+ def create_completion(
+ cls,
+ model: str,
+ messages: Messages,
+ stream: bool,
+ proxy: str = None,
+ timeout: int = 120,
+ webdriver: WebDriver = None,
+ creative_mode: bool = None,
+ **kwargs
+ ) -> CreateResult:
+ with WebDriverSession(webdriver, "", proxy=proxy) as driver:
+ from selenium.webdriver.common.by import By
+ from selenium.webdriver.support.ui import WebDriverWait
+ from selenium.webdriver.support import expected_conditions as EC
+
+ # Register fetch hook
+ source = """
+window._fetch = window.fetch;
+window.fetch = async (url, options) => {
+ const response = await window._fetch(url, options);
+ if (url != "/api/infer/answer") {
+ return response;
+ }
+ copy = response.clone();
+ window._reader = response.body.pipeThrough(new TextDecoderStream()).getReader();
+ return copy;
+}
+"""
+ driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
+ "source": source
+ })
+
+ prompt = quote(format_prompt(messages))
+ driver.get(f"{cls.url}/search?q={prompt}&source=searchbox")
+
+ # Need to change settings
+ wait = WebDriverWait(driver, timeout)
+ def open_dropdown():
+ # Open settings dropdown
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, "button.text-dark.dropdown-toggle")))
+ driver.find_element(By.CSS_SELECTOR, "button.text-dark.dropdown-toggle").click()
+ # Wait for dropdown toggle
+ wait.until(EC.visibility_of_element_located((By.XPATH, "//button[text()='GPT-4']")))
+ if model.startswith("gpt-4") or creative_mode:
+ # Enable GPT-4
+ if model.startswith("gpt-4"):
+ open_dropdown()
+ driver.find_element(By.XPATH, "//button[text()='GPT-4']").click()
+ # Enable creative mode
+ if creative_mode or creative_mode == None:
+ open_dropdown()
+ driver.find_element(By.ID, "Creative Mode").click()
+ # Submit changes
+ driver.find_element(By.CSS_SELECTOR, ".search-bar-input-group button[type='submit']").click()
+ # Wait for page reload
+ wait.until(EC.visibility_of_element_located((By.CSS_SELECTOR, ".search-container")))
+
+ while True:
+ chunk = driver.execute_script("""
+if(window._reader) {
+ chunk = await window._reader.read();
+ if (chunk['done']) {
+ return null;
+ }
+ content = '';
+ chunk['value'].split('\\r\\n').forEach((line, index) => {
+ if (line.startsWith('data: ')) {
+ line = line.substring('data: '.length);
+ if (!line.startsWith('
'
+ 'Start to chat: /chat/
'
+ 'Open Swagger UI at: '
+ '/docs')
+
+ api.register_routes()
+ api.register_authorization()
+ api.register_validation_exception_handler()
+
+ if AppConfig.gui:
+ gui_app = WSGIMiddleware(get_gui_app())
+ app.mount("/", gui_app)
+
+ # Read cookie files if not ignored
+ if not AppConfig.ignore_cookie_files:
+ read_cookie_files()
+
+ return app
+
+def create_app_debug(g4f_api_key: str = None):
+ g4f.debug.logging = True
+ return create_app(g4f_api_key)
+
+class ChatCompletionsConfig(BaseModel):
+ messages: Messages = Field(examples=[[{"role": "system", "content": ""}, {"role": "user", "content": ""}]])
+ model: str = Field(default="")
+ provider: Optional[str] = None
+ stream: bool = False
+ image: Optional[str] = None
+ image_name: Optional[str] = None
+ temperature: Optional[float] = None
+ max_tokens: Optional[int] = None
+ stop: Union[list[str], str, None] = None
+ api_key: Optional[str] = None
+ web_search: Optional[bool] = None
+ proxy: Optional[str] = None
+ conversation_id: Optional[str] = None
+
+class ImageGenerationConfig(BaseModel):
+ prompt: str
+ model: Optional[str] = None
+ provider: Optional[str] = None
+ response_format: str = "url"
+ api_key: Optional[str] = None
+ proxy: Optional[str] = None
+
+class ProviderResponseModel(BaseModel):
+ id: str
+ object: str = "provider"
+ created: int
+ url: Optional[str]
+ label: Optional[str]
+
+class ProviderResponseDetailModel(ProviderResponseModel):
+ models: list[str]
+ image_models: list[str]
+ vision_models: list[str]
+ params: list[str]
+
+class ModelResponseModel(BaseModel):
+ id: str
+ object: str = "model"
+ created: int
+ owned_by: Optional[str]
+
+class ErrorResponseModel(BaseModel):
+ error: ErrorResponseMessageModel
+ model: Optional[str] = None
+ provider: Optional[str] = None
+
+class ErrorResponseMessageModel(BaseModel):
+ message: str
+
+class FileResponseModel(BaseModel):
+ filename: str
+
+class ErrorResponse(Response):
+ media_type = "application/json"
+
+ @classmethod
+ def from_exception(cls, exception: Exception,
+ config: Union[ChatCompletionsConfig, ImageGenerationConfig] = None,
+ status_code: int = HTTP_500_INTERNAL_SERVER_ERROR):
+ return cls(format_exception(exception, config), status_code)
+
+ @classmethod
+ def from_message(cls, message: str, status_code: int = HTTP_500_INTERNAL_SERVER_ERROR):
+ return cls(format_exception(message), status_code)
+
+ def render(self, content) -> bytes:
+ return str(content).encode(errors="ignore")
+
+class AppConfig:
+ ignored_providers: Optional[list[str]] = None
+ g4f_api_key: Optional[str] = None
+ ignore_cookie_files: bool = False
+ model: str = None,
+ provider: str = None
+ image_provider: str = None
+ proxy: str = None
+ gui: bool = False
+
+ @classmethod
+ def set_config(cls, **data):
+ for key, value in data.items():
+ setattr(cls, key, value)
+
+list_ignored_providers: list[str] = None
+
+def set_list_ignored_providers(ignored: list[str]):
+ global list_ignored_providers
+ list_ignored_providers = ignored
+
+class Api:
+ def __init__(self, app: FastAPI, g4f_api_key=None) -> None:
+ self.app = app
+ self.client = AsyncClient()
+ self.g4f_api_key = g4f_api_key
+ self.get_g4f_api_key = APIKeyHeader(name="g4f-api-key")
+ self.conversations: dict[str, dict[str, BaseConversation]] = {}
+
+ security = HTTPBearer(auto_error=False)
+
+ def register_authorization(self):
+ @self.app.middleware("http")
+ async def authorization(request: Request, call_next):
+ if self.g4f_api_key and request.url.path not in ("/", "/v1"):
+ try:
+ user_g4f_api_key = await self.get_g4f_api_key(request)
+ except HTTPException as e:
+ if e.status_code == 403:
+ return ErrorResponse.from_message("G4F API key required", HTTP_401_UNAUTHORIZED)
+ if not secrets.compare_digest(self.g4f_api_key, user_g4f_api_key):
+ return ErrorResponse.from_message("Invalid G4F API key", HTTP_403_FORBIDDEN)
+ return await call_next(request)
+
+ def register_validation_exception_handler(self):
+ @self.app.exception_handler(RequestValidationError)
+ async def validation_exception_handler(request: Request, exc: RequestValidationError):
+ details = exc.errors()
+ modified_details = []
+ for error in details:
+ modified_details.append({
+ "loc": error["loc"],
+ "message": error["msg"],
+ "type": error["type"],
+ })
+ return JSONResponse(
+ status_code=HTTP_422_UNPROCESSABLE_ENTITY,
+ content=jsonable_encoder({"detail": modified_details}),
+ )
+
+ def register_routes(self):
+ @self.app.get("/")
+ async def read_root():
+ return RedirectResponse("/v1", 302)
+
+ @self.app.get("/v1")
+ async def read_root_v1():
+ return HTMLResponse('g4f API: Go to '
+ 'models, '
+ 'chat/completions, or '
+ 'images/generate
'
+ 'Open Swagger UI at: '
+ '/docs')
+
+ @self.app.get("/v1/models", responses={
+ HTTP_200_OK: {"model": List[ModelResponseModel]},
+ })
+ async def models():
+ model_list = dict(
+ (model, g4f.models.ModelUtils.convert[model])
+ for model in g4f.Model.__all__()
+ )
+ return [{
+ 'id': model_id,
+ 'object': 'model',
+ 'created': 0,
+ 'owned_by': model.base_provider
+ } for model_id, model in model_list.items()]
+
+ @self.app.get("/v1/models/{model_name}", responses={
+ HTTP_200_OK: {"model": ModelResponseModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ })
+ async def model_info(model_name: str) -> ModelResponseModel:
+ if model_name in g4f.models.ModelUtils.convert:
+ model_info = g4f.models.ModelUtils.convert[model_name]
+ return JSONResponse({
+ 'id': model_name,
+ 'object': 'model',
+ 'created': 0,
+ 'owned_by': model_info.base_provider
+ })
+ return ErrorResponse.from_message("The model does not exist.", HTTP_404_NOT_FOUND)
+
+ @self.app.post("/v1/chat/completions", responses={
+ HTTP_200_OK: {"model": ChatCompletion},
+ HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
+ HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
+ })
+ async def chat_completions(
+ config: ChatCompletionsConfig,
+ credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None,
+ provider: str = None
+ ):
+ try:
+ config.provider = provider if config.provider is None else config.provider
+ if config.provider is None:
+ config.provider = AppConfig.provider
+ if credentials is not None:
+ config.api_key = credentials.credentials
+
+ conversation = return_conversation = None
+ if config.conversation_id is not None and config.provider is not None:
+ return_conversation = True
+ if config.conversation_id in self.conversations:
+ if config.provider in self.conversations[config.conversation_id]:
+ conversation = self.conversations[config.conversation_id][config.provider]
+
+ if config.image is not None:
+ try:
+ is_data_uri_an_image(config.image)
+ except ValueError as e:
+ return ErrorResponse.from_message(f"The image you send must be a data URI. Example: data:image/webp;base64,...", status_code=HTTP_422_UNPROCESSABLE_ENTITY)
+
+ # Create the completion response
+ response = self.client.chat.completions.create(
+ **filter_none(
+ **{
+ "model": AppConfig.model,
+ "provider": AppConfig.provider,
+ "proxy": AppConfig.proxy,
+ **config.model_dump(exclude_none=True),
+ **{
+ "conversation_id": None,
+ "return_conversation": return_conversation,
+ "conversation": conversation
+ }
+ },
+ ignored=AppConfig.ignored_providers
+ ),
+ )
+
+ if not config.stream:
+ return await response
+
+ async def streaming():
+ try:
+ async for chunk in response:
+ if isinstance(chunk, BaseConversation):
+ if config.conversation_id is not None and config.provider is not None:
+ if config.conversation_id not in self.conversations:
+ self.conversations[config.conversation_id] = {}
+ self.conversations[config.conversation_id][config.provider] = chunk
+ else:
+ yield f"data: {chunk.json()}\n\n"
+ except GeneratorExit:
+ pass
+ except Exception as e:
+ logger.exception(e)
+ yield f'data: {format_exception(e, config)}\n\n'
+ yield "data: [DONE]\n\n"
+
+ return StreamingResponse(streaming(), media_type="text/event-stream")
+
+ except (ModelNotFoundError, ProviderNotFoundError) as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_404_NOT_FOUND)
+ except MissingAuthError as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_401_UNAUTHORIZED)
+ except Exception as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
+
+ responses = {
+ HTTP_200_OK: {"model": ImagesResponse},
+ HTTP_401_UNAUTHORIZED: {"model": ErrorResponseModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ HTTP_500_INTERNAL_SERVER_ERROR: {"model": ErrorResponseModel},
+ }
+
+ @self.app.post("/v1/images/generate", responses=responses)
+ @self.app.post("/v1/images/generations", responses=responses)
+ async def generate_image(
+ request: Request,
+ config: ImageGenerationConfig,
+ credentials: Annotated[HTTPAuthorizationCredentials, Depends(Api.security)] = None
+ ):
+ if credentials is not None:
+ config.api_key = credentials.credentials
+ try:
+ response = await self.client.images.generate(
+ prompt=config.prompt,
+ model=config.model,
+ provider=AppConfig.image_provider if config.provider is None else config.provider,
+ **filter_none(
+ response_format = config.response_format,
+ api_key = config.api_key,
+ proxy = config.proxy
+ )
+ )
+ for image in response.data:
+ if hasattr(image, "url") and image.url.startswith("/"):
+ image.url = f"{request.base_url}{image.url.lstrip('/')}"
+ return response
+ except (ModelNotFoundError, ProviderNotFoundError) as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_404_NOT_FOUND)
+ except MissingAuthError as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_401_UNAUTHORIZED)
+ except Exception as e:
+ logger.exception(e)
+ return ErrorResponse.from_exception(e, config, HTTP_500_INTERNAL_SERVER_ERROR)
+
+ @self.app.get("/v1/providers", responses={
+ HTTP_200_OK: {"model": List[ProviderResponseModel]},
+ })
+ async def providers():
+ return [{
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ } for provider in __providers__ if provider.working]
+
+ @self.app.get("/v1/providers/{provider}", responses={
+ HTTP_200_OK: {"model": ProviderResponseDetailModel},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ })
+ async def providers_info(provider: str):
+ if provider not in ProviderUtils.convert:
+ return ErrorResponse.from_message("The provider does not exist.", 404)
+ provider: ProviderType = ProviderUtils.convert[provider]
+ def safe_get_models(provider: ProviderType) -> list[str]:
+ try:
+ return provider.get_models() if hasattr(provider, "get_models") else []
+ except:
+ return []
+ return {
+ 'id': provider.__name__,
+ 'object': 'provider',
+ 'created': 0,
+ 'url': provider.url,
+ 'label': getattr(provider, "label", None),
+ 'models': safe_get_models(provider),
+ 'image_models': getattr(provider, "image_models", []) or [],
+ 'vision_models': [model for model in [getattr(provider, "default_vision_model", None)] if model],
+ 'params': [*provider.get_parameters()] if hasattr(provider, "get_parameters") else []
+ }
+
+ @self.app.post("/v1/upload_cookies", responses={
+ HTTP_200_OK: {"model": List[FileResponseModel]},
+ })
+ def upload_cookies(files: List[UploadFile]):
+ response_data = []
+ for file in files:
+ try:
+ if file and file.filename.endswith(".json") or file.filename.endswith(".har"):
+ filename = os.path.basename(file.filename)
+ with open(os.path.join(get_cookies_dir(), filename), 'wb') as f:
+ shutil.copyfileobj(file.file, f)
+ response_data.append({"filename": filename})
+ finally:
+ file.file.close()
+ return response_data
+
+ @self.app.get("/v1/synthesize/{provider}", responses={
+ HTTP_200_OK: {"content": {"audio/*": {}}},
+ HTTP_404_NOT_FOUND: {"model": ErrorResponseModel},
+ HTTP_422_UNPROCESSABLE_ENTITY: {"model": ErrorResponseModel},
+ })
+ async def synthesize(request: Request, provider: str):
+ try:
+ provider_handler = convert_to_provider(provider)
+ except ProviderNotFoundError as e:
+ return ErrorResponse.from_exception(e, status_code=HTTP_404_NOT_FOUND)
+ if not hasattr(provider_handler, "synthesize"):
+ return ErrorResponse.from_message("Provider doesn't support synthesize", HTTP_404_NOT_FOUND)
+ if len(request.query_params) == 0:
+ return ErrorResponse.from_message("Missing query params", HTTP_422_UNPROCESSABLE_ENTITY)
+ response_data = provider_handler.synthesize({**request.query_params})
+ content_type = getattr(provider_handler, "synthesize_content_type", "application/octet-stream")
+ return StreamingResponse(response_data, media_type=content_type)
+
+ @self.app.get("/images/{filename}", response_class=FileResponse, responses={
+ HTTP_200_OK: {"content": {"image/*": {}}},
+ HTTP_404_NOT_FOUND: {}
+ })
+ async def get_image(filename):
+ target = os.path.join(images_dir, filename)
+
+ if not os.path.isfile(target):
+ return Response(status_code=404)
+
+ with open(target, "rb") as f:
+ content_type = is_accepted_format(f.read(12))
+
+ return FileResponse(target, media_type=content_type)
+
+def format_exception(e: Union[Exception, str], config: Union[ChatCompletionsConfig, ImageGenerationConfig] = None, image: bool = False) -> str:
+ last_provider = {} if not image else g4f.get_last_provider(True)
+ provider = (AppConfig.image_provider if image else AppConfig.provider)
+ model = AppConfig.model
+ if config is not None:
+ if config.provider is not None:
+ provider = config.provider
+ if config.model is not None:
+ model = config.model
+ if isinstance(e, str):
+ message = e
+ else:
+ message = f"{e.__class__.__name__}: {e}"
+ return json.dumps({
+ "error": {"message": message},
+ "model": last_provider.get("model") if model is None else model,
+ **filter_none(
+ provider=last_provider.get("name") if provider is None else provider
+ )
+ })
+
+def run_api(
+ host: str = '0.0.0.0',
+ port: int = None,
+ bind: str = None,
+ debug: bool = False,
+ workers: int = None,
+ use_colors: bool = None,
+ reload: bool = False
+) -> None:
+ print(f'Starting server... [g4f v-{g4f.version.utils.current_version}]' + (" (debug)" if debug else ""))
+ if use_colors is None:
+ use_colors = debug
+ if bind is not None:
+ host, port = bind.split(":")
+ if port is None:
+ port = DEFAULT_PORT
+ uvicorn.run(
+ f"g4f.api:create_app{'_debug' if debug else ''}",
+ host=host,
+ port=int(port),
+ workers=workers,
+ use_colors=use_colors,
+ factory=True,
+ reload=reload
+ )
diff --git a/g4f/api/_logging.py b/g4f/api/_logging.py
new file mode 100644
index 0000000000000000000000000000000000000000..884d75295fe996a3b4034ea88d005eb6b61c4d24
--- /dev/null
+++ b/g4f/api/_logging.py
@@ -0,0 +1,32 @@
+import sys,logging
+
+#from loguru import logger
+
+def __exception_handle(e_type, e_value, e_traceback):
+ if issubclass(e_type, KeyboardInterrupt):
+ print('\nBye...')
+ sys.exit(0)
+
+ sys.__excepthook__(e_type, e_value, e_traceback)
+
+#class __InterceptHandler(logging.Handler):
+# def emit(self, record):
+# try:
+# level = logger.level(record.levelname).name
+# except ValueError:
+# level = record.levelno
+#
+# frame, depth = logging.currentframe(), 2
+# while frame.f_code.co_filename == logging.__file__:
+# frame = frame.f_back
+# depth += 1
+
+# logger.opt(depth=depth, exception=record.exc_info).log(
+# level, record.getMessage()
+# )
+
+def hook_except_handle():
+ sys.excepthook = __exception_handle
+
+#def hook_logging(**kwargs):
+# logging.basicConfig(handlers=[__InterceptHandler()], **kwargs)
diff --git a/g4f/api/_tokenizer.py b/g4f/api/_tokenizer.py
new file mode 100644
index 0000000000000000000000000000000000000000..de5877c47ad198c9db58c29c40389af1457c4f81
--- /dev/null
+++ b/g4f/api/_tokenizer.py
@@ -0,0 +1,9 @@
+# import tiktoken
+# from typing import Union
+
+# def tokenize(text: str, model: str = 'gpt-3.5-turbo') -> Union[int, str]:
+# encoding = tiktoken.encoding_for_model(model)
+# encoded = encoding.encode(text)
+# num_tokens = len(encoded)
+
+# return num_tokens, encoded
\ No newline at end of file
diff --git a/g4f/api/run.py b/g4f/api/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc1cbf92eb91886a23d9af180ba3dad6456118df
--- /dev/null
+++ b/g4f/api/run.py
@@ -0,0 +1,4 @@
+import g4f.api
+
+if __name__ == "__main__":
+ g4f.api.run_api(debug=True)
diff --git a/g4f/cli.py b/g4f/cli.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8a038a2ce2ea773085a3929a82391aa6d3e5dad
--- /dev/null
+++ b/g4f/cli.py
@@ -0,0 +1,67 @@
+from __future__ import annotations
+
+import argparse
+
+from g4f import Provider
+from g4f.gui.run import gui_parser, run_gui_args
+import g4f.cookies
+
+def main():
+ parser = argparse.ArgumentParser(description="Run gpt4free")
+ subparsers = parser.add_subparsers(dest="mode", help="Mode to run the g4f in.")
+ api_parser = subparsers.add_parser("api")
+ api_parser.add_argument("--bind", default=None, help="The bind string. (Default: 0.0.0.0:1337)")
+ api_parser.add_argument("--port", default=None, help="Change the port of the server.")
+ api_parser.add_argument("--debug", "-d", action="store_true", help="Enable verbose logging.")
+ api_parser.add_argument("--gui", "-g", default=False, action="store_true", help="Add gui to the api.")
+ api_parser.add_argument("--model", default=None, help="Default model for chat completion. (incompatible with --reload and --workers)")
+ api_parser.add_argument("--provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
+ default=None, help="Default provider for chat completion. (incompatible with --reload and --workers)")
+ api_parser.add_argument("--image-provider", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working and hasattr(provider, "image_models")],
+ default=None, help="Default provider for image generation. (incompatible with --reload and --workers)"),
+ api_parser.add_argument("--proxy", default=None, help="Default used proxy. (incompatible with --reload and --workers)")
+ api_parser.add_argument("--workers", type=int, default=None, help="Number of workers.")
+ api_parser.add_argument("--disable-colors", action="store_true", help="Don't use colors.")
+ api_parser.add_argument("--ignore-cookie-files", action="store_true", help="Don't read .har and cookie files. (incompatible with --reload and --workers)")
+ api_parser.add_argument("--g4f-api-key", type=str, default=None, help="Sets an authentication key for your API. (incompatible with --reload and --workers)")
+ api_parser.add_argument("--ignored-providers", nargs="+", choices=[provider.__name__ for provider in Provider.__providers__ if provider.working],
+ default=[], help="List of providers to ignore when processing request. (incompatible with --reload and --workers)")
+ api_parser.add_argument("--cookie-browsers", nargs="+", choices=[browser.__name__ for browser in g4f.cookies.browsers],
+ default=[], help="List of browsers to access or retrieve cookies from. (incompatible with --reload and --workers)")
+ api_parser.add_argument("--reload", action="store_true", help="Enable reloading.")
+ subparsers.add_parser("gui", parents=[gui_parser()], add_help=False)
+
+ args = parser.parse_args()
+ if args.mode == "api":
+ run_api_args(args)
+ elif args.mode == "gui":
+ run_gui_args(args)
+ else:
+ parser.print_help()
+ exit(1)
+
+def run_api_args(args):
+ from g4f.api import AppConfig, run_api
+
+ AppConfig.set_config(
+ ignore_cookie_files=args.ignore_cookie_files,
+ ignored_providers=args.ignored_providers,
+ g4f_api_key=args.g4f_api_key,
+ provider=args.provider,
+ image_provider=args.image_provider,
+ proxy=args.proxy,
+ model=args.model,
+ gui=args.gui,
+ )
+ g4f.cookies.browsers = [g4f.cookies[browser] for browser in args.cookie_browsers]
+ run_api(
+ bind=args.bind,
+ port=args.port,
+ debug=args.debug,
+ workers=args.workers,
+ use_colors=not args.disable_colors,
+ reload=args.reload
+ )
+
+if __name__ == "__main__":
+ main()
diff --git a/g4f/client/__init__.py b/g4f/client/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..86a810493b932ccd96a1e1859ae51a7258c816dc
--- /dev/null
+++ b/g4f/client/__init__.py
@@ -0,0 +1,508 @@
+from __future__ import annotations
+
+import os
+import time
+import random
+import string
+import asyncio
+import base64
+from typing import Union, AsyncIterator, Iterator, Coroutine, Optional
+
+from ..providers.base_provider import AsyncGeneratorProvider
+from ..image import ImageResponse, copy_images, images_dir
+from ..typing import Messages, Image, ImageType
+from ..providers.types import ProviderType
+from ..providers.response import ResponseType, FinishReason, BaseConversation, SynthesizeData
+from ..errors import NoImageResponseError, ModelNotFoundError
+from ..providers.retry_provider import IterListProvider
+from ..providers.asyncio import get_running_loop, to_sync_generator, async_generator_to_list
+from ..Provider.needs_auth import BingCreateImages, OpenaiAccount
+from .stubs import ChatCompletion, ChatCompletionChunk, Image, ImagesResponse
+from .image_models import ImageModels
+from .types import IterResponse, ImageProvider, Client as BaseClient
+from .service import get_model_and_provider, get_last_provider, convert_to_provider
+from .helper import find_stop, filter_json, filter_none, safe_aclose, to_async_iterator
+
+ChatCompletionResponseType = Iterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
+AsyncChatCompletionResponseType = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk, BaseConversation]]
+
+try:
+ anext # Python 3.8+
+except NameError:
+ async def anext(aiter):
+ try:
+ return await aiter.__anext__()
+ except StopAsyncIteration:
+ raise StopIteration
+
+# Synchronous iter_response function
+def iter_response(
+ response: Union[Iterator[Union[str, ResponseType]]],
+ stream: bool,
+ response_format: Optional[dict] = None,
+ max_tokens: Optional[int] = None,
+ stop: Optional[list[str]] = None
+) -> ChatCompletionResponseType:
+ content = ""
+ finish_reason = None
+ completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
+ idx = 0
+
+ if hasattr(response, '__aiter__'):
+ response = to_sync_generator(response)
+
+ for chunk in response:
+ if isinstance(chunk, FinishReason):
+ finish_reason = chunk.reason
+ break
+ elif isinstance(chunk, BaseConversation):
+ yield chunk
+ continue
+ elif isinstance(chunk, SynthesizeData):
+ continue
+
+ chunk = str(chunk)
+ content += chunk
+
+ if max_tokens is not None and idx + 1 >= max_tokens:
+ finish_reason = "length"
+
+ first, content, chunk = find_stop(stop, content, chunk if stream else None)
+
+ if first != -1:
+ finish_reason = "stop"
+
+ if stream:
+ yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
+
+ if finish_reason is not None:
+ break
+
+ idx += 1
+
+ finish_reason = "stop" if finish_reason is None else finish_reason
+
+ if stream:
+ yield ChatCompletionChunk.model_construct(None, finish_reason, completion_id, int(time.time()))
+ else:
+ if response_format is not None and "type" in response_format:
+ if response_format["type"] == "json_object":
+ content = filter_json(content)
+ yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
+
+# Synchronous iter_append_model_and_provider function
+def iter_append_model_and_provider(response: ChatCompletionResponseType) -> ChatCompletionResponseType:
+ last_provider = None
+
+ for chunk in response:
+ if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
+ last_provider = get_last_provider(True) if last_provider is None else last_provider
+ chunk.model = last_provider.get("model")
+ chunk.provider = last_provider.get("name")
+ yield chunk
+
+async def async_iter_response(
+ response: AsyncIterator[Union[str, ResponseType]],
+ stream: bool,
+ response_format: Optional[dict] = None,
+ max_tokens: Optional[int] = None,
+ stop: Optional[list[str]] = None
+) -> AsyncChatCompletionResponseType:
+ content = ""
+ finish_reason = None
+ completion_id = ''.join(random.choices(string.ascii_letters + string.digits, k=28))
+ idx = 0
+
+ try:
+ async for chunk in response:
+ if isinstance(chunk, FinishReason):
+ finish_reason = chunk.reason
+ break
+ elif isinstance(chunk, BaseConversation):
+ yield chunk
+ continue
+ elif isinstance(chunk, SynthesizeData):
+ continue
+
+ chunk = str(chunk)
+ content += chunk
+ idx += 1
+
+ if max_tokens is not None and idx >= max_tokens:
+ finish_reason = "length"
+
+ first, content, chunk = find_stop(stop, content, chunk if stream else None)
+
+ if first != -1:
+ finish_reason = "stop"
+
+ if stream:
+ yield ChatCompletionChunk.model_construct(chunk, None, completion_id, int(time.time()))
+
+ if finish_reason is not None:
+ break
+
+ finish_reason = "stop" if finish_reason is None else finish_reason
+
+ if stream:
+ yield ChatCompletionChunk.model_construct(None, finish_reason, completion_id, int(time.time()))
+ else:
+ if response_format is not None and "type" in response_format:
+ if response_format["type"] == "json_object":
+ content = filter_json(content)
+ yield ChatCompletion.model_construct(content, finish_reason, completion_id, int(time.time()))
+ finally:
+ await safe_aclose(response)
+
+async def async_iter_append_model_and_provider(
+ response: AsyncChatCompletionResponseType
+ ) -> AsyncChatCompletionResponseType:
+ last_provider = None
+ try:
+ async for chunk in response:
+ if isinstance(chunk, (ChatCompletion, ChatCompletionChunk)):
+ last_provider = get_last_provider(True) if last_provider is None else last_provider
+ chunk.model = last_provider.get("model")
+ chunk.provider = last_provider.get("name")
+ yield chunk
+ finally:
+ await safe_aclose(response)
+
+class Client(BaseClient):
+ def __init__(
+ self,
+ provider: Optional[ProviderType] = None,
+ image_provider: Optional[ImageProvider] = None,
+ **kwargs
+ ) -> None:
+ super().__init__(**kwargs)
+ self.chat: Chat = Chat(self, provider)
+ self.images: Images = Images(self, image_provider)
+
+class Completions:
+ def __init__(self, client: Client, provider: Optional[ProviderType] = None):
+ self.client: Client = client
+ self.provider: ProviderType = provider
+
+ def create(
+ self,
+ messages: Messages,
+ model: str,
+ provider: Optional[ProviderType] = None,
+ stream: Optional[bool] = False,
+ proxy: Optional[str] = None,
+ response_format: Optional[dict] = None,
+ max_tokens: Optional[int] = None,
+ stop: Optional[Union[list[str], str]] = None,
+ api_key: Optional[str] = None,
+ ignored: Optional[list[str]] = None,
+ ignore_working: Optional[bool] = False,
+ ignore_stream: Optional[bool] = False,
+ **kwargs
+ ) -> IterResponse:
+ model, provider = get_model_and_provider(
+ model,
+ self.provider if provider is None else provider,
+ stream,
+ ignored,
+ ignore_working,
+ ignore_stream,
+ )
+ stop = [stop] if isinstance(stop, str) else stop
+
+ response = provider.create_completion(
+ model,
+ messages,
+ stream=stream,
+ **filter_none(
+ proxy=self.client.proxy if proxy is None else proxy,
+ max_tokens=max_tokens,
+ stop=stop,
+ api_key=self.client.api_key if api_key is None else api_key
+ ),
+ **kwargs
+ )
+ if asyncio.iscoroutinefunction(provider.create_completion):
+ # Run the asynchronous function in an event loop
+ response = asyncio.run(response)
+ if stream and hasattr(response, '__aiter__'):
+ # It's an async generator, wrap it into a sync iterator
+ response = to_sync_generator(response)
+ elif hasattr(response, '__aiter__'):
+ # If response is an async generator, collect it into a list
+ response = asyncio.run(async_generator_to_list(response))
+ response = iter_response(response, stream, response_format, max_tokens, stop)
+ response = iter_append_model_and_provider(response)
+ if stream:
+ return response
+ else:
+ return next(response)
+
+class Chat:
+ completions: Completions
+
+ def __init__(self, client: Client, provider: Optional[ProviderType] = None):
+ self.completions = Completions(client, provider)
+
+class Images:
+ def __init__(self, client: Client, provider: Optional[ProviderType] = None):
+ self.client: Client = client
+ self.provider: Optional[ProviderType] = provider
+ self.models: ImageModels = ImageModels(client)
+
+ def generate(
+ self,
+ prompt: str,
+ model: str = None,
+ provider: Optional[ProviderType] = None,
+ response_format: str = "url",
+ proxy: Optional[str] = None,
+ **kwargs
+ ) -> ImagesResponse:
+ """
+ Synchronous generate method that runs the async_generate method in an event loop.
+ """
+ return asyncio.run(self.async_generate(prompt, model, provider, response_format, proxy, **kwargs))
+
+ async def get_provider_handler(self, model: Optional[str], provider: Optional[ImageProvider], default: ImageProvider) -> ImageProvider:
+ if provider is None:
+ provider_handler = self.provider
+ if provider_handler is None:
+ provider_handler = self.models.get(model, default)
+ elif isinstance(provider, str):
+ provider_handler = convert_to_provider(provider)
+ else:
+ provider_handler = provider
+ if provider_handler is None:
+ return default
+ if isinstance(provider_handler, IterListProvider):
+ if provider_handler.providers:
+ provider_handler = provider_handler.providers[0]
+ else:
+ raise ModelNotFoundError(f"IterListProvider for model {model} has no providers")
+ return provider_handler
+
+ async def async_generate(
+ self,
+ prompt: str,
+ model: Optional[str] = None,
+ provider: Optional[ProviderType] = None,
+ response_format: Optional[str] = "url",
+ proxy: Optional[str] = None,
+ **kwargs
+ ) -> ImagesResponse:
+ provider_handler = await self.get_provider_handler(model, provider, BingCreateImages)
+ if proxy is None:
+ proxy = self.client.proxy
+
+ response = None
+ if hasattr(provider_handler, "create_async_generator"):
+ messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
+ async for item in provider_handler.create_async_generator(model, messages, prompt=prompt, **kwargs):
+ if isinstance(item, ImageResponse):
+ response = item
+ break
+ elif hasattr(provider_handler, 'create'):
+ if asyncio.iscoroutinefunction(provider_handler.create):
+ response = await provider_handler.create(prompt)
+ else:
+ response = provider_handler.create(prompt)
+ if isinstance(response, str):
+ response = ImageResponse([response], prompt)
+ elif hasattr(provider_handler, "create_completion"):
+ get_running_loop(check_nested=True)
+ messages = [{"role": "user", "content": f"Generate a image: {prompt}"}]
+ for item in provider_handler.create_completion(model, messages, prompt=prompt, **kwargs):
+ if isinstance(item, ImageResponse):
+ response = item
+ break
+ else:
+ raise ValueError(f"Provider {getattr(provider_handler, '__name__')} does not support image generation")
+ if isinstance(response, ImageResponse):
+ return await self._process_image_response(
+ response,
+ response_format,
+ proxy,
+ model,
+ getattr(provider_handler, "__name__", None)
+ )
+ if response is None:
+ raise NoImageResponseError(f"No image response from {getattr(provider_handler, '__name__')}")
+ raise NoImageResponseError(f"Unexpected response type: {type(response)}")
+
+ def create_variation(
+ self,
+ image: Union[str, bytes],
+ model: str = None,
+ provider: Optional[ProviderType] = None,
+ response_format: str = "url",
+ **kwargs
+ ) -> ImagesResponse:
+ return asyncio.run(self.async_create_variation(
+ image, model, provider, response_format, **kwargs
+ ))
+
+ async def async_create_variation(
+ self,
+ image: ImageType,
+ model: Optional[str] = None,
+ provider: Optional[ProviderType] = None,
+ response_format: str = "url",
+ proxy: Optional[str] = None,
+ **kwargs
+ ) -> ImagesResponse:
+ provider_handler = await self.get_provider_handler(model, provider, OpenaiAccount)
+ if proxy is None:
+ proxy = self.client.proxy
+
+ if hasattr(provider_handler, "create_async_generator"):
+ messages = [{"role": "user", "content": "create a variation of this image"}]
+ generator = None
+ try:
+ generator = provider_handler.create_async_generator(model, messages, image=image, response_format=response_format, proxy=proxy, **kwargs)
+ async for chunk in generator:
+ if isinstance(chunk, ImageResponse):
+ response = chunk
+ break
+ finally:
+ await safe_aclose(generator)
+ elif hasattr(provider_handler, 'create_variation'):
+ if asyncio.iscoroutinefunction(provider.provider_handler):
+ response = await provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
+ else:
+ response = provider_handler.create_variation(image, model=model, response_format=response_format, proxy=proxy, **kwargs)
+ else:
+ raise NoImageResponseError(f"Provider {provider} does not support image variation")
+
+ if isinstance(response, str):
+ response = ImageResponse([response])
+ if isinstance(response, ImageResponse):
+ return self._process_image_response(response, response_format, proxy, model, getattr(provider, "__name__", None))
+ if response is None:
+ raise NoImageResponseError(f"No image response from {getattr(provider, '__name__')}")
+ raise NoImageResponseError(f"Unexpected response type: {type(response)}")
+
+ async def _process_image_response(
+ self,
+ response: ImageResponse,
+ response_format: str,
+ proxy: str = None,
+ model: Optional[str] = None,
+ provider: Optional[str] = None
+ ) -> list[Image]:
+ if response_format in ("url", "b64_json"):
+ images = await copy_images(response.get_list(), response.options.get("cookies"), proxy)
+ async def process_image_item(image_file: str) -> Image:
+ if response_format == "b64_json":
+ with open(os.path.join(images_dir, os.path.basename(image_file)), "rb") as file:
+ image_data = base64.b64encode(file.read()).decode()
+ return Image.model_construct(url=image_file, b64_json=image_data, revised_prompt=response.alt)
+ return Image.model_construct(url=image_file, revised_prompt=response.alt)
+ images = await asyncio.gather(*[process_image_item(image) for image in images])
+ else:
+ images = [Image.model_construct(url=image, revised_prompt=response.alt) for image in response.get_list()]
+ last_provider = get_last_provider(True)
+ return ImagesResponse.model_construct(
+ images,
+ model=last_provider.get("model") if model is None else model,
+ provider=last_provider.get("name") if provider is None else provider
+ )
+
+class AsyncClient(BaseClient):
+ def __init__(
+ self,
+ provider: Optional[ProviderType] = None,
+ image_provider: Optional[ImageProvider] = None,
+ **kwargs
+ ) -> None:
+ super().__init__(**kwargs)
+ self.chat: AsyncChat = AsyncChat(self, provider)
+ self.images: AsyncImages = AsyncImages(self, image_provider)
+
+class AsyncChat:
+ completions: AsyncCompletions
+
+ def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
+ self.completions = AsyncCompletions(client, provider)
+
+class AsyncCompletions:
+ def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
+ self.client: AsyncClient = client
+ self.provider: ProviderType = provider
+
+ def create(
+ self,
+ messages: Messages,
+ model: str,
+ provider: Optional[ProviderType] = None,
+ stream: Optional[bool] = False,
+ proxy: Optional[str] = None,
+ response_format: Optional[dict] = None,
+ max_tokens: Optional[int] = None,
+ stop: Optional[Union[list[str], str]] = None,
+ api_key: Optional[str] = None,
+ ignored: Optional[list[str]] = None,
+ ignore_working: Optional[bool] = False,
+ ignore_stream: Optional[bool] = False,
+ **kwargs
+ ) -> Union[Coroutine[ChatCompletion], AsyncIterator[ChatCompletionChunk, BaseConversation]]:
+ model, provider = get_model_and_provider(
+ model,
+ self.provider if provider is None else provider,
+ stream,
+ ignored,
+ ignore_working,
+ ignore_stream,
+ )
+ stop = [stop] if isinstance(stop, str) else stop
+
+ if hasattr(provider, "create_async_generator"):
+ create_handler = provider.create_async_generator
+ else:
+ create_handler = provider.create_completion
+ response = create_handler(
+ model,
+ messages,
+ stream=stream,
+ **filter_none(
+ proxy=self.client.proxy if proxy is None else proxy,
+ max_tokens=max_tokens,
+ stop=stop,
+ api_key=self.client.api_key if api_key is None else api_key
+ ),
+ **kwargs
+ )
+
+ if not isinstance(response, AsyncIterator):
+ response = to_async_iterator(response)
+ response = async_iter_response(response, stream, response_format, max_tokens, stop)
+ response = async_iter_append_model_and_provider(response)
+ return response if stream else anext(response)
+
+class AsyncImages(Images):
+ def __init__(self, client: AsyncClient, provider: Optional[ProviderType] = None):
+ self.client: AsyncClient = client
+ self.provider: Optional[ProviderType] = provider
+ self.models: ImageModels = ImageModels(client)
+
+ async def generate(
+ self,
+ prompt: str,
+ model: Optional[str] = None,
+ provider: Optional[ProviderType] = None,
+ response_format: str = "url",
+ **kwargs
+ ) -> ImagesResponse:
+ return await self.async_generate(prompt, model, provider, response_format, **kwargs)
+
+ async def create_variation(
+ self,
+ image: ImageType,
+ model: str = None,
+ provider: ProviderType = None,
+ response_format: str = "url",
+ **kwargs
+ ) -> ImagesResponse:
+ return await self.async_create_variation(
+ image, model, provider, response_format, **kwargs
+ )
\ No newline at end of file
diff --git a/g4f/client/helper.py b/g4f/client/helper.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b022e8c8a8a5f22c0a20f02dd943004e70c1e46
--- /dev/null
+++ b/g4f/client/helper.py
@@ -0,0 +1,57 @@
+from __future__ import annotations
+
+import re
+import logging
+
+from typing import AsyncIterator, Iterator, AsyncGenerator, Optional
+
+def filter_json(text: str) -> str:
+ """
+ Parses JSON code block from a string.
+
+ Args:
+ text (str): A string containing a JSON code block.
+
+ Returns:
+ dict: A dictionary parsed from the JSON code block.
+ """
+ match = re.search(r"```(json|)\n(?P[\S\s]+?)\n```", text)
+ if match:
+ return match.group("code")
+ return text
+
+def find_stop(stop: Optional[list[str]], content: str, chunk: str = None):
+ first = -1
+ word = None
+ if stop is not None:
+ for word in list(stop):
+ first = content.find(word)
+ if first != -1:
+ content = content[:first]
+ break
+ if chunk is not None and first != -1:
+ first = chunk.find(word)
+ if first != -1:
+ chunk = chunk[:first]
+ else:
+ first = 0
+ return first, content, chunk
+
+def filter_none(**kwargs) -> dict:
+ return {
+ key: value
+ for key, value in kwargs.items()
+ if value is not None
+ }
+
+async def safe_aclose(generator: AsyncGenerator) -> None:
+ try:
+ if generator and hasattr(generator, 'aclose'):
+ await generator.aclose()
+ except Exception as e:
+ logging.warning(f"Error while closing generator: {e}")
+
+# Helper function to convert a synchronous iterator to an async iterator
+async def to_async_iterator(iterator: Iterator) -> AsyncIterator:
+ for item in iterator:
+ yield item
\ No newline at end of file
diff --git a/g4f/client/image_models.py b/g4f/client/image_models.py
new file mode 100644
index 0000000000000000000000000000000000000000..0b97a56b959f5d974114d47d3d2a27c6bbdd2ce4
--- /dev/null
+++ b/g4f/client/image_models.py
@@ -0,0 +1,14 @@
+from __future__ import annotations
+
+from ..models import ModelUtils
+
+class ImageModels():
+ def __init__(self, client):
+ self.client = client
+ self.models = ModelUtils.convert
+
+ def get(self, name, default=None):
+ model = self.models.get(name)
+ if model and model.best_provider:
+ return model.best_provider
+ return default
diff --git a/g4f/client/service.py b/g4f/client/service.py
new file mode 100644
index 0000000000000000000000000000000000000000..44533ece9d37a788e407860e5ff8f2c5236a9252
--- /dev/null
+++ b/g4f/client/service.py
@@ -0,0 +1,119 @@
+from __future__ import annotations
+
+from typing import Union
+
+from .. import debug, version
+from ..errors import ProviderNotFoundError, ModelNotFoundError, ProviderNotWorkingError, StreamNotSupportedError
+from ..models import Model, ModelUtils, default
+from ..Provider import ProviderUtils
+from ..providers.types import BaseRetryProvider, ProviderType
+from ..providers.retry_provider import IterProvider
+
+def convert_to_provider(provider: str) -> ProviderType:
+ if " " in provider:
+ provider_list = [ProviderUtils.convert[p] for p in provider.split() if p in ProviderUtils.convert]
+ if not provider_list:
+ raise ProviderNotFoundError(f'Providers not found: {provider}')
+ provider = IterProvider(provider_list)
+ elif provider in ProviderUtils.convert:
+ provider = ProviderUtils.convert[provider]
+ elif provider:
+ raise ProviderNotFoundError(f'Provider not found: {provider}')
+ return provider
+
+def get_model_and_provider(model : Union[Model, str],
+ provider : Union[ProviderType, str, None],
+ stream : bool,
+ ignored : list[str] = None,
+ ignore_working: bool = False,
+ ignore_stream: bool = False) -> tuple[str, ProviderType]:
+ """
+ Retrieves the model and provider based on input parameters.
+
+ Args:
+ model (Union[Model, str]): The model to use, either as an object or a string identifier.
+ provider (Union[ProviderType, str, None]): The provider to use, either as an object, a string identifier, or None.
+ stream (bool): Indicates if the operation should be performed as a stream.
+ ignored (list[str], optional): List of provider names to be ignored.
+ ignore_working (bool, optional): If True, ignores the working status of the provider.
+ ignore_stream (bool, optional): If True, ignores the streaming capability of the provider.
+
+ Returns:
+ tuple[str, ProviderType]: A tuple containing the model name and the provider type.
+
+ Raises:
+ ProviderNotFoundError: If the provider is not found.
+ ModelNotFoundError: If the model is not found.
+ ProviderNotWorkingError: If the provider is not working.
+ StreamNotSupportedError: If streaming is not supported by the provider.
+ """
+ if debug.version_check:
+ debug.version_check = False
+ version.utils.check_version()
+
+ if isinstance(provider, str):
+ provider = convert_to_provider(provider)
+
+ if isinstance(model, str):
+ if model in ModelUtils.convert:
+ model = ModelUtils.convert[model]
+
+ if not provider:
+ if not model:
+ model = default
+ elif isinstance(model, str):
+ raise ModelNotFoundError(f'Model not found: {model}')
+ provider = model.best_provider
+
+ if not provider:
+ raise ProviderNotFoundError(f'No provider found for model: {model}')
+
+ if isinstance(model, Model):
+ model = model.name
+
+ if not ignore_working and not provider.working:
+ raise ProviderNotWorkingError(f'{provider.__name__} is not working')
+
+ if isinstance(provider, BaseRetryProvider):
+ if not ignore_working:
+ provider.providers = [p for p in provider.providers if p.working]
+ if ignored:
+ provider.providers = [p for p in provider.providers if p.__name__ not in ignored]
+
+ if not ignore_stream and not provider.supports_stream and stream:
+ raise StreamNotSupportedError(f'{provider.__name__} does not support "stream" argument')
+
+ if model:
+ debug.log(f'Using {provider.__name__} provider and {model} model')
+ else:
+ debug.log(f'Using {provider.__name__} provider')
+
+ debug.last_provider = provider
+ debug.last_model = model
+
+ return model, provider
+
+def get_last_provider(as_dict: bool = False) -> Union[ProviderType, dict[str, str], None]:
+ """
+ Retrieves the last used provider.
+
+ Args:
+ as_dict (bool, optional): If True, returns the provider information as a dictionary.
+
+ Returns:
+ Union[ProviderType, dict[str, str]]: The last used provider, either as an object or a dictionary.
+ """
+ last = debug.last_provider
+ if isinstance(last, BaseRetryProvider):
+ last = last.last_provider
+ if as_dict:
+ if last:
+ return {
+ "name": last.__name__,
+ "url": last.url,
+ "model": debug.last_model,
+ "label": getattr(last, "label", None) if hasattr(last, "label") else None
+ }
+ else:
+ return {}
+ return last
\ No newline at end of file
diff --git a/g4f/client/stubs.py b/g4f/client/stubs.py
new file mode 100644
index 0000000000000000000000000000000000000000..7367ac75d42dcc91642ee559c679407aa0cf0b6d
--- /dev/null
+++ b/g4f/client/stubs.py
@@ -0,0 +1,150 @@
+from __future__ import annotations
+
+from typing import Optional, List, Dict
+from time import time
+
+from .helper import filter_none
+
+try:
+ from pydantic import BaseModel, Field
+except ImportError:
+ class BaseModel():
+ @classmethod
+ def model_construct(cls, **data):
+ new = cls()
+ for key, value in data.items():
+ setattr(new, key, value)
+ return new
+ class Field():
+ def __init__(self, **config):
+ pass
+
+class ChatCompletionChunk(BaseModel):
+ id: str
+ object: str
+ created: int
+ model: str
+ provider: Optional[str]
+ choices: List[ChatCompletionDeltaChoice]
+
+ @classmethod
+ def model_construct(
+ cls,
+ content: str,
+ finish_reason: str,
+ completion_id: str = None,
+ created: int = None
+ ):
+ return super().model_construct(
+ id=f"chatcmpl-{completion_id}" if completion_id else None,
+ object="chat.completion.cunk",
+ created=created,
+ model=None,
+ provider=None,
+ choices=[ChatCompletionDeltaChoice.model_construct(
+ ChatCompletionDelta.model_construct(content),
+ finish_reason
+ )]
+ )
+
+class ChatCompletionMessage(BaseModel):
+ role: str
+ content: str
+
+ @classmethod
+ def model_construct(cls, content: str):
+ return super().model_construct(role="assistant", content=content)
+
+class ChatCompletionChoice(BaseModel):
+ index: int
+ message: ChatCompletionMessage
+ finish_reason: str
+
+ @classmethod
+ def model_construct(cls, message: ChatCompletionMessage, finish_reason: str):
+ return super().model_construct(index=0, message=message, finish_reason=finish_reason)
+
+class ChatCompletion(BaseModel):
+ id: str
+ object: str
+ created: int
+ model: str
+ provider: Optional[str]
+ choices: List[ChatCompletionChoice]
+ usage: Dict[str, int] = Field(examples=[{
+ "prompt_tokens": 0, #prompt_tokens,
+ "completion_tokens": 0, #completion_tokens,
+ "total_tokens": 0, #prompt_tokens + completion_tokens,
+ }])
+
+ @classmethod
+ def model_construct(
+ cls,
+ content: str,
+ finish_reason: str,
+ completion_id: str = None,
+ created: int = None
+ ):
+ return super().model_construct(
+ id=f"chatcmpl-{completion_id}" if completion_id else None,
+ object="chat.completion",
+ created=created,
+ model=None,
+ provider=None,
+ choices=[ChatCompletionChoice.model_construct(
+ ChatCompletionMessage.model_construct(content),
+ finish_reason
+ )],
+ usage={
+ "prompt_tokens": 0, #prompt_tokens,
+ "completion_tokens": 0, #completion_tokens,
+ "total_tokens": 0, #prompt_tokens + completion_tokens,
+ }
+ )
+
+class ChatCompletionDelta(BaseModel):
+ role: str
+ content: str
+
+ @classmethod
+ def model_construct(cls, content: Optional[str]):
+ return super().model_construct(role="assistant", content=content)
+
+class ChatCompletionDeltaChoice(BaseModel):
+ index: int
+ delta: ChatCompletionDelta
+ finish_reason: Optional[str]
+
+ @classmethod
+ def model_construct(cls, delta: ChatCompletionDelta, finish_reason: Optional[str]):
+ return super().model_construct(index=0, delta=delta, finish_reason=finish_reason)
+
+class Image(BaseModel):
+ url: Optional[str]
+ b64_json: Optional[str]
+ revised_prompt: Optional[str]
+
+ @classmethod
+ def model_construct(cls, url: str = None, b64_json: str = None, revised_prompt: str = None):
+ return super().model_construct(**filter_none(
+ url=url,
+ b64_json=b64_json,
+ revised_prompt=revised_prompt
+ ))
+
+class ImagesResponse(BaseModel):
+ data: List[Image]
+ model: str
+ provider: str
+ created: int
+
+ @classmethod
+ def model_construct(cls, data: List[Image], created: int = None, model: str = None, provider: str = None):
+ if created is None:
+ created = int(time())
+ return super().model_construct(
+ data=data,
+ model=model,
+ provider=provider,
+ created=created
+ )
diff --git a/g4f/client/types.py b/g4f/client/types.py
new file mode 100644
index 0000000000000000000000000000000000000000..5010e098c9666ca89b6f5364c1aea7334be9532b
--- /dev/null
+++ b/g4f/client/types.py
@@ -0,0 +1,33 @@
+from __future__ import annotations
+
+import os
+
+from .stubs import ChatCompletion, ChatCompletionChunk
+from ..providers.types import BaseProvider
+from typing import Union, Iterator, AsyncIterator
+
+ImageProvider = Union[BaseProvider, object]
+Proxies = Union[dict, str]
+IterResponse = Iterator[Union[ChatCompletion, ChatCompletionChunk]]
+AsyncIterResponse = AsyncIterator[Union[ChatCompletion, ChatCompletionChunk]]
+
+class Client():
+ def __init__(
+ self,
+ api_key: str = None,
+ proxies: Proxies = None,
+ **kwargs
+ ) -> None:
+ self.api_key: str = api_key
+ self.proxies= proxies
+ self.proxy: str = self.get_proxy()
+
+ def get_proxy(self) -> Union[str, None]:
+ if isinstance(self.proxies, str):
+ return self.proxies
+ elif self.proxies is None:
+ return os.environ.get("G4F_PROXY")
+ elif "all" in self.proxies:
+ return self.proxies["all"]
+ elif "https" in self.proxies:
+ return self.proxies["https"]
\ No newline at end of file
diff --git a/g4f/cookies.py b/g4f/cookies.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c62d697ecfcb7eeb7649b11f8144e7ae8792f93
--- /dev/null
+++ b/g4f/cookies.py
@@ -0,0 +1,207 @@
+from __future__ import annotations
+
+import os
+import time
+import json
+
+try:
+ from platformdirs import user_config_dir
+ has_platformdirs = True
+except ImportError:
+ has_platformdirs = False
+try:
+ from browser_cookie3 import (
+ chrome, chromium, opera, opera_gx,
+ brave, edge, vivaldi, firefox,
+ _LinuxPasswordManager, BrowserCookieError
+ )
+
+ def _g4f(domain_name: str) -> list:
+ """
+ Load cookies from the 'g4f' browser (if exists).
+
+ Args:
+ domain_name (str): The domain for which to load cookies.
+
+ Returns:
+ list: List of cookies.
+ """
+ if not has_platformdirs:
+ return []
+ user_data_dir = user_config_dir("g4f")
+ cookie_file = os.path.join(user_data_dir, "Default", "Cookies")
+ return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name)
+
+ browsers = [
+ _g4f,
+ chrome, chromium, opera, opera_gx,
+ brave, edge, vivaldi, firefox,
+ ]
+ has_browser_cookie3 = True
+except ImportError:
+ has_browser_cookie3 = False
+ browsers = []
+
+from .typing import Dict, Cookies
+from .errors import MissingRequirementsError
+from . import debug
+
+class CookiesConfig():
+ cookies: Dict[str, Cookies] = {}
+ cookies_dir: str = "./har_and_cookies"
+
+DOMAINS = [
+ ".bing.com",
+ ".meta.ai",
+ ".google.com",
+ "www.whiterabbitneo.com",
+ "huggingface.co",
+ "chat.reka.ai",
+ "chatgpt.com"
+]
+
+if has_browser_cookie3 and os.environ.get('DBUS_SESSION_BUS_ADDRESS') == "/dev/null":
+ _LinuxPasswordManager.get_password = lambda a, b: b"secret"
+
+def get_cookies(domain_name: str = '', raise_requirements_error: bool = True, single_browser: bool = False) -> Dict[str, str]:
+ """
+ Load cookies for a given domain from all supported browsers and cache the results.
+
+ Args:
+ domain_name (str): The domain for which to load cookies.
+
+ Returns:
+ Dict[str, str]: A dictionary of cookie names and values.
+ """
+ if domain_name in CookiesConfig.cookies:
+ return CookiesConfig.cookies[domain_name]
+
+ cookies = load_cookies_from_browsers(domain_name, raise_requirements_error, single_browser)
+ CookiesConfig.cookies[domain_name] = cookies
+ return cookies
+
+def set_cookies(domain_name: str, cookies: Cookies = None) -> None:
+ if cookies:
+ CookiesConfig.cookies[domain_name] = cookies
+ elif domain_name in CookiesConfig.cookies:
+ CookiesConfig.cookies.pop(domain_name)
+
+def load_cookies_from_browsers(domain_name: str, raise_requirements_error: bool = True, single_browser: bool = False) -> Cookies:
+ """
+ Helper function to load cookies from various browsers.
+
+ Args:
+ domain_name (str): The domain for which to load cookies.
+
+ Returns:
+ Dict[str, str]: A dictionary of cookie names and values.
+ """
+ if not has_browser_cookie3:
+ if raise_requirements_error:
+ raise MissingRequirementsError('Install "browser_cookie3" package')
+ return {}
+ cookies = {}
+ for cookie_fn in [_g4f, chrome, chromium, opera, opera_gx, brave, edge, vivaldi, firefox]:
+ try:
+ cookie_jar = cookie_fn(domain_name=domain_name)
+ if len(cookie_jar) and debug.logging:
+ print(f"Read cookies from {cookie_fn.__name__} for {domain_name}")
+ for cookie in cookie_jar:
+ if cookie.name not in cookies:
+ if not cookie.expires or cookie.expires > time.time():
+ cookies[cookie.name] = cookie.value
+ if single_browser and len(cookie_jar):
+ break
+ except BrowserCookieError:
+ pass
+ except Exception as e:
+ if debug.logging:
+ print(f"Error reading cookies from {cookie_fn.__name__} for {domain_name}: {e}")
+ return cookies
+
+def set_cookies_dir(dir: str) -> None:
+ CookiesConfig.cookies_dir = dir
+
+def get_cookies_dir() -> str:
+ return CookiesConfig.cookies_dir
+
+def read_cookie_files(dirPath: str = None):
+ def get_domain(v: dict) -> str:
+ host = [h["value"] for h in v['request']['headers'] if h["name"].lower() in ("host", ":authority")]
+ if not host:
+ return
+ host = host.pop()
+ for d in DOMAINS:
+ if d in host:
+ return d
+
+ harFiles = []
+ cookieFiles = []
+ for root, _, files in os.walk(CookiesConfig.cookies_dir if dirPath is None else dirPath):
+ for file in files:
+ if file.endswith(".har"):
+ harFiles.append(os.path.join(root, file))
+ elif file.endswith(".json"):
+ cookieFiles.append(os.path.join(root, file))
+
+ CookiesConfig.cookies = {}
+ for path in harFiles:
+ with open(path, 'rb') as file:
+ try:
+ harFile = json.load(file)
+ except json.JSONDecodeError:
+ # Error: not a HAR file!
+ continue
+ if debug.logging:
+ print("Read .har file:", path)
+ new_cookies = {}
+ for v in harFile['log']['entries']:
+ domain = get_domain(v)
+ if domain is None:
+ continue
+ v_cookies = {}
+ for c in v['request']['cookies']:
+ v_cookies[c['name']] = c['value']
+ if len(v_cookies) > 0:
+ CookiesConfig.cookies[domain] = v_cookies
+ new_cookies[domain] = len(v_cookies)
+ if debug.logging:
+ for domain, new_values in new_cookies.items():
+ print(f"Cookies added: {new_values} from {domain}")
+ for path in cookieFiles:
+ with open(path, 'rb') as file:
+ try:
+ cookieFile = json.load(file)
+ except json.JSONDecodeError:
+ # Error: not a json file!
+ continue
+ if not isinstance(cookieFile, list):
+ continue
+ if debug.logging:
+ print("Read cookie file:", path)
+ new_cookies = {}
+ for c in cookieFile:
+ if isinstance(c, dict) and "domain" in c:
+ if c["domain"] not in new_cookies:
+ new_cookies[c["domain"]] = {}
+ new_cookies[c["domain"]][c["name"]] = c["value"]
+ for domain, new_values in new_cookies.items():
+ if debug.logging:
+ print(f"Cookies added: {len(new_values)} from {domain}")
+ CookiesConfig.cookies[domain] = new_values
+
+def _g4f(domain_name: str) -> list:
+ """
+ Load cookies from the 'g4f' browser (if exists).
+
+ Args:
+ domain_name (str): The domain for which to load cookies.
+
+ Returns:
+ list: List of cookies.
+ """
+ if not has_platformdirs:
+ return []
+ user_data_dir = user_config_dir("g4f")
+ cookie_file = os.path.join(user_data_dir, "Default", "Cookies")
+ return [] if not os.path.exists(cookie_file) else chrome(cookie_file, domain_name)
diff --git a/g4f/debug.py b/g4f/debug.py
new file mode 100644
index 0000000000000000000000000000000000000000..c107cbdf97941386cda6a47ec70efda4470c1b97
--- /dev/null
+++ b/g4f/debug.py
@@ -0,0 +1,13 @@
+from .providers.types import ProviderType
+
+logging: bool = False
+version_check: bool = True
+last_provider: ProviderType = None
+last_model: str = None
+version: str = None
+log_handler: callable = print
+logs: list = []
+
+def log(text):
+ if logging:
+ log_handler(text)
\ No newline at end of file
diff --git a/g4f/errors.py b/g4f/errors.py
new file mode 100644
index 0000000000000000000000000000000000000000..3d553ba617094fc1c1a0b05f78bd832bf51ba387
--- /dev/null
+++ b/g4f/errors.py
@@ -0,0 +1,47 @@
+class ProviderNotFoundError(Exception):
+ ...
+
+class ProviderNotWorkingError(Exception):
+ ...
+
+class StreamNotSupportedError(Exception):
+ ...
+
+class ModelNotFoundError(Exception):
+ ...
+
+class ModelNotAllowedError(Exception):
+ ...
+
+class RetryProviderError(Exception):
+ ...
+
+class RetryNoProviderError(Exception):
+ ...
+
+class VersionNotFoundError(Exception):
+ ...
+
+class ModelNotSupportedError(Exception):
+ ...
+
+class MissingRequirementsError(Exception):
+ ...
+
+class NestAsyncioError(MissingRequirementsError):
+ ...
+
+class MissingAuthError(Exception):
+ ...
+
+class NoImageResponseError(Exception):
+ ...
+
+class RateLimitError(Exception):
+ ...
+
+class ResponseError(Exception):
+ ...
+
+class ResponseStatusError(Exception):
+ ...
\ No newline at end of file
diff --git a/g4f/gui/__init__.py b/g4f/gui/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..140711fae1bc6d8a75a4b89ee1def5273b7e5916
--- /dev/null
+++ b/g4f/gui/__init__.py
@@ -0,0 +1,43 @@
+from ..errors import MissingRequirementsError
+
+try:
+ from .server.app import app
+ from .server.website import Website
+ from .server.backend import Backend_Api
+ import_error = None
+except ImportError as e:
+ import_error = e
+
+def get_gui_app():
+ site = Website(app)
+ for route in site.routes:
+ app.add_url_rule(
+ route,
+ view_func=site.routes[route]['function'],
+ methods=site.routes[route]['methods'],
+ )
+
+ backend_api = Backend_Api(app)
+ for route in backend_api.routes:
+ app.add_url_rule(
+ route,
+ view_func = backend_api.routes[route]['function'],
+ methods = backend_api.routes[route]['methods'],
+ )
+ return app
+
+def run_gui(host: str = '0.0.0.0', port: int = 8080, debug: bool = False) -> None:
+ if import_error is not None:
+ raise MissingRequirementsError(f'Install "gui" requirements | pip install -U g4f[gui]\n{import_error}')
+
+ config = {
+ 'host' : host,
+ 'port' : port,
+ 'debug': debug
+ }
+
+ get_gui_app()
+
+ print(f"Running on port {config['port']}")
+ app.run(**config)
+ print(f"Closing port {config['port']}")
diff --git a/g4f/gui/client/index.html b/g4f/gui/client/index.html
new file mode 100644
index 0000000000000000000000000000000000000000..8cbcd57833807824522f9e354f684c632ad3a569
--- /dev/null
+++ b/g4f/gui/client/index.html
@@ -0,0 +1,282 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
')
+ }
+}
+function filter_message(text) {
+ return text.replaceAll(
+ /[\s\S]+/gm, ""
+ )
+}
+
+function fallback_clipboard (text) {
+ var textBox = document.createElement("textarea");
+ textBox.value = text;
+ textBox.style.top = "0";
+ textBox.style.left = "0";
+ textBox.style.position = "fixed";
+ document.body.appendChild(textBox);
+ textBox.focus();
+ textBox.select();
+ try {
+ var success = document.execCommand('copy');
+ var msg = success ? 'succeeded' : 'failed';
+ console.log('Clipboard Fallback: Copying text command ' + msg);
+ } catch (e) {
+ console.error('Clipboard Fallback: Unable to copy', e);
+ }
+ document.body.removeChild(textBox);
+}
+
+hljs.addPlugin(new CopyButtonPlugin());
+let typesetPromise = Promise.resolve();
+const highlight = (container) => {
+ container.querySelectorAll('code:not(.hljs').forEach((el) => {
+ if (el.className != "hljs") {
+ hljs.highlightElement(el);
+ }
+ });
+ if (window.MathJax) {
+ typesetPromise = typesetPromise.then(
+ () => MathJax.typesetPromise([container])
+ ).catch(
+ (err) => console.log('Typeset failed: ' + err.message)
+ );
+ }
+}
+
+const register_message_buttons = async () => {
+ document.querySelectorAll(".message .fa-xmark").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement;
+ await remove_message(window.conversation_id, message_el.dataset.index);
+ await safe_load_conversation(window.conversation_id, false);
+ });
+ }
+ });
+
+ document.querySelectorAll(".message .fa-clipboard").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ const copyText = await get_message(window.conversation_id, message_el.dataset.index);
+ try {
+ if (!navigator.clipboard) {
+ throw new Error("navigator.clipboard: Clipboard API unavailable.");
+ }
+ await navigator.clipboard.writeText(copyText);
+ } catch (e) {
+ console.error(e);
+ console.error("Clipboard API writeText() failed! Fallback to document.exec(\"copy\")...");
+ fallback_clipboard(copyText);
+ }
+ el.classList.add("clicked");
+ setTimeout(() => el.classList.remove("clicked"), 1000);
+ })
+ }
+ });
+
+ document.querySelectorAll(".message .fa-volume-high").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ let audio;
+ if (message_el.dataset.synthesize_url) {
+ el.classList.add("active");
+ setTimeout(()=>el.classList.remove("active"), 2000);
+ const media_player = document.querySelector(".media_player");
+ if (!media_player.classList.contains("show")) {
+ media_player.classList.add("show");
+ audio = new Audio(message_el.dataset.synthesize_url);
+ audio.controls = true;
+ media_player.appendChild(audio);
+ } else {
+ audio = media_player.querySelector("audio");
+ audio.src = message_el.dataset.synthesize_url;
+ }
+ audio.play();
+ return;
+ }
+ let playlist = [];
+ function play_next() {
+ const next = playlist.shift();
+ if (next && el.dataset.do_play) {
+ next.play();
+ }
+ }
+ if (el.dataset.stopped) {
+ el.classList.remove("blink")
+ delete el.dataset.stopped;
+ return;
+ }
+ if (el.dataset.running) {
+ el.dataset.stopped = true;
+ el.classList.add("blink")
+ playlist = [];
+ return;
+ }
+ el.dataset.running = true;
+ el.classList.add("blink")
+ el.classList.add("active")
+
+ let speechText = await get_message(window.conversation_id, message_el.dataset.index);
+
+ speechText = speechText.replaceAll(/([^0-9])\./gm, "$1.;");
+ speechText = speechText.replaceAll("?", "?;");
+ speechText = speechText.replaceAll(/\[(.+)\]\(.+\)/gm, "($1)");
+ speechText = speechText.replaceAll(/```[a-z]+/gm, "");
+ speechText = filter_message(speechText.replaceAll("`", "").replaceAll("#", ""))
+ const lines = speechText.trim().split(/\n|;/).filter(v => count_words(v));
+
+ window.onSpeechResponse = (url) => {
+ if (!el.dataset.stopped) {
+ el.classList.remove("blink")
+ }
+ if (url) {
+ var sound = document.createElement('audio');
+ sound.controls = 'controls';
+ sound.src = url;
+ sound.type = 'audio/wav';
+ sound.onended = function() {
+ el.dataset.do_play = true;
+ setTimeout(play_next, 1000);
+ };
+ sound.onplay = function() {
+ delete el.dataset.do_play;
+ };
+ var container = document.createElement('div');
+ container.classList.add("audio");
+ container.appendChild(sound);
+ content_el.appendChild(container);
+ if (!el.dataset.stopped) {
+ playlist.push(sound);
+ if (el.dataset.do_play) {
+ play_next();
+ }
+ }
+ }
+ let line = lines.length > 0 ? lines.shift() : null;
+ if (line && !el.dataset.stopped) {
+ handleGenerateSpeech(line);
+ } else {
+ el.classList.remove("active");
+ el.classList.remove("blink");
+ delete el.dataset.running;
+ }
+ }
+ el.dataset.do_play = true;
+ let line = lines.shift();
+ handleGenerateSpeech(line);
+ });
+ }
+ });
+ document.querySelectorAll(".message .fa-rotate").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ el.classList.add("clicked");
+ setTimeout(() => el.classList.remove("clicked"), 1000);
+ await ask_gpt(get_message_id(), message_el.dataset.index);
+ });
+ }
+ });
+ document.querySelectorAll(".message .fa-whatsapp").forEach(async (el) => {
+ if (!el.parentElement.href) {
+ const text = el.parentElement.parentElement.parentElement.innerText;
+ el.parentElement.href = `https://wa.me/?text=${encodeURIComponent(text)}`;
+ }
+ });
+ document.querySelectorAll(".message .fa-print").forEach(async (el) => {
+ if (!("click" in el.dataset)) {
+ el.dataset.click = "true";
+ el.addEventListener("click", async () => {
+ const message_el = el.parentElement.parentElement.parentElement;
+ el.classList.add("clicked");
+ message_box.scrollTop = 0;
+ message_el.classList.add("print");
+ setTimeout(() => el.classList.remove("clicked"), 1000);
+ setTimeout(() => message_el.classList.remove("print"), 1000);
+ window.print()
+ })
+ }
+ });
+}
+
+const delete_conversations = async () => {
+ const remove_keys = [];
+ for (let i = 0; i < appStorage.length; i++){
+ let key = appStorage.key(i);
+ if (key.startsWith("conversation:")) {
+ remove_keys.push(key);
+ }
+ }
+ remove_keys.forEach((key)=>appStorage.removeItem(key));
+ hide_sidebar();
+ await new_conversation();
+};
+
+const handle_ask = async () => {
+ messageInput.style.height = "82px";
+ messageInput.focus();
+ await scroll_to_bottom();
+
+ let message = messageInput.value;
+ if (message.length <= 0) {
+ return;
+ }
+ messageInput.value = "";
+ await count_input()
+ await add_conversation(window.conversation_id, message);
+
+ if ("text" in fileInput.dataset) {
+ message += '\n```' + fileInput.dataset.type + '\n';
+ message += fileInput.dataset.text;
+ message += '\n```'
+ }
+ let message_index = await add_message(window.conversation_id, "user", message);
+ let message_id = get_message_id();
+
+ if (imageInput.dataset.src) URL.revokeObjectURL(imageInput.dataset.src);
+ const input = imageInput && imageInput.files.length > 0 ? imageInput : cameraInput
+ if (input.files.length > 0) imageInput.dataset.src = URL.createObjectURL(input.files[0]);
+ else delete imageInput.dataset.src
+
+ message_box.innerHTML += `
+
+ ${message.provider.label ? message.provider.label : message.provider.name}
+
+ ${message.provider.model ? ' with ' + message.provider.model : ''}
+ `
+ } else if (message.type == "message") {
+ console.error(message.message)
+ } else if (message.type == "error") {
+ error_storage[message_id] = message.error
+ console.error(message.error);
+ content_map.inner.innerHTML += `
+ `;
+ highlight(message_box);
+ await ask_gpt(message_id);
+};
+
+async function safe_remove_cancel_button() {
+ for (let key in controller_storage) {
+ if (!controller_storage[key].signal.aborted) {
+ return;
+ }
+ }
+ stop_generating.classList.add("stop_generating-hidden");
+}
+
+regenerate.addEventListener("click", async () => {
+ regenerate.classList.add("regenerate-hidden");
+ setTimeout(()=>regenerate.classList.remove("regenerate-hidden"), 3000);
+ await hide_message(window.conversation_id);
+ await ask_gpt(get_message_id());
+});
+
+stop_generating.addEventListener("click", async () => {
+ stop_generating.classList.add("stop_generating-hidden");
+ regenerate.classList.remove("regenerate-hidden");
+ let key;
+ for (key in controller_storage) {
+ if (!controller_storage[key].signal.aborted) {
+ controller_storage[key].abort();
+ let message = message_storage[key];
+ if (message) {
+ content_storage[key].inner.innerHTML += " [aborted]";
+ message_storage[key] += " [aborted]";
+ console.log(`aborted ${window.conversation_id} #${key}`);
+ }
+ }
+ }
+ await load_conversation(window.conversation_id, false);
+});
+
+document.querySelector(".media_player .fa-x").addEventListener("click", ()=>{
+ const media_player = document.querySelector(".media_player");
+ media_player.classList.remove("show");
+ const audio = document.querySelector(".media_player audio");
+ media_player.removeChild(audio);
+});
+
+const prepare_messages = (messages, message_index = -1) => {
+ if (message_index >= 0) {
+ messages = messages.filter((_, index) => message_index >= index);
+ }
+
+ // Removes none user messages at end
+ let last_message;
+ while (last_message = messages.pop()) {
+ if (last_message["role"] == "user") {
+ messages.push(last_message);
+ break;
+ }
+ }
+
+ let new_messages = [];
+ if (systemPrompt?.value) {
+ new_messages.push({
+ "role": "system",
+ "content": systemPrompt.value
+ });
+ }
+
+ // Remove history, if it's selected
+ if (document.getElementById('history')?.checked) {
+ if (message_index == null) {
+ messages = [messages.pop(), messages.pop()];
+ } else {
+ messages = [messages.pop()];
+ }
+ }
+
+ messages.forEach((new_message) => {
+ // Include only not regenerated messages
+ if (new_message && !new_message.regenerate) {
+ // Remove generated images from history
+ new_message.content = filter_message(new_message.content);
+ delete new_message.provider;
+ delete new_message.synthesize;
+ new_messages.push(new_message)
+ }
+ });
+
+ return new_messages;
+}
+
+async function add_message_chunk(message, message_id) {
+ content_map = content_storage[message_id];
+ if (message.type == "conversation") {
+ console.info("Conversation used:", message.conversation)
+ } else if (message.type == "provider") {
+ provider_storage[message_id] = message.provider;
+ content_map.content.querySelector('.provider').innerHTML = `
+
An error occured: ${e}
`; + } + } + delete controller_storage[message_id]; + if (!error_storage[message_id] && message_storage[message_id]) { + const message_provider = message_id in provider_storage ? provider_storage[message_id] : null; + await add_message( + window.conversation_id, + "assistant", + message_storage[message_id], + message_provider, + message_index, + synthesize_storage[message_id] + ); + await safe_load_conversation(window.conversation_id, message_index == -1); + } else { + let cursorDiv = message_el.querySelector(".cursor"); + if (cursorDiv) cursorDiv.parentNode.removeChild(cursorDiv); + } + if (message_index == -1) { + await scroll_to_bottom(); + } + await safe_remove_cancel_button(); + await register_message_buttons(); + await load_conversations(); + regenerate.classList.remove("regenerate-hidden"); +}; + +async function scroll_to_bottom() { + window.scrollTo(0, 0); + message_box.scrollTop = message_box.scrollHeight; +} + +const clear_conversations = async () => { + const elements = box_conversations.childNodes; + let index = elements.length; + + if (index > 0) { + while (index--) { + const element = elements[index]; + if ( + element.nodeType === Node.ELEMENT_NODE && + element.tagName.toLowerCase() !== `button` + ) { + box_conversations.removeChild(element); + } + } + } +}; + +const clear_conversation = async () => { + let messages = message_box.getElementsByTagName(`div`); + + while (messages.length > 0) { + message_box.removeChild(messages[0]); + } +}; + +async function set_conversation_title(conversation_id, title) { + conversation = await get_conversation(conversation_id) + conversation.new_title = title; + appStorage.setItem( + `conversation:${conversation.id}`, + JSON.stringify(conversation) + ); +} + +const show_option = async (conversation_id) => { + const conv = document.getElementById(`conv-${conversation_id}`); + const choi = document.getElementById(`cho-${conversation_id}`); + + conv.style.display = "none"; + choi.style.display = "block"; + + const el = document.getElementById(`convo-${conversation_id}`); + const trash_el = el.querySelector(".fa-trash"); + const title_el = el.querySelector("span.convo-title"); + if (title_el) { + const left_el = el.querySelector(".left"); + const input_el = document.createElement("input"); + input_el.value = title_el.innerText; + input_el.classList.add("convo-title"); + input_el.onfocus = () => trash_el.style.display = "none"; + input_el.onchange = () => set_conversation_title(conversation_id, input_el.value); + left_el.removeChild(title_el); + left_el.appendChild(input_el); + } +}; + +const hide_option = async (conversation_id) => { + const conv = document.getElementById(`conv-${conversation_id}`); + const choi = document.getElementById(`cho-${conversation_id}`); + + conv.style.display = "block"; + choi.style.display = "none"; + + const el = document.getElementById(`convo-${conversation_id}`); + el.querySelector(".fa-trash").style.display = ""; + const input_el = el.querySelector("input.convo-title"); + if (input_el) { + const left_el = el.querySelector(".left"); + const span_el = document.createElement("span"); + span_el.innerText = input_el.value; + span_el.classList.add("convo-title"); + span_el.onclick = () => set_conversation(conversation_id); + left_el.removeChild(input_el); + left_el.appendChild(span_el); + } +}; + +const delete_conversation = async (conversation_id) => { + appStorage.removeItem(`conversation:${conversation_id}`); + + const conversation = document.getElementById(`convo-${conversation_id}`); + conversation.remove(); + + if (window.conversation_id == conversation_id) { + await new_conversation(); + } + + await load_conversations(); +}; + +const set_conversation = async (conversation_id) => { + history.pushState({}, null, `/chat/${conversation_id}`); + window.conversation_id = conversation_id; + + await clear_conversation(); + await load_conversation(conversation_id); + load_conversations(); + hide_sidebar(); + log_storage.classList.add("hidden"); +}; + +const new_conversation = async () => { + history.pushState({}, null, `/chat/`); + window.conversation_id = uuid(); + + await clear_conversation(); + if (systemPrompt) { + systemPrompt.value = ""; + } + load_conversations(); + hide_sidebar(); + log_storage.classList.add("hidden"); + say_hello(); +}; + +const load_conversation = async (conversation_id, scroll=true) => { + let conversation = await get_conversation(conversation_id); + let messages = conversation?.items || []; + + if (!conversation) { + return; + } + + if (systemPrompt) { + systemPrompt.value = conversation.system || ""; + } + + let elements = ""; + let last_model = null; + for (i in messages) { + let item = messages[i]; + last_model = item.provider?.model; + let next_i = parseInt(i) + 1; + let next_provider = item.provider ? item.provider : (messages.length > next_i ? messages[next_i].provider : null); + let provider_label = item.provider?.label ? item.provider.label : item.provider?.name; + let provider_link = item.provider?.name ? `${provider_label}` : ""; + let provider = provider_link ? ` +