IliaLarchenko commited on
Commit
c468e6a
1 Parent(s): 28d8a08

Improved .env examples

Browse files
env_examples/.env.huggingface.example CHANGED
@@ -15,6 +15,14 @@ LLM_URL=https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-70B-
15
  LLM_TYPE=HF_API
16
  LLM_NAME=Meta-Llama-3-70B-Instruct
17
 
 
 
 
 
 
 
 
 
18
  # The Open AI whisper family with more models is available on HuggingFace:
19
  # https://huggingface.co/collections/openai/whisper-release-6501bba2cf999715fd953013
20
  # You can also use any other compatible STT model from HuggingFace
 
15
  LLM_TYPE=HF_API
16
  LLM_NAME=Meta-Llama-3-70B-Instruct
17
 
18
+ # If you want to use any other model serving provider the configuration will be similar
19
+ # Below is the example for Groq
20
+ # GROQ_API_KEY=gsk_YOUR_GROQ_API_KEY
21
+ # LLM_URL=https://api.groq.com/openai/v1
22
+ # LLM_TYPE=GROQ_API
23
+ # LLM_NAME=llama3-70b-8192
24
+
25
+
26
  # The Open AI whisper family with more models is available on HuggingFace:
27
  # https://huggingface.co/collections/openai/whisper-release-6501bba2cf999715fd953013
28
  # You can also use any other compatible STT model from HuggingFace
env_examples/.env.local.example CHANGED
@@ -1,5 +1,5 @@
1
- # You can also run models locally or on you own server and use them instead if they are compatible with HuggingFace API
2
- # For local models seletct HF_API as a type because they usse HuggingFace API
3
 
4
  # Most probalby you don't need a key for your local model
5
  # But if you have some kind of authentication compatible with HuggingFace API you can use it here
@@ -8,21 +8,35 @@ HF_API_KEY=None
8
  # The main usecase for the local models in locally running LLMs
9
  # You can serve any model using Text Generation Inference from HuggingFace
10
  # https://github.com/huggingface/text-generation-inference
11
- # This project uses Messages API that is compatible with Open AI API and allows you to just plug and play OS models
12
- # Don't gorget to add '/v1' to the end of the URL
13
  # Assuming you have Meta-Llama-3-8B-Instruct model running on your local server, your configuration will look like this
14
  LLM_URL=http://192.168.1.1:8080/v1
15
  LLM_TYPE=HF_API
16
  LLM_NAME=Meta-Llama-3-8B-Instruct
17
 
 
 
 
 
 
 
18
  # Running STT model locally is not straightforward
19
- # But for example you can one of the whispers models on your laptop
20
  # It requires some simple wrapper over the model to make it compatible with HuggingFace API. Maybe I will share some in the future
21
  # But assuming you manages to run a local whisper-server, your configuration will look like this
22
  STT_URL=http://127.0.0.1:5000/transcribe
23
  STT_TYPE=HF_API
24
  STT_NAME=whisper-base.en
25
 
 
 
 
 
 
 
 
 
26
  # I don't see much value in running TTS models locally given the quality of online models
27
  # But if you have some kind of TTS model running on your local server you can use it here
28
  TTS_URL=http://127.0.0.1:5001/read
 
1
+ # You can run models locally or on you own server and use them instead if they are compatible with HuggingFace API
2
+ # For local models seletct HF_API as a type because they use HuggingFace API
3
 
4
  # Most probalby you don't need a key for your local model
5
  # But if you have some kind of authentication compatible with HuggingFace API you can use it here
 
8
  # The main usecase for the local models in locally running LLMs
9
  # You can serve any model using Text Generation Inference from HuggingFace
10
  # https://github.com/huggingface/text-generation-inference
11
+ # It uses Messages API that is compatible with Open AI API and allows you to just plug and play OS models
12
+ # Don't forget to add '/v1' to the end of the URL
13
  # Assuming you have Meta-Llama-3-8B-Instruct model running on your local server, your configuration will look like this
14
  LLM_URL=http://192.168.1.1:8080/v1
15
  LLM_TYPE=HF_API
16
  LLM_NAME=Meta-Llama-3-8B-Instruct
17
 
18
+ # Another polula alternative is ollama https://ollama.com/ it supports the same API and the usage is identical
19
+ # OLLAMA_API_KEY=ollama # you don't really need it
20
+ # LLM_URL=http://192.168.1.128:11434/v1
21
+ # LLM_TYPE=OLLAMA_API
22
+ # LLM_NAME=llama3
23
+
24
  # Running STT model locally is not straightforward
25
+ # But for example you can run one of the whispers models on your laptop
26
  # It requires some simple wrapper over the model to make it compatible with HuggingFace API. Maybe I will share some in the future
27
  # But assuming you manages to run a local whisper-server, your configuration will look like this
28
  STT_URL=http://127.0.0.1:5000/transcribe
29
  STT_TYPE=HF_API
30
  STT_NAME=whisper-base.en
31
 
32
+ # You can also run TTS models locally without API
33
+ # It will use transformers library to run the model
34
+ # I would not recommend doing it on the machine without GPU, it will be too slow
35
+ # STT_URL=None
36
+ # STT_TYPE=HF_LOCAL
37
+ # STT_NAME=openai/whisper-base.en
38
+
39
+
40
  # I don't see much value in running TTS models locally given the quality of online models
41
  # But if you have some kind of TTS model running on your local server you can use it here
42
  TTS_URL=http://127.0.0.1:5001/read
env_examples/.env.openai.example CHANGED
@@ -4,11 +4,12 @@
4
  # Set up you key here: https://platform.openai.com/api-keys
5
  OPENAI_API_KEY=sk-YOUR_OPENAI_API_KEY
6
 
7
- # "gpt-3.5-turbo" - ~3 seconds delay with good quality, recommended model
8
- # "gpt-4-turbo","gpt-4", etc. 10+ seconds delay but higher quality of responses
 
9
  LLM_URL=https://api.openai.com/v1
10
  LLM_TYPE=OPENAI_API
11
- LLM_NAME=gpt-3.5-turbo
12
 
13
  # "whisper-1" is the only OpenAI STT model available with OpenAI API
14
  STT_URL=https://api.openai.com/v1
 
4
  # Set up you key here: https://platform.openai.com/api-keys
5
  OPENAI_API_KEY=sk-YOUR_OPENAI_API_KEY
6
 
7
+ # "gpt-4o-mini" - the fastest and cheapest model
8
+ # "gpt-4o" - good balance between speed and quality
9
+ # "gpt-4-turbo","gpt-4", "gpt-3.5-turbo" older models with differetn pros and cons
10
  LLM_URL=https://api.openai.com/v1
11
  LLM_TYPE=OPENAI_API
12
+ LLM_NAME=gpt-4o-mini
13
 
14
  # "whisper-1" is the only OpenAI STT model available with OpenAI API
15
  STT_URL=https://api.openai.com/v1