File size: 6,004 Bytes
2544492
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
[API]
anthropic_api_key = <anthropic_API_Key>
anthropic_model = claude-3-5-sonnet-20240620
cohere_api_key = <Cohere_API_Key>
cohere_model = command-r-plus
groq_api_key = <Groq_API_Key>
groq_model = llama3-70b-8192
openai_api_key = <OpenAI_API_Key>
openai_model = gpt-4o
huggingface_api_key = <huggingface_api_token>
huggingface_model = CohereForAI/c4ai-command-r-plus
openrouter_api_key = <OpenRouter_API_Key>
openrouter_model = mistralai/mistral-7b-instruct:free
deepseek_api_key = <DeepSeek_API_Key>
deepseek_model = deepseek-chat
mistral_model = mistral-large-latest
mistral_api_key = <mistral_api_key>
custom_openai_api = <key_here>
custom_openai_api_ip = <api_ip_here>

[Local-API]
kobold_api_key =
kobold_api_IP = http://127.0.0.1:5001/api/v1/generate
llama_api_key = <llama.cpp api key>
llama_api_IP = http://127.0.0.1:8080/completion
ooba_api_key = <ooba api key>
ooba_api_IP = http://127.0.0.1:5000/v1/chat/completions
tabby_api_IP = http://127.0.0.1:5000/v1/chat/completions
tabby_api_key = <tabbyapi key>
vllm_api_IP = http://127.0.0.1:8000/v1/chat/completions
vllm_model = <vllm model>
ollama_api_IP = http://127.0.0.1:11434/api/generate
ollama_api_key = <ollama api key>
ollama_model = <ollama model>
aphrodite_api_IP = http://127.0.0.1:8080/completion
aphrodite_api_key = <aphrodite_api_key>

[Paths]
output_path = Results
logging_file = Logs

[Processing]
processing_choice = cuda
# Can swap 'cuda' with 'cpu' if you want to use your CPU for processing

[Settings]
chunk_duration = 30
words_per_second = 3

[Prompts]
prompt_sample = "What is the meaning of life?"
video_summarize_prompt = "Above is the transcript of a video. Please read through the transcript carefully. Identify the main topics that are discussed over the course of the transcript. Then, summarize the key points about each main topic in bullet points. The bullet points should cover the key information conveyed about each topic in the video, but should be much shorter than the full transcript. Please output your bullet point summary inside <bulletpoints> tags. Do not repeat yourself while writing the summary."

[Database]
type = sqlite
sqlite_path = /Databases/media_summary.db
elasticsearch_host = localhost
elasticsearch_port = 9200
# Additionally you can use elasticsearch as the database type, just replace `sqlite` with `elasticsearch` for `type` and provide the `elasticsearch_host` and `elasticsearch_port` of your configured ES instance.
chroma_db_path = chroma_db

[Embeddings]
provider = openai
# Can be 'openai', 'local', or 'huggingface'
model = text-embedding-3-small
# Model name or path
api_key = your_api_key_here
api_url = http://localhost:8080/v1/embeddings
# Only needed for 'local' provider

[Chunking]
method = words
max_size = 400
overlap = 200
adaptive = false
multi_level = false
language = english

#[Comments]
#OpenAI Models:
#    f
#Anthropic Models:
#    f
#Cohere Models:
#    f
#DeepSeek Models:
#    f
#Groq Models:
#    f
#Mistral Models:
#    mistral-large-latest
#    open-mistral-nemo
#    codestral-latest
#    mistral-embed
#    open-mistral-7b
#    open-mixtral-8x7b
#    open-mixtral-8x22b
#    open-codestral-mamba






[API]
anthropic_api_key = <anthropic_api_key>
anthropic_model = claude-3-sonnet-20240229
cohere_api_key = <your_cohere_api_key>
cohere_model = command-r-plus
groq_api_key = <your_groq_api_key>
groq_model = llama3-70b-8192
openai_api_key = <openai_api_key>
openai_model = gpt-4-turbo
huggingface_api_token = <huggingface_api_token>
huggingface_model = CohereForAI/c4ai-command-r-plus
openrouter_api_key = <openrouter_api_key>
openrouter_model = mistralai/mistral-7b-instruct:free
deepseek_api_key = <deepseek_api_key>
deepseek_model = deepseek-chat

[Local-API]
kobold_api_key = <kobold api key>
kobold_api_IP = http://127.0.0.1:5001/api/v1/generate
llama_api_key = <llama.cpp api key>
llama_api_IP = http://127.0.0.1:8080/completion
ooba_api_key = <ooba api key>
ooba_api_IP = http://127.0.0.1:5000/v1/chat/completions
tabby_api_IP = http://127.0.0.1:5000/v1/chat/completions
tabby_api_key = <tabbyapi key>
vllm_api_IP = http://127.0.0.1:8000/v1/chat/completions
vllm_model = <vllm model>
ollama_api_IP = http://127.0.0.1:11434/api/generate
ollama_api_key = <ollama api key>
ollama_model = <ollama model>

[Paths]
output_path = Results
logging_file = Logs

[Processing]
processing_choice = cuda

[Settings]
chunk_duration = 30
words_per_second = 3

[Prompts]
prompt_sample = "What is the meaning of life?"
video_summarize_prompt = "Above is the transcript of a video. Please read through the transcript carefully. Identify the main topics that are discussed over the course of the transcript. Then, summarize the key points about each main topic in bullet points. The bullet points should cover the key information conveyed about each topic in the video, but should be much shorter than the full transcript. Please output your bullet point summary inside <bulletpoints> tags. Do not repeat yourself while writing the summary."

[Database]
type = sqlite
sqlite_path = /Databases/media_summary.db
backup_path = /tldw_DB_Backups/
#Path to the backup location for the database. If the path does not exist, the backup will not be created.
elasticsearch_host = localhost
elasticsearch_port = 9200
# Additionally you can use elasticsearch as the database type, just replace `sqlite` with `elasticsearch` for `type` and provide the `elasticsearch_host` and `elasticsearch_port` of your configured ES instance.
chroma_db_path = chroma_db

[Embeddings]
provider = openai
# Can be 'openai', 'local', or 'huggingface'
model = text-embedding-3-small
# Model name or path
api_key = your_api_key_here
api_url = http://localhost:8080/v1/embeddings
# Only needed for 'local' provider

[Chunking]
method = words
max_size = 400
overlap = 200
adaptive = false
multi_level = false
language = english