davanstrien HF staff commited on
Commit
4dc4bf5
1 Parent(s): ff7e4c7

chore: Add requirements for vllm, outlines, llama_index, datasets, and rich

Browse files
notebooks/requirements.in ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ vllm>=0.5.0
2
+ outlines
3
+ llama_index
4
+ datasets>=2.18.0
5
+ rich
notebooks/requirements.txt ADDED
@@ -0,0 +1,517 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile requirements.in -o requirements.txt
3
+ aiohttp==3.9.5
4
+ # via
5
+ # datasets
6
+ # fsspec
7
+ # llama-index-core
8
+ # llama-index-legacy
9
+ # vllm
10
+ aiosignal==1.3.1
11
+ # via
12
+ # aiohttp
13
+ # ray
14
+ annotated-types==0.7.0
15
+ # via pydantic
16
+ anyio==4.4.0
17
+ # via
18
+ # httpx
19
+ # openai
20
+ # starlette
21
+ # watchfiles
22
+ async-timeout==4.0.3
23
+ # via aiohttp
24
+ attrs==23.2.0
25
+ # via
26
+ # aiohttp
27
+ # jsonschema
28
+ # referencing
29
+ beautifulsoup4==4.12.3
30
+ # via llama-index-readers-file
31
+ certifi==2024.6.2
32
+ # via
33
+ # httpcore
34
+ # httpx
35
+ # requests
36
+ charset-normalizer==3.3.2
37
+ # via requests
38
+ click==8.1.7
39
+ # via
40
+ # nltk
41
+ # ray
42
+ # typer
43
+ # uvicorn
44
+ cloudpickle==3.0.0
45
+ # via outlines
46
+ cmake==3.29.5.1
47
+ # via vllm
48
+ dataclasses-json==0.6.7
49
+ # via
50
+ # llama-index-core
51
+ # llama-index-legacy
52
+ datasets==2.19.2
53
+ # via
54
+ # -r requirements.in
55
+ # outlines
56
+ deprecated==1.2.14
57
+ # via
58
+ # llama-index-core
59
+ # llama-index-legacy
60
+ dill==0.3.7
61
+ # via
62
+ # datasets
63
+ # multiprocess
64
+ dirtyjson==1.0.8
65
+ # via
66
+ # llama-index-core
67
+ # llama-index-legacy
68
+ diskcache==5.6.3
69
+ # via outlines
70
+ distro==1.9.0
71
+ # via openai
72
+ dnspython==2.6.1
73
+ # via email-validator
74
+ email-validator==2.1.1
75
+ # via fastapi
76
+ exceptiongroup==1.2.1
77
+ # via anyio
78
+ fastapi==0.111.0
79
+ # via vllm
80
+ fastapi-cli==0.0.4
81
+ # via fastapi
82
+ filelock==3.15.1
83
+ # via
84
+ # datasets
85
+ # huggingface-hub
86
+ # ray
87
+ # torch
88
+ # transformers
89
+ # triton
90
+ # vllm
91
+ frozenlist==1.4.1
92
+ # via
93
+ # aiohttp
94
+ # aiosignal
95
+ # ray
96
+ fsspec==2024.3.1
97
+ # via
98
+ # datasets
99
+ # huggingface-hub
100
+ # llama-index-core
101
+ # llama-index-legacy
102
+ # torch
103
+ greenlet==3.0.3
104
+ # via sqlalchemy
105
+ h11==0.14.0
106
+ # via
107
+ # httpcore
108
+ # uvicorn
109
+ httpcore==1.0.5
110
+ # via httpx
111
+ httptools==0.6.1
112
+ # via uvicorn
113
+ httpx==0.27.0
114
+ # via
115
+ # fastapi
116
+ # llama-index-core
117
+ # llama-index-legacy
118
+ # llamaindex-py-client
119
+ # openai
120
+ huggingface-hub==0.23.3
121
+ # via
122
+ # datasets
123
+ # tokenizers
124
+ # transformers
125
+ idna==3.7
126
+ # via
127
+ # anyio
128
+ # email-validator
129
+ # httpx
130
+ # requests
131
+ # yarl
132
+ interegular==0.3.3
133
+ # via
134
+ # lm-format-enforcer
135
+ # outlines
136
+ jinja2==3.1.4
137
+ # via
138
+ # fastapi
139
+ # outlines
140
+ # torch
141
+ joblib==1.4.2
142
+ # via nltk
143
+ jsonschema==4.22.0
144
+ # via
145
+ # outlines
146
+ # ray
147
+ jsonschema-specifications==2023.12.1
148
+ # via jsonschema
149
+ lark==1.1.9
150
+ # via outlines
151
+ llama-index==0.10.44
152
+ # via -r requirements.in
153
+ llama-index-agent-openai==0.2.7
154
+ # via
155
+ # llama-index
156
+ # llama-index-program-openai
157
+ llama-index-cli==0.1.12
158
+ # via llama-index
159
+ llama-index-core==0.10.44
160
+ # via
161
+ # llama-index
162
+ # llama-index-agent-openai
163
+ # llama-index-cli
164
+ # llama-index-embeddings-openai
165
+ # llama-index-indices-managed-llama-cloud
166
+ # llama-index-llms-openai
167
+ # llama-index-multi-modal-llms-openai
168
+ # llama-index-program-openai
169
+ # llama-index-question-gen-openai
170
+ # llama-index-readers-file
171
+ # llama-index-readers-llama-parse
172
+ # llama-parse
173
+ llama-index-embeddings-openai==0.1.10
174
+ # via
175
+ # llama-index
176
+ # llama-index-cli
177
+ llama-index-indices-managed-llama-cloud==0.1.6
178
+ # via llama-index
179
+ llama-index-legacy==0.9.48
180
+ # via llama-index
181
+ llama-index-llms-openai==0.1.22
182
+ # via
183
+ # llama-index
184
+ # llama-index-agent-openai
185
+ # llama-index-cli
186
+ # llama-index-multi-modal-llms-openai
187
+ # llama-index-program-openai
188
+ # llama-index-question-gen-openai
189
+ llama-index-multi-modal-llms-openai==0.1.6
190
+ # via llama-index
191
+ llama-index-program-openai==0.1.6
192
+ # via
193
+ # llama-index
194
+ # llama-index-question-gen-openai
195
+ llama-index-question-gen-openai==0.1.3
196
+ # via llama-index
197
+ llama-index-readers-file==0.1.25
198
+ # via llama-index
199
+ llama-index-readers-llama-parse==0.1.4
200
+ # via llama-index
201
+ llama-parse==0.4.4
202
+ # via llama-index-readers-llama-parse
203
+ llamaindex-py-client==0.1.19
204
+ # via
205
+ # llama-index-core
206
+ # llama-index-indices-managed-llama-cloud
207
+ llvmlite==0.42.0
208
+ # via numba
209
+ lm-format-enforcer==0.10.1
210
+ # via vllm
211
+ markdown-it-py==3.0.0
212
+ # via rich
213
+ markupsafe==2.1.5
214
+ # via jinja2
215
+ marshmallow==3.21.3
216
+ # via dataclasses-json
217
+ mdurl==0.1.2
218
+ # via markdown-it-py
219
+ mpmath==1.3.0
220
+ # via sympy
221
+ msgpack==1.0.8
222
+ # via ray
223
+ multidict==6.0.5
224
+ # via
225
+ # aiohttp
226
+ # yarl
227
+ multiprocess==0.70.15
228
+ # via datasets
229
+ mypy-extensions==1.0.0
230
+ # via typing-inspect
231
+ nest-asyncio==1.6.0
232
+ # via
233
+ # llama-index-core
234
+ # llama-index-legacy
235
+ # outlines
236
+ networkx==3.2.1
237
+ # via
238
+ # llama-index-core
239
+ # llama-index-legacy
240
+ # torch
241
+ ninja==1.11.1.1
242
+ # via vllm
243
+ nltk==3.8.1
244
+ # via
245
+ # llama-index-core
246
+ # llama-index-legacy
247
+ numba==0.59.1
248
+ # via outlines
249
+ numpy==1.26.4
250
+ # via
251
+ # datasets
252
+ # llama-index-core
253
+ # llama-index-legacy
254
+ # numba
255
+ # outlines
256
+ # pandas
257
+ # pyarrow
258
+ # transformers
259
+ # vllm
260
+ # xformers
261
+ nvidia-cublas-cu12==12.1.3.1
262
+ # via
263
+ # nvidia-cudnn-cu12
264
+ # nvidia-cusolver-cu12
265
+ # torch
266
+ nvidia-cuda-cupti-cu12==12.1.105
267
+ # via torch
268
+ nvidia-cuda-nvrtc-cu12==12.1.105
269
+ # via torch
270
+ nvidia-cuda-runtime-cu12==12.1.105
271
+ # via torch
272
+ nvidia-cudnn-cu12==8.9.2.26
273
+ # via torch
274
+ nvidia-cufft-cu12==11.0.2.54
275
+ # via torch
276
+ nvidia-curand-cu12==10.3.2.106
277
+ # via torch
278
+ nvidia-cusolver-cu12==11.4.5.107
279
+ # via torch
280
+ nvidia-cusparse-cu12==12.1.0.106
281
+ # via
282
+ # nvidia-cusolver-cu12
283
+ # torch
284
+ nvidia-ml-py==12.555.43
285
+ # via vllm
286
+ nvidia-nccl-cu12==2.20.5
287
+ # via torch
288
+ nvidia-nvjitlink-cu12==12.5.40
289
+ # via
290
+ # nvidia-cusolver-cu12
291
+ # nvidia-cusparse-cu12
292
+ nvidia-nvtx-cu12==12.1.105
293
+ # via torch
294
+ openai==1.34.0
295
+ # via
296
+ # llama-index-agent-openai
297
+ # llama-index-core
298
+ # llama-index-legacy
299
+ # vllm
300
+ orjson==3.10.4
301
+ # via fastapi
302
+ outlines==0.0.43
303
+ # via
304
+ # -r requirements.in
305
+ # vllm
306
+ packaging==24.1
307
+ # via
308
+ # datasets
309
+ # huggingface-hub
310
+ # lm-format-enforcer
311
+ # marshmallow
312
+ # ray
313
+ # transformers
314
+ pandas==2.2.2
315
+ # via
316
+ # datasets
317
+ # llama-index-core
318
+ # llama-index-legacy
319
+ pillow==10.3.0
320
+ # via
321
+ # llama-index-core
322
+ # vllm
323
+ prometheus-client==0.20.0
324
+ # via
325
+ # prometheus-fastapi-instrumentator
326
+ # vllm
327
+ prometheus-fastapi-instrumentator==7.0.0
328
+ # via vllm
329
+ protobuf==5.27.1
330
+ # via ray
331
+ psutil==5.9.8
332
+ # via vllm
333
+ py-cpuinfo==9.0.0
334
+ # via vllm
335
+ pyairports==2.1.1
336
+ # via outlines
337
+ pyarrow==16.1.0
338
+ # via datasets
339
+ pyarrow-hotfix==0.6
340
+ # via datasets
341
+ pycountry==24.6.1
342
+ # via outlines
343
+ pydantic==2.7.4
344
+ # via
345
+ # fastapi
346
+ # llamaindex-py-client
347
+ # lm-format-enforcer
348
+ # openai
349
+ # outlines
350
+ # vllm
351
+ pydantic-core==2.18.4
352
+ # via pydantic
353
+ pygments==2.18.0
354
+ # via rich
355
+ pypdf==4.2.0
356
+ # via llama-index-readers-file
357
+ python-dateutil==2.9.0.post0
358
+ # via pandas
359
+ python-dotenv==1.0.1
360
+ # via uvicorn
361
+ python-multipart==0.0.9
362
+ # via fastapi
363
+ pytz==2024.1
364
+ # via pandas
365
+ pyyaml==6.0.1
366
+ # via
367
+ # datasets
368
+ # huggingface-hub
369
+ # llama-index-core
370
+ # lm-format-enforcer
371
+ # ray
372
+ # transformers
373
+ # uvicorn
374
+ ray==2.24.0
375
+ # via vllm
376
+ referencing==0.35.1
377
+ # via
378
+ # jsonschema
379
+ # jsonschema-specifications
380
+ # outlines
381
+ regex==2024.5.15
382
+ # via
383
+ # nltk
384
+ # tiktoken
385
+ # transformers
386
+ requests==2.32.3
387
+ # via
388
+ # datasets
389
+ # huggingface-hub
390
+ # llama-index-core
391
+ # llama-index-legacy
392
+ # outlines
393
+ # ray
394
+ # tiktoken
395
+ # transformers
396
+ # vllm
397
+ rich==13.7.1
398
+ # via
399
+ # -r requirements.in
400
+ # typer
401
+ rpds-py==0.18.1
402
+ # via
403
+ # jsonschema
404
+ # referencing
405
+ safetensors==0.4.3
406
+ # via transformers
407
+ sentencepiece==0.2.0
408
+ # via vllm
409
+ shellingham==1.5.4
410
+ # via typer
411
+ six==1.16.0
412
+ # via python-dateutil
413
+ sniffio==1.3.1
414
+ # via
415
+ # anyio
416
+ # httpx
417
+ # openai
418
+ soupsieve==2.5
419
+ # via beautifulsoup4
420
+ sqlalchemy==2.0.30
421
+ # via
422
+ # llama-index-core
423
+ # llama-index-legacy
424
+ starlette==0.37.2
425
+ # via
426
+ # fastapi
427
+ # prometheus-fastapi-instrumentator
428
+ striprtf==0.0.26
429
+ # via llama-index-readers-file
430
+ sympy==1.12.1
431
+ # via torch
432
+ tenacity==8.3.0
433
+ # via
434
+ # llama-index-core
435
+ # llama-index-legacy
436
+ tiktoken==0.7.0
437
+ # via
438
+ # llama-index-core
439
+ # llama-index-legacy
440
+ # vllm
441
+ tokenizers==0.19.1
442
+ # via
443
+ # transformers
444
+ # vllm
445
+ torch==2.3.0
446
+ # via
447
+ # vllm
448
+ # vllm-flash-attn
449
+ # xformers
450
+ tqdm==4.66.4
451
+ # via
452
+ # datasets
453
+ # huggingface-hub
454
+ # llama-index-core
455
+ # nltk
456
+ # openai
457
+ # outlines
458
+ # transformers
459
+ transformers==4.41.2
460
+ # via vllm
461
+ triton==2.3.0
462
+ # via torch
463
+ typer==0.12.3
464
+ # via fastapi-cli
465
+ typing-extensions==4.12.2
466
+ # via
467
+ # anyio
468
+ # fastapi
469
+ # huggingface-hub
470
+ # llama-index-core
471
+ # llama-index-legacy
472
+ # openai
473
+ # pydantic
474
+ # pydantic-core
475
+ # pypdf
476
+ # sqlalchemy
477
+ # starlette
478
+ # torch
479
+ # typer
480
+ # typing-inspect
481
+ # uvicorn
482
+ # vllm
483
+ typing-inspect==0.9.0
484
+ # via
485
+ # dataclasses-json
486
+ # llama-index-core
487
+ # llama-index-legacy
488
+ tzdata==2024.1
489
+ # via pandas
490
+ ujson==5.10.0
491
+ # via fastapi
492
+ urllib3==2.2.1
493
+ # via requests
494
+ uvicorn==0.30.1
495
+ # via
496
+ # fastapi
497
+ # vllm
498
+ uvloop==0.19.0
499
+ # via uvicorn
500
+ vllm==0.5.0
501
+ # via -r requirements.in
502
+ vllm-flash-attn==2.5.9
503
+ # via vllm
504
+ watchfiles==0.22.0
505
+ # via uvicorn
506
+ websockets==12.0
507
+ # via uvicorn
508
+ wrapt==1.16.0
509
+ # via
510
+ # deprecated
511
+ # llama-index-core
512
+ xformers==0.0.26.post1
513
+ # via vllm
514
+ xxhash==3.4.1
515
+ # via datasets
516
+ yarl==1.9.4
517
+ # via aiohttp