matt-tries-dl commited on
Commit
ce9f9a1
1 Parent(s): 39b2513

very basics

Browse files
Files changed (5) hide show
  1. .gitignore +1 -0
  2. README +15 -1
  3. llama_test.ipynb +93 -0
  4. requirements.txt +8 -0
  5. uninstall.txt +1 -0
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .venv
README CHANGED
@@ -1 +1,15 @@
1
- #### checking git capabilities of hugging face
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Setup
2
+ This is for the gcp vm. Mine came installed with python3.7 and cuda 11.3
3
+ ### Create a venv
4
+ `python3 -m venv .venv`
5
+ `source .venv/bin/activate`
6
+ You may have to install python3-venv to the machine.
7
+ ### Install
8
+ ```
9
+ pip install -r requirements.txt
10
+ pip uninstall -r uninstall.txt
11
+ ```
12
+ I had to uninstall some cuda stuff that torch installed to make this work.
13
+
14
+
15
+
llama_test.ipynb ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "data": {
10
+ "text/plain": [
11
+ "True"
12
+ ]
13
+ },
14
+ "execution_count": 1,
15
+ "metadata": {},
16
+ "output_type": "execute_result"
17
+ }
18
+ ],
19
+ "source": [
20
+ "import torch\n",
21
+ "torch.cuda.is_available()"
22
+ ]
23
+ },
24
+ {
25
+ "cell_type": "code",
26
+ "execution_count": 3,
27
+ "metadata": {},
28
+ "outputs": [
29
+ {
30
+ "name": "stderr",
31
+ "output_type": "stream",
32
+ "text": [
33
+ "The tokenizer class you load from this checkpoint is not the same type as the class this function is called from. It may result in unexpected tokenization. \n",
34
+ "The tokenizer class you load from this checkpoint is 'LLaMATokenizer'. \n",
35
+ "The class this function is called from is 'LlamaTokenizer'.\n"
36
+ ]
37
+ },
38
+ {
39
+ "data": {
40
+ "application/vnd.jupyter.widget-view+json": {
41
+ "model_id": "b687cc7da1a74a058775e5db887f0634",
42
+ "version_major": 2,
43
+ "version_minor": 0
44
+ },
45
+ "text/plain": [
46
+ "Loading checkpoint shards: 0%| | 0/33 [00:00<?, ?it/s]"
47
+ ]
48
+ },
49
+ "metadata": {},
50
+ "output_type": "display_data"
51
+ }
52
+ ],
53
+ "source": [
54
+ "from transformers import LlamaTokenizer, LlamaForCausalLM\n",
55
+ "tokenizer = LlamaTokenizer.from_pretrained(\"decapoda-research/llama-7b-hf\", add_eos_token=True)\n",
56
+ "\n",
57
+ "model = LlamaForCausalLM.from_pretrained(\n",
58
+ " \"decapoda-research/llama-7b-hf\",\n",
59
+ " load_in_8bit=True,\n",
60
+ " device_map=\"auto\",\n",
61
+ " torch_dtype=torch.float16\n",
62
+ ")\n"
63
+ ]
64
+ }
65
+ ],
66
+ "metadata": {
67
+ "kernelspec": {
68
+ "display_name": ".venv",
69
+ "language": "python",
70
+ "name": "python3"
71
+ },
72
+ "language_info": {
73
+ "codemirror_mode": {
74
+ "name": "ipython",
75
+ "version": 3
76
+ },
77
+ "file_extension": ".py",
78
+ "mimetype": "text/x-python",
79
+ "name": "python",
80
+ "nbconvert_exporter": "python",
81
+ "pygments_lexer": "ipython3",
82
+ "version": "3.7.3"
83
+ },
84
+ "orig_nbformat": 4,
85
+ "vscode": {
86
+ "interpreter": {
87
+ "hash": "6a381460736e8a0eabfb35eafae436ba15c06439de44e28b965ea473bd8dda90"
88
+ }
89
+ }
90
+ },
91
+ "nbformat": 4,
92
+ "nbformat_minor": 2
93
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ numpy
2
+ ipykernel
3
+ ipywidgets
4
+ torch
5
+ sentencepiece
6
+ transformers
7
+ accelerate
8
+ bitsandbytes
uninstall.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ nvidia_cublas_cu11