DHEIVER commited on
Commit
be76344
1 Parent(s): 075619e

Upload 10 files

Browse files
README.md ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - medical
4
+ license: mit
5
+ datasets:
6
+ - Mohammed-Altaf/medical-instruction-120k
7
+ language:
8
+ - en
9
+ library_name: transformers
10
+ ---
11
+ Please note that the chatbot is designed for research purposes only and is not intended for use in real medical settings. While the chatbot has been trained to provide accurate and helpful responses, it is not a substitute for professional medical advice, diagnosis, or treatment. The information provided by the chatbot should not be used to make medical decisions, and any health concerns should be addressed by a licensed healthcare provider.
12
+
13
+ ## Quickstart
14
+
15
+ ```python
16
+ import torch
17
+ from transformers import GPT2LMHeadModel, GPT2Tokenizer
18
+
19
+
20
+ path = "Mohammed-Altaf/Medical-ChatBot"
21
+ device = "cuda" if torch.cuda.is_available() else "cpu"
22
+ tokenizer = GPT2Tokenizer.from_pretrained(path)
23
+ model = GPT2LMHeadModel.from_pretrained(path).to(device)
24
+
25
+ prompt_input = (
26
+ "The conversation between human and AI assistant.\n"
27
+ "[|Human|] {input}\n"
28
+ "[|AI|]"
29
+ )
30
+ sentence = prompt_input.format_map({'input': "what is parkinson's disease?"})
31
+ inputs = tokenizer(sentence, return_tensors="pt").to(device)
32
+
33
+ with torch.no_grad():
34
+ beam_output = model.generate(**inputs,
35
+ min_new_tokens=1,
36
+ max_length=512,
37
+ num_beams=3,
38
+ repetition_penalty=1.2,
39
+ early_stopping=True,
40
+ eos_token_id=198
41
+ )
42
+ print(tokenizer.decode(beam_output[0], skip_special_tokens=True))
43
+ ```
44
+
45
+ ## Example Outputs
46
+ ```
47
+ The conversation between human and AI assistant.
48
+ [|Human|] what is parkinson's disease?
49
+ [|AI|] Parkinson's disease is a neurodegenerative disorder that affects movement. It is caused by the loss of dopamine-producing cells in the brain.
50
+ ```
51
+
52
+ ```
53
+ The conversation between human and AI assistant.
54
+ [|Human|] what type of honey is best for a bad covid cough?
55
+ [|AI|] Manuka honey has been shown to have anti-inflammatory and antibacterial properties that can help alleviate symptoms of a bad covid cough.
56
+ ```
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "[PAD]": 28895
3
+ }
config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "biomedLM_baize/checkpoint-1000",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 28895,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 28895,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 2560,
16
+ "n_head": 20,
17
+ "n_inner": null,
18
+ "n_layer": 32,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": true,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.26.1",
37
+ "use_cache": false,
38
+ "vocab_size": 28896
39
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 28895,
4
+ "eos_token_id": 28895,
5
+ "transformers_version": "4.26.1",
6
+ "use_cache": false
7
+ }
gitattributes ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cac4bd8d32e7fce74c7cc3dc5bba17fefa7c26942f36916557ee9a3a3d7f0ba
3
+ size 10410689701
special_tokens_map.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|endoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": "[PAD]",
10
+ "pad_token": "[PAD]",
11
+ "unk_token": {
12
+ "content": "<|endoftext|>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ }
18
+ }
tokenizer_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "eos_token": {
13
+ "__type": "AddedToken",
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "errors": "replace",
21
+ "model_max_length": 1024,
22
+ "name_or_path": "biomedLM_baize/checkpoint-1000",
23
+ "pad_token": null,
24
+ "special_tokens_map_file": null,
25
+ "tokenizer_class": "GPT2Tokenizer",
26
+ "unk_token": {
27
+ "__type": "AddedToken",
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff