Upload 4 files
Browse files- README.md +38 -0
- my_dataset.py +46 -0
- test_datasets.jsonl +0 -0
- validation_datasets.jsonl +0 -0
README.md
CHANGED
@@ -1,3 +1,41 @@
|
|
1 |
---
|
2 |
license: apache-2.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
license: apache-2.0
|
3 |
+
task_categories:
|
4 |
+
- text-generation
|
5 |
+
language:
|
6 |
+
- zh
|
7 |
+
tags:
|
8 |
+
- medical
|
9 |
+
size_categories:
|
10 |
+
- 100K<n<1M
|
11 |
---
|
12 |
+
# Dataset Card for Huatuo_encyclopedia_qa
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
## Dataset Description
|
17 |
+
|
18 |
+
- **Homepage: https://www.huatuogpt.cn/**
|
19 |
+
- **Repository: https://github.com/FreedomIntelligence/HuatuoGPT**
|
20 |
+
- **Paper: https://arxiv.org/abs/2305.01526**
|
21 |
+
- **Leaderboard:**
|
22 |
+
- **Point of Contact:**
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
### Dataset Summary
|
27 |
+
|
28 |
+
This dataset has a total of 364,420 pieces of medical QA data, some of which have multiple questions in different ways. We extract medical QA pairs from plain texts (e.g., medical encyclopedias and medical articles). We collected 8,699 encyclopedia entries for diseases and 2,736 encyclopedia entries for medicines on Chinese Wikipedia. Moreover, we crawled 226,432 high-quality medical articles from the Qianwen Health website.
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
## Dataset Creation
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
### Source Data
|
38 |
+
|
39 |
+
https://zh.wikipedia.org/wiki/
|
40 |
+
https:/51zyzy.com/
|
41 |
+
|
my_dataset.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
|
3 |
+
|
4 |
+
from datasets import DatasetInfo, Features, Split, SplitGenerator, GeneratorBasedBuilder, Value, Sequence
|
5 |
+
import json
|
6 |
+
|
7 |
+
class MyDataset(GeneratorBasedBuilder):
|
8 |
+
def _info(self):
|
9 |
+
return DatasetInfo(
|
10 |
+
features=Features({
|
11 |
+
"questions": Sequence(Value("string")),
|
12 |
+
"answers": Sequence(Value("string"))
|
13 |
+
}),
|
14 |
+
supervised_keys=("questions", "answers"),
|
15 |
+
homepage="https://github.com/FreedomIntelligence/HuatuoGPT",
|
16 |
+
citation="...",
|
17 |
+
)
|
18 |
+
|
19 |
+
def _split_generators(self, dl_manager):
|
20 |
+
train_path = "train_datasets.jsonl"
|
21 |
+
validation_path = "validation_datasets.jsonl"
|
22 |
+
test_path = "test_datasets.jsonl"
|
23 |
+
|
24 |
+
return [
|
25 |
+
SplitGenerator(name=Split.TRAIN, gen_kwargs={"filepath": train_path}),
|
26 |
+
SplitGenerator(name=Split.VALIDATION, gen_kwargs={"filepath": validation_path}),
|
27 |
+
SplitGenerator(name=Split.TEST, gen_kwargs={"filepath": test_path}),
|
28 |
+
]
|
29 |
+
|
30 |
+
def _generate_examples(self, filepath):
|
31 |
+
with open(filepath, encoding="utf-8") as f:
|
32 |
+
for id_, row in enumerate(f):
|
33 |
+
# Process your data here and create a dictionary with the features.
|
34 |
+
# For example, if your data is in JSON format:
|
35 |
+
data = json.loads(row)
|
36 |
+
yield id_, {
|
37 |
+
"questions": data["questions"],
|
38 |
+
"answers": data["answers"],
|
39 |
+
}
|
40 |
+
|
41 |
+
if __name__ == '__main__':
|
42 |
+
from datasets import load_dataset
|
43 |
+
|
44 |
+
dataset = load_dataset("my_dataset.py")
|
45 |
+
|
46 |
+
print()
|
test_datasets.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
validation_datasets.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|