File size: 1,678 Bytes
79b0f44
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a126620
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
import datasets as ds
import pytest


@pytest.fixture
def dataset_path() -> str:
    return "wrime.py"


@pytest.mark.parametrize(
    "dataset_name, expected_train_num_rows, expected_val_num_rows, expected_test_num_rows,",
    (
        ("ver1", 40000, 1200, 2000),
        ("ver2", 30000, 2500, 2500),
    ),
)
def test_load_dataset(
    dataset_path: str,
    dataset_name: str,
    expected_train_num_rows: int,
    expected_val_num_rows: int,
    expected_test_num_rows: int,
) -> None:

    dataset = ds.load_dataset(path=dataset_path, name=dataset_name)

    assert dataset["train"].num_rows == expected_train_num_rows  # type: ignore
    assert dataset["validation"].num_rows == expected_val_num_rows  # type: ignore
    assert dataset["test"].num_rows == expected_test_num_rows  # type: ignore

    writer_readers = [
        "writer",
        "reader1",
        "reader2",
        "reader3",
        "avg_readers",
    ]
    expected_keys = ["sentence", "user_id", "datetime"] + writer_readers

    for split in ["train", "validation", "test"]:
        split_dataset = dataset[split]  # type: ignore

        for data in split_dataset:
            assert len(data.keys()) == len(expected_keys)
            for expected_key in expected_keys:
                assert expected_key in data.keys()

            for k in writer_readers:
                if dataset_name == "ver1":
                    assert len(data[k]) == 8  # 8 感情強度
                elif dataset_name == "ver2":
                    assert len(data[k]) == 8 + 1  # 8 感情強度 + 1 感情極性
                else:
                    raise ValueError(f"Invalid dataset version: {dataset_name}")