The dataset viewer is not available for this dataset.
Error code: ConfigNamesError Exception: ReadTimeout Message: (ReadTimeoutError("HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)"), '(Request ID: c7b1dcc2-72b6-4a17-8954-b82a8212c4ac)') Traceback: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/dataset/config_names.py", line 66, in compute_config_names_response config_names = get_dataset_config_names( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/inspect.py", line 164, in get_dataset_config_names dataset_module = dataset_module_factory( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1729, in dataset_module_factory raise e1 from None File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1686, in dataset_module_factory return HubDatasetModuleFactoryWithoutScript( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/load.py", line 1024, in get_module standalone_yaml_path = cached_path( File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/utils/file_utils.py", line 178, in cached_path resolved_path = huggingface_hub.HfFileSystem( File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 175, in resolve_path repo_and_revision_exist, err = self._repo_and_revision_exist(repo_type, repo_id, revision) File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_file_system.py", line 121, in _repo_and_revision_exist self._api.repo_info( File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_api.py", line 2682, in repo_info return method( File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_validators.py", line 114, in _inner_fn return fn(*args, **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/hf_api.py", line 2539, in dataset_info r = get_session().get(path, headers=headers, timeout=timeout, params=params) File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/sessions.py", line 602, in get return self.request("GET", url, **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/sessions.py", line 589, in request resp = self.send(prep, **send_kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/sessions.py", line 703, in send r = adapter.send(request, **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/huggingface_hub/utils/_http.py", line 93, in send return super().send(request, *args, **kwargs) File "/src/services/worker/.venv/lib/python3.9/site-packages/requests/adapters.py", line 635, in send raise ReadTimeout(e, request=request) requests.exceptions.ReadTimeout: (ReadTimeoutError("HTTPSConnectionPool(host='huggingface.co', port=443): Read timed out. (read timeout=10)"), '(Request ID: c7b1dcc2-72b6-4a17-8954-b82a8212c4ac)')
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
SCRIPT
from IPython.display import clear_output, display, HTML
import os, time, shutil, sys, json, io, zipfile, requests, shutil, re
import pandas as pd
from os import makedirs as mk, remove as rm, getcwd as cwd, listdir as ls
from os.path import join as osj, isdir as osd, isfile as osf, basename as osb
url = 'https://www.sec.gov/Archives/edgar/daily-index/xbrl/companyfacts.zip'
headers = {
'User-Agent': f'{name} {email}',
'Accept-Encoding': 'text/plain',
'Host': 'www.sec.gov'
}
response = requests.get(url, headers=headers)
if response.status_code == 200:
zip_file = zipfile.ZipFile(io.BytesIO(response.content))
zip_file.extractall()
else:
print(f"STATUS CODE: {response.status_code}")
def process_json_file(json_data, api, token, repo_id, work):
cik = json_data.split('.')[0]
with open(json_data, 'r') as file:
data = json.load(file)
data_keys = data.keys()
if not data or not 'facts' in data_keys:
return
fact_keys = data['facts'].keys()
entity_name = data['entityName']
cik_folder = re.sub(r'[^a-zA-Z0-9]', '_', osj(f'{cik}_{entity_name}')).replace('__', '_').strip('_')
mk(cik_folder, exist_ok=True)
fact_keys = data['facts'].keys()
def process_key(key, units_data):
if key in data['facts']['dei']:
label = data['facts']['dei'][key]['label']
desc = data['facts']['dei'][key]['description']
data_list = []
for unit_key, unit_values in units_data.items():
for unit in unit_values:
data_list.append({**unit, 'label': label, 'description': desc, 'unit': unit_key})
return pd.DataFrame(data_list)
df_dei_EntityCommonStockSharesOutstanding = None
df_dei_EntityListingParValuePerShare = None
df_dei_EntityPublicFloat = None
if 'dei' in fact_keys:
dei_items = data['facts']['dei']
if 'EntityCommonStockSharesOutstanding' in dei_items:
df_dei_EntityCommonStockSharesOutstanding = process_key('EntityCommonStockSharesOutstanding', dei_items['EntityCommonStockSharesOutstanding']['units'])
if 'EntityListingParValuePerShare' in dei_items:
df_dei_EntityListingParValuePerShare = process_key('EntityListingParValuePerShare', dei_items['EntityListingParValuePerShare']['units'])
if 'EntityPublicFloat' in dei_items:
df_dei_EntityPublicFloat = process_key('EntityPublicFloat', dei_items['EntityPublicFloat']['units'])
for df, name in [
[df_dei_EntityCommonStockSharesOutstanding, 'EntityCommonStockSharesOutstanding'],
[df_dei_EntityListingParValuePerShare, 'EntityListingParValuePerShare'],
[df_dei_EntityPublicFloat, 'EntityPublicFloat']
]:
if df is not None:
filename = osj(cik_folder, name) + '.parquet'
df.to_parquet(filename, index=False)
def process_fact_section(section_key, section_data):
fact_list = []
for k, v in section_data.items():
base_item = {'item': k, 'label': v['label'], 'description': v['description']}
for unit_type, unit_values in v['units'].items():
for unit in unit_values:
fact_list.append({**unit, **base_item, 'unit_type': unit_type})
return pd.DataFrame(fact_list) if fact_list else None
df_facts_invest = process_fact_section('invest', data['facts'].get('invest', {}))
if df_facts_invest is not None:
filename = osj(cik_folder, 'Facts_Invest.parquet')
df_facts_invest.to_parquet(filename, index=False)
df_facts_srt = process_fact_section('srt', data['facts'].get('srt', {}))
if df_facts_srt is not None:
filename = osj(cik_folder, 'Facts_Srt.parquet')
df_facts_srt.to_parquet(filename, index=False)
df_facts_usgaap = process_fact_section('us-gaap', data['facts'].get('us-gaap', {}))
if df_facts_usgaap is not None:
filename = osj(cik_folder, 'Facts_UsGaap.parquet')
df_facts_usgaap.to_parquet(filename, index=False)
df_facts_ifrs = process_fact_section('ifrs-full', data['facts'].get('ifrs-full', {}))
if df_facts_ifrs is not None:
filename = osj(cik_folder, 'Facts_IfrsFull.parquet')
df_facts_ifrs.to_parquet(filename, index=False)
rm(json_data)
work = cwd()
json_files = [i for i in ls(work) if i.endswith('.json')]
for json_data in json_files:
process_json_file(json_data, api, token, repo_id, work)
clear_output()
- Downloads last month
- 37