File size: 2,501 Bytes
feb6557
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
66f114c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
---
dataset_info:
  features:
  - name: title
    dtype: string
  - name: link
    dtype: string
  - name: article
    dtype: string
  splits:
  - name: train
    num_bytes: 503475
    num_examples: 428
  download_size: 218936
  dataset_size: 503475
configs:
- config_name: default
  data_files:
  - split: train
    path: data/train-*
---


Created by the following code:
```py
!pip install -Uq datasets

import requests
from bs4 import BeautifulSoup, Comment
import pandas as pd
from datasets import Dataset


def get_content(url):
    response = requests.get(url)
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, 'html.parser')
    return soup

url = "https://huggingface.co/blog/community"

soup = get_content(url)
articles = soup.find_all("article")
titles = [article.h4.text for article in articles]
links = [f'https://hf.co{article.find("a", class_="block px-3 py-2 cursor-pointer").get("href")}' for article in articles]

def get_article(soup):
    # Find all comments in the document
    comments = soup.find_all(string=lambda text: isinstance(text, Comment))

    # Initialize variables to store the start and end comments
    start_comment = None
    end_comment = None

    # Identify the start and end comments
    for comment in comments:
        comment_text = comment.strip()
        if comment_text == 'HTML_TAG_START':
            start_comment = comment
        elif comment_text == 'HTML_TAG_END':
            end_comment = comment

    # Check if both comments were found
    if start_comment and end_comment:
        # Collect all elements between the start and end comments
        contents = []
        current = start_comment.next_sibling
        while current and current != end_comment:
            contents.append(current)
            current = current.next_sibling

        # Convert the contents to a string
        between_content = ''.join(str(item) for item in contents)

        # Output the extracted content
        return between_content
    else:
        return "Start or end comment not found."

article_soups = [get_content(link) for link in links]
articles = [get_article(article_soup) for article_soup in article_soups]

# Assuming titles, links, articles are your lists
df = pd.DataFrame({
    'title': titles,
    'link': links,
    'article': articles
})

# Create a Hugging Face Dataset object
dataset = Dataset.from_pandas(df)

# Push the dataset to the Hugging Face Hub
dataset.push_to_hub("ariG23498/community-blogs")
```