Datasets:
mattismegevand
commited on
Commit
•
4d5d3e0
1
Parent(s):
bd272ca
init commit
Browse files- README.md +46 -3
- get_url.py +59 -0
- scrape_pitchfork.py +149 -0
README.md
CHANGED
@@ -1,3 +1,46 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Pitchfork Music Reviews Dataset
|
2 |
+
|
3 |
+
This repository contains the code and dataset for scraping music reviews from Pitchfork.
|
4 |
+
|
5 |
+
## Dataset Overview
|
6 |
+
|
7 |
+
The Pitchfork Music Reviews dataset is a collection of music album reviews from the Pitchfork website. Each entry in the dataset represents a single review and includes the following attributes:
|
8 |
+
|
9 |
+
- `artist`: The artist of the album.
|
10 |
+
- `album`: The name of the album.
|
11 |
+
- `year_released`: The year the album was released.
|
12 |
+
- `rating`: The rating given to the album by the reviewer.
|
13 |
+
- `small_text`: A short snippet from the review.
|
14 |
+
- `review`: The full text of the review.
|
15 |
+
- `reviewer`: The name of the reviewer.
|
16 |
+
- `genre`: The genre(s) of the album.
|
17 |
+
- `label`: The record label that released the album.
|
18 |
+
- `release_date`: The release date of the review.
|
19 |
+
- `album_art_url`: The URL of the album art.
|
20 |
+
|
21 |
+
## Usage
|
22 |
+
|
23 |
+
This dataset is publicly available for research. The data is provided 'as is', and you assume full responsibility for any legal or ethical issues that may arise from the use of the data.
|
24 |
+
|
25 |
+
## Scraping Process
|
26 |
+
|
27 |
+
The dataset was generated by scraping the Pitchfork website. The Python script uses the `requests` and `BeautifulSoup` libraries to send HTTP requests to the website and parse the resulting HTML content.
|
28 |
+
|
29 |
+
The script saves the data in an SQLite database and can also export the data to a CSV file. Duplicate entries are avoided by checking for existing entries with the same artist and album name before inserting new ones into the database.
|
30 |
+
|
31 |
+
## Potential Applications
|
32 |
+
|
33 |
+
This dataset can be used for a variety of research purposes, such as:
|
34 |
+
|
35 |
+
- Music information retrieval
|
36 |
+
- Text mining and sentiment analysis
|
37 |
+
- Music recommendation systems
|
38 |
+
- Music trend analysis
|
39 |
+
|
40 |
+
## Acknowledgments
|
41 |
+
|
42 |
+
The dataset is sourced from [Pitchfork](https://pitchfork.com/), a website that publishes daily reviews, features, and news stories about music.
|
43 |
+
|
44 |
+
## License
|
45 |
+
|
46 |
+
Please ensure you comply with Pitchfork's terms of service before using or distributing this data.
|
get_url.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
import pandas as pd
|
3 |
+
|
4 |
+
from concurrent.futures import ThreadPoolExecutor
|
5 |
+
from bs4 import BeautifulSoup
|
6 |
+
from os.path import isfile
|
7 |
+
from sys import argv
|
8 |
+
|
9 |
+
|
10 |
+
def fetch(args):
|
11 |
+
''' Fetch a single url '''
|
12 |
+
url, session = args
|
13 |
+
response = session.get(url)
|
14 |
+
page_number = url.split('=')[-1]
|
15 |
+
soup = BeautifulSoup(response.text, 'lxml')
|
16 |
+
error = soup.find('div', {'class': 'error-page'})
|
17 |
+
if error:
|
18 |
+
print(f'Error page: {url} does not exist')
|
19 |
+
return []
|
20 |
+
print('.', end='', flush=True)
|
21 |
+
return [(page_number, f"https://pitchfork.com{e['href']}") for e in soup.find_all('a', {'class': 'review__link'})]
|
22 |
+
|
23 |
+
def get_urls(start, end):
|
24 |
+
''' Return a list of urls from the Pitchfork reviews page '''
|
25 |
+
urls = [f'https://pitchfork.com/reviews/albums/?page={i}' for i in range(start, end+1)]
|
26 |
+
reviews = []
|
27 |
+
session = requests.Session()
|
28 |
+
with ThreadPoolExecutor(max_workers=5) as executor:
|
29 |
+
for result in executor.map(fetch, ((url, session) for url in urls)):
|
30 |
+
reviews.extend(result)
|
31 |
+
print()
|
32 |
+
return reviews
|
33 |
+
|
34 |
+
def insert_into_df(data):
|
35 |
+
''' Insert data into a pandas dataframe '''
|
36 |
+
df = pd.DataFrame(data, columns=['page', 'url'])
|
37 |
+
df.drop_duplicates(subset='url', keep='first', inplace=True)
|
38 |
+
return df
|
39 |
+
|
40 |
+
def main():
|
41 |
+
start, end = int(argv[1]), int(argv[2])
|
42 |
+
print(f'Fetching urls from pages {start} to {end}')
|
43 |
+
data = get_urls(start, end)
|
44 |
+
print(f'Fetched {len(data)} urls')
|
45 |
+
df = insert_into_df(data)
|
46 |
+
|
47 |
+
print(f'Writing to urls.csv')
|
48 |
+
if isfile('urls.csv'):
|
49 |
+
df_existing = pd.read_csv('urls.csv')
|
50 |
+
df_combined = pd.concat([df_existing, df])
|
51 |
+
else:
|
52 |
+
df_combined = df
|
53 |
+
|
54 |
+
df_combined.drop_duplicates(subset='url', keep='first', inplace=True)
|
55 |
+
df_combined.to_csv('urls.csv', index=False)
|
56 |
+
print('Done')
|
57 |
+
|
58 |
+
if __name__ == '__main__':
|
59 |
+
main()
|
scrape_pitchfork.py
ADDED
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import requests
|
3 |
+
import sqlite3
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
from concurrent.futures import ThreadPoolExecutor
|
7 |
+
from bs4 import BeautifulSoup, SoupStrainer
|
8 |
+
from sys import argv
|
9 |
+
|
10 |
+
regexes = {
|
11 |
+
'artist': re.compile(r'SplitScreenContentHeaderArtist-\w*'),
|
12 |
+
'album': re.compile(r'SplitScreenContentHeaderHed-\w*'),
|
13 |
+
'year_released': re.compile(r'SplitScreenContentHeaderReleaseYear-\w*'),
|
14 |
+
'rating': re.compile(r'Rating-\w*'),
|
15 |
+
'small_text': re.compile(r'SplitScreenContentHeaderDekDown-\w*'),
|
16 |
+
'review': re.compile(r'body__inner-container'),
|
17 |
+
'reviewer': re.compile(r'BylineName'),
|
18 |
+
'genre': re.compile(r'SplitScreenContentHeaderInfoSlice-\w*'),
|
19 |
+
'label': re.compile(r'SplitScreenContentHeaderInfoSlice-\w*'),
|
20 |
+
'reviewed': re.compile(r'SplitScreenContentHeaderInfoSlice-\w*'),
|
21 |
+
'album_art_url': re.compile(r'SplitScreenContentHeaderImage-\w*'),
|
22 |
+
}
|
23 |
+
|
24 |
+
def fetch(args):
|
25 |
+
''' Fetch a single url and return a dictionary of data from a Pitchfork review '''
|
26 |
+
url, session = args
|
27 |
+
response = session.get(url)
|
28 |
+
if response.status_code == 200:
|
29 |
+
soup_strainer = SoupStrainer('article', {'data-testid': 'ReviewPageArticle'})
|
30 |
+
soup = BeautifulSoup(response.content, 'lxml', parse_only=soup_strainer)
|
31 |
+
if soup.find('article', {'data-testid': 'ReviewPageArticle'}) is None:
|
32 |
+
with open('not_done.txt', 'a') as f:
|
33 |
+
f.write(url + '\n')
|
34 |
+
return None
|
35 |
+
print('.', end='', flush=True)
|
36 |
+
result = data_from_soup(soup)
|
37 |
+
if result is None:
|
38 |
+
with open('not_done.txt', 'a') as f:
|
39 |
+
f.write(url + '\n')
|
40 |
+
return result
|
41 |
+
else:
|
42 |
+
with open('errors.txt', 'a') as f:
|
43 |
+
f.write(url + '\n')
|
44 |
+
return None
|
45 |
+
|
46 |
+
def get_reviews(urls):
|
47 |
+
''' Return a list of review data dictionaries from the provided urls '''
|
48 |
+
reviews = []
|
49 |
+
session = requests.Session()
|
50 |
+
with ThreadPoolExecutor() as executor:
|
51 |
+
for result in executor.map(fetch, ((url, session) for url in urls)):
|
52 |
+
if result: # Check if result is not None
|
53 |
+
reviews.append(result)
|
54 |
+
print()
|
55 |
+
return reviews
|
56 |
+
|
57 |
+
def data_from_soup(soup):
|
58 |
+
''' Return a dictionary of data from a Pitchfork review '''
|
59 |
+
artist = soup.find('div', {'class': regexes['artist']}).text.strip()
|
60 |
+
album = soup.find('h1', {'class': regexes['album']}).text.strip()
|
61 |
+
year_released = soup.find('time', {'class': regexes['year_released']})
|
62 |
+
if year_released:
|
63 |
+
year_released = int(year_released.text.strip())
|
64 |
+
else:
|
65 |
+
return None
|
66 |
+
rating = float(soup.find('p', {'class': regexes['rating']}).text.strip())
|
67 |
+
small_text = soup.find('div', {'class': regexes['small_text']})
|
68 |
+
small_text = small_text.text.strip() if small_text else 'N/A'
|
69 |
+
review = "".join(e.text for e in soup.find('div', {'class': regexes['review']}).descendants if e.name == 'p')
|
70 |
+
reviewer = soup.find('span', {'data-testid': regexes['reviewer']})
|
71 |
+
reviewer = reviewer.text.strip()[3:] if reviewer else 'N/A'
|
72 |
+
misc = [e.text for e in soup.find('div', {'class': regexes['genre']}).descendants if e.name == 'li']
|
73 |
+
misc = {'genre': 'N/A', 'label': 'N/A', 'reviewed': 'N/A'} | {e.split(':')[0].strip().lower(): e.split(':')[1].strip() for e in misc}
|
74 |
+
album_art_url = soup.find('source', {'media': '(max-width: 767px)'})
|
75 |
+
album_art_url = album_art_url['srcset'].split(',')[-2].strip() if album_art_url else 'N/A'
|
76 |
+
return {
|
77 |
+
'artist': artist, 'album': album, 'year_released': year_released,
|
78 |
+
'rating': rating, 'small_text': small_text, 'review': review,
|
79 |
+
'reviewer': reviewer, 'genre': misc['genre'], 'label': misc['label'],
|
80 |
+
'reviewed': misc['reviewed'], 'album_art_url': album_art_url,
|
81 |
+
}
|
82 |
+
|
83 |
+
def insert_into_db(data, cursor):
|
84 |
+
''' Insert data into a sqlite3 database '''
|
85 |
+
for review in data:
|
86 |
+
artist = review.get('artist')
|
87 |
+
album = review.get('album')
|
88 |
+
year_released = review.get('year_released')
|
89 |
+
rating = review.get('rating')
|
90 |
+
small_text = review.get('small_text')
|
91 |
+
review_text = review.get('review') # 'review' is a reserved word in Python
|
92 |
+
reviewer = review.get('reviewer')
|
93 |
+
genre = review.get('genre')
|
94 |
+
label = review.get('label')
|
95 |
+
reviewed = review.get('reviewed')
|
96 |
+
album_art_url = review.get('album_art_url')
|
97 |
+
|
98 |
+
cursor.execute('SELECT * FROM reviews WHERE artist=? AND album=?', (artist, album))
|
99 |
+
result = cursor.fetchone()
|
100 |
+
if result is None:
|
101 |
+
# Insert new review into database
|
102 |
+
cursor.execute('''
|
103 |
+
INSERT INTO reviews (
|
104 |
+
artist, album, year_released, rating, small_text,
|
105 |
+
review, reviewer, genre, label, reviewed, album_art_url
|
106 |
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
107 |
+
''', (
|
108 |
+
artist, album, year_released, rating, small_text, review_text,
|
109 |
+
reviewer, genre, label, reviewed, album_art_url
|
110 |
+
))
|
111 |
+
|
112 |
+
def main(start, end):
|
113 |
+
conn = sqlite3.connect('reviews.db')
|
114 |
+
c = conn.cursor()
|
115 |
+
|
116 |
+
# Create table with all necessary fields
|
117 |
+
c.execute('''
|
118 |
+
CREATE TABLE IF NOT EXISTS reviews (
|
119 |
+
artist TEXT,
|
120 |
+
album TEXT,
|
121 |
+
year_released INTEGER,
|
122 |
+
rating REAL,
|
123 |
+
small_text TEXT,
|
124 |
+
review TEXT,
|
125 |
+
reviewer TEXT,
|
126 |
+
genre TEXT,
|
127 |
+
label TEXT,
|
128 |
+
reviewed TEXT,
|
129 |
+
album_art_url TEXT
|
130 |
+
)
|
131 |
+
''')
|
132 |
+
|
133 |
+
# Read URLs from a CSV file into a list
|
134 |
+
df = pd.read_csv('urls.csv')
|
135 |
+
urls = df['url'].tolist() # replace 'url' with your actual column name
|
136 |
+
start, end = max(0, start), min(len(urls), end)
|
137 |
+
urls = urls[start:end]
|
138 |
+
|
139 |
+
print(f'Fetching {len(urls)} reviews')
|
140 |
+
data = get_reviews(urls)
|
141 |
+
print(f'Fetching complete. Inserting into database')
|
142 |
+
insert_into_db(data, c)
|
143 |
+
print('Done')
|
144 |
+
|
145 |
+
conn.commit()
|
146 |
+
conn.close()
|
147 |
+
|
148 |
+
if __name__ == '__main__':
|
149 |
+
main(int(argv[1]), int(argv[2]))
|