KatriTaratuta
commited on
Commit
·
b404872
1
Parent(s):
7ca9a3b
getting pictures
Browse filesFormer-commit-id: 40ff9addb9a2bad9c06d23d4c8eb49b53e61906e
- .gitignore +1 -0
- picturedownloader/exampleThumb.py +23 -0
- picturedownloader/main.py +42 -0
- requirements.txt +7 -0
.gitignore
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
.idea/
|
picturedownloader/exampleThumb.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
from bs4 import BeautifulSoup
|
3 |
+
import os
|
4 |
+
|
5 |
+
def save_images(save_dir, keywords):
|
6 |
+
os.makedirs(save_dir, exist_ok=True)
|
7 |
+
for keyword in keywords:
|
8 |
+
url = f"https://www.google.com/search?q={keyword}&tbm=isch"
|
9 |
+
res = requests.get(url)
|
10 |
+
soup = BeautifulSoup(res.text, "html.parser")
|
11 |
+
img_tags = soup.find_all("img")
|
12 |
+
for i, img in enumerate(img_tags):
|
13 |
+
try:
|
14 |
+
img_url = img["src"]
|
15 |
+
res = requests.get(img_url)
|
16 |
+
with open(f"{save_dir}/{keyword}{str(i).zfill(5)}.jpg", "wb") as f:
|
17 |
+
f.write(res.content)
|
18 |
+
except:
|
19 |
+
continue
|
20 |
+
|
21 |
+
keywords = ["cat"]
|
22 |
+
save_dir = "train"
|
23 |
+
save_images(save_dir, keywords)
|
picturedownloader/main.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from icrawler.builtin import BingImageCrawler
|
2 |
+
import os
|
3 |
+
|
4 |
+
imageFolder = 'images'
|
5 |
+
|
6 |
+
|
7 |
+
def download_images(imageFolder, query, limit):
|
8 |
+
imageFolder=os.path.join(imageFolder, query)
|
9 |
+
os.makedirs(name=imageFolder,
|
10 |
+
exist_ok=True)
|
11 |
+
google_crawler = BingImageCrawler(parser_threads=1,
|
12 |
+
downloader_threads=1,
|
13 |
+
storage={'root_dir': imageFolder})
|
14 |
+
# Parameters can be found in the icrawler documentation
|
15 |
+
# https://icrawler.readthedocs.io/en/latest/builtin.html
|
16 |
+
filters = dict(
|
17 |
+
type="photo",
|
18 |
+
size='large',
|
19 |
+
date="pastyear")
|
20 |
+
google_crawler.crawl(keyword=query,
|
21 |
+
max_num=limit,
|
22 |
+
filters=filters)
|
23 |
+
return os.listdir(imageFolder)
|
24 |
+
|
25 |
+
|
26 |
+
# Задаем список достопримечательностей и количество изображений, которые нужно загрузить
|
27 |
+
sights = [
|
28 |
+
"Кинотеатр Художественный на Арбате",
|
29 |
+
"Театр им. Вахтангова",
|
30 |
+
"Центральный Дом Актера на Арбате",
|
31 |
+
"Мемориальная квартира А.С. Пушкина на Арбате",
|
32 |
+
"Памятник Пушкину и Гончаровой на Арбате",
|
33 |
+
"Памятник Окуджаве на Арбате",
|
34 |
+
"Хард-рок кафе на Арбате",
|
35 |
+
"Дома-книжки на Новом Арбате"
|
36 |
+
]
|
37 |
+
num_images = 200
|
38 |
+
|
39 |
+
for sight in sights:
|
40 |
+
print(f"Загрузка изображений достопримечательности '{sight}':")
|
41 |
+
image_paths=download_images(imageFolder, sight, num_images)
|
42 |
+
print(f"Загружено {len(image_paths)} изображений\n")
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
icrawler==0.6.7
|
2 |
+
six==1.16.0
|
3 |
+
Pillow
|
4 |
+
bs4
|
5 |
+
lxml
|
6 |
+
requests
|
7 |
+
requests
|