-
Notifications
You must be signed in to change notification settings - Fork 3
Expand file tree
/
Copy pathscraper.py
More file actions
95 lines (77 loc) · 3.34 KB
/
scraper.py
File metadata and controls
95 lines (77 loc) · 3.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
#External Dependencies
from pymongo import MongoClient
from bs4 import BeautifulSoup
from selenium import webdriver
# Python Dependencies
from time import sleep
from random import randint
import config
import re
# MongoDB Configuration Setup
dbuser = config.DATABASE_CONFIG['dbuser']
dbuserpassword = config.DATABASE_CONFIG['dbuserpassword']
client = MongoClient(config.DATABASE_CONFIG['host'].format(dbuser, dbuserpassword))
# Connect to the 'artistlogo' Database and the 'logo' collection
db = client.artistlogo
logos = db.logo
# Setup Selenium Chrome Driver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(executable_path=r'chromedriver.exe', chrome_options=chrome_options)
# Set an initial artist seed, I've used my favorite artist Giuseppe Ottaviani
seed = "/artist/5B9q1NRokzWYB7nSgnlHyv"
# main will handling the loop and crawling logic
def main(startingArtist):
artistLinks = [startingArtist]
alreadySearched = [startingArtist]
while(len(artistLinks) > 0):
currentArtist = artistLinks.pop(0)
additionalLinks = scrapArtist(currentArtist)
alreadySearched.append(currentArtist)
for link in additionalLinks:
if link not in alreadySearched:
artistLinks.append(link)
#randomly sleep between 1-2 seconds to not abuse the server
sleep(randint(1,2))
# ScrapArtist will handle logic to parse html and collect data for a particular artist
def scrapArtist(artistLink):
driver.get("https://open.spotify.com" + artistLink + "/related")
# Scroll the page to generate all content
SCROLL_PAUSE_TIME = 0.5
SCROLL_LENGTH = 200
page_height = int(driver.execute_script("return document.body.scrollHeight"))
scrollPosition = 0
while scrollPosition < page_height:
scrollPosition = scrollPosition + SCROLL_LENGTH
driver.execute_script("window.scrollTo(0, " + str(scrollPosition) + ");")
sleep(SCROLL_PAUSE_TIME)
# Store the page response
response = driver.find_element_by_class_name('related-artists').get_attribute('innerHTML')
# Initialize the returning array to store artist links on the current page
nextLinksToCrawl = []
# Parse the page source to extract information
html_soup = BeautifulSoup(response, 'html.parser')
artist_container = html_soup.find_all('div', attrs={'class': 'media-object mo-artist'})
for el in artist_container:
rawStyle = el.find('div', {'class': 'cover-art-image cover-art-image-loaded'})['style']
artistLogo = re.findall('"([^"]*)"', rawStyle)[0]
artistName = el.find('a', {'class': 'mo-info-name'})['title']
artistLink = el.find('a', {'class': 'mo-info-name'})['href']
# Check if artist is already in the database
duplicate = logos.find_one({'logo': artistLogo})
if(duplicate == None):
entry = {
'artist': artistName,
'logo': artistLogo,
'link': artistLink
}
logos.insert_one(entry)
print("Scraped {0}".format(artistName))
else:
print("{0} already in the Database".format(artistName))
# Append the artistlink to the array for future iterations
nextLinksToCrawl.append(artistLink)
return nextLinksToCrawl
# run the main loop
main(seed)
driver.quit()