rafa-br34 commited on
Commit
106d24b
·
verified ·
1 Parent(s): 7cadcb3

Upload 2 files

Browse files
Files changed (2) hide show
  1. Scraper.py +561 -0
  2. state.pickle +3 -0
Scraper.py ADDED
@@ -0,0 +1,561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import latest_user_agents
2
+ import threading
3
+ import requests
4
+ import pathlib
5
+ import tarfile
6
+ import urllib.parse as urlparse
7
+ import random
8
+ import pickle
9
+ import json
10
+ import time
11
+ import bs4
12
+ import PIL.Image as Image
13
+ import io
14
+ import re
15
+
16
+
17
+ c_imaging_threads = 30
18
+ c_paging_threads = 12
19
+ c_state_file = "state.pickle"
20
+ c_output_file = "./data.tar"
21
+
22
+
23
+ class Query:
24
+ name = None
25
+ mode = None
26
+ query_hash = None
27
+
28
+ def __init__(self, name, mode, query_hash):
29
+ self.name = name
30
+ self.mode = mode
31
+ self.query_hash = query_hash
32
+
33
+ def __repr__(self):
34
+ return f"Query<{self.name}, {self.mode}, {self.query_hash}>"
35
+
36
+ def _build_extension(self):
37
+ return json.dumps({
38
+ "persistedQuery": {
39
+ "version": 1,
40
+ "sha256Hash": self.query_hash
41
+ }
42
+ })
43
+
44
+ def build(self, url, variables):
45
+ return f"{urlparse.urljoin(url, self.mode)}?operationName={self.name}&variables={json.dumps(variables)}&extensions={self._build_extension()}"
46
+
47
+
48
+ class Client:
49
+ session = requests.session()
50
+
51
+ client_version = None
52
+ client_token = None
53
+ client_auth = None
54
+ client_id = None
55
+
56
+ queries = {}
57
+
58
+ _client_token_expiration = 0
59
+ _client_token_renewal = 0
60
+ _client_auth_expiration = 0
61
+
62
+ def _setup_session(self):
63
+ self.session.headers = {
64
+ "User-Agent": latest_user_agents.get_random_user_agent(),
65
+ "Accept-Encoding": "gzip, deflate, br, zstd",
66
+ "Accept-Language": "en-US,en;q=0.5",
67
+ "Accept": "*/*",
68
+ "Connection": "keep-alive",
69
+ "Sec-Fetch-Dest": "empty",
70
+ "Sec-Fetch-Mode": "no-cors",
71
+ "Sec-Fetch-Site": "same-origin",
72
+ "Referer": "https://open.spotify.com/",
73
+ "Origin": "https://open.spotify.com",
74
+ "Priority": "u=4",
75
+ "Sec-GPC": "1",
76
+ "DNT": "1"
77
+ }
78
+
79
+ def _acquire_client(self):
80
+ def get_key(source, name):
81
+ result = re.search(name + r":\s*['\"](.+?)['\"]", source, re.UNICODE)
82
+
83
+ if result:
84
+ return result.group(1)
85
+ else:
86
+ return None
87
+
88
+ def get_queries(source):
89
+ queries = {}
90
+ for section in re.finditer(r"\(\s*?['\"](\w+?)['\"],\s*?['\"](\w+?)['\"],\s*?['\"]([a-z\d]+?)['\"]", source, re.UNICODE):
91
+ query = Query(*section.groups())
92
+ queries[query.name] = query
93
+
94
+ return queries
95
+
96
+ document = bs4.BeautifulSoup(self.session.get("https://open.spotify.com/").text, "html.parser")
97
+
98
+ client_version = None
99
+ client_id = None
100
+
101
+ for script in document.find_all("script"):
102
+ source_link = script.get("src")
103
+ if source_link and "web-player" in source_link:
104
+ source = self.session.get(source_link).text
105
+
106
+ client_version = get_key(source, "clientVersion")
107
+ client_id = get_key(source, "clientID")
108
+
109
+ self.queries.update(get_queries(source))
110
+
111
+ if client_version and client_id:
112
+ break
113
+
114
+ assert client_version and client_id, "Couldn't find keys"
115
+ assert len(client_id) + len(client_version) < 1024, "Keys are too big (regex failure?)"
116
+
117
+ self.session.headers.update({
118
+ "Spotify-App-Version": client_version,
119
+ "App-Platform": "WebPlayer"
120
+ })
121
+
122
+ result = self.session.get("https://open.spotify.com/get_access_token?reason=transport&productType=web-player", headers={ "Accept": "application/json" })
123
+ assert result.status_code == 200, "Status code for get_access_token isn't 200"
124
+
125
+ parsed = result.json()
126
+ assert parsed["clientId"] == client_id, "client_id mismatch"
127
+
128
+ self.client_auth = parsed["accessToken"]
129
+ self._client_auth_expiration = float(parsed["accessTokenExpirationTimestampMs"]) / 1000
130
+
131
+ self.client_version = client_version
132
+ self.client_id = client_id
133
+
134
+
135
+ def _get_client_token(self):
136
+ result = self.session.options("https://clienttoken.spotify.com/v1/clienttoken", headers = {
137
+ "Access-Control-Request-Method": "POST",
138
+ "Access-Control-Request-Headers": "content-type"
139
+ })
140
+ assert result.status_code == 200, "Failed to configure v1/clienttoken for request"
141
+
142
+ result = self.session.post(
143
+ "https://clienttoken.spotify.com/v1/clienttoken",
144
+ headers = {
145
+ "Accept": "application/json",
146
+ "TE": "trailers"
147
+ },
148
+ json = {
149
+ "client_data": {
150
+ "client_version": self.client_version,
151
+ "client_id": self.client_id,
152
+ "js_sdk_data": {
153
+ "device_brand": "unknown",
154
+ "device_model": "unknown",
155
+ "os": "windows",
156
+ "os_version": "NT 10.0",
157
+ "device_id": random.randbytes(16).hex(),
158
+ "device_type": "computer"
159
+ }
160
+ }
161
+ }
162
+ )
163
+ assert result.status_code, "Failed to acquire authorization from v1/clienttoken"
164
+ parsed = result.json()
165
+
166
+ response_type = "response_type" in parsed and parsed["response_type"]
167
+ assert response_type == "RESPONSE_GRANTED_TOKEN_RESPONSE", f"Expected RESPONSE_GRANTED_TOKEN_RESPONSE got {response_type}"
168
+
169
+ granted_token = parsed["granted_token"]
170
+
171
+ self._client_token_expiration = time.time() + int(granted_token["expires_after_seconds"])
172
+ self._client_token_renewal = time.time() + int(granted_token["refresh_after_seconds"])
173
+ self.client_token = granted_token["token"]
174
+
175
+ def is_authenticated(self, slack=10):
176
+ return self._client_token_renewal > time.time() + slack and self._client_auth_expiration > time.time() + slack
177
+
178
+ def authenticate(self):
179
+ if not self.is_authenticated():
180
+ self._setup_session()
181
+ self._acquire_client()
182
+ self._get_client_token()
183
+
184
+ def request(self, name, variables):
185
+ assert name in self.queries, f"Operation {name} not found"
186
+
187
+ self.authenticate()
188
+
189
+ return self.session.get(
190
+ self.queries[name].build("https://api-partner.spotify.com/pathfinder/v1/", variables),
191
+ headers = {
192
+ "authorization": f"Bearer {self.client_auth}",
193
+ "client-token": self.client_token,
194
+ "Accept": "application/json"
195
+ }
196
+ )
197
+
198
+ class ImageWriter:
199
+ def __init__(self, output_file="data.tar"):
200
+ self.current_file_index = 0
201
+ self.current_file = tarfile.open(output_file, mode="a")
202
+ self._lock = threading.Lock()
203
+
204
+ def write_file(self, name, value):
205
+ with self._lock:
206
+ value.seek(0)
207
+
208
+ info = tarfile.TarInfo(name.format(index = self.current_file_index))
209
+ info.size = value.getbuffer().nbytes
210
+
211
+ self.current_file.addfile(info, value)
212
+ self.current_file_index += 1
213
+
214
+ def close(self):
215
+ with self._lock:
216
+ self.current_file.close()
217
+
218
+
219
+ def can_iterate(obj):
220
+ return "__iter__" in dir(obj) and "__next__" in dir(obj) or "__getitem__" in dir(obj)
221
+
222
+ def traverse(obj, path):
223
+ current = obj
224
+ for name in path.split('.'):
225
+ current = current.get(name)
226
+ if not current:
227
+ break
228
+
229
+ return current
230
+
231
+ def recursive_scraping(parent, uri_list, url_list):
232
+ if isinstance(parent, list):
233
+ for child in parent:
234
+ if can_iterate(child):
235
+ recursive_scraping(child, uri_list, url_list)
236
+ elif isinstance(parent, dict):
237
+ if "width" in parent and "height" in parent:
238
+ if int(parent["width"] or 0) > 500 and int(parent["height"] or 0) > 500:
239
+ uri_list.add(parent["url"])
240
+ return
241
+
242
+ for key, child in parent.items():
243
+ if key == "uri" or key == "_uri":
244
+ uri_list.add(child)
245
+ elif key == "url":
246
+ url_list.add(child)
247
+ elif can_iterate(child):
248
+ recursive_scraping(child, uri_list, url_list)
249
+
250
+ def parse_uri(uri, offset=0, limit=50):
251
+ if not uri or uri.count(':') != 2:
252
+ return None, None, None
253
+
254
+ [_spotify, section, _index] = uri.split(':')
255
+
256
+ total_count = None
257
+ variables = None
258
+ query = None
259
+
260
+ match section:
261
+ case "episode" | "chapter":
262
+ query = "getEpisodeOrChapter"
263
+ variables = { "uri": uri }
264
+ case "show":
265
+ query = "queryPodcastEpisodes"
266
+ variables = { "uri": uri, "offset": offset, "limit": limit }
267
+ total_count = "data.podcastUnionV2.episodesV2.totalCount"
268
+ case "album":
269
+ query = "getAlbum"
270
+ variables = { "uri": uri, "offset": offset, "limit": limit, "locale": "intl-pt" }
271
+ total_count = "data.albumUnion.tracks.totalCount"
272
+ case "playlist":
273
+ query = "fetchPlaylist"
274
+ variables = { "uri": uri, "offset": offset, "limit": limit }
275
+ total_count = "data.playlistV2.content.totalCount"
276
+ case "artist":
277
+ query = "queryArtistOverview"
278
+ variables = { "uri": uri, "includePrerelease": True, "locale": "intl-pt" }
279
+
280
+ case "track" | "section" | "concert" | "page" | "user" | "merch" | "prerelease":
281
+ query = None
282
+ variables = None
283
+
284
+ case _:
285
+ raise RuntimeError(f"Unknown section type \"{section}\" found in {uri}")
286
+
287
+
288
+ return query, variables, total_count
289
+
290
+
291
+ def evaluate_uri(client, uri):
292
+ current = 0
293
+ limit = 50
294
+
295
+ uri_list = set()
296
+ url_list = set()
297
+
298
+ error_count = 0
299
+ while 10 > error_count:
300
+ [query, variables, total_count_path] = parse_uri(uri, current, limit)
301
+
302
+ if not query:
303
+ break
304
+
305
+ result = None
306
+ parsed = None
307
+
308
+ try:
309
+ result = client.request(query, variables)
310
+ parsed = result.json()
311
+ except (requests.exceptions.ConnectionError, requests.exceptions.JSONDecodeError) as error:
312
+ if result:
313
+ print(f"Failed to query uri {uri} with code {result.status_code}, error {error}")
314
+ else:
315
+ print(f"Failed to query uri {uri} (no result), error {error}")
316
+
317
+ error_count += 1
318
+ continue
319
+
320
+
321
+ recursive_scraping(parsed, uri_list, url_list)
322
+
323
+ total = 0
324
+ if total_count_path:
325
+ total = traverse(parsed, total_count_path)
326
+
327
+ if not total or current + limit > total:
328
+ break
329
+
330
+ current += limit
331
+
332
+ return uri_list, url_list
333
+
334
+
335
+ class _Queue:
336
+ pages_done = set()
337
+ pages = set()
338
+
339
+ sources_done = set()
340
+ sources = set()
341
+
342
+ def copy(self):
343
+ val = type(self)
344
+
345
+ val.pages = self.pages.copy()
346
+ val.pages_done = self.pages_done.copy()
347
+ val.sources = self.sources.copy()
348
+ val.sources_done = self.sources_done.copy()
349
+
350
+ return val
351
+
352
+ class _State:
353
+ update_event = threading.Condition()
354
+ running = True
355
+
356
+ g_image_writer = ImageWriter(c_output_file)
357
+ g_queue = _Queue()
358
+ g_state = _State()
359
+
360
+
361
+ def atomic_wait_item(lock, iterable):
362
+ target = None
363
+
364
+
365
+ while not target and g_state.running:
366
+ if len(iterable):
367
+ target = iterable.pop()
368
+ else:
369
+ with lock:
370
+ lock.wait()
371
+ return target
372
+
373
+ def paging_worker_logic(thread_id):
374
+ global g_queue, g_state
375
+
376
+ client = Client()
377
+
378
+ update_event = g_state.update_event
379
+
380
+ pages_done = g_queue.pages_done
381
+ pages = g_queue.pages
382
+ sources_done = g_queue.sources_done
383
+ sources = g_queue.sources
384
+
385
+ print(f"W[{thread_id}] Started")
386
+ try:
387
+ while g_state.running:
388
+ #print(f"[{thread_id}] Waiting...")
389
+ target = atomic_wait_item(update_event, pages)
390
+
391
+ pages_done.add(target)
392
+
393
+ if not client.is_authenticated():
394
+ print(f"W[{thread_id}] Token renewal...")
395
+ try:
396
+ client.authenticate()
397
+ print(f"W[{thread_id}] Acquired new tokens")
398
+ except RuntimeError as error:
399
+ print(error)
400
+
401
+ #print(f"[{thread_id}] Eval...")
402
+ [uri_list, url_list] = evaluate_uri(client, target)
403
+
404
+ pages.update(uri_list.difference(pages_done))
405
+ sources.update(url_list.difference(sources_done))
406
+
407
+ #print(f"[{thread_id}] Fire...")
408
+ with update_event:
409
+ update_event.notify_all()
410
+ finally:
411
+ print(f"W[{thread_id}] Halted")
412
+
413
+ def imaging_worker_logic(thread_id):
414
+ global g_queue, g_state
415
+
416
+ update_event = g_state.update_event
417
+
418
+ sources_done = g_queue.sources_done
419
+ sources = g_queue.sources
420
+
421
+ session = requests.session()
422
+ session.headers["User-Agent"] = latest_user_agents.get_random_user_agent()
423
+
424
+ print(f"I[{thread_id}] Started")
425
+ try:
426
+ while g_state.running:
427
+ target = atomic_wait_item(update_event, sources)
428
+ if not target:
429
+ continue
430
+
431
+ sources_done.add(target)
432
+
433
+ match = re.fullmatch(r"https?:\/\/(.*\.scdn\.co|image-cdn-ak.spotifycdn.com)\/image\/([a-z\d]+)", target)
434
+ if not match:
435
+ continue
436
+
437
+ (_domain, name) = match.groups()
438
+
439
+ data = None
440
+ for _ in range(3):
441
+ try:
442
+ data = io.BytesIO(session.get(target).content)
443
+ break
444
+ except (requests.exceptions.ConnectionError, requests.exceptions.ChunkedEncodingError):
445
+ pass
446
+ if not data:
447
+ print(f"I[{thread_id}] Failed to download image")
448
+ continue
449
+
450
+ image = None
451
+ try:
452
+ image = Image.open(data)
453
+ except (Image.UnidentifiedImageError):
454
+ pass
455
+
456
+ if not image:
457
+ print(f"I[{thread_id}] Failed to identify image")
458
+ continue
459
+
460
+ (x, y) = image.size
461
+ if x < 500 or y < 500:
462
+ continue
463
+
464
+ g_image_writer.write_file(f"{name}-{{index}}.{image.format.lower()}", data)
465
+ finally:
466
+ print(f"I[{thread_id}] Halted")
467
+
468
+
469
+
470
+ def save_state(path, queue, image_writer):
471
+ file = open(path, "wb")
472
+ pickle.dump({
473
+ "sources_done": queue.sources_done,
474
+ "sources": queue.sources,
475
+ "pages_done": queue.pages_done,
476
+ "pages": queue.pages,
477
+ "current_file_index": image_writer.current_file_index,
478
+ }, file)
479
+ file.close()
480
+
481
+ def load_state(path, queue, image_writer):
482
+ global g_queue, g_state
483
+
484
+ file = open(path, "rb")
485
+ loaded = pickle.load(file)
486
+
487
+ queue.sources_done = loaded["sources_done"]
488
+ queue.sources = loaded["sources"]
489
+ queue.pages_done = loaded["pages_done"]
490
+ queue.pages = loaded["pages"]
491
+
492
+ image_writer.current_file_index = loaded["current_file_index"]
493
+
494
+ file.close()
495
+
496
+ def main():
497
+ global g_queue, g_state
498
+
499
+ if pathlib.Path(c_state_file).is_file():
500
+ load_state(c_state_file, g_queue, g_image_writer)
501
+ else:
502
+ client = Client()
503
+ client.authenticate()
504
+
505
+ print("Client:")
506
+ print("\tVersion:", client.client_version)
507
+ print("\tID:", client.client_id)
508
+ print("\tToken:", client.client_token)
509
+ print("\tAuth:", client.client_auth)
510
+
511
+
512
+ result = client.request("home", {
513
+ "timeZone": "UTC",
514
+ "sp_t": random.randbytes(16).hex(),
515
+ "country": "US",
516
+ "facet": None,
517
+ "sectionItemsLimit": 15
518
+ })
519
+
520
+ recursive_scraping(result.json(), g_queue.pages, g_queue.sources)
521
+
522
+ threads = []
523
+
524
+ for thread_id in range(c_paging_threads):
525
+ thread = threading.Thread(target=paging_worker_logic, args=(thread_id,))
526
+ thread.start()
527
+ threads.append(thread)
528
+
529
+ for thread_id in range(c_imaging_threads):
530
+ thread = threading.Thread(target=imaging_worker_logic, args=(thread_id,))
531
+ thread.start()
532
+ threads.append(thread)
533
+
534
+ try:
535
+ start = time.time()
536
+ while g_state.running:
537
+ with g_state.update_event:
538
+ g_state.update_event.wait()
539
+
540
+ print(f"URIs: {len(g_queue.pages)}/{len(g_queue.pages_done)} images: {len(g_queue.sources)}/{len(g_queue.sources_done)} written: {g_image_writer.current_file_index}")
541
+ if time.time() > start + 60 * 30:
542
+ start = time.time()
543
+ save_state(c_state_file + ".bak", g_queue.copy(), g_image_writer)
544
+ except KeyboardInterrupt:
545
+ print("Halting tasks...")
546
+ g_state.running = False
547
+
548
+ for thread in threads:
549
+ with g_state.update_event:
550
+ g_state.update_event.notify_all()
551
+
552
+ if thread.is_alive():
553
+ thread.join()
554
+
555
+ print("Saving state...")
556
+ g_image_writer.close()
557
+ save_state(c_state_file, g_queue, g_image_writer)
558
+
559
+
560
+ if __name__ == "__main__":
561
+ main()
state.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a9ef0876cd28bbe937704927e0fb7407de5d8c424da3830a2972a8d4763efd7
3
+ size 682051187