Пример скачивания множества ссылок параллельно:
from multiprocessing.pool import ThreadPool
import requests
def fetch_url(url):
path = 'files/' + url + '.html'
if not os.path.exists(path):
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'wb') as f:
for chunk in r:
f.write(chunk)
return path
results = ThreadPool(50).imap_unordered(fetch_url, compositions)