banchan86
10/22/2017 - 4:15 AM

Crawling through weblinks with BeautifulSoup

Iteratively go through a webpage with links and click on a link in a sequence

# To run this, you can install BeautifulSoup
# https://pypi.python.org/pypi/beautifulsoup4

# Or download the file
# http://www.py4e.com/code3/bs4.zip
# and unzip it in the same directory as this file


from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl

# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE

url = "http://py4e-data.dr-chuck.net/known_by_Danish.html"
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
name_list = ["Danish"]

# Retrieve all of the anchor tags
tags = soup('a')
name_list.append(tags[17].contents[0])


for number in range(6):
    url = (tags[17].get('href', None))
    html = urlopen(url, context=ctx).read()
    soup = BeautifulSoup(html, "html.parser")
    tags = soup('a')
    name_list.append(tags[17].contents[0])


print(name_list)

# for tag in tags:
#     # Look at the parts of a tag
#     # print('TAG:', tag)
#     print('URL:', tag.get('href', None))
#     print('Contents:', tag.contents[0])
#     # print('Attrs:', tag.attrs)