import requests
from bs4 import BeautifulSoup
import csv
link_list = []
with open('100_stocks.csv - 100_stocks.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile, delimiter=' ', quotechar='|')
for row in spamreader:
link_list.append(', '.join(row))
# print(', '.join(row))
# print(link_list)
table_data = []
for link in link_list[1:3]:
url = "http://www.nasdaq.com/earnings/report/%s"%link.lower()
print(url)
# url = "http://www.nasdaq.com/earnings/report/%s"
result = requests.get(url)
print(result)
# c = result.content
# soup = BeautifulSoup(c, 'html.parser')
# all_content = soup.find('div', class_="genTable")
# rows = all_content.find_all('tr')
# for row in rows[1:]:
# data = {}
# tds = row.find_all('td')
# data['url'] = url
# data['Fiscal Quarter End'] = tds[0].text
# data['Date Reported'] = tds[1].text
# data['Earnings Per Share'] = tds[2].text
# data['Consensus EPS* Forecast'] = tds[3].text
# data['%Surprise'] = tds[4].text
# table_data.append(data)
# print(table_data)
# keys = table_data[0].keys()
# with open('renata.csv', 'w') as output_file:
# dict_writer = csv.DictWriter(output_file, keys)
# dict_writer.writeheader()
# dict_writer.writerows(table_data)
# # import csv
# # with open('mycsvfile.csv', 'w') as f: # Just use 'w' mode in 3.x
# # w = csv.DictWriter(f, data.keys())
# # w.writeheader()
# # w.writerow(data)