Python BeautifulSoup scrape tables
Question:
I am trying to create a table scrape with BeautifulSoup. I wrote this Python code:
import urllib2
from bs4 import BeautifulSoup
url = "http://dofollow.netsons.org/table1.htm" # change to whatever your url is
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
for i in soup.find_all('form'):
print i.attrs['class']
I need to scrape Nome, Cognome, Email.
Answers:
Loop over table rows (tr
tag) and get the text of cells (td
tag) inside:
for tr in soup.find_all('tr')[2:]:
tds = tr.find_all('td')
print "Nome: %s, Cognome: %s, Email: %s" %
(tds[0].text, tds[1].text, tds[2].text)
prints:
Nome: Massimo, Cognome: Allegri, Email: [email protected]
Nome: Alessandra, Cognome: Anastasia, Email: [email protected]
...
FYI, [2:]
slice here is to skip two header rows.
UPD, here’s how you can save results into txt file:
with open('output.txt', 'w') as f:
for tr in soup.find_all('tr')[2:]:
tds = tr.find_all('td')
f.write("Nome: %s, Cognome: %s, Email: %sn" %
(tds[0].text, tds[1].text, tds[2].text))
# Libray
from bs4 import BeautifulSoup
# Empty List
tabs = []
# File handling
with open('/home/rakesh/showHW/content.html', 'r') as fp:
html_content = fp.read()
table_doc = BeautifulSoup(html_content, 'html.parser')
# parsing html content
for tr in table_doc.table.find_all('tr'):
tabs.append({
'Nome': tr.find_all('td')[0].string,
'Cogname': tr.find_all('td')[1].string,
'Email': tr.find_all('td')[2].string
})
print(tabs)
The original link posted by OP is dead… but here’s how you might scrape table data with gazpacho:
Step 1 – import Soup
and download the html:
from gazpacho import Soup
url = "https://en.wikipedia.org/wiki/List_of_multiple_Olympic_gold_medalists"
soup = Soup.get(url)
Step 2 – Find the table and table rows:
table = soup.find("table", {"class": "wikitable sortable"}, mode="first")
trs = table.find("tr")[1:]
Step 3 – Parse each row with a function to extract desired data:
def parse_tr(tr):
return {
"name": tr.find("td")[0].text,
"country": tr.find("td")[1].text,
"medals": int(tr.find("td")[-1].text)
}
data = [parse_tr(tr) for tr in trs]
sorted(data, key=lambda x: x["medals"], reverse=True)
I am trying to create a table scrape with BeautifulSoup. I wrote this Python code:
import urllib2
from bs4 import BeautifulSoup
url = "http://dofollow.netsons.org/table1.htm" # change to whatever your url is
page = urllib2.urlopen(url).read()
soup = BeautifulSoup(page)
for i in soup.find_all('form'):
print i.attrs['class']
I need to scrape Nome, Cognome, Email.
Loop over table rows (tr
tag) and get the text of cells (td
tag) inside:
for tr in soup.find_all('tr')[2:]:
tds = tr.find_all('td')
print "Nome: %s, Cognome: %s, Email: %s" %
(tds[0].text, tds[1].text, tds[2].text)
prints:
Nome: Massimo, Cognome: Allegri, Email: [email protected]
Nome: Alessandra, Cognome: Anastasia, Email: [email protected]
...
FYI, [2:]
slice here is to skip two header rows.
UPD, here’s how you can save results into txt file:
with open('output.txt', 'w') as f:
for tr in soup.find_all('tr')[2:]:
tds = tr.find_all('td')
f.write("Nome: %s, Cognome: %s, Email: %sn" %
(tds[0].text, tds[1].text, tds[2].text))
# Libray
from bs4 import BeautifulSoup
# Empty List
tabs = []
# File handling
with open('/home/rakesh/showHW/content.html', 'r') as fp:
html_content = fp.read()
table_doc = BeautifulSoup(html_content, 'html.parser')
# parsing html content
for tr in table_doc.table.find_all('tr'):
tabs.append({
'Nome': tr.find_all('td')[0].string,
'Cogname': tr.find_all('td')[1].string,
'Email': tr.find_all('td')[2].string
})
print(tabs)
The original link posted by OP is dead… but here’s how you might scrape table data with gazpacho:
Step 1 – import Soup
and download the html:
from gazpacho import Soup
url = "https://en.wikipedia.org/wiki/List_of_multiple_Olympic_gold_medalists"
soup = Soup.get(url)
Step 2 – Find the table and table rows:
table = soup.find("table", {"class": "wikitable sortable"}, mode="first")
trs = table.find("tr")[1:]
Step 3 – Parse each row with a function to extract desired data:
def parse_tr(tr):
return {
"name": tr.find("td")[0].text,
"country": tr.find("td")[1].text,
"medals": int(tr.find("td")[-1].text)
}
data = [parse_tr(tr) for tr in trs]
sorted(data, key=lambda x: x["medals"], reverse=True)