HTTPError: HTTP Error 403: Forbidden

Question:

I making a python script for personal use but it’s not working for wikipedia…

This work:

import urllib2, sys
from bs4 import BeautifulSoup

site = "http://youtube.com"
page = urllib2.urlopen(site)
soup = BeautifulSoup(page)
print soup

This not work:

import urllib2, sys
from bs4 import BeautifulSoup

site= "http://en.wikipedia.org/wiki/StackOverflow"
page = urllib2.urlopen(site)
soup = BeautifulSoup(page)
print soup

This is the error:

Traceback (most recent call last):
  File "C:Python27wiki.py", line 5, in <module>
    page = urllib2.urlopen(site)
  File "C:Python27liburllib2.py", line 126, in urlopen
    return _opener.open(url, data, timeout)
  File "C:Python27liburllib2.py", line 406, in open
    response = meth(req, response)
  File "C:Python27liburllib2.py", line 519, in http_response
    'http', request, response, code, msg, hdrs)
  File "C:Python27liburllib2.py", line 444, in error
    return self._call_chain(*args)
  File "C:Python27liburllib2.py", line 378, in _call_chain
    result = func(*args)
  File "C:Python27liburllib2.py", line 527, in http_error_default
    raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
HTTPError: HTTP Error 403: Forbidden
Asked By: a1204773

||

Answers:

Within the current code:

Python 2.X

import urllib2, sys
from BeautifulSoup import BeautifulSoup

site= "http://en.wikipedia.org/wiki/StackOverflow"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = urllib2.Request(site,headers=hdr)
page = urllib2.urlopen(req)
soup = BeautifulSoup(page)
print soup

Python 3.X

from bs4 import BeautifulSoup
from urllib.request import Request, urlopen

site= "http://en.wikipedia.org/wiki/StackOverflow"
hdr = {'User-Agent': 'Mozilla/5.0'}
req = Request(site,headers=hdr)
page = urlopen(req)
soup = BeautifulSoup(page)
print(soup)

Python 3.X with Selenium (Javascript functions execution)

from selenium import webdriver as driver

browser = driver.PhantomJS()
p = browser.get("http://en.wikipedia.org/wiki/StackOverflow")
assert "Stack Overflow - Wikipedia" in browser.title

The reason modified version works is because Wikipedia checks for User-Agent to be of “popular browser”

Answered By: Supreet Sethi
from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as soup
url = 'yourlink'
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})

webpage = urlopen(req).read()
page_soup = soup(webpage, "html.parser")

This worked for me, should also work for you!

Answered By: Hemil Mehta
from urllib.request import urlopen, Request
from urllib.parse import urlparse
from bs4 import BeautifulSoup as soup


def checkURL(requested_url):
    if not urlparse(requested_url).scheme:
        requested_url = "https://" + requested_url
    return requested_url


def requestAndParse(requested_url):
    requested_url = checkURL(requested_url)
    try:
        # define headers to be provided for request authentication
        headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ' 
                        'AppleWebKit/537.11 (KHTML, like Gecko) '
                        'Chrome/23.0.1271.64 Safari/537.11',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
            'Accept-Encoding': 'none',
            'Accept-Language': 'en-US,en;q=0.8',
            'Connection': 'keep-alive'}
        request_obj = Request(url = requested_url, headers = headers)
        opened_url = urlopen(request_obj)
        page_html = opened_url.read()
        opened_url.close()
        page_soup = soup(page_html, "html.parser")
        return page_soup, requested_url

    except Exception as e:
        print(e)

# Exmaple:
page, url = requestAndParse(url)

try out the above code snippet for beautifulsoup page loading.

Answered By: nipun

Scrape Data With Cleaned Text

from urllib.request import Request, urlopen
from bs4 import BeautifulSoup as soup
url = "your_url"
req = Request(url , headers={'User-Agent': 'Mozilla/5.0'})

webpage = urlopen(req).read()
new_soup = soup(webpage,"lxml")

text = new_soup.get_text()                                    
text = re.sub("n"," ",text)                                                    
text = re.sub("t"," ",text)
text = re.sub("s+"," ",text)
val = re.sub('[^a-zA-Z0-9@_,.$£+]', ' ', text).strip()
val
Answered By: Hardik Vegad
Categories: questions Tags: , ,
Answers are sorted by their score. The answer accepted by the question owner as the best is marked with
at the top-right corner.