Commit 9575e189 authored by Berker Peksag's avatar Berker Peksag

Issue #12955: Change the urlopen() examples to use context managers where appropriate.

Patch by Martin Panter.
parent 8ad751e0
...@@ -687,6 +687,7 @@ Yes. Here's a simple example that uses urllib.request:: ...@@ -687,6 +687,7 @@ Yes. Here's a simple example that uses urllib.request::
### connect and send the server a path ### connect and send the server a path
req = urllib.request.urlopen('http://www.some-server.out-there' req = urllib.request.urlopen('http://www.some-server.out-there'
'/cgi-bin/some-cgi-script', data=qs) '/cgi-bin/some-cgi-script', data=qs)
with req:
msg, hdrs = req.read(), req.info() msg, hdrs = req.read(), req.info()
Note that in general for percent-encoded POST operations, query strings must be Note that in general for percent-encoded POST operations, query strings must be
......
...@@ -53,7 +53,7 @@ Fetching URLs ...@@ -53,7 +53,7 @@ Fetching URLs
The simplest way to use urllib.request is as follows:: The simplest way to use urllib.request is as follows::
import urllib.request import urllib.request
response = urllib.request.urlopen('http://python.org/') with urllib.request.urlopen('http://python.org/') as response:
html = response.read() html = response.read()
If you wish to retrieve a resource via URL and store it in a temporary location, If you wish to retrieve a resource via URL and store it in a temporary location,
...@@ -79,7 +79,7 @@ response:: ...@@ -79,7 +79,7 @@ response::
import urllib.request import urllib.request
req = urllib.request.Request('http://www.voidspace.org.uk') req = urllib.request.Request('http://www.voidspace.org.uk')
response = urllib.request.urlopen(req) with urllib.request.urlopen(req) as response:
the_page = response.read() the_page = response.read()
Note that urllib.request makes use of the same Request interface to handle all URL Note that urllib.request makes use of the same Request interface to handle all URL
...@@ -117,7 +117,7 @@ library. :: ...@@ -117,7 +117,7 @@ library. ::
data = urllib.parse.urlencode(values) data = urllib.parse.urlencode(values)
data = data.encode('utf-8') # data should be bytes data = data.encode('utf-8') # data should be bytes
req = urllib.request.Request(url, data) req = urllib.request.Request(url, data)
response = urllib.request.urlopen(req) with urllib.request.urlopen(req) as response:
the_page = response.read() the_page = response.read()
Note that other encodings are sometimes required (e.g. for file upload from HTML Note that other encodings are sometimes required (e.g. for file upload from HTML
...@@ -183,7 +183,7 @@ Explorer [#]_. :: ...@@ -183,7 +183,7 @@ Explorer [#]_. ::
data = urllib.parse.urlencode(values) data = urllib.parse.urlencode(values)
data = data.encode('utf-8') data = data.encode('utf-8')
req = urllib.request.Request(url, data, headers) req = urllib.request.Request(url, data, headers)
response = urllib.request.urlopen(req) with urllib.request.urlopen(req) as response:
the_page = response.read() the_page = response.read()
The response also has two useful methods. See the section on `info and geturl`_ The response also has two useful methods. See the section on `info and geturl`_
......
...@@ -138,8 +138,8 @@ ThreadPoolExecutor Example ...@@ -138,8 +138,8 @@ ThreadPoolExecutor Example
# Retrieve a single page and report the url and contents # Retrieve a single page and report the url and contents
def load_url(url, timeout): def load_url(url, timeout):
conn = urllib.request.urlopen(url, timeout=timeout) with urllib.request.urlopen(url, timeout=timeout) as conn:
return conn.readall() return conn.read()
# We can use a with statement to ensure threads are cleaned up promptly # We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor: with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
......
...@@ -1048,8 +1048,9 @@ This example gets the python.org main page and displays the first 300 bytes of ...@@ -1048,8 +1048,9 @@ This example gets the python.org main page and displays the first 300 bytes of
it. :: it. ::
>>> import urllib.request >>> import urllib.request
>>> f = urllib.request.urlopen('http://www.python.org/') >>> with urllib.request.urlopen('http://www.python.org/') as f:
>>> print(f.read(300)) ... print(f.read(300))
...
b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" b'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n\n\n<html "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n\n\n<html
xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">\n\n<head>\n xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">\n\n<head>\n
...@@ -1091,8 +1092,9 @@ when the Python installation supports SSL. :: ...@@ -1091,8 +1092,9 @@ when the Python installation supports SSL. ::
>>> import urllib.request >>> import urllib.request
>>> req = urllib.request.Request(url='https://localhost/cgi-bin/test.cgi', >>> req = urllib.request.Request(url='https://localhost/cgi-bin/test.cgi',
... data=b'This data is passed to stdin of the CGI') ... data=b'This data is passed to stdin of the CGI')
>>> f = urllib.request.urlopen(req) >>> with urllib.request.urlopen(req) as f:
>>> print(f.read().decode('utf-8')) ... print(f.read().decode('utf-8'))
...
Got Data: "This data is passed to stdin of the CGI" Got Data: "This data is passed to stdin of the CGI"
The code for the sample CGI used in the above example is:: The code for the sample CGI used in the above example is::
...@@ -1107,7 +1109,8 @@ Here is an example of doing a ``PUT`` request using :class:`Request`:: ...@@ -1107,7 +1109,8 @@ Here is an example of doing a ``PUT`` request using :class:`Request`::
import urllib.request import urllib.request
DATA=b'some data' DATA=b'some data'
req = urllib.request.Request(url='http://localhost:8080', data=DATA,method='PUT') req = urllib.request.Request(url='http://localhost:8080', data=DATA,method='PUT')
f = urllib.request.urlopen(req) with urllib.request.urlopen(req) as f:
pass
print(f.status) print(f.status)
print(f.reason) print(f.reason)
...@@ -1173,8 +1176,10 @@ containing parameters:: ...@@ -1173,8 +1176,10 @@ containing parameters::
>>> import urllib.request >>> import urllib.request
>>> import urllib.parse >>> import urllib.parse
>>> params = urllib.parse.urlencode({'spam': 1, 'eggs': 2, 'bacon': 0}) >>> params = urllib.parse.urlencode({'spam': 1, 'eggs': 2, 'bacon': 0})
>>> f = urllib.request.urlopen("http://www.musi-cal.com/cgi-bin/query?%s" % params) >>> url = "http://www.musi-cal.com/cgi-bin/query?%s" % params
>>> print(f.read().decode('utf-8')) >>> with urllib.request.urlopen(url) as f:
... print(f.read().decode('utf-8'))
...
The following example uses the ``POST`` method instead. Note that params output The following example uses the ``POST`` method instead. Note that params output
from urlencode is encoded to bytes before it is sent to urlopen as data:: from urlencode is encoded to bytes before it is sent to urlopen as data::
...@@ -1186,8 +1191,9 @@ from urlencode is encoded to bytes before it is sent to urlopen as data:: ...@@ -1186,8 +1191,9 @@ from urlencode is encoded to bytes before it is sent to urlopen as data::
>>> request = urllib.request.Request("http://requestb.in/xrbl82xr") >>> request = urllib.request.Request("http://requestb.in/xrbl82xr")
>>> # adding charset parameter to the Content-Type header. >>> # adding charset parameter to the Content-Type header.
>>> request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8") >>> request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8")
>>> f = urllib.request.urlopen(request, data) >>> with urllib.request.urlopen(request, data) as f:
>>> print(f.read().decode('utf-8')) ... print(f.read().decode('utf-8'))
...
The following example uses an explicitly specified HTTP proxy, overriding The following example uses an explicitly specified HTTP proxy, overriding
environment settings:: environment settings::
...@@ -1195,15 +1201,17 @@ environment settings:: ...@@ -1195,15 +1201,17 @@ environment settings::
>>> import urllib.request >>> import urllib.request
>>> proxies = {'http': 'http://proxy.example.com:8080/'} >>> proxies = {'http': 'http://proxy.example.com:8080/'}
>>> opener = urllib.request.FancyURLopener(proxies) >>> opener = urllib.request.FancyURLopener(proxies)
>>> f = opener.open("http://www.python.org") >>> with opener.open("http://www.python.org") as f:
>>> f.read().decode('utf-8') ... f.read().decode('utf-8')
...
The following example uses no proxies at all, overriding environment settings:: The following example uses no proxies at all, overriding environment settings::
>>> import urllib.request >>> import urllib.request
>>> opener = urllib.request.FancyURLopener({}) >>> opener = urllib.request.FancyURLopener({})
>>> f = opener.open("http://www.python.org/") >>> with opener.open("http://www.python.org/") as f:
>>> f.read().decode('utf-8') ... f.read().decode('utf-8')
...
Legacy interface Legacy interface
......
...@@ -153,7 +153,8 @@ protocols. Two of the simplest are :mod:`urllib.request` for retrieving data ...@@ -153,7 +153,8 @@ protocols. Two of the simplest are :mod:`urllib.request` for retrieving data
from URLs and :mod:`smtplib` for sending mail:: from URLs and :mod:`smtplib` for sending mail::
>>> from urllib.request import urlopen >>> from urllib.request import urlopen
>>> for line in urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl'): >>> with urlopen('http://tycho.usno.navy.mil/cgi-bin/timer.pl') as response:
... for line in response:
... line = line.decode('utf-8') # Decoding the binary data to text. ... line = line.decode('utf-8') # Decoding the binary data to text.
... if 'EST' in line or 'EDT' in line: # look for Eastern Time ... if 'EST' in line or 'EDT' in line: # look for Eastern Time
... print(line) ... print(line)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment