aboutsummaryrefslogtreecommitdiff
path: root/requests/utils.py
diff options
context:
space:
mode:
authorSVN-Git Migration <python-modules-team@lists.alioth.debian.org>2015-10-08 13:41:37 -0700
committerSVN-Git Migration <python-modules-team@lists.alioth.debian.org>2015-10-08 13:41:37 -0700
commitb4a5af4fcbf4e5d0f741aaf9978c728235b11a56 (patch)
treed9f97f31578c0e47ab923f24e9da82136f52be42 /requests/utils.py
parent2c79b40c98c83e352c5479223d581d69b0e7806c (diff)
downloadpython-requests-b4a5af4fcbf4e5d0f741aaf9978c728235b11a56.tar
python-requests-b4a5af4fcbf4e5d0f741aaf9978c728235b11a56.tar.gz
Imported Upstream version 2.7.0
Diffstat (limited to 'requests/utils.py')
-rw-r--r--requests/utils.py35
1 files changed, 28 insertions, 7 deletions
diff --git a/requests/utils.py b/requests/utils.py
index 1868f86..8fba62d 100644
--- a/requests/utils.py
+++ b/requests/utils.py
@@ -19,12 +19,14 @@ import re
import sys
import socket
import struct
+import warnings
from . import __version__
from . import certs
from .compat import parse_http_list as _parse_list_header
from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2,
- builtin_str, getproxies, proxy_bypass, urlunparse)
+ builtin_str, getproxies, proxy_bypass, urlunparse,
+ basestring)
from .cookies import RequestsCookieJar, cookiejar_from_dict
from .structures import CaseInsensitiveDict
from .exceptions import InvalidURL
@@ -114,7 +116,8 @@ def get_netrc_auth(url):
def guess_filename(obj):
"""Tries to guess the filename of the given object."""
name = getattr(obj, 'name', None)
- if name and name[0] != '<' and name[-1] != '>':
+ if (name and isinstance(name, basestring) and name[0] != '<' and
+ name[-1] != '>'):
return os.path.basename(name)
@@ -287,6 +290,11 @@ def get_encodings_from_content(content):
:param content: bytestring to extract encodings from.
"""
+ warnings.warn((
+ 'In requests 3.0, get_encodings_from_content will be removed. For '
+ 'more information, please see the discussion on issue #2266. (This'
+ ' warning should only appear once.)'),
+ DeprecationWarning)
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
@@ -354,6 +362,11 @@ def get_unicode_from_response(r):
2. fall back and replace all unicode characters
"""
+ warnings.warn((
+ 'In requests 3.0, get_unicode_from_response will be removed. For '
+ 'more information, please see the discussion on issue #2266. (This'
+ ' warning should only appear once.)'),
+ DeprecationWarning)
tried_encodings = []
@@ -407,10 +420,18 @@ def requote_uri(uri):
This function passes the given URI through an unquote/quote cycle to
ensure that it is fully and consistently quoted.
"""
- # Unquote only the unreserved characters
- # Then quote only illegal characters (do not quote reserved, unreserved,
- # or '%')
- return quote(unquote_unreserved(uri), safe="!#$%&'()*+,/:;=?@[]~")
+ safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
+ safe_without_percent = "!#$&'()*+,/:;=?@[]~"
+ try:
+ # Unquote only the unreserved characters
+ # Then quote only illegal characters (do not quote reserved,
+ # unreserved, or '%')
+ return quote(unquote_unreserved(uri), safe=safe_with_percent)
+ except InvalidURL:
+ # We couldn't unquote the given URI, so let's try quoting it, but
+ # there may be unquoted '%'s in the URI. We need to make sure they're
+ # properly quoted so they do not cause issues elsewhere.
+ return quote(uri, safe=safe_without_percent)
def address_in_network(ip, net):
@@ -567,7 +588,7 @@ def parse_header_links(value):
replace_chars = " '\""
- for val in value.split(","):
+ for val in re.split(", *<", value):
try:
url, params = val.split(";", 1)
except ValueError: