nemubot/nemubot/tools/web.py

237 lines
6.6 KiB
Python
Raw Normal View History

2012-10-19 16:41:17 +00:00
# coding=utf-8
# Nemubot is a smart and modulable IM bot.
# Copyright (C) 2012-2015 Mercier Pierre-Olivier
2012-10-19 16:41:17 +00:00
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from urllib.parse import urlparse, urlsplit, urlunsplit
2012-10-19 16:41:17 +00:00
from nemubot.exception import IRCException
2012-11-04 15:26:20 +00:00
2014-11-09 13:11:54 +00:00
2012-11-06 03:14:11 +00:00
def isURL(url):
"""Return True if the URL can be parsed"""
o = urlparse(url)
return o.netloc == "" and o.path == ""
def getNormalizedURL(url):
"""Return a normalized form for the given URL"""
return urlunsplit(urlsplit(url, "http"))
2012-10-19 16:41:17 +00:00
2014-11-09 13:11:54 +00:00
2012-11-06 03:14:11 +00:00
def getScheme(url):
2012-11-04 15:26:20 +00:00
"""Return the protocol of a given URL"""
o = urlparse(url, "http")
return o.scheme
2012-11-06 03:14:11 +00:00
2014-11-09 13:11:54 +00:00
2012-11-06 03:14:11 +00:00
def getHost(url):
"""Return the domain of a given URL"""
return urlparse(url, "http").hostname
2012-10-19 16:41:17 +00:00
2014-11-09 13:11:54 +00:00
2012-11-04 15:26:20 +00:00
def getPort(url):
"""Return the port of a given URL"""
return urlparse(url, "http").port
2012-11-04 15:26:20 +00:00
2014-11-09 13:11:54 +00:00
2012-11-06 03:14:11 +00:00
def getPath(url):
"""Return the page request of a given URL"""
return urlparse(url, "http").path
2012-11-06 03:14:11 +00:00
2014-11-09 13:11:54 +00:00
2012-11-06 03:14:11 +00:00
def getUser(url):
"""Return the page request of a given URL"""
return urlparse(url, "http").username
2014-11-09 13:11:54 +00:00
2012-11-06 03:14:11 +00:00
def getPassword(url):
2012-11-04 15:26:20 +00:00
"""Return the page request of a given URL"""
return urlparse(url, "http").password
2012-11-04 15:26:20 +00:00
# Get real pages
def getURLContent(url, body=None, timeout=7, header=None):
2015-02-21 12:51:40 +00:00
"""Return page content corresponding to URL or None if any error occurs
Arguments:
url -- the URL to get
2015-07-20 05:18:50 +00:00
body -- Data to send as POST content
2015-02-21 12:51:40 +00:00
timeout -- maximum number of seconds to wait before returning an exception
"""
o = urlparse(url, "http")
2015-02-21 12:51:40 +00:00
import http.client
kwargs = {
'host': o.hostname,
'port': o.port,
'timeout': timeout
}
if o.scheme == "http":
conn = http.client.HTTPConnection(**kwargs)
elif o.scheme == "https":
# For Python>3.4, restore the Python 3.3 behavior
import ssl
if hasattr(ssl, "create_default_context"):
kwargs["context"] = ssl.create_default_context()
kwargs["context"].check_hostname = False
kwargs["context"].verify_mode = ssl.CERT_NONE
conn = http.client.HTTPSConnection(**kwargs)
elif o.scheme is None or o.scheme == "":
conn = http.client.HTTPConnection(**kwargs)
else:
2015-07-05 11:09:18 +00:00
raise IRCException("Invalid URL")
2015-02-21 12:51:40 +00:00
from nemubot import __version__
if header is None:
header = {"User-agent": "Nemubot v%s" % __version__}
elif "User-agent" not in header:
header["User-agent"] = "Nemubot v%s" % __version__
2015-02-21 12:51:40 +00:00
import socket
2012-10-19 16:41:17 +00:00
try:
if o.query != '':
2015-07-20 05:18:50 +00:00
conn.request("GET" if body is None else "POST",
o.path + "?" + o.query,
body,
header)
else:
2015-07-20 05:18:50 +00:00
conn.request("GET" if body is None else "POST",
o.path,
body,
header)
2015-07-05 11:09:18 +00:00
except OSError as e:
raise IRCException(e.strerror)
2012-10-19 16:41:17 +00:00
2012-11-06 03:14:11 +00:00
try:
res = conn.getresponse()
size = int(res.getheader("Content-Length", 524288))
2013-01-04 17:05:41 +00:00
cntype = res.getheader("Content-Type")
if size > 524288 or (cntype is not None and cntype[:4] != "text" and cntype[:4] != "appl"):
2015-07-05 11:09:18 +00:00
raise IRCException("Content too large to be retrieved")
2013-01-04 17:05:41 +00:00
data = res.read(size)
# Decode content
2014-07-08 00:44:20 +00:00
charset = "utf-8"
2014-12-15 23:46:07 +00:00
if cntype is not None:
lcharset = res.getheader("Content-Type").split(";")
if len(lcharset) > 1:
for c in lcharset:
2014-12-15 23:46:07 +00:00
ch = c.split("=")
if ch[0].strip().lower() == "charset" and len(ch) > 1:
cha = ch[1].split(".")
if len(cha) > 1:
charset = cha[1]
else:
charset = cha[0]
2012-11-06 03:14:11 +00:00
except http.client.BadStatusLine:
2014-09-10 19:33:28 +00:00
raise IRCException("Invalid HTTP response")
2012-11-06 03:14:11 +00:00
finally:
conn.close()
2012-10-19 16:41:17 +00:00
2012-11-04 15:26:20 +00:00
if res.status == http.client.OK or res.status == http.client.SEE_OTHER:
return data.decode(charset).strip()
2014-11-09 13:11:54 +00:00
elif ((res.status == http.client.FOUND or
res.status == http.client.MOVED_PERMANENTLY) and
res.getheader("Location") != url):
2015-07-20 05:18:50 +00:00
return getURLContent(res.getheader("Location"), timeout=timeout)
2012-11-04 15:26:20 +00:00
else:
2014-11-09 13:11:54 +00:00
raise IRCException("A HTTP error occurs: %d - %s" %
(res.status, http.client.responses[res.status]))
2012-11-04 15:26:20 +00:00
def getXML(url, timeout=15):
2015-02-21 12:51:40 +00:00
"""Get content page and return XML parsed content
Arguments:
url -- the URL to get
timeout -- maximum number of seconds to wait before returning an exception
"""
2015-07-20 05:18:50 +00:00
cnt = getURLContent(url, timeout=timeout)
2012-11-04 15:26:20 +00:00
if cnt is None:
return None
else:
2015-02-21 12:51:40 +00:00
from nemubot.tools.xmlparser import parse_string
2014-12-06 08:00:53 +00:00
return parse_string(cnt.encode())
2012-11-04 15:26:20 +00:00
2014-11-09 13:11:54 +00:00
2012-11-06 03:26:38 +00:00
def getJSON(url, timeout=15):
2015-02-21 12:51:40 +00:00
"""Get content page and return JSON content
Arguments:
url -- the URL to get
timeout -- maximum number of seconds to wait before returning an exception
"""
import json
2015-07-20 05:18:50 +00:00
cnt = getURLContent(url, timeout=timeout)
2012-11-06 03:26:38 +00:00
if cnt is None:
return None
else:
return json.loads(cnt)
2012-11-06 03:26:38 +00:00
2014-11-09 13:11:54 +00:00
2012-11-04 15:26:20 +00:00
# Other utils
def striphtml(data):
"""Remove HTML tags from text
2015-02-21 12:51:40 +00:00
Argument:
data -- the string to strip
2015-02-21 12:51:40 +00:00
"""
if not isinstance(data, str) and not isinstance(data, buffer):
return data
2015-02-21 12:51:40 +00:00
try:
from html import unescape
except ImportError:
def _replace_charref(s):
s = s.group(1)
if s[0] == '#':
if s[1] in 'xX':
return chr(int(s[2:], 16))
else:
return chr(int(s[2:]))
else:
from html.entities import name2codepoint
return chr(name2codepoint[s])
2014-11-09 13:11:54 +00:00
# unescape exists from Python 3.4
def unescape(s):
if '&' not in s:
return s
2015-02-21 12:51:40 +00:00
import re
return re.sub('&([^;]+);', _replace_charref, s)
2015-02-21 12:51:40 +00:00
2015-02-21 12:51:40 +00:00
import re
r, _ = re.subn(r' +', ' ',
unescape(re.sub(r'<.*?>', '', data)).replace('\n', ' '))
return r