nemubot/tools/web.py

143 lines
4.4 KiB
Python
Raw Normal View History

2012-10-19 16:41:17 +00:00
# coding=utf-8
# Nemubot is a modulable IRC bot, built around XML configuration files.
# Copyright (C) 2012 Mercier Pierre-Olivier
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import http.client
2012-11-06 03:26:38 +00:00
import json
2012-10-19 16:41:17 +00:00
import re
2012-11-04 15:26:20 +00:00
import socket
2012-10-19 16:41:17 +00:00
from urllib.parse import quote
from urllib.parse import urlparse
from urllib.request import urlopen
2012-10-19 16:41:17 +00:00
2012-11-04 15:26:20 +00:00
import xmlparser
2012-11-06 03:14:11 +00:00
def isURL(url):
"""Return True if the URL can be parsed"""
o = urlparse(url)
return o.scheme == "" and o.netloc == "" and o.path == ""
2012-10-19 16:41:17 +00:00
2012-11-06 03:14:11 +00:00
def getScheme(url):
2012-11-04 15:26:20 +00:00
"""Return the protocol of a given URL"""
o = urlparse(url)
return o.scheme
2012-11-06 03:14:11 +00:00
def getHost(url):
"""Return the domain of a given URL"""
return urlparse(url).netloc
2012-10-19 16:41:17 +00:00
2012-11-04 15:26:20 +00:00
def getPort(url):
"""Return the port of a given URL"""
return urlparse(url).port
2012-11-04 15:26:20 +00:00
2012-11-06 03:14:11 +00:00
def getPath(url):
"""Return the page request of a given URL"""
return urlparse(url).path
2012-11-06 03:14:11 +00:00
def getUser(url):
"""Return the page request of a given URL"""
return urlparse(url).username
2012-11-06 03:14:11 +00:00
def getPassword(url):
2012-11-04 15:26:20 +00:00
"""Return the page request of a given URL"""
return urlparse(url).password
2012-11-04 15:26:20 +00:00
# Get real pages
def getURLContent(url, timeout=15):
2012-10-19 16:41:17 +00:00
"""Return page content corresponding to URL or None if any error occurs"""
o = urlparse(url)
if o.netloc == "":
o = urlparse("http://" + url)
if o.scheme == "http":
conn = http.client.HTTPConnection(o.netloc, port=o.port, timeout=timeout)
elif o.scheme == "https":
conn = http.client.HTTPSConnection(o.netloc, port=o.port, timeout=timeout)
elif o.scheme is None or o.scheme == "":
conn = http.client.HTTPConnection(o.netloc, port=80, timeout=timeout)
else:
return None
2012-10-19 16:41:17 +00:00
try:
if o.query != '':
conn.request("GET", o.path + "?" + o.query, None, {"User-agent": "Nemubot v3"})
else:
conn.request("GET", o.path, None, {"User-agent": "Nemubot v3"})
2012-11-06 03:14:11 +00:00
except socket.timeout:
return None
2012-10-19 16:41:17 +00:00
except socket.gaierror:
print ("<tools.web> Unable to receive page %s on %s from %s."
% (o.path, o.netloc, url))
2012-11-04 15:26:20 +00:00
return None
2012-10-19 16:41:17 +00:00
2012-11-06 03:14:11 +00:00
try:
res = conn.getresponse()
size = int(res.getheader("Content-Length", 200000))
2013-01-04 17:05:41 +00:00
cntype = res.getheader("Content-Type")
if size > 200000 or (cntype[:4] != "text" and cntype[:4] != "appl"):
2013-01-04 17:05:41 +00:00
return None
data = res.read(size)
# Decode content
2014-07-08 00:44:20 +00:00
charset = "utf-8"
lcharset = res.getheader("Content-Type").split(";")
if len(lcharset) > 1:
for c in charset:
ch = c.split("=")
if ch[0].strip().lower() == "charset" and len(ch) > 1:
cha = ch[1].split(".")
if len(cha) > 1:
charset = cha[1]
else:
charset = cha[0]
2012-11-06 03:14:11 +00:00
except http.client.BadStatusLine:
return None
finally:
conn.close()
2012-10-19 16:41:17 +00:00
2012-11-04 15:26:20 +00:00
if res.status == http.client.OK or res.status == http.client.SEE_OTHER:
2014-07-08 00:44:20 +00:00
return data.decode(charset)
2012-11-06 03:14:11 +00:00
elif res.status == http.client.FOUND or res.status == http.client.MOVED_PERMANENTLY:
return getURLContent(res.getheader("Location"), timeout)
2012-11-04 15:26:20 +00:00
else:
return None
def getXML(url, timeout=15):
"""Get content page and return XML parsed content"""
cnt = getURLContent(url, timeout)
if cnt is None:
return None
else:
return xmlparser.parse_string(cnt)
2012-11-06 03:26:38 +00:00
def getJSON(url, timeout=15):
"""Get content page and return JSON content"""
cnt = getURLContent(url, timeout)
if cnt is None:
return None
else:
return json.loads(cnt.decode())
2012-11-04 15:26:20 +00:00
# Other utils
def striphtml(data):
"""Remove HTML tags from text"""
p = re.compile(r'<.*?>')
return p.sub('', data).replace("&#x28;", "/(").replace("&#x29;", ")/").replace("&#x22;", "\"")