1
0
Fork 0

Use new web tool into various modules

This commit is contained in:
Némunaire 2012-11-04 16:26:20 +01:00
parent 6b6f5b7e0b
commit 9b38b21898
7 changed files with 96 additions and 137 deletions

View File

@ -1,19 +1,13 @@
# coding=utf-8 # coding=utf-8
import http.client
import re
from urllib.parse import quote from urllib.parse import quote
import xmlparser from tools import web
class DDGSearch: class DDGSearch:
def __init__(self, terms): def __init__(self, terms):
self.terms = terms self.terms = terms
(res, page) = getPage(terms) self.ddgres = web.getXML("http://api.duckduckgo.com/?q=%s&format=xml" % quote(terms))
if res == http.client.OK or res == http.client.SEE_OTHER:
self.ddgres = xmlparser.parse_string(page)
else:
self.ddgres = None
@property @property
def type(self): def type(self):
@ -55,7 +49,7 @@ class DDGSearch:
@property @property
def answer(self): def answer(self):
try: try:
return striphtml(self.ddgres.getFirstNode("Answer").getContent()) return web.striphtml(self.ddgres.getFirstNode("Answer").getContent())
except: except:
return None return None
@ -68,22 +62,3 @@ class DDGSearch:
return None return None
except: except:
return None return None
def striphtml(data):
p = re.compile(r'<.*?>')
return p.sub('', data).replace("&#x28;", "/(").replace("&#x29;", ")/").replace("&#x22;", "\"")
def getPage(terms):
conn = http.client.HTTPConnection("api.duckduckgo.com", timeout=5)
try:
conn.request("GET", "/?q=%s&format=xml" % quote(terms))
except socket.gaierror:
print ("impossible de récupérer la page %s."%(p))
return (http.client.INTERNAL_SERVER_ERROR, None)
res = conn.getresponse()
data = res.read()
conn.close()
return (res.status, data)

View File

@ -1,19 +1,22 @@
# coding=utf-8 # coding=utf-8
import http.client
import re
import socket
from urllib.parse import quote from urllib.parse import quote
import xmlparser from tools import web
class WFASearch: class WFASearch:
def __init__(self, terms): def __init__(self, terms):
self.terms = terms self.terms = terms
(res, page) = getPage(terms) try:
if res == http.client.OK: self.wfares = web.getXML("http://api.wolframalpha.com/v2/query?"
self.wfares = xmlparser.parse_string(page) "input=%s&appid=%s"
else: % (quote(terms),
CONF.getNode("wfaapi")["key"]))
except (TypeError, KeyError):
print ("You need a Wolfram|Alpha API key in order to use this "
"module. Add it to the module configuration file:\n<wfaapi"
" key=\"XXXXXX-XXXXXXXXXX\" />\nRegister at "
"http://products.wolframalpha.com/api/")
self.wfares = None self.wfares = None
@property @property
@ -25,7 +28,9 @@ class WFASearch:
@property @property
def error(self): def error(self):
if self.wfares["error"] == "true": if self.wfares is None:
return "An error occurs during computation."
elif self.wfares["error"] == "true":
return "An error occurs during computation: " + self.wfares.getNode("error").getNode("msg").getContent() return "An error occurs during computation: " + self.wfares.getNode("error").getNode("msg").getContent()
elif self.wfares.hasNode("didyoumeans"): elif self.wfares.hasNode("didyoumeans"):
start = "Did you mean: " start = "Did you mean: "
@ -62,21 +67,3 @@ class WFASearch:
yield node["title"] + " " + subnode["title"] + ": " + subnode.getFirstNode("plaintext").getContent() yield node["title"] + " " + subnode["title"] + ": " + subnode.getFirstNode("plaintext").getContent()
except IndexError: except IndexError:
pass pass
def getPage(terms):
conn = http.client.HTTPConnection("api.wolframalpha.com", timeout=15)
try:
conn.request("GET", "/v2/query?input=%s&appid=%s" % (quote(terms), CONF.getNode("wfaapi")["key"]))
except socket.gaierror:
print ("impossible de récupérer la page Wolfram|Alpha.")
return (http.client.INTERNAL_SERVER_ERROR, None)
except (TypeError, KeyError):
print ("You need a Wolfram|Alpha API key in order to use this module. Add it to the module configuration file:\n<wfaapi key=\"XXXXXX-XXXXXXXXXX\" />\nRegister at http://products.wolframalpha.com/api/")
return (http.client.INTERNAL_SERVER_ERROR, None)
res = conn.getresponse()
data = res.read()
conn.close()
return (res.status, data)

View File

@ -1,10 +1,10 @@
# coding=utf-8 # coding=utf-8
import http.client
import re import re
import socket
from urllib.parse import quote from urllib.parse import quote
from tools import web
nemubotversion = 3.3 nemubotversion = 3.3
def help_tiny (): def help_tiny ():
@ -38,8 +38,8 @@ def cmd_syno(msg):
def get_synos(word): def get_synos(word):
(res, page) = getPage(word) page = web.getURLContent("http://www.crisco.unicaen.fr/des/synonymes/%s" % quote(word))
if res == http.client.OK: if page is not None:
synos = list() synos = list()
for line in page.decode().split("\n"): for line in page.decode().split("\n"):
if re.match("[ \t]*<tr[^>]*>.*</tr>[ \t]*</table>.*", line) is not None: if re.match("[ \t]*<tr[^>]*>.*</tr>[ \t]*</table>.*", line) is not None:
@ -48,29 +48,3 @@ def get_synos(word):
return synos return synos
else: else:
return None return None
def getPage(terms):
conn = http.client.HTTPConnection("www.crisco.unicaen.fr", timeout=5)
try:
conn.request("GET", "/des/synonymes/%s" % quote(terms))
except socket.gaierror:
print ("impossible de récupérer la page Wolfram|Alpha.")
return (http.client.INTERNAL_SERVER_ERROR, None)
res = conn.getresponse()
data = res.read()
conn.close()
return (res.status, data)
if __name__ == "__main__":
import sys
if len(sys.argv) == 0:
print ("Usage: ./syno.py word [word ...]")
else:
for word in sys.argv:
synos = get_synos(word)
if synos is not None:
print ("Synonyme de %s : %s" % (word, ', '.join(synos)))

View File

@ -1,11 +1,8 @@
# coding=utf-8 # coding=utf-8
import http.client
import re import re
from xml.dom.minidom import parseString
from event import ModuleEvent from tools import web
from xmlparser.node import ModuleState
nemubotversion = 3.3 nemubotversion = 3.3
@ -26,33 +23,18 @@ def help_full ():
return "!velib /number/ ...: gives available bikes and slots at the station /number/." return "!velib /number/ ...: gives available bikes and slots at the station /number/."
def getPage (s, p):
conn = http.client.HTTPConnection(s, timeout=10)
try:
conn.request("GET", p)
except socket.gaierror:
print ("[%s] impossible de récupérer la page %s."%(s, p))
return None
res = conn.getresponse()
data = res.read()
conn.close()
return (res.status, data)
def station_status(station): def station_status(station):
"""Gets available and free status of a given station""" """Gets available and free status of a given station"""
(st, page) = getPage(CONF.getNode("server")["ip"], CONF.getNode("server")["url"] + station) response = web.getXML(CONF.getNode("server")["url"] + station)
if st == http.client.OK: if response is not None:
response = parseString(page) available = response.getNode("available").getContent()
available = response.documentElement.getElementsByTagName("available") if available is not None and len(available) > 0:
if len(available) > 0: available = int(available)
available = int(available[0].childNodes[0].nodeValue)
else: else:
available = 0 available = 0
free = response.documentElement.getElementsByTagName("free") free = response.getNode("free").getContent()
if len(free) > 0: if free is not None and len(free) > 0:
free = int(free[0].childNodes[0].nodeValue) free = int(free)
else: else:
free = 0 free = 0
return (available, free) return (available, free)

View File

@ -1,5 +1,5 @@
<?xml version="1.0" ?> <?xml version="1.0" ?>
<nemubotmodule name="velib"> <nemubotmodule name="velib">
<server ip="www.velib.paris.fr" url="/service/stationdetails/paris/" /> <server url="http://www.velib.paris.fr/service/stationdetails/paris/" />
<message type="cmd" name="velib" call="ask_stations" /> <message type="cmd" name="velib" call="ask_stations" />
</nemubotmodule> </nemubotmodule>

0
tools/__init__.py Normal file
View File

View File

@ -18,18 +18,30 @@
import http.client import http.client
import re import re
import socket
from urllib.parse import quote from urllib.parse import quote
import xmlparser
# Parse URL
def parseURL(url): def parseURL(url):
"""Separate protocol, domain, port and page request""" """Separate protocol, domain, port and page request"""
res = re.match("^(([^:]+)://)?([^:/]+)(:([0-9]{1,5}))?(.*)$", url) res = re.match("^(([^:]+)://)?([^:/]+)(:([0-9]{1,5}))?(.*)$", url)
if res is not None: if res is not None:
port = res.group(5) if res.group(5) is not None:
if port is None and res.group(2) is not None: port = int(res.group(5))
elif res.group(2) is not None:
if res.group(2) == "http": if res.group(2) == "http":
port = 80 port = 80
elif res.group(2) == "https": elif res.group(2) == "https":
port = 443 port = 443
else:
print ("<tools.web> WARNING: unknown protocol %s"
% res.group(2))
port = 0
else:
port = 0
return (res.group(2), res.group(3), port, res.group(6)) return (res.group(2), res.group(3), port, res.group(6))
else: else:
return (None, None, None, None) return (None, None, None, None)
@ -40,38 +52,67 @@ def getDomain(url):
return domain return domain
def getProtocol(url): def getProtocol(url):
"""Return the domain of a given URL""" """Return the protocol of a given URL"""
(protocol, domain, port, page) = parseURL(url) (protocol, domain, port, page) = parseURL(url)
return protocol return protocol
def getURL(url): def getPort(url):
"""Return the port of a given URL"""
(protocol, domain, port, page) = parseURL(url)
return port
def getRequest(url):
"""Return the page request of a given URL"""
(protocol, domain, port, page) = parseURL(url)
return page
# Get real pages
def getURLContent(url, timeout=15):
"""Return page content corresponding to URL or None if any error occurs""" """Return page content corresponding to URL or None if any error occurs"""
conn = http.client.HTTPConnection("api.duckduckgo.com", timeout=5) (protocol, domain, port, page) = parseURL(url)
if port == 0: port = 80
conn = http.client.HTTPConnection(domain, port=port, timeout=15)
try: try:
conn.request("GET", "/?q=%s&format=xml" % quote(terms)) conn.request("GET", page, None, {"User-agent": "Nemubot v3"})
except socket.gaierror: except socket.gaierror:
print ("impossible de récupérer la page %s."%(p)) print ("<tools.web> Unable to receive page %s from %s on %d."
return (http.client.INTERNAL_SERVER_ERROR, None) % (page, domain, port))
return None
res = conn.getresponse() res = conn.getresponse()
data = res.read() data = res.read()
conn.close() conn.close()
return (res.status, data)
if res.status == http.client.OK or res.status == http.client.SEE_OTHER:
return data
#TODO: follow redirections
else:
return None
def getXML(url, timeout=15):
"""Get content page and return XML parsed content"""
cnt = getURLContent(url, timeout)
if cnt is None:
return None
else:
return xmlparser.parse_string(cnt)
# Other utils
def striphtml(data):
"""Remove HTML tags from text"""
p = re.compile(r'<.*?>')
return p.sub('', data).replace("&#x28;", "/(").replace("&#x29;", ")/").replace("&#x22;", "\"")
# Tests when called alone
if __name__ == "__main__": if __name__ == "__main__":
content1 = "" print(parseURL("www.nemunai.re"))
with open("rss.php.1", "r") as f: print(parseURL("www.nemunai.re/?p0m"))
for line in f: print(parseURL("http://www.nemunai.re/?p0m"))
content1 += line print(parseURL("http://www.nemunai.re:42/?p0m"))
content2 = "" print(parseURL("www.nemunai.re:42/?p0m"))
with open("rss.php", "r") as f: print(parseURL("http://www.nemunai.re/?p0m"))
for line in f:
content2 += line
a = Atom (content1)
print (a.updated)
b = Atom (content2)
print (b.updated)
diff = a.diff (b)
print (diff)