Merge pull request #1 from ClouDev/develop

this works? :/
This commit is contained in:
thenoodle68 2013-09-04 04:18:43 -07:00
commit caf45fbae2
4 changed files with 46 additions and 9 deletions

View file

@ -3,14 +3,21 @@ Thanks to everyone who has contributed to CloudBot! Come in IRC and ping me if I
Luke Rogers (lukeroge) Luke Rogers (lukeroge)
Neersighted Neersighted
blha303 blha303
KsaRedFx
urbels
cybojenix cybojenix
KsaRedFx
nathanblaney
thenoodle68
nasonfish
urbels
puffrfish
Sepero
TheFiZi
mikeleigh
Spudstabber Spudstabber
frozenMC frozenMC
frdmn frdmn
puffrfish
nasonfish
We are using code from the following projects: We are using code from the following projects:
./plugins/mlia.py - https://github.com/infinitylabs/UguuBot ./plugins/mlia.py - https://github.com/infinitylabs/UguuBot

View file

@ -19,7 +19,7 @@ def invite(paraml, conn=None):
# Identify to NickServ (or other service) # Identify to NickServ (or other service)
@hook.event('004') @hook.event('004')
def onjoin(conn=None, bot=None): def onjoin(paraml, conn=None, bot=None):
nickserv_password = conn.conf.get('nickserv_password', '') nickserv_password = conn.conf.get('nickserv_password', '')
nickserv_name = conn.conf.get('nickserv_name', 'nickserv') nickserv_name = conn.conf.get('nickserv_name', 'nickserv')
nickserv_command = conn.conf.get('nickserv_command', 'IDENTIFY %s') nickserv_command = conn.conf.get('nickserv_command', 'IDENTIFY %s')

View file

@ -1,4 +1,5 @@
from util import hook, http from util import hook, http, text
import re
api_url = "http://encyclopediadramatica.se/api.php?action=opensearch" api_url = "http://encyclopediadramatica.se/api.php?action=opensearch"
ed_url = "http://encyclopediadramatica.se/" ed_url = "http://encyclopediadramatica.se/"
@ -10,6 +11,7 @@ def drama(inp):
the Encyclopedia Dramatica article on <phrase>.""" the Encyclopedia Dramatica article on <phrase>."""
j = http.get_json(api_url, search=inp) j = http.get_json(api_url, search=inp)
if not j[1]: if not j[1]:
return "No results found." return "No results found."
article_name = j[1][0].replace(' ', '_').encode('utf8') article_name = j[1][0].replace(' ', '_').encode('utf8')
@ -20,8 +22,8 @@ def drama(inp):
for p in page.xpath('//div[@id="bodyContent"]/p'): for p in page.xpath('//div[@id="bodyContent"]/p'):
if p.text_content(): if p.text_content():
summary = " ".join(p.text_content().splitlines()) summary = " ".join(p.text_content().splitlines())
if len(summary) > 300: summary = re.sub("\[\d+\]", "", summary)
summary = summary[:summary.rfind(' ', 0, 300)] + "..." summary = text.truncate_str(summary, 220)
return "%s :: \x02%s\x02" % (summary, url) return "%s :: %s" % (summary, url)
return "Unknown Error." return "Unknown Error."

28
plugins/isup.py Normal file
View file

@ -0,0 +1,28 @@
import urlparse
from util import hook, http, urlnorm
@hook.command
def isup(inp):
"isup -- uses isup.me to see if a site is up or not"
# slightly overcomplicated, esoteric URL parsing
scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip())
domain = auth.encode('utf-8') or path.encode('utf-8')
url = urlnorm.normalize(domain, assume_scheme="http")
try:
soup = http.get_soup('http://isup.me/' + domain)
except http.HTTPError, http.URLError:
return "Could not get status."
content = soup.find('div').text.strip()
if "not just you" in content:
return "It's not just you. {} looks \x02\x034down\x02\x0f from here!".format(url)
elif "is up" in content:
return "It's just you. {} is \x02\x033up\x02\x0f.".format(url)
else:
return "Huh? That doesn't look like a site on the interweb."