From b648706bfdee7f8cbd145953435f95777a9cd13a Mon Sep 17 00:00:00 2001 From: nasonfish Date: Mon, 28 Jan 2013 00:00:38 -0700 Subject: [PATCH 1/5] Added a plugin to get data from snowy-evening.com with a regex when a link is sent. --- plugins/snowy-evening.py | 41 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) create mode 100644 plugins/snowy-evening.py diff --git a/plugins/snowy-evening.py b/plugins/snowy-evening.py new file mode 100644 index 0000000..a3c1e7a --- /dev/null +++ b/plugins/snowy-evening.py @@ -0,0 +1,41 @@ +from util import hook, http, text +import json, urllib2 +import re + +snowy_re = (r'(?:snowy-evening.com/)' + '([-_a-zA-Z0-9/]+)', re.I) + + +def stripStrongTags(text): + return text.replace("", "").replace("", "") + + +@hook.regex(*snowy_re) +def snowy(match, nick="", reply=""): + try: + + owner, name, id, nothing = match.group(1).split("/") + soup = http.get_soup("https://snowy-evening.com/%s/%s/%s" % (owner, name, id)) + header = ''.join([unicode(tag) for tag in (soup.h2)]) + header = header.replace("Issue Details", "").replace("#%s " % id, "") + info = soup.find_all('p', text=True)[0] + info = ''.join([unicode(tag) for tag in info]) + if info.isspace(): + info = "No details found" + + reply("Project %s by %s: Issue #%s: %s - %s" % (name, owner, id, header, text.truncate_str(info, 150))) + stats = soup.find_all('ul', {'id':'stats'}, text=True) + + priority, number, type, status, age, other = soup.find_all('strong', text=True) + + data = { + "priority" : stripStrongTags(''.join(priority)), + "number" : stripStrongTags(''.join(number)), + "type" : stripStrongTags(''.join(type)), + "status" : stripStrongTags(''.join(status)), + "age" : stripStrongTags(''.join(age)) + } + reply("This issue has a priority of \x02{priority}\x02. It's age is \x02{age}\x02 and it is a \x02{type}\x02. It's status is \x02{status}\x02.".format(**data)) + except Exception: + reply("An error occured while trying to retrieve the data.") + raise \ No newline at end of file From 560f2e75163c9e52f4a44e891daeeea611081fa8 Mon Sep 17 00:00:00 2001 From: nasonfish Date: Mon, 28 Jan 2013 00:09:59 -0700 Subject: [PATCH 2/5] cleanup, don't raise the error if you catch it --- plugins/snowy-evening.py | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) diff --git a/plugins/snowy-evening.py b/plugins/snowy-evening.py index a3c1e7a..37787f3 100644 --- a/plugins/snowy-evening.py +++ b/plugins/snowy-evening.py @@ -13,21 +13,15 @@ def stripStrongTags(text): @hook.regex(*snowy_re) def snowy(match, nick="", reply=""): try: - - owner, name, id, nothing = match.group(1).split("/") + owner, name, id, blankSpace = match.group(1).split("/") soup = http.get_soup("https://snowy-evening.com/%s/%s/%s" % (owner, name, id)) - header = ''.join([unicode(tag) for tag in (soup.h2)]) - header = header.replace("Issue Details", "").replace("#%s " % id, "") - info = soup.find_all('p', text=True)[0] - info = ''.join([unicode(tag) for tag in info]) + header = ''.join([unicode(tag) for tag in (soup.h2)]).replace("Issue Details", "").replace("#%s " % id, "") + info = ''.join([unicode(tag) for tag in soup.p]) if info.isspace(): info = "No details found" - reply("Project %s by %s: Issue #%s: %s - %s" % (name, owner, id, header, text.truncate_str(info, 150))) stats = soup.find_all('ul', {'id':'stats'}, text=True) - priority, number, type, status, age, other = soup.find_all('strong', text=True) - data = { "priority" : stripStrongTags(''.join(priority)), "number" : stripStrongTags(''.join(number)), @@ -37,5 +31,4 @@ def snowy(match, nick="", reply=""): } reply("This issue has a priority of \x02{priority}\x02. It's age is \x02{age}\x02 and it is a \x02{type}\x02. It's status is \x02{status}\x02.".format(**data)) except Exception: - reply("An error occured while trying to retrieve the data.") - raise \ No newline at end of file + reply("An error occured while trying to retrieve the data.") \ No newline at end of file From 64974dc355f9f950aeb910612348cabf84336076 Mon Sep 17 00:00:00 2001 From: nasonfish Date: Thu, 31 Jan 2013 08:02:27 -0700 Subject: [PATCH 3/5] more work on the snowy plugin. --- plugins/snowy-evening.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/plugins/snowy-evening.py b/plugins/snowy-evening.py index 37787f3..62c1a19 100644 --- a/plugins/snowy-evening.py +++ b/plugins/snowy-evening.py @@ -1,6 +1,7 @@ from util import hook, http, text import json, urllib2 import re +from bs4 import BeautifulSoup snowy_re = (r'(?:snowy-evening.com/)' '([-_a-zA-Z0-9/]+)', re.I) @@ -13,22 +14,26 @@ def stripStrongTags(text): @hook.regex(*snowy_re) def snowy(match, nick="", reply=""): try: - owner, name, id, blankSpace = match.group(1).split("/") - soup = http.get_soup("https://snowy-evening.com/%s/%s/%s" % (owner, name, id)) - header = ''.join([unicode(tag) for tag in (soup.h2)]).replace("Issue Details", "").replace("#%s " % id, "") - info = ''.join([unicode(tag) for tag in soup.p]) - if info.isspace(): + + soup = http.get_soup("https://snowy-evening.com/" + match.group(1)) + header = ''.join([unicode(tag) for tag in (soup.h2)]).replace("Issue Details", "") + print(tag for tag in soup.find_all('p')) + info = ''.join([unicode(tag) for tag in soup.find_all('p')[0]]) + if info.isspace() or not info or info == "": info = "No details found" - reply("Project %s by %s: Issue #%s: %s - %s" % (name, owner, id, header, text.truncate_str(info, 150))) + stuff = soup.find_all('li', {'id':'proj-current'})[-1] + owner, name = stuff.find_all('a')[-1]['href'].replace("https://snowy-evening.com/", '').split('/') + reply("Project %s by %s: Issue %s - %s" % (name, owner, header, text.truncate_str(info, 150))) stats = soup.find_all('ul', {'id':'stats'}, text=True) - priority, number, type, status, age, other = soup.find_all('strong', text=True) + priority, number, type, status, age, name = soup.find_all('strong', text=True)[:6] data = { "priority" : stripStrongTags(''.join(priority)), "number" : stripStrongTags(''.join(number)), "type" : stripStrongTags(''.join(type)), "status" : stripStrongTags(''.join(status)), - "age" : stripStrongTags(''.join(age)) + "age" : stripStrongTags(''.join(age)), + "name" : stripStrongTags(''.join(name)) } - reply("This issue has a priority of \x02{priority}\x02. It's age is \x02{age}\x02 and it is a \x02{type}\x02. It's status is \x02{status}\x02.".format(**data)) + reply("This issue has a priority of \x02{priority}\x02. It's age is \x02{age}\x02 and it is a \x02{type}\x02 and was created by \x02{name}\x02. It's status is \x02{status}\x02.".format(**data)) except Exception: - reply("An error occured while trying to retrieve the data.") \ No newline at end of file + raise \ No newline at end of file From 8d8260a2f7dcd85c6edb6967e44c2539cbd17908 Mon Sep 17 00:00:00 2001 From: Luke Rogers Date: Thu, 7 Feb 2013 11:50:33 +1300 Subject: [PATCH 4/5] Edited Snowy Evening plugin. --- plugins/snowy-evening.py | 44 ++++++++++++++-------------------------- 1 file changed, 15 insertions(+), 29 deletions(-) diff --git a/plugins/snowy-evening.py b/plugins/snowy-evening.py index 62c1a19..333c23e 100644 --- a/plugins/snowy-evening.py +++ b/plugins/snowy-evening.py @@ -1,39 +1,25 @@ from util import hook, http, text import json, urllib2 import re -from bs4 import BeautifulSoup snowy_re = (r'(?:snowy-evening.com/)' '([-_a-zA-Z0-9/]+)', re.I) - -def stripStrongTags(text): - return text.replace("", "").replace("", "") - @hook.regex(*snowy_re) def snowy(match, nick="", reply=""): - try: - - soup = http.get_soup("https://snowy-evening.com/" + match.group(1)) - header = ''.join([unicode(tag) for tag in (soup.h2)]).replace("Issue Details", "") - print(tag for tag in soup.find_all('p')) - info = ''.join([unicode(tag) for tag in soup.find_all('p')[0]]) - if info.isspace() or not info or info == "": - info = "No details found" - stuff = soup.find_all('li', {'id':'proj-current'})[-1] - owner, name = stuff.find_all('a')[-1]['href'].replace("https://snowy-evening.com/", '').split('/') - reply("Project %s by %s: Issue %s - %s" % (name, owner, header, text.truncate_str(info, 150))) - stats = soup.find_all('ul', {'id':'stats'}, text=True) - priority, number, type, status, age, name = soup.find_all('strong', text=True)[:6] - data = { - "priority" : stripStrongTags(''.join(priority)), - "number" : stripStrongTags(''.join(number)), - "type" : stripStrongTags(''.join(type)), - "status" : stripStrongTags(''.join(status)), - "age" : stripStrongTags(''.join(age)), - "name" : stripStrongTags(''.join(name)) - } - reply("This issue has a priority of \x02{priority}\x02. It's age is \x02{age}\x02 and it is a \x02{type}\x02 and was created by \x02{name}\x02. It's status is \x02{status}\x02.".format(**data)) - except Exception: - raise \ No newline at end of file + owner, name, id, blankSpace = match.group(1).split("/") + soup = http.get_soup("https://snowy-evening.com/%s/%s/%s" % (owner, name, id)) + + header = soup.find('section', {'class': 'container'}).header.h2(text=True)[1].split(" ", 1)[1] + reply("Project {} by {}: Issue #{}: {}".format(name, owner, id, text.truncate_str(header, 150))) + stats = soup.find('ul', {'id':'stats'}).find_all('strong') + if len(stats) == 6: + priority, number, type, status, age, assignee = [i.contents[0].lower() for i in stats] + else: + priority, number, type, status, age = [i.contents[0].lower() for i in stats] + + if status == "assigned": + reply("This issue has a priority of \x02{}\x02. It's age is \x02{}\x02 and it is a \x02{}\x02. This issue is \x02{}\x02 to \x02{}\x02.".format(priority, age, type, status, assignee)) + else: + reply("This issue has a priority of \x02{}\x02. It's age is \x02{}\x02 and it is a \x02{}\x02. It's status is \x02{}\x02.".format(priority, age, type, status)) \ No newline at end of file From 16210639bcb2a6000a0d2a36634311bd5e2a4fd6 Mon Sep 17 00:00:00 2001 From: Luke Rogers Date: Thu, 7 Feb 2013 19:50:00 +1300 Subject: [PATCH 5/5] Fixed another issue. --- plugins/answers.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/plugins/answers.py b/plugins/answers.py index b16c779..da846bc 100644 --- a/plugins/answers.py +++ b/plugins/answers.py @@ -9,6 +9,8 @@ def answer(inp): result = web.query(query, {"query": inp.strip()}).one() short_url = web.isgd(result["Link"]) - answer = text.truncate_str(result["ChosenAnswer"], 80) - return u"\x02{}\x02 {} - {}".format(result["Subject"], answer, short_url) \ No newline at end of file + # we split the answer and .join() it to remove newlines/extra spaces + answer = text.truncate_str(" ".join(result["ChosenAnswer"].split()), 80) + + return u"\x02{}\x02 \"{}\" - {}".format(result["Subject"], answer, short_url) \ No newline at end of file