further pepping, spelling and other such things

This commit is contained in:
Luke Rogers 2014-02-13 15:02:44 +13:00
parent 6cc7554cd8
commit cd5ae1d32b
12 changed files with 52 additions and 56 deletions

View file

@ -15,11 +15,11 @@ def spell(inp):
if len(inp.split(" ")) > 1: if len(inp.split(" ")) > 1:
# input is a sentence # input is a sentence
chkr = SpellChecker(locale) checker = SpellChecker(locale)
chkr.set_text(inp) checker.set_text(inp)
offset = 0 offset = 0
for err in chkr: for err in checker:
# find the location of the incorrect word # find the location of the incorrect word
start = err.wordpos + offset start = err.wordpos + offset
finish = start + len(err.word) finish = start + len(err.word)

View file

@ -56,7 +56,8 @@ def get_steam_info(url):
data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip() data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip()
return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}, \x02Price\x02: {price}".format(**data) return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \
u" \x02Price\x02: {price}".format(**data)
@hook.regex(*steam_re) @hook.regex(*steam_re)

View file

@ -9,7 +9,7 @@ def stock(inp):
query = "SELECT * FROM yahoo.finance.quote WHERE symbol=@symbol LIMIT 1" query = "SELECT * FROM yahoo.finance.quote WHERE symbol=@symbol LIMIT 1"
quote = web.query(query, {"symbol": sym}).one() quote = web.query(query, {"symbol": sym}).one()
# if we dont get a company name back, the symbol doesn't match a company # if we don't get a company name back, the symbol doesn't match a company
if quote['Change'] is None: if quote['Change'] is None:
return "Unknown ticker symbol: {}".format(sym) return "Unknown ticker symbol: {}".format(sym)

View file

@ -14,9 +14,9 @@ def title(inp):
except (http.HTTPError, http.URLError): except (http.HTTPError, http.URLError):
return "Could not fetch page." return "Could not fetch page."
title = soup.find('title').contents[0] page_title = soup.find('title').contents[0]
if not title: if not page_title:
return "Could not find title." return "Could not find title."
return u"{} [{}]".format(title, real_url) return u"{} [{}]".format(page_title, real_url)

View file

@ -13,7 +13,6 @@ from util import hook, http
base_url = "http://thetvdb.com/api/" base_url = "http://thetvdb.com/api/"
api_key = "469B73127CA0C411"
def get_zipped_xml(*args, **kwargs): def get_zipped_xml(*args, **kwargs):
@ -25,11 +24,11 @@ def get_zipped_xml(*args, **kwargs):
return etree.parse(ZipFile(zip_buffer, "r").open(path)) return etree.parse(ZipFile(zip_buffer, "r").open(path))
def get_episodes_for_series(seriesname, api_key): def get_episodes_for_series(series_name, api_key):
res = {"error": None, "ended": False, "episodes": None, "name": None} res = {"error": None, "ended": False, "episodes": None, "name": None}
# http://thetvdb.com/wiki/index.php/API:GetSeries # http://thetvdb.com/wiki/index.php/API:GetSeries
try: try:
query = http.get_xml(base_url + 'GetSeries.php', seriesname=seriesname) query = http.get_xml(base_url + 'GetSeries.php', seriesname=series_name)
except URLError: except URLError:
res["error"] = "error contacting thetvdb.com" res["error"] = "error contacting thetvdb.com"
return res return res
@ -63,7 +62,7 @@ def get_episode_info(episode, api_key):
first_aired = episode.findtext("FirstAired") first_aired = episode.findtext("FirstAired")
try: try:
airdate = datetime.date(*map(int, first_aired.split('-'))) air_date = datetime.date(*map(int, first_aired.split('-')))
except (ValueError, TypeError): except (ValueError, TypeError):
return None return None
@ -79,7 +78,7 @@ def get_episode_info(episode, api_key):
episode_desc = '{}'.format(episode_num) episode_desc = '{}'.format(episode_num)
if episode_name: if episode_name:
episode_desc += ' - {}'.format(episode_name) episode_desc += ' - {}'.format(episode_name)
return first_aired, airdate, episode_desc return first_aired, air_date, episode_desc
@hook.command @hook.command
@ -111,15 +110,15 @@ def tv_next(inp, bot=None):
if ep_info is None: if ep_info is None:
continue continue
(first_aired, airdate, episode_desc) = ep_info (first_aired, air_date, episode_desc) = ep_info
if airdate > today: if air_date > today:
next_eps = ['{} ({})'.format(first_aired, episode_desc)] next_eps = ['{} ({})'.format(first_aired, episode_desc)]
elif airdate == today: elif air_date == today:
next_eps = ['Today ({})'.format(episode_desc)] + next_eps next_eps = ['Today ({})'.format(episode_desc)] + next_eps
else: else:
#we're iterating in reverse order with newest episodes last # we're iterating in reverse order with newest episodes last
#so, as soon as we're past today, break out of loop # so, as soon as we're past today, break out of loop
break break
if not next_eps: if not next_eps:
@ -158,9 +157,9 @@ def tv_last(inp, bot=None):
if ep_info is None: if ep_info is None:
continue continue
(first_aired, airdate, episode_desc) = ep_info (first_aired, air_date, episode_desc) = ep_info
if airdate < today: if air_date < today:
#iterating in reverse order, so the first episode encountered #iterating in reverse order, so the first episode encountered
#before today was the most recently aired #before today was the most recently aired
prev_ep = '{} ({})'.format(first_aired, episode_desc) prev_ep = '{} ({})'.format(first_aired, episode_desc)

View file

@ -10,37 +10,36 @@ def urban(inp):
"""urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com.""" """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""
# clean and split the input # clean and split the input
input = inp.lower().strip() inp = inp.lower().strip()
parts = input.split() parts = inp.split()
# if the last word is a number, set the ID to that number # if the last word is a number, set the ID to that number
if parts[-1].isdigit(): if parts[-1].isdigit():
id = int(parts[-1]) id_num = int(parts[-1])
# remove the ID from the input string # remove the ID from the input string
del parts[-1] del parts[-1]
input = " ".join(parts) inp = " ".join(parts)
else: else:
id = 1 id_num = 1
# fetch the definitions # fetch the definitions
page = http.get_json(base_url, term=input, referer="http://m.urbandictionary.com") page = http.get_json(base_url, term=inp, referer="http://m.urbandictionary.com")
defs = page['list'] definitions = page['list']
print page
if page['result_type'] == 'no_results': if page['result_type'] == 'no_results':
return 'Not found.' return 'Not found.'
# try getting the requested definition # try getting the requested definition
try: try:
definition = defs[id - 1]['definition'].replace('\r\n', ' ') definition = definitions[id_num - 1]['definition'].replace('\r\n', ' ')
definition = re.sub('\s+', ' ', definition).strip() # remove excess spaces definition = re.sub('\s+', ' ', definition).strip() # remove excess spaces
definition = text.truncate_str(definition, 200) definition = text.truncate_str(definition, 200)
except IndexError: except IndexError:
return 'Not found.' return 'Not found.'
url = defs[id - 1]['permalink'] url = definitions[id_num - 1]['permalink']
output = u"[%i/%i] %s :: %s" % \ output = u"[%i/%i] %s :: %s" % \
(id, len(defs), definition, url) (id_num, len(definitions), definition, url)
return output return output

View file

@ -7,7 +7,7 @@ import re
colors = collections.OrderedDict([ colors = collections.OrderedDict([
('red', '\x0304'), ('red', '\x0304'),
('ornage', '\x0307'), ('orange', '\x0307'),
('yellow', '\x0308'), ('yellow', '\x0308'),
('green', '\x0309'), ('green', '\x0309'),
('cyan', '\x0303'), ('cyan', '\x0303'),
@ -24,8 +24,8 @@ colors = collections.OrderedDict([
strip_re = re.compile("(\x03|\x02|\x1f)(?:,?\d{1,2}(?:,\d{1,2})?)?", re.UNICODE) strip_re = re.compile("(\x03|\x02|\x1f)(?:,?\d{1,2}(?:,\d{1,2})?)?", re.UNICODE)
def strip(text): def strip(string):
return strip_re.sub('', text) return strip_re.sub('', string)
# basic text tools # basic text tools

View file

@ -20,7 +20,7 @@ def validate(inp):
status = info['x-w3c-validator-status'].lower() status = info['x-w3c-validator-status'].lower()
if status in ("valid", "invalid"): if status in ("valid", "invalid"):
errorcount = info['x-w3c-validator-errors'] error_count = info['x-w3c-validator-errors']
warningcount = info['x-w3c-validator-warnings'] warning_count = info['x-w3c-validator-warnings']
return "{} was found to be {} with {} errors and {} warnings." \ return "{} was found to be {} with {} errors and {} warnings." \
" see: {}".format(inp, status, errorcount, warningcount, url) " see: {}".format(inp, status, error_count, warning_count, url)

View file

@ -8,7 +8,7 @@ def vimeo_url(match):
% match.group(1)) % match.group(1))
if info: if info:
info[0]["duration"] = timeformat.timeformat(info[0]["duration"]) info[0]["duration"] = timeformat.format_time(info[0]["duration"])
info[0]["stats_number_of_likes"] = format( info[0]["stats_number_of_likes"] = format(
info[0]["stats_number_of_likes"], ",d") info[0]["stats_number_of_likes"], ",d")
info[0]["stats_number_of_plays"] = format( info[0]["stats_number_of_plays"] = format(

View file

@ -13,7 +13,7 @@ def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None):
if not api_key: if not api_key:
return "Error: No wunderground API details." return "Error: No wunderground API details."
# initalise weather DB # initialise weather DB
db.execute("create table if not exists weather(nick primary key, loc)") db.execute("create table if not exists weather(nick primary key, loc)")
# if there is no input, try getting the users last location from the DB # if there is no input, try getting the users last location from the DB

View file

@ -11,6 +11,6 @@ def answer(inp):
short_url = web.try_isgd(result["Link"]) short_url = web.try_isgd(result["Link"])
# we split the answer and .join() it to remove newlines/extra spaces # we split the answer and .join() it to remove newlines/extra spaces
answer = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80) answer_text = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80)
return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer, short_url) return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url)

View file

@ -14,10 +14,7 @@ video_url = "http://youtu.be/%s"
def plural(num=0, text=''): def plural(num=0, text=''):
return "{:,} {}{}".format(num, text, "s"[num==1:]) return "{:,} {}{}".format(num, text, "s"[num == 1:])
def get_video_description(video_id): def get_video_description(video_id):
@ -41,22 +38,23 @@ def get_video_description(video_id):
likes = plural(int(data['likeCount']), "like") likes = plural(int(data['likeCount']), "like")
dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike") dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")
percent = 100 * float(data['likeCount'])/float(data['ratingCount']) percent = 100 * float(data['likeCount']) / float(data['ratingCount'])
out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes, out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes,
dislikes, percent) dislikes, percent)
if 'viewCount' in data: if 'viewCount' in data:
views = data['viewCount'] views = data['viewCount']
out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views==1:]) out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:])
try: try:
uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"]["$t"] uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"][
"$t"]
except: except:
uploader = data["uploader"] uploader = data["uploader"]
upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z") upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z")
out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader, out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader,
time.strftime("%Y.%m.%d", upload_time)) time.strftime("%Y.%m.%d", upload_time))
if 'contentRating' in data: if 'contentRating' in data:
out += u' - \x034NSFW\x02' out += u' - \x034NSFW\x02'
@ -88,7 +86,6 @@ def youtube(inp):
return get_video_description(video_id) + u" - " + video_url % video_id return get_video_description(video_id) + u" - " + video_url % video_id
@hook.command('ytime') @hook.command('ytime')
@hook.command @hook.command
def youtime(inp): def youtime(inp):
@ -119,8 +116,8 @@ def youtime(inp):
total_text = timeformat.format_time(total, accuracy=8) total_text = timeformat.format_time(total, accuracy=8)
return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \ return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \
'a total run time of {}!'.format(data['title'], length_text, views, \ u'a total run time of {}!'.format(data['title'], length_text, views,
total_text) total_text)
ytpl_re = (r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I) ytpl_re = (r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
@ -135,6 +132,6 @@ def ytplaylist_url(match):
return "\x034\x02Invalid response." return "\x034\x02Invalid response."
title = soup.find('title').text.split('-')[0].strip() title = soup.find('title').text.split('-')[0].strip()
author = soup.find('img', {'class': 'channel-header-profile-image'})['title'] author = soup.find('img', {'class': 'channel-header-profile-image'})['title']
numvideos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0] num_videos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0] views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0]
return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, numvideos, author) return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author)