Added random output, switched API URL
This commit is contained in:
parent
97a3283eff
commit
fb471bee17
3 changed files with 56 additions and 38 deletions
|
@ -2,11 +2,11 @@ import random
|
||||||
|
|
||||||
from util import hook, http, web
|
from util import hook, http, web
|
||||||
|
|
||||||
METADATA_URL = "http://omnidator.appspot.com/microdata/json/?url={}"
|
metadata_url = "http://omnidator.appspot.com/microdata/json/?url={}"
|
||||||
|
|
||||||
BASE_URL = "http://www.cookstr.com"
|
base_url = "http://www.cookstr.com"
|
||||||
SEARCH_URL = BASE_URL + "/searches"
|
search_url = base_url + "/searches"
|
||||||
RANDOM_URL = SEARCH_URL + "/surprise"
|
random_url = search_url + "/surprise"
|
||||||
|
|
||||||
# set this to true to censor this plugin!
|
# set this to true to censor this plugin!
|
||||||
censor = True
|
censor = True
|
||||||
|
@ -29,7 +29,7 @@ class ParseError(Exception):
|
||||||
def get_data(url):
|
def get_data(url):
|
||||||
""" Uses the omnidator API to parse the metadata from the provided URL """
|
""" Uses the omnidator API to parse the metadata from the provided URL """
|
||||||
try:
|
try:
|
||||||
omni = http.get_json(METADATA_URL.format(url))
|
omni = http.get_json(metadata_url.format(url))
|
||||||
except (http.HTTPError, http.URLError) as e:
|
except (http.HTTPError, http.URLError) as e:
|
||||||
raise ParseError(e)
|
raise ParseError(e)
|
||||||
schemas = omni["@"]
|
schemas = omni["@"]
|
||||||
|
@ -47,7 +47,7 @@ def recipe(inp):
|
||||||
if inp:
|
if inp:
|
||||||
# get the recipe URL by searching
|
# get the recipe URL by searching
|
||||||
try:
|
try:
|
||||||
search = http.get_soup(SEARCH_URL, query=inp.strip())
|
search = http.get_soup(search_url, query=inp.strip())
|
||||||
except (http.HTTPError, http.URLError) as e:
|
except (http.HTTPError, http.URLError) as e:
|
||||||
return "Could not get recipe: {}".format(e)
|
return "Could not get recipe: {}".format(e)
|
||||||
|
|
||||||
|
@ -63,12 +63,12 @@ def recipe(inp):
|
||||||
result = random.choice(results)
|
result = random.choice(results)
|
||||||
|
|
||||||
# extract the URL from the result
|
# extract the URL from the result
|
||||||
url = BASE_URL + result.find('div', {'class': 'image-wrapper'}).find('a')['href']
|
url = base_url + result.find('div', {'class': 'image-wrapper'}).find('a')['href']
|
||||||
|
|
||||||
else:
|
else:
|
||||||
# get a random recipe URL
|
# get a random recipe URL
|
||||||
try:
|
try:
|
||||||
page = http.open(RANDOM_URL)
|
page = http.open(random_url)
|
||||||
except (http.HTTPError, http.URLError) as e:
|
except (http.HTTPError, http.URLError) as e:
|
||||||
return "Could not get recipe: {}".format(e)
|
return "Could not get recipe: {}".format(e)
|
||||||
url = page.geturl()
|
url = page.geturl()
|
||||||
|
@ -87,7 +87,7 @@ def recipe(inp):
|
||||||
def dinner(inp):
|
def dinner(inp):
|
||||||
"""dinner - WTF IS FOR DINNER"""
|
"""dinner - WTF IS FOR DINNER"""
|
||||||
try:
|
try:
|
||||||
page = http.open(RANDOM_URL)
|
page = http.open(random_url)
|
||||||
except (http.HTTPError, http.URLError) as e:
|
except (http.HTTPError, http.URLError) as e:
|
||||||
return "Could not get recipe: {}".format(e)
|
return "Could not get recipe: {}".format(e)
|
||||||
url = page.geturl()
|
url = page.geturl()
|
||||||
|
|
|
@ -1,47 +1,66 @@
|
||||||
import re
|
import re
|
||||||
|
import random
|
||||||
|
|
||||||
from util import hook, http, text
|
from util import hook, http, text
|
||||||
|
|
||||||
|
|
||||||
base_url = 'http://www.urbandictionary.com/iphone/search/define'
|
base_url = 'http://api.urbandictionary.com/v0'
|
||||||
|
define_url = base_url + "/define"
|
||||||
|
random_url = base_url + "/random"
|
||||||
|
|
||||||
|
@hook.command('u', autohelp=False)
|
||||||
@hook.command('u')
|
@hook.command(autohelp=False)
|
||||||
@hook.command
|
|
||||||
def urban(inp):
|
def urban(inp):
|
||||||
"""urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""
|
"""urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com."""
|
||||||
|
|
||||||
# clean and split the input
|
if inp:
|
||||||
inp = inp.lower().strip()
|
# clean and split the input
|
||||||
parts = inp.split()
|
inp = inp.lower().strip()
|
||||||
|
parts = inp.split()
|
||||||
|
|
||||||
# if the last word is a number, set the ID to that number
|
# if the last word is a number, set the ID to that number
|
||||||
if parts[-1].isdigit():
|
if parts[-1].isdigit():
|
||||||
id_num = int(parts[-1])
|
id_num = int(parts[-1])
|
||||||
# remove the ID from the input string
|
# remove the ID from the input string
|
||||||
del parts[-1]
|
del parts[-1]
|
||||||
inp = " ".join(parts)
|
inp = " ".join(parts)
|
||||||
|
else:
|
||||||
|
id_num = 1
|
||||||
|
|
||||||
|
# fetch the definitions
|
||||||
|
page = http.get_json(define_url, term=inp, referer="http://m.urbandictionary.com")
|
||||||
|
|
||||||
|
if page['result_type'] == 'no_results':
|
||||||
|
return 'Not found.'
|
||||||
else:
|
else:
|
||||||
id_num = 1
|
# get a random definition!
|
||||||
|
page = http.get_json(random_url, referer="http://m.urbandictionary.com")
|
||||||
|
id_num = None
|
||||||
|
|
||||||
# fetch the definitions
|
|
||||||
page = http.get_json(base_url, term=inp, referer="http://m.urbandictionary.com")
|
|
||||||
definitions = page['list']
|
definitions = page['list']
|
||||||
|
|
||||||
if page['result_type'] == 'no_results':
|
if id_num:
|
||||||
return 'Not found.'
|
# try getting the requested definition
|
||||||
|
try:
|
||||||
|
definition = definitions[id_num - 1]['definition'].replace('\r\n', ' ')
|
||||||
|
definition = re.sub('\s+', ' ', definition).strip() # remove excess spaces
|
||||||
|
definition = text.truncate_str(definition, 200)
|
||||||
|
except IndexError:
|
||||||
|
return 'Not found.'
|
||||||
|
|
||||||
# try getting the requested definition
|
url = definitions[id_num - 1]['permalink']
|
||||||
try:
|
output = u"[%i/%i] %s :: %s" % \
|
||||||
definition = definitions[id_num - 1]['definition'].replace('\r\n', ' ')
|
(id_num, len(definitions), definition, url)
|
||||||
definition = re.sub('\s+', ' ', definition).strip() # remove excess spaces
|
|
||||||
definition = text.truncate_str(definition, 200)
|
|
||||||
except IndexError:
|
|
||||||
return 'Not found.'
|
|
||||||
|
|
||||||
url = definitions[id_num - 1]['permalink']
|
else:
|
||||||
|
definition = random.choice(definitions)
|
||||||
|
|
||||||
output = u"[%i/%i] %s :: %s" % \
|
def_text = definition['definition'].replace('\r\n', ' ')
|
||||||
(id_num, len(definitions), definition, url)
|
def_text = re.sub('\s+', ' ', def_text).strip() # remove excess spaces
|
||||||
|
def_text = text.truncate_str(def_text, 200)
|
||||||
|
|
||||||
|
name = definition['word']
|
||||||
|
url = definition['permalink']
|
||||||
|
output = u"\x02{}\x02: {} :: {}".format(name, def_text, url)
|
||||||
|
|
||||||
return output
|
return output
|
||||||
|
|
|
@ -34,7 +34,6 @@ def get_video_description(video_id):
|
||||||
out += u' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True))
|
out += u' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True))
|
||||||
|
|
||||||
if 'ratingCount' in data:
|
if 'ratingCount' in data:
|
||||||
# format
|
|
||||||
likes = plural(int(data['likeCount']), "like")
|
likes = plural(int(data['likeCount']), "like")
|
||||||
dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")
|
dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike")
|
||||||
|
|
||||||
|
|
Reference in a new issue