This repository has been archived on 2023-04-13. You can view files and clone it, but cannot push or open issues or pull requests.
CloudBot/disabled_stuff/recipe.py

107 lines
3.1 KiB
Python
Raw Normal View History

2014-04-01 08:43:03 +02:00
import random
2014-04-01 06:57:39 +02:00
from util import hook, http, web
2014-04-03 07:27:17 +02:00
metadata_url = "http://omnidator.appspot.com/microdata/json/?url={}"
2014-04-01 06:57:39 +02:00
2014-04-03 07:27:17 +02:00
base_url = "http://www.cookstr.com"
search_url = base_url + "/searches"
random_url = search_url + "/surprise"
2014-04-02 04:03:10 +02:00
# set this to true to censor this plugin!
censor = True
phrases = [
2014-04-01 08:43:03 +02:00
u"EAT SOME FUCKING \x02{}\x02",
u"YOU WON'T NOT MAKE SOME FUCKING \x02{}\x02",
2014-04-02 09:12:21 +02:00
u"HOW ABOUT SOME FUCKING \x02{}?\x02",
u"WHY DON'T YOU EAT SOME FUCKING \x02{}?\x02",
2014-04-03 04:01:19 +02:00
u"MAKE SOME FUCKING \x02{}\x02",
2014-04-01 08:43:03 +02:00
u"INDUCE FOOD COMA WITH SOME FUCKING \x02{}\x02"
]
2014-04-01 06:57:39 +02:00
clean_key = lambda i: i.split("#")[1]
2014-04-01 08:43:03 +02:00
class ParseError(Exception):
pass
2014-04-01 06:57:39 +02:00
def get_data(url):
""" Uses the omnidator API to parse the metadata from the provided URL """
2014-04-01 08:43:03 +02:00
try:
2014-04-03 07:27:17 +02:00
omni = http.get_json(metadata_url.format(url))
2014-04-01 08:43:03 +02:00
except (http.HTTPError, http.URLError) as e:
raise ParseError(e)
2014-04-01 06:57:39 +02:00
schemas = omni["@"]
for d in schemas:
if d["a"] == "<http://schema.org/Recipe>":
data = {clean_key(key): value for (key, value) in d.iteritems()
if key.startswith("http://schema.org/Recipe")}
return data
2014-04-01 08:43:03 +02:00
raise ParseError("No recipe data found")
2014-04-01 06:57:39 +02:00
@hook.command(autohelp=False)
def recipe(inp):
2014-04-02 04:03:10 +02:00
"""recipe [term] - Gets a recipe for [term], or ets a random recipe if [term] is not provided"""
if inp:
# get the recipe URL by searching
try:
2014-04-03 07:27:17 +02:00
search = http.get_soup(search_url, query=inp.strip())
2014-04-02 04:03:10 +02:00
except (http.HTTPError, http.URLError) as e:
return "Could not get recipe: {}".format(e)
# find the list of results
2014-04-02 09:08:56 +02:00
result_list = search.find('div', {'class': 'found_results'})
2014-04-02 04:03:10 +02:00
2014-04-02 09:08:56 +02:00
if result_list:
results = result_list.find_all('div', {'class': 'recipe_result'})
2014-04-02 04:03:10 +02:00
else:
return "No results"
2014-04-02 09:08:56 +02:00
# pick a random front page result
result = random.choice(results)
2014-04-02 04:03:10 +02:00
# extract the URL from the result
2014-04-03 07:27:17 +02:00
url = base_url + result.find('div', {'class': 'image-wrapper'}).find('a')['href']
2014-04-02 04:03:10 +02:00
else:
# get a random recipe URL
try:
2014-04-03 07:27:17 +02:00
page = http.open(random_url)
2014-04-02 04:03:10 +02:00
except (http.HTTPError, http.URLError) as e:
return "Could not get recipe: {}".format(e)
url = page.geturl()
# use get_data() to get the recipe info from the URL
2014-04-01 08:43:03 +02:00
try:
data = get_data(url)
except ParseError as e:
return "Could not parse recipe: {}".format(e)
2014-04-02 09:03:14 +02:00
name = data["name"].strip()
2014-04-01 08:43:03 +02:00
return u"Try eating \x02{}!\x02 - {}".format(name, web.try_isgd(url))
@hook.command(autohelp=False)
2014-04-02 09:12:21 +02:00
def dinner(inp):
"""dinner - WTF IS FOR DINNER"""
2014-04-01 08:43:03 +02:00
try:
2014-04-03 07:27:17 +02:00
page = http.open(random_url)
2014-04-01 08:43:03 +02:00
except (http.HTTPError, http.URLError) as e:
return "Could not get recipe: {}".format(e)
url = page.geturl()
try:
data = get_data(url)
except ParseError as e:
return "Could not parse recipe: {}".format(e)
2014-04-02 09:03:14 +02:00
name = data["name"].strip().upper()
2014-04-02 04:03:10 +02:00
text = random.choice(phrases).format(name)
2014-04-01 08:43:03 +02:00
2014-04-02 04:03:10 +02:00
if censor:
2014-04-01 08:43:03 +02:00
text = text.replace("FUCK", "F**K")
2014-04-03 04:01:19 +02:00
return u"{} - {}".format(text, web.try_isgd(url))