Merge pull request #73 from blha303/patch-5
Add autoresponse to some plugins, add rdio and newgrounds search/autoresponse
This commit is contained in:
commit
536cfcb87c
4 changed files with 209 additions and 1 deletions
|
@ -1,10 +1,26 @@
|
|||
# IMDb lookup plugin by Ghetto Wizard (2011).
|
||||
# IMDb lookup plugin by Ghetto Wizard (2011) and blha303 (2013)
|
||||
|
||||
from util import hook, http
|
||||
import re
|
||||
import json
|
||||
|
||||
id_re = re.compile("tt\d+")
|
||||
|
||||
def truncate(msg):
|
||||
nmsg = msg.split(" ")
|
||||
out = None
|
||||
x = 0
|
||||
for i in nmsg:
|
||||
if x <= 7:
|
||||
if out:
|
||||
out = out + " " + nmsg[x]
|
||||
else:
|
||||
out = nmsg[x]
|
||||
x = x + 1
|
||||
if x <= 7:
|
||||
return out
|
||||
else:
|
||||
return out + "..."
|
||||
|
||||
@hook.command
|
||||
def imdb(inp):
|
||||
|
@ -32,3 +48,26 @@ def imdb(inp):
|
|||
return out % content
|
||||
else:
|
||||
return 'Unknown error.'
|
||||
|
||||
imdb_re = (r'(.*:)//(imdb.com|www.imdb.com)(:[0-9]+)?(.*)', re.I)
|
||||
|
||||
@hook.regex(*imdb_re)
|
||||
def imdb_url(match):
|
||||
id = match.group(4).split('/')[-1]
|
||||
if id == "":
|
||||
id = match.group(4).split('/')[-2]
|
||||
content = http.get_json("http://www.omdbapi.com/", i=id)
|
||||
if content.get('Error', None) == 'Movie not found!':
|
||||
return 'Movie not found!'
|
||||
elif content['Response'] == 'True':
|
||||
content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content
|
||||
content['Plot'] = truncate(content['Plot'])
|
||||
out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s'
|
||||
if content['Runtime'] != 'N/A':
|
||||
out += ' \x02%(Runtime)s\x02.'
|
||||
if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A':
|
||||
out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \
|
||||
' votes.'
|
||||
return out % content
|
||||
else:
|
||||
return 'Unknown error.'
|
||||
|
|
32
plugins/newgrounds.py
Normal file
32
plugins/newgrounds.py
Normal file
|
@ -0,0 +1,32 @@
|
|||
import re
|
||||
from util import hook, http
|
||||
import json
|
||||
from BeautifulSoup import BeautifulSoup
|
||||
import urllib2
|
||||
|
||||
newgrounds_re = (r'(.*:)//(www.newgrounds.com|newgrounds.com)(:[0-9]+)?(.*)', re.I)
|
||||
valid = set('0123456789')
|
||||
|
||||
def test(s):
|
||||
return set(s) <= valid
|
||||
|
||||
@hook.regex(*newgrounds_re)
|
||||
def newgrounds_url(match):
|
||||
location = match.group(4).split("/")[-1]
|
||||
if not test(location):
|
||||
return "Not a valid Newgrounds portal ID. Example: http://www.newgrounds.com/portal/view/593993"
|
||||
try:
|
||||
urlobj = urllib2.urlopen("http://www.newgrounds.com/portal/view/" + location)
|
||||
except urllib2.HTTPError:
|
||||
return "\x034\x02Invalid response. Maybe Newgrounds is down for maintenance?"
|
||||
soup = BeautifulSoup(urlobj.read())
|
||||
try:
|
||||
title = soup.find('title').text
|
||||
author = soup.find('ul', {'class': 'authorlinks'}).find('img')['alt']
|
||||
rating = u"\x02%s\x02/\x025.0\x02" % soup.find('dd', {'class': 'star-variable'})['title'].split("Stars –")[0].strip()
|
||||
numofratings = soup.find('dd', {'class': 'star-variable'})['title'].split("Stars –")[1].replace("Votes", "").strip()
|
||||
views = soup.find('dl', {'class': 'contentdata'}).findAll('dd')[1].find('strong').text
|
||||
date = soup.find('dl', {'class': 'sidestats'}).find('dd').text
|
||||
except Exception:
|
||||
return "\x034\x02Could not find item information."
|
||||
return u"\x02%s\x02 - rated %s (%s) - \x02%s\x02 views - \x02%s\x02 on \x02%s\x02" % (title, rating, numofratings, views, author, date)
|
120
plugins/rdio.py
Normal file
120
plugins/rdio.py
Normal file
|
@ -0,0 +1,120 @@
|
|||
from util import hook
|
||||
import oauth2 as oauth
|
||||
import urllib, json
|
||||
|
||||
CONSUMER_KEY = "KEY"
|
||||
CONSUMER_SECRET = "SECRET"
|
||||
|
||||
def getdata(inp, types):
|
||||
consumer = oauth.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
|
||||
client = oauth.Client(consumer)
|
||||
response = client.request('http://api.rdio.com/1/', 'POST', urllib.urlencode({'method': 'search', 'query': inp, 'types': types, 'count': '1'}))
|
||||
data = json.loads(response[1])
|
||||
return data
|
||||
|
||||
def checkkeys():
|
||||
if CONSUMER_KEY == "KEY" or CONSUMER_SECRET == "SECRET":
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
@hook.command
|
||||
def rdio(inp):
|
||||
""" rdio <search term> - alternatives: .rdiot (track), .rdioar (artist), .rdioal (album) """
|
||||
if checkkeys():
|
||||
return "This command requires an API key, please enter one in the config"
|
||||
data = getdata(inp, "Track,Album,Artist")
|
||||
try:
|
||||
info = data['result']['results'][0]
|
||||
except IndexError:
|
||||
return "No results."
|
||||
if 'name' in info:
|
||||
if 'artist' in info and 'album' in info: #Track
|
||||
name = info['name']
|
||||
artist = info['artist']
|
||||
album = info['album']
|
||||
url = info['shortUrl']
|
||||
return u"\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url)
|
||||
elif 'artist' in info and not 'album' in info: #Album
|
||||
name = info['name']
|
||||
artist = info['artist']
|
||||
url = info['shortUrl']
|
||||
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
|
||||
else: #Artist
|
||||
name = info['name']
|
||||
url = info['shortUrl']
|
||||
return u"\x02{}\x02 {}".format(name, url)
|
||||
|
||||
@hook.command
|
||||
def rdiot(inp):
|
||||
""" rdiot <search term> - Search for tracks on rdio """
|
||||
if checkkeys():
|
||||
return "This command requires an API key, please enter one in the config"
|
||||
data = getdata(inp, "Track")
|
||||
try:
|
||||
info = data['result']['results'][0]
|
||||
except IndexError:
|
||||
return "No results."
|
||||
name = info['name']
|
||||
artist = info['artist']
|
||||
album = info['album']
|
||||
url = info['shortUrl']
|
||||
return u"\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url)
|
||||
|
||||
@hook.command
|
||||
def rdioar(inp):
|
||||
""" rdioar <search term> - Search for artists on rdio """
|
||||
if checkkeys():
|
||||
return "This command requires an API key, please enter one in the config"
|
||||
data = getdata(inp, "Artist")
|
||||
try:
|
||||
info = data['result']['results'][0]
|
||||
except IndexError:
|
||||
return "No results."
|
||||
name = info['name']
|
||||
url = info['shortUrl']
|
||||
return u"\x02{}\x02 {}".format(name, url)
|
||||
|
||||
@hook.command
|
||||
def rdioal(inp):
|
||||
""" rdioal <search term> - Search for albums on rdio """
|
||||
if checkkeys():
|
||||
return "This command requires an API key, please enter one in the config"
|
||||
data = getdata(inp, "Album")
|
||||
try:
|
||||
info = data['result']['results'][0]
|
||||
except IndexError:
|
||||
return "No results."
|
||||
name = info['name']
|
||||
artist = info['artist']
|
||||
url = info['shortUrl']
|
||||
return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url)
|
||||
|
||||
import re
|
||||
import urllib2
|
||||
|
||||
rdio_re = (r'(.*:)//(rd.io|www.rdio.com|rdio.com)(:[0-9]+)?(.*)', re.I)
|
||||
|
||||
@hook.regex(*rdio_re)
|
||||
def rdio_url(match):
|
||||
if checkkeys():
|
||||
return None
|
||||
url = match.group(1) + "//" + match.group(2) + match.group(4)
|
||||
consumer = oauth.Consumer(CONSUMER_KEY, CONSUMER_SECRET)
|
||||
client = oauth.Client(consumer)
|
||||
response = client.request('http://api.rdio.com/1/', 'POST', urllib.urlencode({'method': 'getObjectFromUrl', 'url': url}))
|
||||
data = json.loads(response[1])
|
||||
info = data['result']
|
||||
if 'name' in info:
|
||||
if 'artist' in info and 'album' in info: #Track
|
||||
name = info['name']
|
||||
artist = info['artist']
|
||||
album = info['album']
|
||||
return u"Rdio track: \x02{}\x02 by \x02{}\x02 - {}".format(name, artist, album)
|
||||
elif 'artist' in info and not 'album' in info: #Album
|
||||
name = info['name']
|
||||
artist = info['artist']
|
||||
return u"Rdio album: \x02{}\x02 by \x02{}\x02".format(name, artist)
|
||||
else: #Artist
|
||||
name = info['name']
|
||||
return u"Rdio artist: \x02{}\x02".format(name)
|
|
@ -83,3 +83,20 @@ def youtube(inp):
|
|||
video_id = request['data']['items'][0]['id']
|
||||
|
||||
return get_video_description(video_id) + " - " + video_url % video_id
|
||||
|
||||
ytpl_re = (r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I)
|
||||
|
||||
@hook.regex(*ytpl_re)
|
||||
def ytplaylist_url(match):
|
||||
location = match.group(4).split("=")[-1]
|
||||
try:
|
||||
soup = http.get_soup("https://www.youtube.com/playlist?list=" + location)
|
||||
except Exception:
|
||||
return "\x034\x02Invalid response."
|
||||
title = soup.find('title').text.split('-')[0].strip()
|
||||
author = soup.find('img', {'class': 'channel-header-profile-image'})['title']
|
||||
numofratings = int(soup.find('span', {'class': 'likes'}).text) + int(soup.find('span', {'class': 'dislikes'}).text)
|
||||
rating = (int(soup.find('span', {'class': 'likes'}).text) / numofratings) * 100 / 20
|
||||
numvideos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0]
|
||||
views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0]
|
||||
return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, numvideos, author)
|
||||
|
|
Reference in a new issue