tidy cloudbot3/refresh code!

This commit is contained in:
Luke Rogers 2014-03-06 14:15:04 +13:00
parent f81cf21b57
commit 99e363cc99
12 changed files with 66 additions and 53 deletions

View File

@ -11,7 +11,7 @@ if sys.version_info < (3, 2, 0):
print("CloudBot3 requires Python 3.2 or newer.")
sys.exit(1)
# set up enviroment
# set up environment
os.chdir(sys.path[0] or '.') # do stuff relative to the install directory
# this is not the code you are looking for
@ -20,6 +20,7 @@ if os.path.exists(os.path.abspath('lib')):
print('CloudBot3 <http://git.io/cloudbotirc>')
def exit_gracefully(signum, frame):
# this doesn't really work at all
cloudbot.stop()
@ -42,7 +43,7 @@ while True:
else:
if cloudbot.do_restart:
# create a new bot thread and start it
# THIS DOES NOT WORK
# Todo: Make this work
del cloudbot
cloudbot = bot.Bot()
cloudbot.start()

View File

@ -21,6 +21,7 @@ class Config(dict):
# start watcher
self.watcher()
def load_config(self):
"""(re)loads the bot config from the config file"""
if not os.path.exists(self.path):
@ -41,13 +42,11 @@ class Config(dict):
for conn in self.bot.connections:
conn.permissions.reload()
def save_config(self):
"""saves the contents of the config dict to the config file"""
json.dump(self, open(self.path, 'w'), sort_keys=True, indent=2)
self.logger.info("Config saved to file.")
def watcher(self):
"""starts the watchdog to automatically reload the config when it changes on disk"""
self.observer = Observer()

View File

@ -73,7 +73,7 @@ class PluginLoader(object):
# remove plugins already loaded from this file
for name, data in self.bot.plugins.items():
self.bot.plugins[name] = [x for x in data
if x[0]._filename != filename]
if x[0]._filename != filename]
# stop all currently running instances of the plugins from this file
for func, handler in list(self.bot.threads.items()):
@ -88,16 +88,15 @@ class PluginLoader(object):
if obj._thread:
self.bot.threads[obj] = main.Handler(self.bot, obj)
for type, data in obj._hook:
for plug_type, data in obj._hook:
# add plugin to the plugin list
self.bot.plugins[type] += [data]
self.bot.logger.info("Loaded plugin: {} ({})".format(format_plug(data), type))
self.bot.plugins[plug_type] += [data]
self.bot.logger.info("Loaded plugin: {} ({})".format(format_plug(data), plug_type))
# do a rebuild, unless the bot is loading all plugins (rebuild happens after load_all)
if not rebuild:
self.rebuild()
def unload_file(self, path):
"""unloads all loaded plugins from a specified file"""
filename = os.path.basename(path)

View File

@ -126,7 +126,7 @@ class Handler(object):
if uses_db:
# self.bot.logger.debug("Opened ST DB session for: {}".format(self.func._filename))
input.db = input.bot.db_session()
input.db = input.bot.db_session()
try:
run(self.bot, self.func, input)

View File

@ -1,5 +1,6 @@
from fnmatch import fnmatch
class PermissionManager(object):
def __init__(self, bot, conn):
@ -13,7 +14,6 @@ class PermissionManager(object):
self.conn = conn
self.config = conn.config
self.group_perms = {}
self.group_users = {}
self.perm_users = {}

View File

@ -25,16 +25,17 @@ def sieve_suite(bot, input, func, kind, args):
_bucket = buckets[uid]
if _bucket.consume(MESSAGE_COST):
return input
pass
else:
print("pong!")
return None
disabled_commands = conn.config.get('disabled_commands', [])
if input.trigger in disabled_commands:
return None
return input
acl = conn.config.get('acls', {}).get(func.__name__)
if acl:

View File

@ -2,13 +2,19 @@
import http.cookiejar
import json
import urllib.request, urllib.parse, urllib.error
import urllib.request, urllib.error, urllib.parse
import urllib.request
import urllib.parse
from urllib.parse import quote, quote_plus as _quote_plus
import urllib.error
import urllib.request
import urllib.error
import urllib.parse
import urllib.parse
from urllib.parse import quote_plus as _quote_plus
from bs4 import BeautifulSoup
from lxml import etree, html
from bs4 import BeautifulSoup
from urllib.error import URLError, HTTPError
ua_cloudbot = 'Cloudbot/DEV http://github.com/CloudDev/CloudBot'
@ -92,7 +98,7 @@ def prepare_url(url, queries):
query = dict(urllib.parse.parse_qsl(query))
query.update(queries)
query = urllib.parse.urlencode(dict((to_utf8(key), to_utf8(value))
for key, value in query.items()))
for key, value in query.items()))
url = urllib.parse.urlunsplit((scheme, netloc, path, query, fragment))

View File

@ -112,15 +112,16 @@ def capitalize_first(line):
return ' '.join([s[0].upper() + s[1:] for s in line.split(' ')])
def multiword_replace(text, wordDic):
# TODO: rewrite to use a list of tuples
def multiword_replace(text, word_dic):
"""
take a text and replace words that match a key in a dictionary with
the associated value, return the changed text
"""
rc = re.compile('|'.join(map(re.escape, wordDic)))
rc = re.compile('|'.join(map(re.escape, word_dic)))
def translate(match):
return wordDic[match.group(0)]
return word_dic[match.group(0)]
return rc.sub(translate, text)

View File

@ -41,7 +41,7 @@ class TextGenerator(object):
return text
def generate_strings(self, amount, template=None):
def generate_strings(self, amount):
strings = []
for i in range(amount):
strings.append(self.generate_string())

View File

@ -1,5 +1,6 @@
from util import text
def format_time(seconds, count=3, accuracy=6, simple=False):
"""
Takes a length of time in seconds and returns a string describing that length of time.
@ -20,26 +21,26 @@ def format_time(seconds, count=3, accuracy=6, simple=False):
if simple:
periods = [
('c', 60 * 60 * 24 * 365 * 100),
('de', 60 * 60 * 24 * 365 * 10),
('y', 60 * 60 * 24 * 365),
('m', 60 * 60 * 24 * 30),
('d', 60 * 60 * 24),
('h', 60 * 60),
('m', 60),
('s', 1)
]
('c', 60 * 60 * 24 * 365 * 100),
('de', 60 * 60 * 24 * 365 * 10),
('y', 60 * 60 * 24 * 365),
('m', 60 * 60 * 24 * 30),
('d', 60 * 60 * 24),
('h', 60 * 60),
('m', 60),
('s', 1)
]
else:
periods = [
(('century', 'centuries'), 60 * 60 * 24 * 365 * 100),
(('decade', 'decades'), 60 * 60 * 24 * 365 * 10),
(('year', 'years'), 60 * 60 * 24 * 365),
(('month', 'months'), 60 * 60 * 24 * 30),
(('day', 'days'), 60 * 60 * 24),
(('hour', 'hours'), 60 * 60),
(('minute', 'minutes'), 60),
(('second', 'seconds'), 1)
]
(('century', 'centuries'), 60 * 60 * 24 * 365 * 100),
(('decade', 'decades'), 60 * 60 * 24 * 365 * 10),
(('year', 'years'), 60 * 60 * 24 * 365),
(('month', 'months'), 60 * 60 * 24 * 30),
(('day', 'days'), 60 * 60 * 24),
(('hour', 'hours'), 60 * 60),
(('minute', 'minutes'), 60),
(('second', 'seconds'), 1)
]
periods = periods[-accuracy:]
@ -48,15 +49,15 @@ def format_time(seconds, count=3, accuracy=6, simple=False):
for period_name, period_seconds in periods:
if i < count:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
i += 1
if simple:
strings.append("{}{}".format(period_value, period_name))
period_value, seconds = divmod(seconds, period_seconds)
i += 1
if simple:
strings.append("{}{}".format(period_value, period_name))
else:
if period_value == 1:
strings.append("{} {}".format(period_value, period_name[0]))
else:
if period_value == 1:
strings.append("{} {}".format(period_value, period_name[0]))
else:
strings.append("{} {}".format(period_value, period_name[1]))
strings.append("{} {}".format(period_value, period_name[1]))
else:
break

View File

@ -40,7 +40,9 @@ class Normalizer(object):
normalizers = (Normalizer(re.compile(
r'(?:https?://)?(?:[a-zA-Z0-9\-]+\.)?(?:amazon|amzn){1}\.(?P<tld>[a-zA-Z\.]{2,})\/(gp/(?:product|offer-listing|customer-media/product-gallery)/|exec/obidos/tg/detail/-/|o/ASIN/|dp/|(?:[A-Za-z0-9\-]+)/dp/)?(?P<ASIN>[0-9A-Za-z]{10})'),
r'(?:https?://)?(?:[a-zA-Z0-9\-]+\.)?(?:amazon|amzn){1}\.(?P<tld>[a-zA-Z\.]{2,})\/(gp/(?:product|offer-listing|cu'
r'stomer-media/product-gallery)/|exec/obidos/tg/detail/-/|o/ASIN/|dp/|(?:[A-Za-z0-9\-]+)/dp/)?(?P<ASIN>[0-9A-Za-z'
r']{10})'),
lambda m: r'http://amazon.%s/dp/%s' % (m.group('tld'), m.group('ASIN'))),
Normalizer(re.compile(r'.*waffleimages\.com.*/([0-9a-fA-F]{40})'),
lambda m: r'http://img.waffleimages.com/%s' % m.group(1)),
@ -131,7 +133,7 @@ def normalize(url, assume_scheme=False):
if url.endswith("#") and query == "" and fragment == "":
path += "#"
normal_url = urllib.parse.urlunsplit((scheme, auth, path, query,
fragment)).replace("http:///", "http://")
fragment)).replace("http:///", "http://")
for norm in normalizers:
m = norm.regex.match(normal_url)
if m:

View File

@ -1,15 +1,18 @@
""" web.py - handy functions for web services """
import json
import urllib.request
import urllib.parse
import urllib.error
from . import http
from . import urlnorm
import json
import urllib.request, urllib.parse, urllib.error
short_url = "http://is.gd/create.php"
paste_url = "http://hastebin.com"
class ShortenError(Exception):
def __init__(self, code, text):
self.code = code