fixed urltools.py
This commit is contained in:
parent
31699c840b
commit
b3f8556fbc
1 changed files with 11 additions and 9 deletions
|
@ -4,7 +4,9 @@ from urllib2 import urlopen, Request, HTTPError
|
||||||
import re
|
import re
|
||||||
import BeautifulSoup
|
import BeautifulSoup
|
||||||
|
|
||||||
ignored_urls = ["http://google.com","http://youtube.com","http://pastebin.com","http://mibpaste.com","http://fpaste.com","beastnode.com"]
|
ignored_urls = ["http://google.com", "http://youtube.com",
|
||||||
|
"http://pastebin.com", "http://mibpaste.com",
|
||||||
|
"http://fpaste.com"]
|
||||||
|
|
||||||
wordDic = {
|
wordDic = {
|
||||||
'"': '"',
|
'"': '"',
|
||||||
|
@ -23,6 +25,7 @@ wordDic = {
|
||||||
'$': '$',
|
'$': '$',
|
||||||
' ': ' '}
|
' ': ' '}
|
||||||
|
|
||||||
|
|
||||||
def parse(match):
|
def parse(match):
|
||||||
url = urlnorm.normalize(match.encode('utf-8'))
|
url = urlnorm.normalize(match.encode('utf-8'))
|
||||||
if url not in ignored_urls:
|
if url not in ignored_urls:
|
||||||
|
@ -33,24 +36,26 @@ def parse(match):
|
||||||
except:
|
except:
|
||||||
return "fail"
|
return "fail"
|
||||||
|
|
||||||
|
|
||||||
def multiwordReplace(text, wordDic):
|
def multiwordReplace(text, wordDic):
|
||||||
rc = re.compile('|'.join(map(re.escape, wordDic)))
|
rc = re.compile('|'.join(map(re.escape, wordDic)))
|
||||||
|
|
||||||
def translate(match):
|
def translate(match):
|
||||||
return wordDic[match.group(0)]
|
return wordDic[match.group(0)]
|
||||||
return rc.sub(translate, text)
|
return rc.sub(translate, text)
|
||||||
|
|
||||||
|
|
||||||
#@hook.regex(r'^(?#Protocol)(?:(?:ht|f)tp(?:s?)\:\/\/|~\/|\/)?(?#Username:Password)(?:\w+:\w+@)?(?#Subdomains)(?:(?:[-\w]+\.)+(?#TopLevel Domains)(?:com|org|net|gov|mil|biz|info|mobi|name|aero|jobs|museum|travel|[a-z]{2}))(?#Port)(?::[\d]{1,5})?(?#Directories)(?:(?:(?:\/(?:[-\w~!$+|.,=]|%[a-f\d]{2})+)+|\/)+|\?|#)?(?#Query)(?:(?:\?(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)(?:&(?:[-\w~!$+|.,*:]|%[a-f\d{2}])+=?(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)*)*(?#Anchor)(?:#(?:[-\w~!$+|.,*:=]|%[a-f\d]{2})*)?$')
|
@hook.regex(r'([a-zA-Z]://|www\.)?[^ ]+(\.[a-z]+)+')
|
||||||
#@hook.regex(r'([a-zA-Z]+://|www\.)[^ ]+')
|
def urlparser(match, say=None):
|
||||||
def urlparser(match, say = None):
|
|
||||||
print "[debug] URL found"
|
|
||||||
url = urlnorm.normalize(match.group().encode('utf-8'))
|
url = urlnorm.normalize(match.group().encode('utf-8'))
|
||||||
|
if url[:7] != "http://":
|
||||||
|
if url[:8] != "https://":
|
||||||
|
url = "http://" + url
|
||||||
for x in ignored_urls:
|
for x in ignored_urls:
|
||||||
if x in url:
|
if x in url:
|
||||||
return
|
return
|
||||||
title = parse(url)
|
title = parse(url)
|
||||||
if title == "fail":
|
if title == "fail":
|
||||||
print "[url] No title found"
|
|
||||||
return
|
return
|
||||||
title = multiwordReplace(title, wordDic)
|
title = multiwordReplace(title, wordDic)
|
||||||
realurl = http.get_url(url)
|
realurl = http.get_url(url)
|
||||||
|
@ -60,6 +65,3 @@ def urlparser(match, say = None):
|
||||||
else:
|
else:
|
||||||
say("(Link) %s [%s]" % (title, realurl))
|
say("(Link) %s [%s]" % (title, realurl))
|
||||||
return
|
return
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
Reference in a new issue