diff --git a/.editorconfig b/.editorconfig deleted file mode 100644 index e044f79..0000000 --- a/.editorconfig +++ /dev/null @@ -1,18 +0,0 @@ -# CloudBot editor configuration normalization -# Copied from Drupal (GPL) -# @see http://editorconfig.org/ - -# This is the top-most .editorconfig file; do not search in parent directories. -root = true - -# All files. -[*] -end_of_line = LF -indent_style = space -indent_size = 4 - -# Not in the spec yet: -# @see https://github.com/editorconfig/editorconfig/wiki/EditorConfig-Properties -charset = utf-8 -trim_trailing_whitespace = true -insert_final_newline = true diff --git a/.gitignore b/.gitignore deleted file mode 100644 index 5fa7446..0000000 --- a/.gitignore +++ /dev/null @@ -1,15 +0,0 @@ -persist -config -config.ssl -gitflow -*.db -*.log -.*.swp -*.pyc -*.orig -.project -.geany -*.sublime-project -*.sublime-workspace -.idea/ -plugins/data/GeoLiteCity.dat diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 45407c1..0000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,55 +0,0 @@ -# How to contribute - -I like to encourage you to contribute to the repository. -This should be as easy as possible for you but there are a few things to consider when contributing. -The following guidelines for contribution should be followed if you want to submit a pull request. - -## TL;DR - -* Read [Github documentation](http://help.github.com/) and [Pull Request documentation](http://help.github.com/send-pull-requests/) -* Fork the repository -* Edit the files, add new files -* Check the files with [`pep8`](https://pypi.python.org/pypi/pep8), fix any reported errors -* Check that the files work as expected in CloudBot -* Create a new branch with a descriptive name for your feature (optional) -* Commit changes, push to your fork on GitHub -* Create a new pull request, provide a short summary of changes in the title line, with more information in the description field. -* After submitting the pull request, join the IRC channel (irc.esper.net #cloudbot) and paste a link to the pull request so people are aware of it -* After discussion, your pull request will be accepted or rejected. - -## How to prepare - -* You need a [GitHub account](https://github.com/signup/free) -* Submit an [issue ticket](https://github.com/ClouDev/CloudBot/issues) for your issue if the is no one yet. - * Describe the issue and include steps to reproduce if it's a bug. - * Ensure to mention the earliest version that you know is affected. -* If you are able and want to fix this, fork the repository on GitHub - -## Make Changes - -* In your forked repository, create a topic branch for your upcoming patch. (e.g. `feature--autoplay` or `bugfix--ios-crash`) - * Usually this is based on the develop branch. - * Create a branch based on master; `git branch - fix/develop/my_contribution develop` then checkout the new branch with `git - checkout fix/develop/my_contribution`. Please avoid working directly on the `develop` branch. -* Make sure you stick to the coding style that is used already. -* Make use of the [`.editorconfig`](http://editorconfig.org/) file. -* Make commits of logical units and describe them properly. -* Check for unnecessary whitespace with `git diff --check` before committing. -* Check your changes with [`pep8`](https://pypi.python.org/pypi/pep8). Ignore messages about line length. - -## Submit Changes - -* Push your changes to a topic branch in your fork of the repository. -* Open a pull request to the original repository and choose the right original branch you want to patch. - _Advanced users may use [`hub`](https://github.com/defunkt/hub#git-pull-request) gem for that._ -* If not done in commit messages (which you really should do) please reference and update your issue with the code changes. But _please do not close the issue yourself_. -_Notice: You can [turn your previously filed issues into a pull-request here](http://issue2pr.herokuapp.com/)._ -* Even if you have write access to the repository, do not directly push or merge pull-requests. Let another team member review your pull request and approve. - -# Additional Resources - -* [General GitHub documentation](http://help.github.com/) -* [GitHub pull request documentation](http://help.github.com/send-pull-requests/) -* [Read the Issue Guidelines by @necolas](https://github.com/necolas/issue-guidelines/blob/master/CONTRIBUTING.md) for more details -* [This CONTRIBUTING.md from here](https://github.com/anselmh/CONTRIBUTING.md) diff --git a/CONTRIBUTORS b/CONTRIBUTORS deleted file mode 100644 index 5495bc9..0000000 --- a/CONTRIBUTORS +++ /dev/null @@ -1,34 +0,0 @@ -Thanks to everyone who has contributed to CloudBot! Come in IRC and ping me if I forgot anyone. - -Luke Rogers (lukeroge) -Neersighted -blha303 -cybojenix -KsaRedFx -nathanblaney -thenoodle68 -nasonfish -urbels -puffrfish -Sepero -TheFiZi -mikeleigh -Spudstabber -frozenMC -frdmn - - - -We are using code from the following projects: -./plugins/mlia.py - https://github.com/infinitylabs/UguuBot -./plugins/horoscope.py - https://github.com/infinitylabs/UguuBot -color section in ./plugins/utility.py - https://github.com/hitzler/homero - -Special Thanks: -Rmmh (created skybot!) -lahwran (for his advice and stuff I stole from his skybot fork!) -TheNoodle (for helping with some plugins when I was first starting out) - -If any of your code is in here and you don't have credit, I'm sorry. I didn't keep track of a lot of code I added in the early days of the project. - -You are all awesome :) diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 2f349f8..0000000 --- a/LICENSE +++ /dev/null @@ -1,186 +0,0 @@ -GNU GENERAL PUBLIC LICENSE - -Version 3, 29 June 2007 - -Copyright © 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -Preamble - -The GNU General Public License is a free, copyleft license for software and other kinds of works. - -The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. - -When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. - -To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. - -For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. - -Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. - -For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. - -Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. - -Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. - -The precise terms and conditions for copying, distribution and modification follow. - -TERMS AND CONDITIONS - -0. Definitions. -“This License” refers to version 3 of the GNU General Public License. - -“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. - -“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. - -To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. - -A “covered work” means either the unmodified Program or a work based on the Program. - -To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. - -To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. - -An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. - -1. Source Code. -The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. - -A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. - -The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. - -The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. - -The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. - -The Corresponding Source for a work in source code form is that same work. - -2. Basic Permissions. -All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. - -You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. - -Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. - -3. Protecting Users' Legal Rights From Anti-Circumvention Law. -No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. - -When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. - -4. Conveying Verbatim Copies. -You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. - -You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. - -5. Conveying Modified Source Versions. -You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: - -a) The work must carry prominent notices stating that you modified it, and giving a relevant date. -b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. -c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. -d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. -A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. - -6. Conveying Non-Source Forms. -You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: - -a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. -b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. -c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. -d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. -e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. -A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. - -A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. - -“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. - -If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). - -The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. - -Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. - -7. Additional Terms. -“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. - -When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. - -Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: - -a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or -b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or -c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or -d) Limiting the use for publicity purposes of names of licensors or authors of the material; or -e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or -f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. -All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. - -If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. - -Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. - -8. Termination. -You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). - -However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. - -Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. - -Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. - -9. Acceptance Not Required for Having Copies. -You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. - -10. Automatic Licensing of Downstream Recipients. -Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. - -An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. - -You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. - -11. Patents. -A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. - -A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. - -Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. - -In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. - -If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. - -If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. - -A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. - -Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. - -12. No Surrender of Others' Freedom. -If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. - -13. Use with the GNU Affero General Public License. -Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. - -14. Revised Versions of this License. -The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. - -If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. - -Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. - -15. Disclaimer of Warranty. -THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. Limitation of Liability. -IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -17. Interpretation of Sections 15 and 16. -If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. diff --git a/README.md b/README.md deleted file mode 100644 index 06d64ac..0000000 --- a/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# CloudBot - -## About - -CloudBot is a Python IRC bot based on [Skybot](http://git.io/skybot) by [rmmh](http://git.io/rmmh). - -## Getting and using CloudBot - -### Download - -Get CloudBot at [https://github.com/ClouDev/CloudBot/zipball/develop](https://github.com/ClouDev/CloudBot/zipball/develop "Get CloudBot from Github!"). - -Unzip the resulting file, and continue to read this document. - -### Install - -Before you can run the bot, you need to install a few Python dependencies. LXML is required while Enchant and PyDNS are needed for several plugins. - - -These can be installed with `pip` (The Python package manager): - - [sudo] pip install -r requirements.txt - -If you use `pip`, you will also need the following packages on linux or `pip` will fail to install the requirements. - ```python, python-dev, libenchant-dev, libenchant1c2a, libxslt-dev, libxml2-dev.``` - -#### How to install `pip` - - curl -O http://python-distribute.org/distribute_setup.py # or download with your browser on windows - python distribute_setup.py - easy_install pip - -If you are unable to use pip, there are Windows installers for LXML available for [64 bit](https://pypi.python.org/packages/2.7/l/lxml/lxml-2.3.win-amd64-py2.7.exe) and [32 bit](https://pypi.python.org/packages/2.7/l/lxml/lxml-2.3.win32-py2.7.exe) versions of Python. - -### Run - -Before you run the bot, rename `config.default` to `config` and edit it with your preferred settings. - -Once you have installed the required dependencies and renamed the config file, you can run the bot! Make sure you are in the correct folder and run the following command: - -`python bot.py` - -On Windows you can usually just double-click `bot.py` to start the bot, as long as you have Python installed correctly. - -## Getting help with CloudBot - -### Documentation - -To configure your CloudBot, visit the [Config Wiki Page](http://git.io/cloudbotircconfig). - -To write your own plugins, visit the [Plugin Wiki Page](http://git.io/cloudbotircplugins). - -More at the [Wiki Main Page](http://git.io/cloudbotircwiki). - -(some of the information on the wiki is outdated and needs to be rewritten) - -### Support - -The developers reside in [#CloudBot](irc://irc.esper.net/cloudbot) on [EsperNet](http://esper.net) and would be glad to help you. - -If you think you have found a bug/have a idea/suggestion, please **open a issue** here on Github. - -### Requirements - -CloudBot runs on **Python** *2.7.x*. It is currently developed on **Windows** *8* with **Python** *2.7.5*. - -It **requires the Python module** lXML. -The module `Enchant` is needed for the spellcheck plugin. -The module `PyDNS` is needed for SRV record lookup in the mcping plugin. - -**Windows** users: Windows compatibility some plugins is **broken** (such as ping), but we do intend to add it. Eventually. - -## Example CloudBots - -You can find a number of example bots in [#CloudBot](irc://irc.esper.net/cloudbot "Connect via IRC to #CloudBot on irc.esper.net"). - -## License - -CloudBot is **licensed** under the **GPL v3** license. The terms are as follows. - - CloudBot - - Copyright © 2011-2013 Luke Rogers - - CloudBot is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - CloudBot is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with CloudBot. If not, see . diff --git a/cloudbot.py b/cloudbot.py deleted file mode 100755 index 91515db..0000000 --- a/cloudbot.py +++ /dev/null @@ -1,74 +0,0 @@ -#!/usr/bin/env python - -import os -import Queue -import sys -import time -import re - -sys.path += ['plugins', 'lib'] # add stuff to the sys.path for easy imports -os.chdir(sys.path[0] or '.') # do stuff relative to the install directory - - -class Bot(object): - pass - -print 'CloudBot DEV ' - -# create new bot object -bot = Bot() -bot.vars = {} - -# record start time for the uptime command -bot.start_time = time.time() - -print 'Begin Plugin Loading.' - -# bootstrap the reloader -eval(compile(open(os.path.join('core', 'reload.py'), 'U').read(), - os.path.join('core', 'reload.py'), 'exec')) -reload(init=True) - -config() -if not hasattr(bot, 'config'): - exit() - -print 'Connecting to IRC...' - -bot.conns = {} - -try: - for name, conf in bot.config['connections'].iteritems(): - # strip all spaces and capitalization from the connection name - name = name.replace(" ", "_") - name = re.sub('[^A-Za-z0-9_]+', '', name) - print 'Connecting to server: %s' % conf['server'] - if conf.get('ssl'): - bot.conns[name] = SSLIRC(name, conf['server'], conf['nick'], conf=conf, - port=conf.get('port', 6667), channels=conf['channels'], - ignore_certificate_errors=conf.get('ignore_cert', True)) - else: - bot.conns[name] = IRC(name, conf['server'], conf['nick'], conf=conf, - port=conf.get('port', 6667), channels=conf['channels']) -except Exception as e: - print 'ERROR: malformed config file', e - sys.exit() - -bot.persist_dir = os.path.abspath('persist') -if not os.path.exists(bot.persist_dir): - os.mkdir(bot.persist_dir) - -print 'Connection(s) made, starting main loop.' - -while True: - reload() # these functions only do things - config() # if changes have occured - - for conn in bot.conns.itervalues(): - try: - out = conn.out.get_nowait() - main(conn, out) - except Queue.Empty: - pass - while all(conn.out.empty() for conn in bot.conns.itervalues()): - time.sleep(.1) diff --git a/config.default b/config.default deleted file mode 100644 index 4bda1b0..0000000 --- a/config.default +++ /dev/null @@ -1,77 +0,0 @@ -{ - "connections": { - "hackint": { - "server": "irc.hackint.eu", - "nick": "antibot", - "user": "antibot", - "realname": "CloudBot - http://git.io/cloudbotirc", - "mode": "", - "_nickserv_password": "", - "-nickserv_user": "", - "channels": [ - "#ChaosChemnitz", - "#logbot" - ], - "invite_join": true, - "auto_rejoin": false, - "command_prefix": "." - } - }, - "disabled_plugins": [], - "disabled_commands": [], - "acls": {}, - "api_keys": { - "tvdb": "", - "wolframalpha": "", - "lastfm": "", - "rottentomatoes": "", - "soundcloud": "", - "twitter_consumer_key": "", - "twitter_consumer_secret": "", - "twitter_access_token": "", - "twitter_access_secret": "", - "wunderground": "", - "googletranslate": "", - "rdio_key": "", - "rdio_secret": "" - }, - "permissions": { - "admins": { - "perms": [ - "adminonly", - "addfactoid", - "delfactoid", - "ignore", - "botcontrol", - "permissions_users", - "op" - ], - "users": [ - "examplea!user@example.com", - "exampleb!user@example.com" - ] - }, - "moderators": { - "perms": [ - "addfactoid", - "delfactoid", - "ignore" - ], - "users": [ - "stummi!~Stummi@stummi.org" - ] - } - }, - "plugins": { - "factoids": { - "prefix": false - }, - "ignore": { - "ignored": [] - } - }, - "censored_strings": [ - "mypass", - "mysecret" - ] -} diff --git a/core/config.py b/core/config.py deleted file mode 100644 index c813ea5..0000000 --- a/core/config.py +++ /dev/null @@ -1,27 +0,0 @@ -import inspect -import json -import os - - -def save(conf): - json.dump(conf, open('config', 'w'), sort_keys=True, indent=2) - -if not os.path.exists('config'): - print "Please rename 'config.default' to 'config' to set up your bot!" - print "For help, see http://git.io/cloudbotirc" - print "Thank you for using CloudBot!" - sys.exit() - - -def config(): - # reload config from file if file has changed - config_mtime = os.stat('config').st_mtime - if bot._config_mtime != config_mtime: - try: - bot.config = json.load(open('config')) - bot._config_mtime = config_mtime - except ValueError, e: - print 'error: malformed config', e - - -bot._config_mtime = 0 diff --git a/core/db.py b/core/db.py deleted file mode 100644 index 6bdf8fa..0000000 --- a/core/db.py +++ /dev/null @@ -1,26 +0,0 @@ -import os -import sqlite3 -import thread - -threaddbs = {} - - -def get_db_connection(conn, name=''): - """returns an sqlite3 connection to a persistent database""" - - if not name: - name = '{}.db'.format(conn.name) - - threadid = thread.get_ident() - if name in threaddbs and threadid in threaddbs[name]: - return threaddbs[name][threadid] - filename = os.path.join(bot.persist_dir, name) - - db = sqlite3.connect(filename, timeout=10) - if name in threaddbs: - threaddbs[name][threadid] = db - else: - threaddbs[name] = {threadid: db} - return db - -bot.get_db_connection = get_db_connection diff --git a/core/irc.py b/core/irc.py deleted file mode 100644 index 40831e3..0000000 --- a/core/irc.py +++ /dev/null @@ -1,263 +0,0 @@ -import re -import socket -import time -import thread -import Queue - -from ssl import wrap_socket, CERT_NONE, CERT_REQUIRED, SSLError - - -def decode(txt): - for codec in ('utf-8', 'iso-8859-1', 'shift_jis', 'cp1252'): - try: - return txt.decode(codec) - except UnicodeDecodeError: - continue - return txt.decode('utf-8', 'ignore') - - -def censor(text): - text = text.replace('\n', '').replace('\r', '') - replacement = '[censored]' - if 'censored_strings' in bot.config: - if bot.config['censored_strings']: - words = map(re.escape, bot.config['censored_strings']) - regex = re.compile('({})'.format("|".join(words))) - text = regex.sub(replacement, text) - return text - - -class crlf_tcp(object): - """Handles tcp connections that consist of utf-8 lines ending with crlf""" - - def __init__(self, host, port, timeout=300): - self.ibuffer = "" - self.obuffer = "" - self.oqueue = Queue.Queue() # lines to be sent out - self.iqueue = Queue.Queue() # lines that were received - self.socket = self.create_socket() - self.host = host - self.port = port - self.timeout = timeout - - def create_socket(self): - return socket.socket(socket.AF_INET, socket.TCP_NODELAY) - - def run(self): - noerror = 0 - while 1: - try: - self.socket.connect((self.host, self.port)) - break - except socket.gaierror as e: - time.sleep(5) - except socket.timeout as e: - time.sleep(5) - - thread.start_new_thread(self.recv_loop, ()) - thread.start_new_thread(self.send_loop, ()) - - def recv_from_socket(self, nbytes): - return self.socket.recv(nbytes) - - def get_timeout_exception_type(self): - return socket.timeout - - def handle_receive_exception(self, error, last_timestamp): - print("Receive exception: %s" % (error)) - if time.time() - last_timestamp > self.timeout: - print("Receive timeout. Restart connection.") - self.iqueue.put(StopIteration) - self.socket.close() - return True - return False - - def handle_send_exception(self, error): - print("Send exception: %s" % (error)) - self.iqueue.put(StopIteration) - self.socket.close() - return True - - def recv_loop(self): - last_timestamp = time.time() - while True: - try: - data = self.recv_from_socket(4096) - self.ibuffer += data - if data: - last_timestamp = time.time() - else: - if time.time() - last_timestamp > self.timeout: - self.iqueue.put(StopIteration) - self.socket.close() - return - time.sleep(1) - except (self.get_timeout_exception_type(), socket.error) as e: - if self.handle_receive_exception(e, last_timestamp): - return - continue - except AttributeError: - return - - while '\r\n' in self.ibuffer: - line, self.ibuffer = self.ibuffer.split('\r\n', 1) - self.iqueue.put(decode(line)) - - def send_loop(self): - while True: - try: - line = self.oqueue.get().splitlines()[0][:500] - if line == StopIteration: - return - print ">>> %r" % line - self.obuffer += line.encode('utf-8', 'replace') + '\r\n' - while self.obuffer: - sent = self.socket.send(self.obuffer) - self.obuffer = self.obuffer[sent:] - - except socket.error as e: - self.handle_send_exception(e) - return - -class crlf_ssl_tcp(crlf_tcp): - """Handles ssl tcp connetions that consist of utf-8 lines ending with crlf""" - - def __init__(self, host, port, ignore_cert_errors, timeout=300): - self.ignore_cert_errors = ignore_cert_errors - crlf_tcp.__init__(self, host, port, timeout) - - def create_socket(self): - return wrap_socket(crlf_tcp.create_socket(self), server_side=False, - cert_reqs=CERT_NONE if self.ignore_cert_errors else - CERT_REQUIRED) - - def recv_from_socket(self, nbytes): - return self.socket.read(nbytes) - - def get_timeout_exception_type(self): - return SSLError - - def handle_receive_exception(self, error, last_timestamp): - # this is terrible - #if not "timed out" in error.args[0]: - # raise - return crlf_tcp.handle_receive_exception(self, error, last_timestamp) - - def handle_send_exception(self, error): - return crlf_tcp.handle_send_exception(self, error) - - -irc_prefix_rem = re.compile(r'(.*?) (.*?) (.*)').match -irc_noprefix_rem = re.compile(r'()(.*?) (.*)').match -irc_netmask_rem = re.compile(r':?([^!@]*)!?([^@]*)@?(.*)').match -irc_param_ref = re.compile(r'(?:^|(?<= ))(:.*|[^ ]+)').findall - - -class IRC(object): - """handles the IRC protocol""" - - def __init__(self, name, server, nick, port=6667, channels=[], conf={}): - self.name = name - self.channels = channels - self.conf = conf - self.server = server - self.port = port - self.nick = nick - self.history = {} - self.vars = {} - - self.out = Queue.Queue() # responses from the server are placed here - # format: [rawline, prefix, command, params, - # nick, user, host, paramlist, msg] - self.connect() - - thread.start_new_thread(self.parse_loop, ()) - - def create_connection(self): - return crlf_tcp(self.server, self.port) - - def connect(self): - self.conn = self.create_connection() - thread.start_new_thread(self.conn.run, ()) - self.set_pass(self.conf.get('server_password')) - self.set_nick(self.nick) - self.cmd("USER", - [conf.get('user', 'cloudbot'), "3", "*", conf.get('realname', - 'CloudBot - http://git.io/cloudbot')]) - - def parse_loop(self): - while True: - # get a message from the input queue - msg = self.conn.iqueue.get() - - if msg == StopIteration: - self.connect() - continue - - # parse the message - if msg.startswith(":"): # has a prefix - prefix, command, params = irc_prefix_rem(msg).groups() - else: - prefix, command, params = irc_noprefix_rem(msg).groups() - nick, user, host = irc_netmask_rem(prefix).groups() - mask = nick + "!" + user + "@" + host - paramlist = irc_param_ref(params) - lastparam = "" - if paramlist: - if paramlist[-1].startswith(':'): - paramlist[-1] = paramlist[-1][1:] - lastparam = paramlist[-1] - # put the parsed message in the response queue - self.out.put([msg, prefix, command, params, nick, user, host, - mask, paramlist, lastparam]) - # if the server pings us, pong them back - if command == "PING": - self.cmd("PONG", paramlist) - - def set_pass(self, password): - if password: - self.cmd("PASS", [password]) - - def set_nick(self, nick): - self.cmd("NICK", [nick]) - - def join(self, channel): - """ makes the bot join a channel """ - self.send("JOIN {}".format(channel)) - if channel not in self.channels: - self.channels.append(channel) - - def part(self, channel): - """ makes the bot leave a channel """ - self.cmd("PART", [channel]) - if channel in self.channels: - self.channels.remove(channel) - - def msg(self, target, text): - """ makes the bot send a PRIVMSG to a target """ - self.cmd("PRIVMSG", [target, text]) - - def ctcp(self, target, ctcp_type, text): - """ makes the bot send a PRIVMSG CTCP to a target """ - out = u"\x01{} {}\x01".format(ctcp_type, text) - self.cmd("PRIVMSG", [target, out]) - - def cmd(self, command, params=None): - if params: - params[-1] = u':' + params[-1] - self.send(u"{} {}".format(command, ' '.join(params))) - else: - self.send(command) - - def send(self, str): - self.conn.oqueue.put(str) - - -class SSLIRC(IRC): - def __init__(self, name, server, nick, port=6667, channels=[], conf={}, - ignore_certificate_errors=True): - self.ignore_cert_errors = ignore_certificate_errors - IRC.__init__(self, name, server, nick, port, channels, conf) - - def create_connection(self): - return crlf_ssl_tcp(self.server, self.port, self.ignore_cert_errors) diff --git a/core/main.py b/core/main.py deleted file mode 100644 index 0054b0a..0000000 --- a/core/main.py +++ /dev/null @@ -1,195 +0,0 @@ -import thread -import traceback - - -thread.stack_size(1024 * 512) # reduce vm size - - -class Input(dict): - def __init__(self, conn, raw, prefix, command, params, - nick, user, host, mask, paraml, msg): - - chan = paraml[0].lower() - if chan == conn.nick.lower(): # is a PM - chan = nick - - def message(message, target=chan): - """sends a message to a specific or current channel/user""" - conn.msg(target, message) - - def reply(message, target=chan): - """sends a message to the current channel/user with a prefix""" - if target == nick: - conn.msg(target, message) - else: - conn.msg(target, u"({}) {}".format(nick, message)) - - def action(message, target=chan): - """sends an action to the current channel/user or a specific channel/user""" - conn.ctcp(target, "ACTION", message) - - def ctcp(message, ctcp_type, target=chan): - """sends an ctcp to the current channel/user or a specific channel/user""" - conn.ctcp(target, ctcp_type, message) - - def notice(message, target=nick): - """sends a notice to the current channel/user or a specific channel/user""" - conn.cmd('NOTICE', [target, message]) - - dict.__init__(self, conn=conn, raw=raw, prefix=prefix, command=command, - params=params, nick=nick, user=user, host=host, mask=mask, - paraml=paraml, msg=msg, server=conn.server, chan=chan, - notice=notice, message=message, reply=reply, bot=bot, - action=action, ctcp=ctcp, lastparam=paraml[-1]) - - # make dict keys accessible as attributes - def __getattr__(self, key): - return self[key] - - def __setattr__(self, key, value): - self[key] = value - - -def run(func, input): - args = func._args - - if 'inp' not in input: - input.inp = input.paraml - - if args: - if 'db' in args and 'db' not in input: - input.db = get_db_connection(input.conn) - if 'input' in args: - input.input = input - if 0 in args: - out = func(input.inp, **input) - else: - kw = dict((key, input[key]) for key in args if key in input) - out = func(input.inp, **kw) - else: - out = func(input.inp) - if out is not None: - input.reply(unicode(out)) - - -def do_sieve(sieve, bot, input, func, type, args): - try: - return sieve(bot, input, func, type, args) - except Exception: - print 'sieve error', - traceback.print_exc() - return None - - -class Handler(object): - """Runs plugins in their own threads (ensures order)""" - - def __init__(self, func): - self.func = func - self.input_queue = Queue.Queue() - thread.start_new_thread(self.start, ()) - - def start(self): - uses_db = 'db' in self.func._args - db_conns = {} - while True: - input = self.input_queue.get() - - if input == StopIteration: - break - - if uses_db: - db = db_conns.get(input.conn) - if db is None: - db = bot.get_db_connection(input.conn) - db_conns[input.conn] = db - input.db = db - - try: - run(self.func, input) - except: - import traceback - - traceback.print_exc() - - def stop(self): - self.input_queue.put(StopIteration) - - def put(self, value): - self.input_queue.put(value) - - -def dispatch(input, kind, func, args, autohelp=False): - for sieve, in bot.plugs['sieve']: - input = do_sieve(sieve, bot, input, func, kind, args) - if input is None: - return - - if not (not autohelp or not args.get('autohelp', True) or input.inp or not (func.__doc__ is not None)): - input.notice(input.conn.conf["command_prefix"] + func.__doc__) - return - - if func._thread: - bot.threads[func].put(input) - else: - thread.start_new_thread(run, (func, input)) - - -def match_command(command): - commands = list(bot.commands) - - # do some fuzzy matching - prefix = filter(lambda x: x.startswith(command), commands) - if len(prefix) == 1: - return prefix[0] - elif prefix and command not in prefix: - return prefix - - return command - - -def main(conn, out): - inp = Input(conn, *out) - command_prefix = conn.conf.get('command_prefix', '.') - - # EVENTS - for func, args in bot.events[inp.command] + bot.events['*']: - dispatch(Input(conn, *out), "event", func, args) - - if inp.command == 'PRIVMSG': - # COMMANDS - if inp.chan == inp.nick: # private message, no command prefix - prefix = '^(?:[{}]?|'.format(command_prefix) - else: - prefix = '^(?:[{}]|'.format(command_prefix) - - command_re = prefix + inp.conn.nick - command_re += r'[,;:]+\s+)(\w+)(?:$|\s+)(.*)' - - m = re.match(command_re, inp.lastparam) - - if m: - trigger = m.group(1).lower() - command = match_command(trigger) - - if isinstance(command, list): # multiple potential matches - input = Input(conn, *out) - input.notice("Did you mean {} or {}?".format - (', '.join(command[:-1]), command[-1])) - elif command in bot.commands: - input = Input(conn, *out) - input.trigger = trigger - input.inp_unstripped = m.group(2) - input.inp = input.inp_unstripped.strip() - - func, args = bot.commands[command] - dispatch(input, "command", func, args, autohelp=True) - - # REGEXES - for func, args in bot.plugs['regex']: - m = args['re'].search(inp.lastparam) - if m: - input = Input(conn, *out) - input.inp = m - - dispatch(input, "regex", func, args) diff --git a/core/reload.py b/core/reload.py deleted file mode 100644 index f1bfeb6..0000000 --- a/core/reload.py +++ /dev/null @@ -1,160 +0,0 @@ -import collections -import glob -import os -import re -import sys -import traceback - - -if 'mtimes' not in globals(): - mtimes = {} - -if 'lastfiles' not in globals(): - lastfiles = set() - - -def make_signature(f): - return f.func_code.co_filename, f.func_name, f.func_code.co_firstlineno - - -def format_plug(plug, kind='', lpad=0): - out = ' ' * lpad + '{}:{}:{}'.format(*make_signature(plug[0])) - if kind == 'command': - out += ' ' * (50 - len(out)) + plug[1]['name'] - - if kind == 'event': - out += ' ' * (50 - len(out)) + ', '.join(plug[1]['events']) - - if kind == 'regex': - out += ' ' * (50 - len(out)) + plug[1]['regex'] - - return out - - -def reload(init=False): - changed = False - - if init: - bot.plugs = collections.defaultdict(list) - bot.threads = {} - - core_fileset = set(glob.glob(os.path.join("core", "*.py"))) - - for filename in core_fileset: - mtime = os.stat(filename).st_mtime - if mtime != mtimes.get(filename): - mtimes[filename] = mtime - - changed = True - - try: - eval(compile(open(filename, 'U').read(), filename, 'exec'), - globals()) - except Exception: - traceback.print_exc() - if init: # stop if there's an error (syntax?) in a core - sys.exit() # script on startup - continue - - if filename == os.path.join('core', 'reload.py'): - reload(init=init) - return - - fileset = set(glob.glob(os.path.join('plugins', '*.py'))) - - # remove deleted/moved plugins - for name, data in bot.plugs.iteritems(): - bot.plugs[name] = [x for x in data if x[0]._filename in fileset] - - for filename in list(mtimes): - if filename not in fileset and filename not in core_fileset: - mtimes.pop(filename) - - for func, handler in list(bot.threads.iteritems()): - if func._filename not in fileset: - handler.stop() - del bot.threads[func] - - # compile new plugins - for filename in fileset: - mtime = os.stat(filename).st_mtime - if mtime != mtimes.get(filename): - mtimes[filename] = mtime - - changed = True - - try: - code = compile(open(filename, 'U').read(), filename, 'exec') - namespace = {} - eval(code, namespace) - except Exception: - traceback.print_exc() - continue - - # remove plugins already loaded from this filename - for name, data in bot.plugs.iteritems(): - bot.plugs[name] = [x for x in data - if x[0]._filename != filename] - - for func, handler in list(bot.threads.iteritems()): - if func._filename == filename: - handler.stop() - del bot.threads[func] - - for obj in namespace.itervalues(): - if hasattr(obj, '_hook'): # check for magic - if obj._thread: - bot.threads[obj] = Handler(obj) - - for type, data in obj._hook: - bot.plugs[type] += [data] - - if not init: - print '### new plugin (type: %s) loaded:' % \ - type, format_plug(data) - - if changed: - bot.commands = {} - for plug in bot.plugs['command']: - name = plug[1]['name'].lower() - if not re.match(r'^\w+$', name): - print '### ERROR: invalid command name "{}" ({})'.format(name, format_plug(plug)) - continue - if name in bot.commands: - print "### ERROR: command '{}' already registered ({}, {})".format(name, - format_plug(bot.commands[name]), - format_plug(plug)) - continue - bot.commands[name] = plug - - bot.events = collections.defaultdict(list) - for func, args in bot.plugs['event']: - for event in args['events']: - bot.events[event].append((func, args)) - - if init: - print ' plugin listing:' - - if bot.commands: - # hack to make commands with multiple aliases - # print nicely - - print ' command:' - commands = collections.defaultdict(list) - - for name, (func, args) in bot.commands.iteritems(): - commands[make_signature(func)].append(name) - - for sig, names in sorted(commands.iteritems()): - names.sort(key=lambda x: (-len(x), x)) # long names first - out = ' ' * 6 + '%s:%s:%s' % sig - out += ' ' * (50 - len(out)) + ', '.join(names) - print out - - for kind, plugs in sorted(bot.plugs.iteritems()): - if kind == 'command': - continue - print ' {}:'.format(kind) - for plug in plugs: - print format_plug(plug, kind=kind, lpad=6) - print diff --git a/disabled_stuff/attacks.py b/disabled_stuff/attacks.py deleted file mode 100644 index feb00b8..0000000 --- a/disabled_stuff/attacks.py +++ /dev/null @@ -1,72 +0,0 @@ -import random - -from util import hook - - -with open("plugins/data/larts.txt") as f: - larts = [line.strip() for line in f.readlines() - if not line.startswith("//")] - -with open("plugins/data/insults.txt") as f: - insults = [line.strip() for line in f.readlines() - if not line.startswith("//")] - -with open("plugins/data/flirts.txt") as f: - flirts = [line.strip() for line in f.readlines() - if not line.startswith("//")] - - -@hook.command -def lart(inp, action=None, nick=None, conn=None, notice=None): - """lart -- LARTs .""" - target = inp.strip() - - if " " in target: - notice("Invalid username!") - return - - # if the user is trying to make the bot slap itself, slap them - if target.lower() == conn.nick.lower() or target.lower() == "itself": - target = nick - - values = {"user": target} - phrase = random.choice(larts) - - # act out the message - action(phrase.format(**values)) - - -@hook.command -def insult(inp, nick=None, action=None, conn=None, notice=None): - """insult -- Makes the bot insult .""" - target = inp.strip() - - if " " in target: - notice("Invalid username!") - return - - if target == conn.nick.lower() or target == "itself": - target = nick - else: - target = inp - - out = 'insults {}... "{}"'.format(target, random.choice(insults)) - action(out) - - -@hook.command -def flirt(inp, action=None, conn=None, notice=None): - """flirt -- Make the bot flirt with .""" - target = inp.strip() - - if " " in target: - notice("Invalid username!") - return - - if target == conn.nick.lower() or target == "itself": - target = 'itself' - else: - target = inp - - out = 'flirts with {}... "{}"'.format(target, random.choice(flirts)) - action(out) diff --git a/disabled_stuff/brainfuck.py b/disabled_stuff/brainfuck.py deleted file mode 100644 index a7dc12e..0000000 --- a/disabled_stuff/brainfuck.py +++ /dev/null @@ -1,89 +0,0 @@ -"""brainfuck interpreter adapted from (public domain) code at -http://brainfuck.sourceforge.net/brain.py""" - -import re -import random - -from util import hook - - -BUFFER_SIZE = 5000 -MAX_STEPS = 1000000 - - -@hook.command('brainfuck') -@hook.command -def bf(inp): - """bf -- Executes as Brainfuck code.""" - - program = re.sub('[^][<>+-.,]', '', inp) - - # create a dict of brackets pairs, for speed later on - brackets = {} - open_brackets = [] - for pos in range(len(program)): - if program[pos] == '[': - open_brackets.append(pos) - elif program[pos] == ']': - if len(open_brackets) > 0: - brackets[pos] = open_brackets[-1] - brackets[open_brackets[-1]] = pos - open_brackets.pop() - else: - return 'unbalanced brackets' - if len(open_brackets) != 0: - return 'unbalanced brackets' - - # now we can start interpreting - ip = 0 # instruction pointer - mp = 0 # memory pointer - steps = 0 - memory = [0] * BUFFER_SIZE # initial memory area - rightmost = 0 - output = "" # we'll save the output here - - # the main program loop: - while ip < len(program): - c = program[ip] - if c == '+': - memory[mp] += 1 % 256 - elif c == '-': - memory[mp] -= 1 % 256 - elif c == '>': - mp += 1 - if mp > rightmost: - rightmost = mp - if mp >= len(memory): - # no restriction on memory growth! - memory.extend([0] * BUFFER_SIZE) - elif c == '<': - mp -= 1 % len(memory) - elif c == '.': - output += chr(memory[mp]) - if len(output) > 500: - break - elif c == ',': - memory[mp] = random.randint(1, 255) - elif c == '[': - if memory[mp] == 0: - ip = brackets[ip] - elif c == ']': - if memory[mp] != 0: - ip = brackets[ip] - - ip += 1 - steps += 1 - if steps > MAX_STEPS: - if output == '': - output = '(no output)' - output += '[exceeded {} iterations]'.format(MAX_STEPS) - break - - stripped_output = re.sub(r'[\x00-\x1F]', '', output) - - if stripped_output == '': - if output != '': - return 'no printable output' - return 'no output' - - return stripped_output[:430].decode('utf8', 'ignore') diff --git a/disabled_stuff/choose.py b/disabled_stuff/choose.py deleted file mode 100644 index f478328..0000000 --- a/disabled_stuff/choose.py +++ /dev/null @@ -1,18 +0,0 @@ -import re -import random - -from util import hook - - -@hook.command -def choose(inp): - """choose , [choice2], [choice3], [choice4], ... -- - Randomly picks one of the given choices.""" - - c = re.findall(r'([^,]+)', inp) - if len(c) == 1: - c = re.findall(r'(\S+)', inp) - if len(c) == 1: - return 'The decision is up to you!' - - return random.choice(c).strip() diff --git a/disabled_stuff/cleverbot.py b/disabled_stuff/cleverbot.py deleted file mode 100644 index 6604d8b..0000000 --- a/disabled_stuff/cleverbot.py +++ /dev/null @@ -1,121 +0,0 @@ -# from jessi bot -import urllib2 -import hashlib -import re -import unicodedata -from util import hook - -# these are just parts required -# TODO: Merge them. - -arglist = ['', 'y', '', '', '', '', '', '', '', '', 'wsf', '', - '', '', '', '', '', '', '', '0', 'Say', '1', 'false'] - -always_safe = ('ABCDEFGHIJKLMNOPQRSTUVWXYZ' - 'abcdefghijklmnopqrstuvwxyz' - '0123456789' '_.-') - -headers = {'X-Moz': 'prefetch', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', - 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1)Gecko/20100101 Firefox/7.0', - 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Referer': 'http://www.cleverbot.com', - 'Pragma': 'no-cache', 'Cache-Control': 'no-cache, no-cache', 'Accept-Language': 'en-us;q=0.8,en;q=0.5'} - -keylist = ['stimulus', 'start', 'sessionid', 'vText8', 'vText7', 'vText6', - 'vText5', 'vText4', 'vText3', 'vText2', 'icognoid', - 'icognocheck', 'prevref', 'emotionaloutput', 'emotionalhistory', - 'asbotname', 'ttsvoice', 'typing', 'lineref', 'fno', 'sub', - 'islearning', 'cleanslate'] - -MsgList = list() - - -def quote(s, safe='/'): # quote('abc def') -> 'abc%20def' - s = s.encode('utf-8') - s = s.decode('utf-8') - print "s= " + s - print "safe= " + safe - safe += always_safe - safe_map = dict() - for i in range(256): - c = chr(i) - safe_map[c] = (c in safe) and c or ('%%%02X' % i) - try: - res = map(safe_map.__getitem__, s) - except: - print "blank" - return '' - print "res= " + ''.join(res) - return ''.join(res) - - -def encode(keylist, arglist): - text = str() - for i in range(len(keylist)): - k = keylist[i] - v = quote(arglist[i]) - text += '&' + k + '=' + v - text = text[1:] - return text - - -def Send(): - data = encode(keylist, arglist) - digest_txt = data[9:29] - new_hash = hashlib.md5(digest_txt).hexdigest() - arglist[keylist.index('icognocheck')] = new_hash - data = encode(keylist, arglist) - req = urllib2.Request('http://www.cleverbot.com/webservicemin', - data, headers) - f = urllib2.urlopen(req) - reply = f.read() - return reply - - -def parseAnswers(text): - d = dict() - keys = ['text', 'sessionid', 'logurl', 'vText8', 'vText7', 'vText6', - 'vText5', 'vText4', 'vText3', 'vText2', 'prevref', 'foo', - 'emotionalhistory', 'ttsLocMP3', 'ttsLocTXT', 'ttsLocTXT3', - 'ttsText', 'lineRef', 'lineURL', 'linePOST', 'lineChoices', - 'lineChoicesAbbrev', 'typingData', 'divert'] - values = text.split('\r') - i = 0 - for key in keys: - d[key] = values[i] - i += 1 - return d - - -def ask(inp): - arglist[keylist.index('stimulus')] = inp - if MsgList: - arglist[keylist.index('lineref')] = '!0' + str(len( - MsgList) / 2) - asw = Send() - MsgList.append(inp) - answer = parseAnswers(asw) - for k, v in answer.iteritems(): - try: - arglist[keylist.index(k)] = v - except ValueError: - pass - arglist[keylist.index('emotionaloutput')] = str() - text = answer['ttsText'] - MsgList.append(text) - return text - - -@hook.command("cb") -def cleverbot(inp, reply=None): - reply(ask(inp)) - - -''' # TODO: add in command to control extra verbose per channel -@hook.event('PRIVMSG') -def cbevent(inp, reply=None): - reply(ask(inp)) - -@hook.command("cbver", permissions=['cleverbot']) -def cleverbotverbose(inp, notice=None): - if on in input -''' diff --git a/disabled_stuff/cloudbot.sh b/disabled_stuff/cloudbot.sh deleted file mode 100644 index 877c4ea..0000000 --- a/disabled_stuff/cloudbot.sh +++ /dev/null @@ -1,126 +0,0 @@ -#!/bin/bash -echo "" -echo " ________ ______ __ " -echo " / ____/ /___ __ ______/ / __ )____ / /_" -echo " / / / / __ \/ / / / __ / __ / __ \/ __/" -echo "/ /___/ / /_/ / /_/ / /_/ / /_/ / /_/ / /_ " -echo "\____/_/\____/\__,_/\__,_/_____/\____/\__/ " -echo " http://git.io/cloudbotirc by ClouDev " -echo "" -locatefiles() { - botfile="/bot.py" - botfile=$(pwd)$botfile - logfile="/bot.log" - logfile=$(pwd)$logfile -} - -running() { - if [[ $(ps aux|grep bot.py|grep -v grep|grep -v daemon|grep -v SCREEN) != "" ]]; then - true - else - false - fi -} - -checkbackend() { - if dpkg -l| grep ^ii|grep daemon|grep 'turns other' > /dev/null; then - backend="daemon" - elif dpkg -l| grep ^ii|grep screen|grep 'terminal multi' > /dev/null; then - backend="screen" - else - backend="manual" - fi - return 0 -} - -setcommands() { - status() { - if running; then - echo "CloudBot is running!" - else - echo "CloudBot is not running!" - fi - } - clear() { - : > $logfile - } - if [ "$backend" == "daemon" ]; then - start() { - daemon -r -n cloudbot -O $logfile python $botfile - } - stop() { - daemon -n cloudbot --stop - } - elif [ "$backend" == "screen" ]; then - start() { - screen -d -m -S cloudbot -t cloudbot python $botfile > $logfile 2>&1 - } - stop() { - pid=`ps ax|grep -v grep|grep python|grep -v SCREEN|grep $botfile|awk '{print $1}'` - kill $pid - } - elif [ "$backend" == "manual" ]; then - start() { - $botfile - } - stop() { - pid=`ps ax|grep -v grep|grep python|grep $botfile|awk '{print $1}'` - kill $pid - } - fi -} - -processargs() { - case $1 in - start|-start|--start) - if running; then - echo "Cannot start! Bot is already running!" - exit 1 - else - echo "Starting CloudBot... ($backend)" - start - fi - ;; - stop|-stop|--stop) - if running; then - echo "Stopping CloudBot... ($backend)" - stop - else - echo "Cannot stop! Bot is not already running!" - exit 1 - fi - ;; - restart|-restart|--restart) - if running; then - echo "Restarting CloudBot... ($backend)" - stop - sleep 3 - start - else - echo "Cannot restart! Bot is not already running!" - exit 1 - fi - ;; - clear|-clear|--clear) - echo "Clearing logs..." - clear - ;; - status|-status|--status) - status - ;; - *) - usage="usage: ./cloudbot {start|stop|restart|clear|status}" - echo $usage - ;; - esac -} - -main() { - locatefiles - checkbackend - setcommands - processargs $1 -} - -main $* -exit 0 \ No newline at end of file diff --git a/disabled_stuff/coin.py b/disabled_stuff/coin.py deleted file mode 100644 index 7cc2a2a..0000000 --- a/disabled_stuff/coin.py +++ /dev/null @@ -1,25 +0,0 @@ -import random - -from util import hook - - -@hook.command(autohelp=False) -def coin(inp, action=None): - """coin [amount] -- Flips [amount] of coins.""" - - if inp: - try: - amount = int(inp) - except (ValueError, TypeError): - return "Invalid input!" - else: - amount = 1 - - if amount == 1: - action("flips a coin and gets {}.".format(random.choice(["heads", "tails"]))) - elif amount == 0: - action("makes a coin flipping motion with its hands.") - else: - heads = int(random.normalvariate(.5 * amount, (.75 * amount) ** .5)) - tails = amount - heads - action("flips {} coins and gets {} heads and {} tails.".format(amount, heads, tails)) diff --git a/disabled_stuff/correction.py b/disabled_stuff/correction.py deleted file mode 100644 index 7617e11..0000000 --- a/disabled_stuff/correction.py +++ /dev/null @@ -1,37 +0,0 @@ -from util import hook - -import re - -CORRECTION_RE = r'^(s|S)/.*/.*/?\S*$' - - -@hook.regex(CORRECTION_RE) -def correction(match, input=None, conn=None, message=None): - split = input.msg.split("/") - - if len(split) == 4: - nick = split[3].lower() - else: - nick = None - - find = split[1] - replace = split[2] - - for item in conn.history[input.chan].__reversed__(): - name, timestamp, msg = item - if msg.startswith("s/"): - # don't correct corrections, it gets really confusing - continue - if nick: - if nick != name.lower(): - continue - if find in msg: - if "\x01ACTION" in msg: - msg = msg.replace("\x01ACTION ", "/me ").replace("\x01", "") - message(u"Correction, <{}> {}".format(name, msg.replace(find, "\x02" + replace + "\x02"))) - return - else: - continue - - return u"Did not find {} in any recent messages.".format(find) - diff --git a/disabled_stuff/cryptocoins.py b/disabled_stuff/cryptocoins.py deleted file mode 100644 index 42d5945..0000000 --- a/disabled_stuff/cryptocoins.py +++ /dev/null @@ -1,60 +0,0 @@ -from util import http, hook - -## CONSTANTS - -exchanges = { - "blockchain": { - "api_url": "https://blockchain.info/ticker", - "func": lambda data: u"Blockchain // Buy: \x0307${:,.2f}\x0f -" - u" Sell: \x0307${:,.2f}\x0f".format(data["USD"]["buy"], data["USD"]["sell"]) - }, - "coinbase": { - "api_url": "https://coinbase.com/api/v1/prices/spot_rate", - "func": lambda data: u"Coinbase // Current: \x0307${:,.2f}\x0f".format(float(data['amount'])) - }, - "bitpay": { - "api_url": "https://bitpay.com/api/rates", - "func": lambda data: u"Bitpay // Current: \x0307${:,.2f}\x0f".format(data[0]['rate']) - }, - "bitstamp": { - "api_url": "https://www.bitstamp.net/api/ticker/", - "func": lambda data: u"BitStamp // Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f -" - u" Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} BTC".format(float(data['last']), - float(data['high']), - float(data['low']), - float(data['volume'])) - } -} - - -## HOOK FUNCTIONS - -@hook.command("btc", autohelp=False) -@hook.command(autohelp=False) -def bitcoin(inp): - """bitcoin -- Gets current exchange rate for bitcoins from several exchanges, default is Blockchain. - Supports MtGox, Bitpay, Coinbase and BitStamp.""" - inp = inp.lower() - - if inp: - if inp in exchanges: - exchange = exchanges[inp] - else: - return "Invalid Exchange" - else: - exchange = exchanges["blockchain"] - - data = http.get_json(exchange["api_url"]) - func = exchange["func"] - return func(data) - - -@hook.command("ltc", autohelp=False) -@hook.command(autohelp=False) -def litecoin(inp, message=None): - """litecoin -- gets current exchange rate for litecoins from BTC-E""" - data = http.get_json("https://btc-e.com/api/2/ltc_usd/ticker") - ticker = data['ticker'] - message("Current: \x0307${:,.2f}\x0f - High: \x0307${:,.2f}\x0f" - " - Low: \x0307${:,.2f}\x0f - Volume: {:,.2f} LTC".format(ticker['buy'], ticker['high'], ticker['low'], - ticker['vol_cur'])) diff --git a/disabled_stuff/cypher.py b/disabled_stuff/cypher.py deleted file mode 100644 index b54248a..0000000 --- a/disabled_stuff/cypher.py +++ /dev/null @@ -1,39 +0,0 @@ -import base64 - -from util import hook - - -def encode(key, clear): - enc = [] - for i in range(len(clear)): - key_c = key[i % len(key)] - enc_c = chr((ord(clear[i]) + ord(key_c)) % 256) - enc.append(enc_c) - return base64.urlsafe_b64encode("".join(enc)) - - -def decode(key, enc): - dec = [] - enc = base64.urlsafe_b64decode(enc.encode('ascii', 'ignore')) - for i in range(len(enc)): - key_c = key[i % len(key)] - dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256) - dec.append(dec_c) - return "".join(dec) - - -@hook.command -def cypher(inp): - """cypher -- Cyphers with .""" - - passwd = inp.split(" ")[0] - inp = " ".join(inp.split(" ")[1:]) - return encode(passwd, inp) - - -@hook.command -def decypher(inp): - """decypher -- Decyphers with .""" - passwd = inp.split(" ")[0] - inp = " ".join(inp.split(" ")[1:]) - return decode(passwd, inp) diff --git a/disabled_stuff/data/8ball_responses.txt b/disabled_stuff/data/8ball_responses.txt deleted file mode 100644 index 87c7d6b..0000000 --- a/disabled_stuff/data/8ball_responses.txt +++ /dev/null @@ -1,26 +0,0 @@ -As I see it, yes -It is certain -It is decidedly so -Most likely -Outlook good -Signs point to yes -One would be wise to think so -Naturally -Without a doubt -Yes -Yes, definitely -You may rely on it -Reply hazy, try again -Ask again later -Better not tell you now -Cannot predict now -Concentrate and ask again -You know the answer better than I -Maybe... -You're kidding, right? -Don't count on it -In your dreams -My reply is no -My sources say no -Outlook not so good -Very doubtful diff --git a/disabled_stuff/data/GeoLiteCity.dat b/disabled_stuff/data/GeoLiteCity.dat deleted file mode 100644 index e94f60e..0000000 Binary files a/disabled_stuff/data/GeoLiteCity.dat and /dev/null differ diff --git a/disabled_stuff/data/flirts.txt b/disabled_stuff/data/flirts.txt deleted file mode 100644 index 6490da8..0000000 --- a/disabled_stuff/data/flirts.txt +++ /dev/null @@ -1,54 +0,0 @@ -I bet your name's Mickey, 'cause you're so fine. -Hey, pretty mama. You smell kinda pretty, wanna smell me? -I better get out my library card, 'cause I'm checkin' you out. -If you were a booger, I'd pick you. -If I could rearrange the alphabet, I would put U and I together. -I've been bad, take me to your room. -I think Heaven's missing an angel. -Are you a parking ticket? Because you've got FINE written all over you. -That shirt looks good on you, it'd look better on my bedroom floor. -Can I have your phone number? I seem to have lost mine. -I cant help to notice but you look a lot like my next girlfriend. -Aren't your feet tired? Because you've been running through my mind all day. -I must be asleep, 'cause you are a dream come true. -I like large posteriors and I cannot prevaricate. -How you doin'? -If I said you had a good body, would you hold it against me? -Hey, baby cakes. -Nice butt. -I love you like a fat kid loves cake. -Do you believe in love at first sight? Or should I walk by again...? -Do you have a map? I think I just got lost in your eyes. -Want to see my good side? Hah, that was a trick question, all I have are good sides. -You look like a woman who appreciates the finer things in life. Come over here and feel my velour bedspread. -Now you're officially my woman. Kudos! I can't say I don't envy you. -I find that the most erotic part of a woman is the boobies. -I wish I was one of your tears, so I could be born in your eye, run down your cheek, and die on your lips. -If you want to climb aboard the Love Train, you've got to stand on the Love Tracks. But you might just get smushed by a very sensual cow-catcher. -It’s a good thing I wore my gloves today; otherwise, you’d be too hot to handle. -Lets say you and I knock some very /sensual/ boots. -I lost my phone number, can I have yours? -Does this rag smell like chloroform to you? -I'm here, where are your other two wishes? -Apart from being sexy, what do you do for a living? -Hi, I'm Mr. Right. Someone said you were looking for me. -You got something on your chest: My eyes. -Are you from Tennessee? Cause you're the only TEN I see. -Are you an alien? Because you just abducted my heart. -Excuse me, but I think you dropped something!!! MY JAW!!! -If I followed you home, would you keep me? -I wish you were a Pony Carousel outside Walmart, so I could ride you all day long for a quarter. -Where have you been all my life? -I'm just a love machine, and I don't work for nobody but you. -Do you live on a chicken farm? Because you sure know how to raise cocks. -Are you wearing space pants? Because your ass is out of this world. -Nice legs. What time do they open? -Are you lost? Because it’s so strange to see an angel so far from heaven. -Your daddy must have been a baker, because you've got a nice set of buns. -You're so beautiful that last night you made me forget my pickup line. -I've never seen such dark eyes with so much light in them. -I think we should just be friends with sexual tension. -Whenever I see you I feel like a dog dying to get out of the car. -If I'd have held you any closer I'd be in back of you. -I wish I were on Facebook so I could poke you. -I want you like JFK wanted a car with a roof. diff --git a/disabled_stuff/data/fortunes.txt b/disabled_stuff/data/fortunes.txt deleted file mode 100644 index d425f26..0000000 --- a/disabled_stuff/data/fortunes.txt +++ /dev/null @@ -1,57 +0,0 @@ -Help! I'm stuck in the fortune cookie factory! -He who laughs at himself never runs out of things to laugh at. -The world is your oyster. -Today will be a good day. -Life's short, party naked. -Haters gonna hate. -You are amazing and let no one tell you otherwise. -A starship ride has been promised to you by the galactic wizard. -That wasn’t chicken. -You will become a hermit and be sad and lonely for the rest of your life. -Don’t fry bacon in the nude. -The road to riches is paved with homework. -Take calculated risks. That is quite different from being rash. -DO THE IMPOSSIBLE, SEE THE INVISIBLE. -You cannot plough a field by turning it over in your mind. Unless you have telekinesis. -No one can make you feel inferior without your consent. -Never lose the ability to find beauty in ordinary things. -Ignore previous fortune. -Smile more. -YOU'RE THE BEST AROUND, NOTHIN'S GONNA EVER KEEP YA DOWN. -The cake is not a lie. -Never take life seriously. Nobody gets out alive anyway. -Friendship is like peeing on yourself: everyone can see it, but only you get the warm feeling that it brings. -Never go to a doctor whose office plants have died. -Always remember you're unique, just like everyone else. -What if everything is an illusion and nothing exists? (In that case, I definitely overpaid for my carpet) -Don’t be discouraged, because every wrong attempt discarded is another step forward. -Even if you are on the right track, you will get run over if you just sit there. -Think like a man of action, and act like a man of thought. -When in doubt, lubricate. -It is time for you to live up to your family name and face FULL LIFE CONSEQUENCES. -It's a good day to do what has to be done. -Move near the countryside and you will be friends of John Freeman. -If you can't beat 'em, mock 'em. -Use gun. And if that don't work, use more gun. -LOOK OUT BEHIND YOU -You will die, but become reanimated as a Zombie, and forever roam the earth in search of purpose... And brains, of course. -This message will self destruct in 10 seconds. -You will live a normal life, with a normal home, a normal job, and a normal future. -You'll never know what you can do until you try. -A person of words and not deeds is like a garden full of weeds. -You are talented in many ways -Be both a speaker of words and a doer of deeds. -A visit to a strange place will bring you renewed perspective. -A passionate new romance will appear in your life when you least expect it. -If you care enough for a result, you will most certainly attain it. -To be loved, be loveable. -Step away from the power position for one day. -If you want to get a sure crop with a big yield, sow wild oats. -It doesn't take guts to quit. -You can expect a change for the better in job or status in the future. -As the wallet grows, so do the needs. -You have a reputation for being straightforward and honest. -Learn a new language and get a new soul. -A dubious friend may be an enemy in camouflage. -A tall dark stranger will soon enter our life. -Keep staring. I'll do a trick. diff --git a/disabled_stuff/data/geoip_regions.json b/disabled_stuff/data/geoip_regions.json deleted file mode 100644 index 449d148..0000000 --- a/disabled_stuff/data/geoip_regions.json +++ /dev/null @@ -1 +0,0 @@ -{"BD": {"82": "Khulna", "83": "Rajshahi", "81": "Dhaka", "86": "Sylhet", "84": "Chittagong", "85": "Barisal"}, "BE": {"11": "Brussels Hoofdstedelijk Gewest", "03": "Hainaut", "13": "Flanders", "01": "Antwerpen", "06": "Luxembourg", "07": "Namur", "04": "Liege", "05": "Limburg", "08": "Oost-Vlaanderen", "09": "West-Vlaanderen", "12": "Vlaams-Brabant", "14": "Wallonia", "10": "Brabant Wallon"}, "BF": {"28": "Kouritenga", "50": "Gourma", "60": "Kourweogo", "61": "Leraba", "62": "Loroum", "63": "Mouhoun", "64": "Namentenga", "65": "Naouri", "66": "Nayala", "67": "Noumbiel", "68": "Oubritenga", "69": "Poni", "52": "Ioba", "20": "Ganzourgou", "21": "Gnagna", "48": "Bougouriba", "49": "Boulgou", "46": "Banwa", "47": "Bazega", "44": "Zoundweogo", "45": "Bale", "42": "Tapoa", "40": "Soum", "77": "Ziro", "76": "Yatenga", "75": "Yagha", "74": "Tuy", "73": "Sourou", "72": "Sissili", "71": "Seno", "70": "Sanmatenga", "59": "Koulpelogo", "78": "Zondoma", "15": "Bam", "58": "Kossi", "19": "Boulkiemde", "55": "Komoe", "57": "Kompienga", "56": "Komondjari", "51": "Houet", "36": "Sanguie", "53": "Kadiogo", "34": "Passore", "33": "Oudalan", "54": "Kenedougou"}, "BG": {"60": "Turgovishte", "61": "Varna", "62": "Veliko Turnovo", "63": "Vidin", "64": "Vratsa", "65": "Yambol", "48": "Pazardzhik", "49": "Pernik", "46": "Lovech", "47": "Montana", "44": "Kurdzhali", "45": "Kyustendil", "42": "Grad Sofiya", "43": "Khaskovo", "40": "Dobrich", "41": "Gabrovo", "39": "Burgas", "38": "Blagoevgrad", "59": "Stara Zagora", "58": "Sofiya", "33": "Mikhaylovgrad", "54": "Shumen", "57": "Smolyan", "56": "Sliven", "51": "Plovdiv", "50": "Pleven", "53": "Ruse", "52": "Razgrad", "55": "Silistra"}, "BA": {"02": "Republika Srpska", "01": "Federation of Bosnia and Herzegovina"}, "BB": {"02": "Saint Andrew", "03": "Saint George", "01": "Christ Church", "06": "Saint Joseph", "07": "Saint Lucy", "04": "Saint James", "05": "Saint John", "08": "Saint Michael", "09": "Saint Peter", "11": "Saint Thomas", "10": "Saint Philip"}, "BM": {"02": "Hamilton", "03": "Hamilton", "01": "Devonshire", "06": "Saint George", "07": "Saint George's", "04": "Paget", "05": "Pembroke", "08": "Sandys", "09": "Smiths", "11": "Warwick", "10": "Southampton"}, "BN": {"11": "Collines", "10": "Temburong", "13": "Donga", "12": "Kouffo", "15": "Tutong", "07": "Alibori", "17": "Plateau", "16": "Oueme", "18": "Zou", "08": "Belait", "09": "Brunei and Muara", "14": "Littoral"}, "BO": {"02": "Cochabamba", "03": "El Beni", "01": "Chuquisaca", "06": "Pando", "07": "Potosi", "04": "La Paz", "05": "Oruro", "08": "Santa Cruz", "09": "Tarija"}, "BH": {"02": "Al Manamah", "10": "Al Mintaqah ash Shamaliyah", "13": "Ar Rifa", "01": "Al Hadd", "06": "Sitrah", "14": "Madinat Hamad", "17": "Al Janubiyah", "05": "Jidd Hafs", "19": "Al Wusta", "18": "Ash Shamaliyah", "08": "Al Mintaqah al Gharbiyah", "09": "Mintaqat Juzur Hawar", "16": "Al Asimah", "12": "Madinat", "11": "Al Mintaqah al Wusta", "15": "Al Muharraq"}, "BI": {"02": "Bujumbura", "10": "Bururi", "13": "Gitega", "12": "Cibitoke", "15": "Kayanza", "14": "Karuzi", "17": "Makamba", "16": "Kirundo", "19": "Ngozi", "18": "Muyinga", "09": "Bubanza", "22": "Muramvya", "11": "Cankuzo", "20": "Rutana", "23": "Mwaro", "21": "Ruyigi"}, "BJ": {"11": "Collines", "10": "Borgou", "13": "Donga", "12": "Kouffo", "15": "Mono", "07": "Alibori", "17": "Plateau", "16": "Oueme", "18": "Zou", "08": "Atakora", "09": "Atlanyique", "14": "Littoral"}, "BT": {"11": "Lhuntshi", "10": "Ha", "13": "Paro", "12": "Mongar", "06": "Chhukha", "07": "Chirang", "17": "Samdrup", "05": "Bumthang", "19": "Tashigang", "18": "Shemgang", "08": "Daga", "09": "Geylegphug", "22": "Wangdi Phodrang", "21": "Tongsa", "20": "Thimphu", "16": "Samchi", "15": "Punakha", "14": "Pemagatsel"}, "JM": {"02": "Hanover", "10": "Saint Catherine", "13": "Saint Mary", "01": "Clarendon", "15": "Trelawny", "07": "Portland", "04": "Manchester", "16": "Westmoreland", "08": "Saint Andrew", "09": "Saint Ann", "12": "Saint James", "17": "Kingston", "11": "Saint Elizabeth", "14": "Saint Thomas"}, "BW": {"11": "North-West", "03": "Ghanzi", "01": "Central", "06": "Kweneng", "04": "Kgalagadi", "05": "Kgatleng", "08": "North-East", "09": "South-East", "10": "Southern"}, "WS": {"02": "Aiga-i-le-Tai", "03": "Atua", "06": "Va", "07": "Gagaifomauga", "04": "Fa", "05": "Gaga", "08": "Palauli", "09": "Satupa", "11": "Vaisigano", "10": "Tuamasaga"}, "BR": {"30": "Pernambuco", "02": "Alagoas", "03": "Amapa", "26": "Santa Catarina", "01": "Acre", "06": "Ceara", "07": "Distrito Federal", "04": "Amazonas", "05": "Bahia", "08": "Espirito Santo", "28": "Sergipe", "29": "Goias", "14": "Mato Grosso", "24": "Rondonia", "25": "Roraima", "27": "Sao Paulo", "20": "Piaui", "21": "Rio de Janeiro", "11": "Mato Grosso do Sul", "13": "Maranhao", "15": "Minas Gerais", "22": "Rio Grande do Norte", "17": "Paraiba", "16": "Para", "18": "Parana", "31": "Tocantins", "23": "Rio Grande do Sul"}, "BS": {"24": "Acklins and Crooked Islands", "10": "Exuma", "13": "Inagua", "27": "Governor's Harbour", "15": "Long Island", "23": "New Providence", "22": "Harbour Island", "16": "Mayaguana", "33": "Rock Sound", "18": "Ragged Island", "31": "Marsh Harbour", "05": "Bimini", "28": "Green Turtle Cay", "26": "Fresh Creek", "35": "San Salvador and Rum Cay", "29": "High Rock", "34": "Sandy Point", "25": "Freeport", "30": "Kemps Bay", "06": "Cat Island", "32": "Nichollstown and Berry Islands"}, "BY": {"02": "Homyel'skaya Voblasts'", "03": "Hrodzyenskaya Voblasts'", "01": "Brestskaya Voblasts'", "06": "Mahilyowskaya Voblasts'", "07": "Vitsyebskaya Voblasts'", "04": "Minsk", "05": "Minskaya Voblasts'"}, "BZ": {"02": "Cayo", "03": "Corozal", "01": "Belize", "06": "Toledo", "04": "Orange Walk", "05": "Stann Creek"}, "RU": {"24": "Kalmyk", "25": "Kaluga", "26": "Kamchatka", "27": "Karachay-Cherkess", "20": "Irkutsk", "21": "Ivanovo", "22": "Kabardin-Balkar", "23": "Kaliningrad", "28": "Karelia", "29": "Kemerovo", "59": "Primor'ye", "58": "Perm'", "55": "Orenburg", "54": "Omsk", "57": "Penza", "56": "Orel", "51": "Nizhegorod", "50": "Nenets", "53": "Novosibirsk", "52": "Novgorod", "88": "Yaroslavl'", "89": "Yevrey", "82": "Ust-Orda Buryat", "83": "Vladimir", "80": "Udmurt", "81": "Ul'yanovsk", "86": "Voronezh", "87": "Yamal-Nenets", "84": "Volgograd", "85": "Vologda", "02": "Aginsky Buryatsky AO", "03": "Gorno-Altay", "01": "Adygeya, Republic of", "06": "Arkhangel'sk", "07": "Astrakhan'", "04": "Altaisky krai", "05": "Amur", "08": "Bashkortostan", "09": "Belgorod", "39": "Krasnoyarsk", "38": "Krasnodar", "33": "Kirov", "32": "Khanty-Mansiy", "31": "Khakass", "30": "Khabarovsk", "37": "Kostroma", "36": "Koryak", "35": "Komi-Permyak", "34": "Komi", "60": "Pskov", "61": "Rostov", "62": "Ryazan'", "63": "Sakha", "64": "Sakhalin", "65": "Samara", "66": "Saint Petersburg City", "67": "Saratov", "68": "North Ossetia", "69": "Smolensk", "91": "Krasnoyarskiy Kray", "90": "Permskiy Kray", "93": "Zabaykal'skiy Kray", "92": "Kamchatskiy Kray", "11": "Buryat", "10": "Bryansk", "13": "Chelyabinsk", "12": "Chechnya", "15": "Chukot", "14": "Chita", "17": "Dagestan", "16": "Chuvashia", "19": "Ingush", "18": "Evenk", "48": "Moscow City", "49": "Murmansk", "46": "Mordovia", "47": "Moskva", "44": "Magadan", "45": "Mariy-El", "42": "Leningrad", "43": "Lipetsk", "40": "Kurgan", "41": "Kursk", "77": "Tver'", "76": "Tula", "75": "Tomsk", "74": "Taymyr", "73": "Tatarstan", "72": "Tambovskaya oblast", "71": "Sverdlovsk", "70": "Stavropol'", "79": "Tuva", "78": "Tyumen'"}, "RW": {"11": "Est", "13": "Nord", "12": "Kigali", "06": "Gitarama", "07": "Kibungo", "09": "Kigali", "01": "Butare", "15": "Sud", "14": "Ouest"}, "RS": {"02": "Vojvodina", "01": "Kosovo"}, "TM": {"02": "Balkan", "03": "Dashoguz", "01": "Ahal", "04": "Lebap", "05": "Mary"}, "TJ": {"02": "Khatlon", "03": "Sughd", "01": "Kuhistoni Badakhshon"}, "RO": {"30": "Prahova", "42": "Giurgiu", "43": "Ilfov", "02": "Arad", "03": "Arges", "26": "Mehedinti", "01": "Alba", "06": "Bistrita-Nasaud", "07": "Botosani", "04": "Bacau", "05": "Bihor", "08": "Braila", "09": "Brasov", "28": "Neamt", "29": "Olt", "40": "Vrancea", "41": "Calarasi", "14": "Constanta", "25": "Maramures", "39": "Valcea", "27": "Mures", "20": "Harghita", "38": "Vaslui", "21": "Hunedoara", "11": "Buzau", "10": "Bucuresti", "13": "Cluj", "12": "Caras-Severin", "15": "Covasna", "22": "Ialomita", "17": "Dolj", "16": "Dambovita", "19": "Gorj", "18": "Galati", "31": "Salaj", "23": "Iasi", "37": "Tulcea", "36": "Timis", "35": "Teleorman", "34": "Suceava", "33": "Sibiu", "32": "Satu Mare"}, "GW": {"02": "Quinara", "10": "Gabu", "01": "Bafata", "06": "Cacheu", "07": "Tombali", "04": "Oio", "05": "Bolama", "12": "Biombo", "11": "Bissau"}, "GT": {"02": "Baja Verapaz", "03": "Chimaltenango", "01": "Alta Verapaz", "06": "Escuintla", "07": "Guatemala", "04": "Chiquimula", "05": "El Progreso", "08": "Huehuetenango", "09": "Izabal", "14": "Quiche", "20": "Suchitepequez", "21": "Totonicapan", "11": "Jutiapa", "10": "Jalapa", "13": "Quetzaltenango", "12": "Peten", "15": "Retalhuleu", "22": "Zacapa", "17": "San Marcos", "16": "Sacatepequez", "19": "Solola", "18": "Santa Rosa"}, "GR": {"30": "Evritania", "42": "Lakonia", "48": "Samos", "45": "Iraklion", "43": "Khania", "49": "Kikladhes", "02": "Rodhopi", "03": "Xanthi", "26": "Levkas", "01": "Evros", "06": "Kilkis", "07": "Pella", "04": "Drama", "05": "Serrai", "46": "Lasithi", "47": "Dhodhekanisos", "08": "Florina", "09": "Kastoria", "28": "Zakinthos", "29": "Fthiotis", "40": "Messinia", "41": "Arkadhia", "14": "Kavala", "51": "Lesvos", "24": "Magnisia", "25": "Kerkira", "39": "Ilia", "27": "Kefallinia", "20": "Arta", "38": "Akhaia", "21": "Larisa", "11": "Kozani", "10": "Grevena", "13": "Thessaloniki", "12": "Imathia", "15": "Khalkidhiki", "22": "Trikala", "17": "Ioannina", "16": "Pieria", "19": "Preveza", "18": "Thesprotia", "31": "Aitolia kai Akarnania", "23": "Kardhitsa", "37": "Korinthia", "36": "Argolis", "35": "Attiki", "34": "Evvoia", "33": "Voiotia", "32": "Fokis", "44": "Rethimni", "50": "Khios"}, "GQ": {"03": "Annobon", "06": "Centro Sur", "07": "Kie-Ntem", "04": "Bioko Norte", "05": "Bioko Sur", "08": "Litoral", "09": "Wele-Nzas"}, "JP": {"30": "Oita", "42": "Toyama", "45": "Yamaguchi", "43": "Wakayama", "02": "Akita", "03": "Aomori", "26": "Nagano", "01": "Aichi", "06": "Fukui", "07": "Fukuoka", "04": "Chiba", "05": "Ehime", "46": "Yamanashi", "47": "Okinawa", "08": "Fukushima", "09": "Gifu", "28": "Nara", "29": "Niigata", "40": "Tokyo", "41": "Tottori", "14": "Ibaraki", "24": "Miyagi", "25": "Miyazaki", "39": "Tokushima", "27": "Nagasaki", "20": "Kochi", "38": "Tochigi", "21": "Kumamoto", "11": "Hiroshima", "10": "Gumma", "13": "Hyogo", "12": "Hokkaido", "15": "Ishikawa", "22": "Kyoto", "17": "Kagawa", "16": "Iwate", "19": "Kanagawa", "18": "Kagoshima", "31": "Okayama", "23": "Mie", "37": "Shizuoka", "36": "Shimane", "35": "Shiga", "34": "Saitama", "33": "Saga", "32": "Osaka", "44": "Yamagata"}, "GY": {"11": "Cuyuni-Mazaruni", "10": "Barima-Waini", "13": "East Berbice-Corentyne", "12": "Demerara-Mahaica", "15": "Mahaica-Berbice", "14": "Essequibo Islands-West Demerara", "17": "Potaro-Siparuni", "16": "Pomeroon-Supenaam", "19": "Upper Takutu-Upper Essequibo", "18": "Upper Demerara-Berbice"}, "GE": {"58": "Tsalenjikhis Raioni", "30": "Khonis Raioni", "54": "T'et'ritsqaros Raioni", "42": "P'ot'i", "48": "Samtrediis Raioni", "45": "Rust'avi", "43": "Qazbegis Raioni", "60": "Tsqaltubo", "61": "Vanis Raioni", "62": "Zestap'onis Raioni", "57": "Ts'ageris Raioni", "64": "Zugdidis Raioni", "49": "Senakis Raioni", "52": "T'elavis Raioni", "53": "T'erjolis Raioni", "02": "Abkhazia", "03": "Adigenis Raioni", "26": "Kaspis Raioni", "01": "Abashis Raioni", "06": "Akhalk'alak'is Raioni", "07": "Akhalts'ikhis Raioni", "04": "Ajaria", "05": "Akhalgoris Raioni", "46": "Sach'kheris Raioni", "47": "Sagarejos Raioni", "08": "Akhmetis Raioni", "09": "Ambrolauris Raioni", "28": "Khashuris Raioni", "29": "Khobis Raioni", "40": "Onis Raioni", "41": "Ozurget'is Raioni", "14": "Chiat'ura", "59": "Tsalkis Raioni", "51": "T'bilisi", "24": "Javis Raioni", "56": "Tqibuli", "25": "K'arelis Raioni", "39": "Ninotsmindis Raioni", "27": "Kharagaulis Raioni", "20": "Gardabanis Raioni", "38": "Mts'khet'is Raioni", "21": "Gori", "11": "Baghdat'is Raioni", "10": "Aspindzis Raioni", "13": "Borjomis Raioni", "12": "Bolnisis Raioni", "15": "Ch'khorotsqus Raioni", "22": "Goris Raioni", "17": "Dedop'listsqaros Raioni", "16": "Ch'okhatauris Raioni", "19": "Dushet'is Raioni", "18": "Dmanisis Raioni", "31": "K'ut'aisi", "23": "Gurjaanis Raioni", "37": "Mestiis Raioni", "36": "Martvilis Raioni", "35": "Marneulis Raioni", "34": "Lentekhis Raioni", "33": "Lanch'khut'is Raioni", "55": "T'ianet'is Raioni", "63": "Zugdidi", "32": "Lagodekhis Raioni", "44": "Qvarlis Raioni", "50": "Sighnaghis Raioni"}, "GD": {"02": "Saint David", "03": "Saint George", "01": "Saint Andrew", "06": "Saint Patrick", "04": "Saint John", "05": "Saint Mark"}, "GB": {"O9": "Waltham Forest", "O8": "Walsall", "O7": "Wakefield", "O6": "Trafford", "O5": "Tower Hamlets", "O4": "Torbay", "O3": "Thurrock", "O2": "Telford and Wrekin", "O1": "Tameside", "W2": "Renfrewshire", "G7": "Kingston upon Thames", "G6": "Kingston upon Hull, City of", "G5": "Kent", "G4": "Kensington and Chelsea", "G3": "Islington", "G2": "Isle of Wight", "G1": "Hounslow", "V2": "Glasgow City", "G9": "Knowsley", "G8": "Kirklees", "V3": "Highland", "V4": "Inverclyde", "W1": "Perth and Kinross", "V5": "Midlothian", "R4": "Carrickfergus", "R5": "Castlereagh", "R6": "Coleraine", "R7": "Cookstown", "R1": "Ballymoney", "R2": "Banbridge", "R3": "Belfast", "Z8": "Cheshire West and Chester", "V7": "North Ayrshire", "R8": "Craigavon", "R9": "Down", "V8": "North Lanarkshire", "V9": "Orkney", "Z6": "Central Bedfordshire", "Z7": "Cheshire East", "J8": "Nottingham", "J9": "Nottinghamshire", "J4": "North Somerset", "J5": "North Tyneside", "J6": "Northumberland", "J7": "North Yorkshire", "J1": "Northamptonshire", "J2": "North East Lincolnshire", "J3": "North Lincolnshire", "B4": "Bradford", "B5": "Brent", "B6": "Brighton and Hove", "B7": "Bristol, City of", "W3": "Shetland Islands", "B1": "Bolton", "B2": "Bournemouth", "B3": "Bracknell Forest", "Z3": "Vale of Glamorgan, The", "B8": "Bromley", "B9": "Buckinghamshire", "W9": "West Lothian", "W8": "Eilean Siar", "U9": "Falkirk", "U8": "Edinburgh, City of", "U5": "East Dunbartonshire", "U4": "East Ayrshire", "U7": "East Renfrewshire", "U6": "East Lothian", "U1": "Clackmannanshire", "U3": "Dundee City", "U2": "Dumfries and Galloway", "M5": "Southend-on-Sea", "M4": "Southampton", "M7": "South Tyneside", "M6": "South Gloucestershire", "M1": "Slough", "M3": "Somerset", "M2": "Solihull", "X9": "Denbighshire", "Z4": "Wrexham", "M9": "Staffordshire", "M8": "Southwark", "W7": "West Dunbartonshire", "W6": "Stirling", "W5": "South Lanarkshire", "W4": "South Ayrshire", "E9": "Halton", "E8": "Hackney", "Y6": "Newport", "E5": "Gateshead", "E4": "Essex", "E7": "Greenwich", "E6": "Gloucestershire", "E1": "East Riding of Yorkshire", "Z1": "Swansea", "E3": "Enfield", "E2": "East Sussex", "P2": "Warrington", "P3": "Warwickshire", "P1": "Wandsworth", "P6": "West Sussex", "P7": "Wigan", "P4": "West Berkshire", "P5": "Westminster", "P8": "Wiltshire", "P9": "Windsor and Maidenhead", "Z9": "Isles of Scilly", "H8": "Liverpool", "H9": "London, City of", "H2": "Lancashire", "H3": "Leeds", "H1": "Lambeth", "H6": "Lewisham", "H7": "Lincolnshire", "H4": "Leicester", "H5": "Leicestershire", "S9": "Newry and Mourne", "S8": "Moyle", "S3": "Larne", "S2": "Fermanagh", "S1": "Dungannon", "S7": "Magherafelt", "S6": "Derry", "S5": "Lisburn", "S4": "Limavady", "K3": "Peterborough", "K2": "Oxfordshire", "K1": "Oldham", "K7": "Reading", "K6": "Portsmouth", "K5": "Poole", "K4": "Plymouth", "K9": "Redcar and Cleveland", "K8": "Redbridge", "X8": "Conwy", "X2": "Blaenau Gwent", "C9": "Cumbria", "C8": "Croydon", "V6": "Moray", "Z5": "Bedfordshire", "C3": "Cambridgeshire", "C2": "Calderdale", "C1": "Bury", "X1": "Isle of Anglesey", "C7": "Coventry", "C6": "Cornwall", "C5": "Cheshire", "C4": "Camden", "Z2": "Torfaen", "N8": "Sutton", "N9": "Swindon", "X3": "Bridgend", "N1": "St. Helens", "N2": "Stockport", "N3": "Stockton-on-Tees", "N4": "Stoke-on-Trent", "N5": "Suffolk", "N6": "Sunderland", "N7": "Surrey", "F1": "Hammersmith and Fulham", "F2": "Hampshire", "F3": "Haringey", "F4": "Harrow", "F5": "Hartlepool", "F6": "Havering", "F7": "Herefordshire", "F8": "Hertford", "F9": "Hillingdon", "X7": "Carmarthenshire", "X4": "Caerphilly", "X5": "Cardiff", "Q1": "Wirral", "Q3": "Wolverhampton", "Q2": "Wokingham", "Q5": "York", "Q4": "Worcestershire", "Q7": "Ards", "Q6": "Antrim", "Q9": "Ballymena", "Q8": "Armagh", "I9": "Norfolk", "I8": "Newham", "X6": "Ceredigion", "I1": "Luton", "I3": "Medway", "I2": "Manchester", "I5": "Middlesbrough", "I4": "Merton", "I7": "Newcastle upon Tyne", "I6": "Milton Keynes", "A1": "Barking and Dagenham", "V1": "Fife", "A3": "Barnsley", "A2": "Barnet", "A5": "Bedfordshire", "A4": "Bath and North East Somerset", "A7": "Birmingham", "A6": "Bexley", "A9": "Blackpool", "A8": "Blackburn with Darwen", "T8": "Argyll and Bute", "T9": "Scottish Borders, The", "T6": "Aberdeenshire", "T7": "Angus", "T4": "Strabane", "T5": "Aberdeen City", "T2": "North Down", "T3": "Omagh", "T1": "Newtownabbey", "L6": "Shropshire", "L7": "Sandwell", "L4": "Rutland", "L5": "Salford", "L2": "Rochdale", "L3": "Rotherham", "L1": "Richmond upon Thames", "Y3": "Merthyr Tydfil", "L8": "Sefton", "L9": "Sheffield", "Y2": "Gwynedd", "Y1": "Flintshire", "Y5": "Neath Port Talbot", "Y4": "Monmouthshire", "Y9": "Rhondda Cynon Taff", "Y8": "Powys", "D8": "Durham", "D9": "Ealing", "D6": "Dorset", "D7": "Dudley", "D4": "Devon", "D5": "Doncaster", "D2": "Derby", "D3": "Derbyshire", "Y7": "Pembrokeshire", "D1": "Darlington"}, "GA": {"02": "Haut-Ogooue", "03": "Moyen-Ogooue", "01": "Estuaire", "06": "Ogooue-Ivindo", "07": "Ogooue-Lolo", "04": "Ngounie", "05": "Nyanga", "08": "Ogooue-Maritime", "09": "Woleu-Ntem"}, "SV": {"02": "Cabanas", "03": "Chalatenango", "13": "Sonsonate", "01": "Ahuachapan", "06": "La Paz", "07": "La Union", "04": "Cuscatlan", "05": "La Libertad", "08": "Morazan", "09": "San Miguel", "12": "San Vicente", "14": "Usulutan", "11": "Santa Ana", "10": "San Salvador"}, "GN": {"30": "Coyah", "02": "Boffa", "03": "Boke", "01": "Beyla", "06": "Dalaba", "07": "Dinguiraye", "04": "Conakry", "05": "Dabola", "09": "Faranah", "28": "Tougue", "29": "Yomou", "25": "Pita", "39": "Siguiri", "27": "Telimele", "38": "Nzerekore", "21": "Macenta", "11": "Fria", "10": "Forecariah", "13": "Gueckedou", "12": "Gaoual", "15": "Kerouane", "22": "Mali", "17": "Kissidougou", "16": "Kindia", "19": "Kouroussa", "18": "Koundara", "31": "Dubreka", "23": "Mamou", "37": "Mandiana", "36": "Lola", "35": "Lelouma", "34": "Labe", "33": "Koubia", "32": "Kankan"}, "GM": {"02": "Lower River", "03": "Central River", "01": "Banjul", "07": "North Bank", "04": "Upper River", "05": "Western"}, "GL": {"02": "Ostgronland", "03": "Vestgronland", "01": "Nordgronland"}, "GH": {"02": "Ashanti", "03": "Brong-Ahafo", "01": "Greater Accra", "06": "Northern", "04": "Central", "05": "Eastern", "08": "Volta", "09": "Western", "11": "Upper West", "10": "Upper East"}, "OM": {"02": "Al Batinah", "03": "Al Wusta", "01": "Ad Dakhiliyah", "06": "Masqat", "07": "Musandam", "04": "Ash Sharqiyah", "05": "Az Zahirah", "08": "Zufar"}, "TN": {"02": "Kasserine", "03": "Kairouan", "27": "Ben Arous", "06": "Jendouba", "22": "Siliana", "23": "Sousse", "28": "Madanin", "29": "Gabes", "10": "Qafsah", "39": "Manouba", "38": "Aiana", "15": "Al Mahdia", "14": "El Kef", "17": "Bajah", "16": "Al Munastir", "19": "Nabeul", "18": "Bizerte", "31": "Kebili", "37": "Zaghouan", "36": "Tunis", "35": "Tozeur", "34": "Tataouine", "33": "Sidi Bou Zid", "32": "Sfax"}, "JO": {"02": "Al Balqa'", "23": "Madaba", "12": "At Tafilah", "15": "Al Mafraq", "21": "Al Aqabah", "17": "Az Zaraqa", "16": "Amman", "19": "Ma'an", "18": "Irbid", "09": "Al Karak", "22": "Jarash", "20": "Ajlun"}, "HR": {"02": "Brodsko-Posavska", "03": "Dubrovacko-Neretvanska", "13": "Sibensko-Kninska", "01": "Bjelovarsko-Bilogorska", "06": "Koprivnicko-Krizevacka", "07": "Krapinsko-Zagorska", "04": "Istarska", "05": "Karlovacka", "19": "Zadarska", "18": "Vukovarsko-Srijemska", "08": "Licko-Senjska", "09": "Medimurska", "21": "Grad Zagreb", "20": "Zagrebacka", "16": "Varazdinska", "12": "Primorsko-Goranska", "17": "Viroviticko-Podravska", "14": "Sisacko-Moslavacka", "11": "Pozesko-Slavonska", "15": "Splitsko-Dalmatinska", "10": "Osjecko-Baranjska"}, "HT": {"11": "Ouest", "03": "Nord-Ouest", "13": "Sud-Est", "12": "Sud", "06": "Artibonite", "07": "Centre", "10": "Nord-Est", "09": "Nord", "15": "Nippes", "14": "Grand' Anse"}, "HU": {"30": "Kaposvar", "42": "Szekszard", "43": "Erd", "02": "Baranya", "03": "Bekes", "26": "Bekescsaba", "01": "Bacs-Kiskun", "06": "Csongrad", "07": "Debrecen", "04": "Borsod-Abauj-Zemplen", "05": "Budapest", "08": "Fejer", "09": "Gyor-Moson-Sopron", "28": "Eger", "29": "Hodmezovasarhely", "40": "Zalaegerszeg", "41": "Salgotarjan", "14": "Nograd", "24": "Zala", "25": "Gyor", "39": "Veszprem", "27": "Dunaujvaros", "20": "Jasz-Nagykun-Szolnok", "38": "Tatabanya", "21": "Tolna", "11": "Heves", "10": "Hajdu-Bihar", "13": "Miskolc", "12": "Komarom-Esztergom", "15": "Pecs", "22": "Vas", "17": "Somogy", "16": "Pest", "19": "Szeged", "18": "Szabolcs-Szatmar-Bereg", "31": "Kecskemet", "23": "Veszprem", "37": "Szombathely", "36": "Szolnok", "35": "Szekesfehervar", "34": "Sopron", "33": "Nyiregyhaza", "32": "Nagykanizsa"}, "HN": {"02": "Choluteca", "03": "Colon", "13": "Lempira", "01": "Atlantida", "06": "Cortes", "07": "El Paraiso", "04": "Comayagua", "05": "Copan", "18": "Yoro", "08": "Francisco Morazan", "09": "Gracias a Dios", "16": "Santa Barbara", "12": "La Paz", "17": "Valle", "14": "Ocotepeque", "11": "Islas de la Bahia", "15": "Olancho", "10": "Intibuca"}, "VE": {"02": "Anzoategui", "03": "Apure", "26": "Vargas", "01": "Amazonas", "06": "Bolivar", "07": "Carabobo", "04": "Aragua", "05": "Barinas", "08": "Cojedes", "09": "Delta Amacuro", "14": "Merida", "24": "Dependencias Federales", "25": "Distrito Federal", "20": "Tachira", "21": "Trujillo", "11": "Falcon", "13": "Lara", "12": "Guarico", "15": "Miranda", "22": "Yaracuy", "17": "Nueva Esparta", "16": "Monagas", "19": "Sucre", "18": "Portuguesa", "23": "Zulia"}, "PS": {"WE": "West Bank", "GZ": "Gaza"}, "PT": {"02": "Aveiro", "03": "Beja", "13": "Leiria", "06": "Castelo Branco", "07": "Coimbra", "04": "Braga", "05": "Braganca", "19": "Setubal", "18": "Santarem", "08": "Evora", "09": "Faro", "22": "Viseu", "21": "Vila Real", "16": "Portalegre", "17": "Porto", "14": "Lisboa", "11": "Guarda", "20": "Viana do Castelo", "23": "Azores", "10": "Madeira"}, "PY": {"02": "Amambay", "03": "Boqueron", "13": "Neembucu", "01": "Alto Parana", "06": "Central", "07": "Concepcion", "04": "Caaguazu", "05": "Caazapa", "19": "Canindeyu", "23": "Alto Paraguay", "08": "Cordillera", "16": "Presidente Hayes", "21": "Nueva Asuncion", "20": "Chaco", "12": "Misiones", "17": "San Pedro", "11": "Itapua", "15": "Paraguari", "10": "Guaira"}, "PA": {"02": "Chiriqui", "03": "Cocle", "01": "Bocas del Toro", "06": "Herrera", "07": "Los Santos", "04": "Colon", "05": "Darien", "08": "Panama", "09": "San Blas", "10": "Veraguas"}, "PG": {"02": "Gulf", "03": "Milne Bay", "13": "Manus", "01": "Central", "06": "Western", "07": "North Solomons", "04": "Northern", "05": "Southern Highlands", "19": "Enga", "18": "Sandaun", "08": "Chimbu", "09": "Eastern Highlands", "20": "National Capital", "16": "Western Highlands", "12": "Madang", "17": "West New Britain", "14": "Morobe", "11": "East Sepik", "15": "New Ireland", "10": "East New Britain"}, "PE": {"02": "Ancash", "03": "Apurimac", "01": "Amazonas", "06": "Cajamarca", "07": "Callao", "04": "Arequipa", "05": "Ayacucho", "08": "Cusco", "09": "Huancavelica", "14": "Lambayeque", "24": "Tumbes", "25": "Ucayali", "20": "Piura", "21": "Puno", "11": "Ica", "10": "Huanuco", "13": "La Libertad", "12": "Junin", "15": "Lima", "22": "San Martin", "17": "Madre de Dios", "16": "Loreto", "19": "Pasco", "18": "Moquegua", "23": "Tacna"}, "PK": {"02": "Balochistan", "03": "North-West Frontier", "01": "Federally Administered Tribal Areas", "06": "Azad Kashmir", "07": "Northern Areas", "04": "Punjab", "05": "Sindh", "08": "Islamabad"}, "PH": {"24": "Davao", "25": "Davao del Sur", "26": "Davao Oriental", "27": "Ifugao", "20": "Cavite", "21": "Cebu", "22": "Basilan", "23": "Eastern Samar", "28": "Ilocos Norte", "29": "Ilocos Sur", "G8": "Aurora", "F1": "Puerto Princesa", "F2": "Quezon City", "F3": "Roxas", "F4": "San Carlos", "F5": "San Carlos", "59": "Southern Leyte", "58": "Sorsogon", "55": "Samar", "54": "Romblon", "57": "North Cotabato", "56": "Maguindanao", "51": "Pangasinan", "50": "Pampanga", "53": "Rizal", "B4": "Caloocan", "B5": "Canlaon", "B6": "Cavite City", "B7": "Cebu City", "B1": "Cadiz", "B2": "Cagayan de Oro", "B3": "Calbayog", "B8": "Cotabato", "B9": "Dagupan", "M6": "San Juan", "M9": "Sarangani", "M8": "Santiago", "02": "Agusan del Norte", "03": "Agusan del Sur", "01": "Abra", "06": "Antique", "07": "Bataan", "04": "Aklan", "05": "Albay", "08": "Batanes", "09": "Batangas", "E9": "Pasay", "E8": "Palayan", "E5": "Oroquieta", "E4": "Ormoc", "E7": "Pagadian", "E6": "Ozamis", "E1": "Marawi", "E3": "Olongapo", "E2": "Naga", "G7": "Zamboanga", "F6": "San Jose", "G6": "Trece Martires", "G5": "Toledo", "G4": "Tangub", "G3": "Tagbilaran", "G2": "Tagaytay", "39": "Masbate", "38": "Marinduque", "G1": "Tacloban", "33": "Laguna", "32": "Kalinga-Apayao", "31": "Isabela", "30": "Iloilo", "37": "Leyte", "36": "La Union", "35": "Lanao del Sur", "34": "Lanao del Norte", "F8": "Silay", "60": "Sulu", "61": "Surigao del Norte", "62": "Surigao del Sur", "63": "Tarlac", "64": "Zambales", "65": "Zamboanga del Norte", "66": "Zamboanga del Sur", "67": "Northern Samar", "68": "Quirino", "69": "Siquijor", "F9": "Surigao", "C9": "Iloilo City", "C8": "Iligan", "C3": "Davao City", "C2": "Dapitan", "C1": "Danao", "C7": "Gingoog", "C6": "General Santos", "C5": "Dumaguete", "C4": "Dipolog", "11": "Bohol", "10": "Benguet", "13": "Bulacan", "12": "Bukidnon", "15": "Camarines Norte", "14": "Cagayan", "17": "Camiguin", "16": "Camarines Sur", "19": "Catanduanes", "18": "Capiz", "H2": "Quezon", "H3": "Negros Occidental", "48": "Nueva Vizcaya", "49": "Palawan", "46": "Negros Oriental", "47": "Nueva Ecija", "44": "Mountain", "45": "Negros Occidental", "42": "Misamis Occidental", "43": "Misamis Oriental", "40": "Mindoro Occidental", "41": "Mindoro Oriental", "A1": "Angeles", "A3": "Bago", "A2": "Bacolod", "A5": "Bais", "A4": "Baguio", "A7": "Batangas City", "A6": "Basilan City", "A9": "Cabanatuan", "A8": "Butuan", "72": "Tawitawi", "71": "Sultan Kudarat", "70": "South Cotabato", "F7": "San Pablo", "D8": "Mandaue", "D9": "Manila", "D6": "Lipa", "D7": "Lucena", "D4": "Lapu-Lapu", "D5": "Legaspi", "D2": "La Carlota", "D3": "Laoag", "D1": "Iriga"}, "PL": {"86": "Wielkopolskie", "84": "Swietokrzyskie", "77": "Malopolskie", "76": "Lubuskie", "75": "Lubelskie", "74": "Lodzkie", "73": "Kujawsko-Pomorskie", "72": "Dolnoslaskie", "82": "Pomorskie", "83": "Slaskie", "80": "Podkarpackie", "81": "Podlaskie", "85": "Warminsko-Mazurskie", "87": "Zachodniopomorskie", "79": "Opolskie", "78": "Mazowieckie"}, "ZM": {"02": "Central", "03": "Eastern", "01": "Western", "06": "North-Western", "07": "Southern", "04": "Luapula", "05": "Northern", "08": "Copperbelt", "09": "Lusaka"}, "EE": {"02": "Hiiumaa", "03": "Ida-Virumaa", "13": "Raplamaa", "01": "Harjumaa", "06": "Kohtla-Jarve", "07": "Laanemaa", "04": "Jarvamaa", "05": "Jogevamaa", "19": "Valgamaa", "18": "Tartumaa", "08": "Laane-Virumaa", "09": "Narva", "21": "Vorumaa", "20": "Viljandimaa", "16": "Tallinn", "12": "Polvamaa", "17": "Tartu", "14": "Saaremaa", "11": "Parnumaa", "15": "Sillamae", "10": "Parnu"}, "EG": {"02": "Al Bahr al Ahmar", "03": "Al Buhayrah", "26": "Janub Sina'", "01": "Ad Daqahliyah", "06": "Al Iskandariyah", "07": "Al Isma'iliyah", "04": "Al Fayyum", "05": "Al Gharbiyah", "08": "Al Jizah", "09": "Al Minufiyah", "14": "Ash Sharqiyah", "24": "Suhaj", "27": "Shamal Sina'", "20": "Dumyat", "21": "Kafr ash Shaykh", "11": "Al Qahirah", "10": "Al Minya", "13": "Al Wadi al Jadid", "12": "Al Qalyubiyah", "15": "As Suways", "22": "Matruh", "17": "Asyut", "16": "Aswan", "19": "Bur Sa'id", "18": "Bani Suwayf", "23": "Qina"}, "ZA": {"02": "KwaZulu-Natal", "03": "Free State", "01": "North-Western Province", "06": "Gauteng", "07": "Mpumalanga", "05": "Eastern Cape", "08": "Northern Cape", "09": "Limpopo", "11": "Western Cape", "10": "North-West"}, "EC": {"02": "Azuay", "03": "Bolivar", "01": "Galapagos", "06": "Chimborazo", "07": "Cotopaxi", "04": "Canar", "05": "Carchi", "08": "El Oro", "09": "Esmeraldas", "24": "Orellana", "20": "Zamora-Chinchipe", "14": "Manabi", "11": "Imbabura", "10": "Guayas", "13": "Los Rios", "12": "Loja", "15": "Morona-Santiago", "22": "Sucumbios", "17": "Pastaza", "19": "Tungurahua", "18": "Pichincha", "23": "Napo"}, "IT": {"02": "Basilicata", "03": "Calabria", "13": "Puglia", "01": "Abruzzi", "06": "Friuli-Venezia Giulia", "07": "Lazio", "04": "Campania", "05": "Emilia-Romagna", "19": "Valle d'Aosta", "18": "Umbria", "08": "Liguria", "09": "Lombardia", "20": "Veneto", "16": "Toscana", "12": "Piemonte", "17": "Trentino-Alto Adige", "14": "Sardegna", "11": "Molise", "15": "Sicilia", "10": "Marche"}, "VN": {"30": "Quang Ninh", "81": "Hung Yen", "45": "Ba Ria-Vung Tau", "60": "Ninh Thuan", "61": "Phu Yen", "62": "Quang Binh", "63": "Quang Ngai", "64": "Quang Tri", "49": "Gia Lai", "66": "Thua Thien-Hue", "67": "Tra Vinh", "68": "Tuyen Quang", "69": "Vinh Long", "80": "Ha Nam", "52": "Ha Tinh", "86": "Vinh Phuc", "53": "Hoa Binh", "84": "Quang Nam", "85": "Thai Nguyen", "24": "Long An", "03": "Ben Tre", "01": "An Giang", "20": "Ho Chi Minh", "21": "Kien Giang", "05": "Cao Bang", "46": "Binh Dinh", "47": "Binh Thuan", "44": "Ha Noi", "09": "Dong Thap", "43": "Dong Nai", "87": "Can Tho", "82": "Nam Dinh", "83": "Phu Tho", "39": "Lang Son", "77": "Ca Mau", "76": "Binh Phuoc", "75": "Binh Duong", "74": "Bac Ninh", "73": "Bac Lieu", "72": "Bac Kan", "71": "Bac Giang", "70": "Yen Bai", "91": "Dak Nong", "90": "Lao Cai", "93": "Hau Giang", "92": "Dien Bien", "79": "Hai Duong", "78": "Da Nang", "13": "Hai Phong", "59": "Ninh Binh", "58": "Nghe An", "33": "Tay Ninh", "32": "Son La", "23": "Lam Dong", "37": "Tien Giang", "50": "Ha Giang", "35": "Thai Binh", "34": "Thanh Hoa", "55": "Kon Tum", "89": "Lai Chau", "88": "Dac Lak", "54": "Khanh Hoa", "65": "Soc Trang"}, "SB": {"11": "Western", "03": "Malaita", "13": "Rennell and Bellona", "12": "Choiseul", "06": "Guadalcanal", "07": "Isabel", "10": "Central", "08": "Makira", "09": "Temotu"}, "ET": {"48": "Dire Dawa", "49": "Gambela Hizboch", "46": "Amara", "47": "Binshangul Gumuz", "44": "Adis Abeba", "45": "Afar", "51": "Oromiya", "50": "Hareri Hizb", "53": "Tigray", "52": "Sumale", "54": "YeDebub Biheroch Bihereseboch na Hizboch"}, "SO": {"02": "Banaadir", "03": "Bari", "13": "Shabeellaha Dhexe", "01": "Bakool", "06": "Gedo", "07": "Hiiraan", "04": "Bay", "05": "Galguduud", "19": "Togdheer", "18": "Nugaal", "08": "Jubbada Dhexe", "09": "Jubbada Hoose", "22": "Sool", "21": "Awdal", "16": "Woqooyi Galbeed", "12": "Sanaag", "14": "Shabeellaha Hoose", "11": "Nugaal", "20": "Woqooyi Galbeed", "10": "Mudug"}, "ZW": {"02": "Midlands", "03": "Mashonaland Central", "01": "Manicaland", "06": "Matabeleland North", "07": "Matabeleland South", "04": "Mashonaland East", "05": "Mashonaland West", "08": "Masvingo", "09": "Bulawayo", "10": "Harare"}, "SA": {"02": "Al Bahah", "10": "Ar Riyad", "13": "Ha'il", "06": "Ash Sharqiyah", "14": "Makkah", "17": "Jizan", "05": "Al Madinah", "19": "Tabuk", "08": "Al Qasim", "16": "Najran", "20": "Al Jawf", "11": "Asir Province", "15": "Al Hudud ash Shamaliyah"}, "ES": {"39": "Cantabria", "27": "La Rioja", "59": "Pais Vasco", "07": "Islas Baleares", "55": "Castilla y Leon", "32": "Navarra", "31": "Murcia", "56": "Catalonia", "51": "Andalucia", "29": "Madrid", "53": "Canarias", "34": "Asturias", "60": "Comunidad Valenciana", "54": "Castilla-La Mancha", "52": "Aragon", "57": "Extremadura", "58": "Galicia"}, "ER": {"02": "Debub", "03": "Debubawi K'eyih Bahri", "01": "Anseba", "06": "Semenawi K'eyih Bahri", "04": "Gash Barka", "05": "Ma'akel"}, "MD": {"60": "Balti", "61": "Basarabeasca", "62": "Bender", "63": "Briceni", "64": "Cahul", "65": "Cantemir", "66": "Calarasi", "67": "Causeni", "68": "Cimislia", "69": "Criuleni", "80": "Nisporeni", "81": "Ocnita", "86": "Soldanesti", "87": "Soroca", "84": "Riscani", "85": "Singerei", "82": "Orhei", "83": "Rezina", "77": "Hincesti", "76": "Glodeni", "75": "Floresti", "74": "Falesti", "73": "Edinet", "72": "Dubasari", "71": "Drochia", "70": "Donduseni", "91": "Telenesti", "90": "Taraclia", "92": "Ungheni", "79": "Leova", "78": "Ialoveni", "59": "Anenii Noi", "58": "Stinga Nistrului", "57": "Chisinau", "51": "Gagauzia", "88": "Stefan-Voda", "89": "Straseni"}, "MG": {"02": "Fianarantsoa", "03": "Mahajanga", "01": "Antsiranana", "06": "Toliara", "04": "Toamasina", "05": "Antananarivo"}, "MA": {"56": "Tadla-Azilal", "59": "La,youne-Boujdour-Sakia El Hamra", "58": "Taza-Al Hoceima-Taounate", "48": "Meknes-Tafilalet", "49": "Rabat-Sale-Zemmour-Zaer", "46": "Fes-Boulemane", "47": "Marrakech-Tensift-Al Haouz", "57": "Tanger-Tetouan", "45": "Grand Casablanca", "51": "Doukkala-Abda", "50": "Chaouia-Ouardigha", "53": "Guelmim-Es Smara", "52": "Gharb-Chrarda-Beni Hssen", "55": "Souss-Massa-Dr,a", "54": "Oriental"}, "MC": {"02": "Monaco", "03": "Monte-Carlo", "01": "La Condamine"}, "UZ": {"02": "Bukhoro", "03": "Farghona", "13": "Toshkent", "01": "Andijon", "06": "Namangan", "07": "Nawoiy", "04": "Jizzakh", "05": "Khorazm", "08": "Qashqadaryo", "09": "Qoraqalpoghiston", "12": "Surkhondaryo", "14": "Toshkent", "11": "Sirdaryo", "10": "Samarqand"}, "MM": {"02": "Chin State", "03": "Irrawaddy", "13": "Mon State", "01": "Rakhine State", "06": "Kayah State", "07": "Magwe", "04": "Kachin State", "05": "Karan State", "08": "Mandalay", "09": "Pegu", "12": "Tenasserim", "17": "Yangon", "14": "Rangoon", "11": "Shan State", "10": "Sagaing"}, "ML": {"03": "Kayes", "01": "Bamako", "06": "Sikasso", "07": "Koulikoro", "04": "Mopti", "05": "Segou", "08": "Tombouctou", "09": "Gao", "10": "Kidal"}, "MO": {"02": "Macau", "01": "Ilhas"}, "MN": {"02": "Bayanhongor", "03": "Bayan-Olgiy", "01": "Arhangay", "06": "Dornod", "07": "Dornogovi", "22": "Erdenet", "05": "Darhan", "08": "Dundgovi", "09": "Dzavhan", "24": "Govisumber", "25": "Orhon", "20": "Ulaanbaatar", "21": "Bulgan", "11": "Hentiy", "10": "Govi-Altay", "13": "Hovsgol", "12": "Hovd", "15": "Ovorhangay", "14": "Omnogovi", "17": "Suhbaatar", "16": "Selenge", "19": "Uvs", "18": "Tov", "23": "Darhan-Uul"}, "MK": {"24": "Demir Hisar", "25": "Demir Kapija", "26": "Dobrusevo", "27": "Dolna Banjica", "20": "Cucer-Sandevo", "21": "Debar", "22": "Delcevo", "23": "Delogozdi", "28": "Dolneni", "29": "Dorce Petrov", "59": "Lipkovo", "58": "Labunista", "55": "Kuklis", "54": "Krusevo", "57": "Kumanovo", "56": "Kukurecani", "51": "Kratovo", "50": "Kosel", "53": "Krivogastani", "52": "Kriva Palanka", "B4": "Vinica", "B5": "Vitoliste", "B6": "Vranestica", "B7": "Vrapciste", "B1": "Veles", "B2": "Velesta", "B3": "Vevcani", "B8": "Vratnica", "B9": "Vrutok", "88": "Rostusa", "89": "Samokov", "82": "Prilep", "83": "Probistip", "80": "Plasnica", "81": "Podares", "86": "Resen", "87": "Rosoman", "84": "Radovis", "85": "Rankovce", "02": "Bac", "03": "Belcista", "01": "Aracinovo", "06": "Bitola", "07": "Blatec", "04": "Berovo", "05": "Bistrica", "08": "Bogdanci", "09": "Bogomila", "39": "Kamenjane", "38": "Jegunovce", "33": "Gevgelija", "32": "Gazi Baba", "31": "Dzepciste", "30": "Drugovo", "37": "Izvor", "36": "Ilinden", "35": "Gradsko", "34": "Gostivar", "60": "Lozovo", "61": "Lukovo", "62": "Makedonska Kamenica", "63": "Makedonski Brod", "64": "Mavrovi Anovi", "65": "Meseista", "66": "Miravci", "67": "Mogila", "68": "Murtino", "69": "Negotino", "C3": "Zelino", "C2": "Zelenikovo", "C1": "Zajas", "C6": "Zrnovci", "C5": "Zletovo", "C4": "Zitose", "99": "Struga", "98": "Stip", "91": "Sipkovica", "90": "Saraj", "93": "Sopotnica", "92": "Sopiste", "95": "Staravina", "94": "Srbinovo", "97": "Staro Nagoricane", "96": "Star Dojran", "11": "Bosilovo", "10": "Bogovinje", "13": "Cair", "12": "Brvenica", "15": "Caska", "14": "Capari", "17": "Centar", "16": "Cegrane", "19": "Cesinovo", "18": "Centar Zupa", "48": "Kondovo", "49": "Konopiste", "46": "Kocani", "47": "Konce", "44": "Kisela Voda", "45": "Klecevce", "42": "Kavadarci", "43": "Kicevo", "40": "Karbinci", "41": "Karpos", "A1": "Strumica", "A3": "Suto Orizari", "A2": "Studenicani", "A5": "Tearce", "A4": "Sveti Nikole", "A7": "Topolcani", "A6": "Tetovo", "A9": "Vasilevo", "A8": "Valandovo", "77": "Oslomej", "76": "Orizari", "75": "Orasac", "74": "Ohrid", "73": "Oblesevo", "72": "Novo Selo", "71": "Novaci", "70": "Negotino-Polosko", "79": "Petrovec", "78": "Pehcevo"}, "MU": {"13": "Flacq", "12": "Black River", "15": "Moka", "14": "Grand Port", "17": "Plaines Wilhems", "16": "Pamplemousses", "19": "Riviere du Rempart", "18": "Port Louis", "22": "Cargados Carajos", "20": "Savanne", "23": "Rodrigues", "21": "Agalega Islands"}, "MW": {"30": "Phalombe", "02": "Chikwawa", "03": "Chiradzulu", "26": "Balaka", "27": "Likoma", "06": "Dedza", "07": "Dowa", "04": "Chitipa", "05": "Thyolo", "08": "Karonga", "09": "Kasungu", "28": "Machinga", "29": "Mulanje", "24": "Blantyre", "25": "Mwanza", "20": "Ntchisi", "21": "Rumphi", "11": "Lilongwe", "13": "Mchinji", "12": "Mangochi", "15": "Mzimba", "22": "Salima", "17": "Nkhata Bay", "16": "Ntcheu", "19": "Nsanje", "18": "Nkhotakota", "23": "Zomba"}, "MV": {"39": "Lhaviyani", "01": "Seenu", "43": "Noonu", "05": "Laamu", "33": "Faafu ", "32": "Dhaalu", "31": "Baa", "30": "Alifu", "37": "Haa Dhaalu", "36": "Haa Alifu", "35": "Gaafu Dhaalu", "34": "Gaafu Alifu", "46": "Thaa", "38": "Kaafu", "40": "Maale", "47": "Vaavu", "45": "Shaviyani", "41": "Meemu", "44": "Raa", "42": "Gnaviyani"}, "MS": {"02": "Saint Georges", "03": "Saint Peter", "01": "Saint Anthony"}, "MR": {"02": "Hodh El Gharbi", "03": "Assaba", "01": "Hodh Ech Chargui", "06": "Trarza", "07": "Adrar", "04": "Gorgol", "05": "Brakna", "08": "Dakhlet Nouadhibou", "09": "Tagant", "12": "Inchiri", "11": "Tiris Zemmour", "10": "Guidimaka"}, "UG": {"56": "Mubende", "42": "Kiboga", "29": "Bushenyi", "60": "Pallisa", "61": "Rakai", "88": "Moroto", "89": "Mpigi", "65": "Adjumani", "66": "Bugiri", "67": "Busia", "82": "Kanungu", "69": "Katakwi", "80": "Kaberamaido", "81": "Kamwenge", "86": "Mayuge", "87": "Mbale", "84": "Kitgum", "85": "Kyenjojo", "26": "Apac", "46": "Kumi", "47": "Lira", "45": "Kotido", "28": "Bundibugyo", "43": "Kisoro", "40": "Kasese", "41": "Kibale", "97": "Yumbe", "96": "Wakiso", "83": "Kayunga", "77": "Arua", "76": "Tororo", "74": "Sembabule", "73": "Nakasongola", "72": "Moyo", "71": "Masaka", "70": "Luwero", "91": "Nakapiripirit", "90": "Mukono", "93": "Rukungiri", "92": "Pader", "95": "Soroti", "94": "Sironko", "79": "Kabarole", "78": "Iganga", "39": "Kapchorwa", "38": "Kamuli", "59": "Ntungamo", "58": "Nebbi", "33": "Jinja", "31": "Hoima", "30": "Gulu", "37": "Kampala", "36": "Kalangala", "52": "Mbarara", "50": "Masindi"}, "MY": {"02": "Kedah", "03": "Kelantan", "13": "Terengganu", "01": "Johor", "06": "Pahang", "07": "Perak", "04": "Melaka", "05": "Negeri Sembilan", "08": "Perlis", "09": "Pulau Pinang", "16": "Sabah", "12": "Selangor", "17": "Putrajaya", "11": "Sarawak", "15": "Labuan", "14": "Kuala Lumpur"}, "MX": {"30": "Veracruz-Llave", "02": "Baja California", "03": "Baja California Sur", "26": "Sonora", "01": "Aguascalientes", "06": "Chihuahua", "07": "Coahuila de Zaragoza", "04": "Campeche", "05": "Chiapas", "08": "Colima", "09": "Distrito Federal", "28": "Tamaulipas", "29": "Tlaxcala", "14": "Jalisco", "24": "San Luis Potosi", "25": "Sinaloa", "27": "Tabasco", "20": "Oaxaca", "21": "Puebla", "11": "Guanajuato", "10": "Durango", "13": "Hidalgo", "12": "Guerrero", "15": "Mexico", "22": "Queretaro de Arteaga", "17": "Morelos", "16": "Michoacan de Ocampo", "19": "Nuevo Leon", "18": "Nayarit", "31": "Yucatan", "23": "Quintana Roo", "32": "Zacatecas"}, "IL": {"02": "HaMerkaz", "03": "HaZafon", "01": "HaDarom", "06": "Yerushalayim", "04": "Hefa", "05": "Tel Aviv"}, "FR": {"A1": "Bourgogne", "A3": "Centre", "A2": "Bretagne", "A5": "Corse", "A4": "Champagne-Ardenne", "A7": "Haute-Normandie", "A6": "Franche-Comte", "A9": "Languedoc-Roussillon", "A8": "Ile-de-France", "C1": "Alsace", "99": "Basse-Normandie", "98": "Auvergne", "97": "Aquitaine", "B4": "Nord-Pas-de-Calais", "B5": "Pays de la Loire", "B6": "Picardie", "B7": "Poitou-Charentes", "B1": "Limousin", "B2": "Lorraine", "B3": "Midi-Pyrenees", "B8": "Provence-Alpes-Cote d'Azur", "B9": "Rhone-Alpes"}, "SH": {"02": "Saint Helena", "03": "Tristan da Cunha", "01": "Ascension"}, "FI": {"13": "Southern Finland", "01": "Aland", "15": "Western Finland", "14": "Eastern Finland", "08": "Oulu", "06": "Lapland"}, "FJ": {"02": "Eastern", "03": "Northern", "01": "Central", "04": "Rotuma", "05": "Western"}, "FM": {"02": "Pohnpei", "03": "Chuuk", "01": "Kosrae", "04": "Yap"}, "NI": {"02": "Carazo", "03": "Chinandega", "13": "Nueva Segovia", "01": "Boaco", "06": "Granada", "07": "Jinotega", "04": "Chontales", "05": "Esteli", "18": "Region Autonoma Atlantico Sur", "08": "Leon", "09": "Madriz", "16": "Zelaya", "12": "Matagalpa", "17": "Autonoma Atlantico Norte", "14": "Rio San Juan", "11": "Masaya", "15": "Rivas", "10": "Managua"}, "NL": {"02": "Friesland", "03": "Gelderland", "01": "Drenthe", "06": "Noord-Brabant", "07": "Noord-Holland", "04": "Groningen", "05": "Limburg", "09": "Utrecht", "16": "Flevoland", "11": "Zuid-Holland", "15": "Overijssel", "10": "Zeeland"}, "NO": {"02": "Aust-Agder", "10": "Nord-Trondelag", "13": "Ostfold", "01": "Akershus", "06": "Hedmark", "07": "Hordaland", "04": "Buskerud", "05": "Finnmark", "19": "Vest-Agder", "18": "Troms", "08": "More og Romsdal", "09": "Nordland", "20": "Vestfold", "16": "Sor-Trondelag", "12": "Oslo", "17": "Telemark", "11": "Oppland", "15": "Sogn og Fjordane", "14": "Rogaland"}, "NA": {"30": "Hardap", "02": "Caprivi Oos", "03": "Boesmanland", "26": "Mariental", "01": "Bethanien", "06": "Kaokoland", "07": "Karibib", "04": "Gobabis", "05": "Grootfontein", "08": "Keetmanshoop", "09": "Luderitz", "28": "Caprivi", "29": "Erongo", "14": "Outjo", "24": "Hereroland Wes", "25": "Kavango", "39": "Otjozondjupa", "27": "Namaland", "20": "Karasburg", "38": "Oshikoto", "21": "Windhoek", "11": "Okahandja", "10": "Maltahohe", "13": "Otjiwarongo", "12": "Omaruru", "15": "Owambo", "22": "Damaraland", "17": "Swakopmund", "16": "Rehoboth", "33": "Ohangwena", "18": "Tsumeb", "31": "Karas", "23": "Hereroland Oos", "37": "Oshana", "36": "Omusati", "35": "Omaheke", "34": "Okavango", "32": "Kunene"}, "VU": {"11": "Paama", "10": "Malakula", "13": "Sanma", "12": "Pentecote", "06": "Aoba", "07": "Torba", "17": "Penama", "05": "Ambrym", "18": "Shefa", "08": "Efate", "09": "Epi", "16": "Malampa", "15": "Tafea", "14": "Shepherd"}, "NE": {"02": "Diffa", "03": "Dosso", "01": "Agadez", "06": "Tahoua", "07": "Zinder", "04": "Maradi", "05": "Niamey", "08": "Niamey"}, "NG": {"56": "Nassarawa", "42": "Osun", "43": "Taraba", "49": "Plateau", "53": "Ebonyi", "24": "Katsina", "25": "Anambra", "26": "Benue", "27": "Borno", "21": "Akwa Ibom", "22": "Cross River", "23": "Kaduna", "46": "Bauchi", "47": "Enugu", "44": "Yobe", "45": "Abia", "28": "Imo", "29": "Kano", "40": "Kebbi", "41": "Kogi", "05": "Lagos", "11": "Federal Capital Territory", "51": "Sokoto", "39": "Jigawa", "48": "Ondo", "16": "Ogun", "55": "Gombe", "32": "Oyo", "31": "Niger", "30": "Kwara", "37": "Edo", "36": "Delta", "35": "Adamawa", "52": "Bayelsa", "54": "Ekiti", "57": "Zamfara", "50": "Rivers"}, "NZ": {"10": "Chatham Islands", "F2": "Hawke's Bay", "F3": "Manawatu-Wanganui", "F4": "Marlborough", "F5": "Nelson", "F6": "Northland", "F1": "Gisborne", "F8": "Southland", "F9": "Taranaki", "F7": "Otago", "E9": "Canterbury", "E8": "Bay of Plenty", "G3": "West Coast", "E7": "Auckland", "G1": "Waikato", "G2": "Wellington"}, "NP": {"02": "Bheri", "03": "Dhawalagiri", "13": "Sagarmatha", "01": "Bagmati", "06": "Karnali", "07": "Kosi", "04": "Gandaki", "05": "Janakpur", "08": "Lumbini", "09": "Mahakali", "12": "Rapti", "14": "Seti", "11": "Narayani", "10": "Mechi"}, "NR": {"02": "Anabar", "03": "Anetan", "13": "Uaboe", "01": "Aiwo", "06": "Boe", "07": "Buada", "04": "Anibare", "05": "Baiti", "08": "Denigomodu", "09": "Ewa", "12": "Nibok", "14": "Yaren", "11": "Meneng", "10": "Ijuw"}, "CI": {"91": "Worodougou", "92": "Zanzan", "86": "N'zi-Comoe", "88": "Sud-Bandama", "77": "Denguele", "76": "Bas-Sassandra", "75": "Bafing", "74": "Agneby", "89": "Sud-Comoe", "84": "Moyen-Cavally", "90": "Vallee du Bandama", "82": "Lagunes", "83": "Marahoue", "80": "Haut-Sassandra", "81": "Lacs", "85": "Moyen-Comoe", "87": "Savanes", "79": "Fromager", "78": "Dix-Huit Montagnes"}, "CH": {"02": "Ausser-Rhoden", "03": "Basel-Landschaft", "26": "Jura", "01": "Aargau", "06": "Fribourg", "07": "Geneve", "04": "Basel-Stadt", "05": "Bern", "08": "Glarus", "09": "Graubunden", "14": "Obwalden", "24": "Zug", "25": "Zurich", "20": "Ticino", "21": "Uri", "11": "Luzern", "10": "Inner-Rhoden", "13": "Nidwalden", "12": "Neuchatel", "15": "Sankt Gallen", "22": "Valais", "17": "Schwyz", "16": "Schaffhausen", "19": "Thurgau", "18": "Solothurn", "23": "Vaud"}, "CO": {"22": "Putumayo", "02": "Antioquia", "03": "Arauca", "26": "Santander", "01": "Amazonas", "20": "Narino", "21": "Norte de Santander", "04": "Atlantico", "23": "Quindio", "08": "Caqueta", "09": "Cauca", "28": "Tolima", "29": "Valle del Cauca", "24": "Risaralda", "25": "San Andres y Providencia", "27": "Sucre", "38": "Magdalena", "11": "Choco", "10": "Cesar", "12": "Cordoba", "15": "Guainia", "14": "Guaviare", "17": "La Guajira", "16": "Huila", "19": "Meta", "32": "Casanare", "31": "Vichada", "30": "Vaupes", "37": "Caldas", "36": "Boyaca", "35": "Bolivar", "34": "Distrito Especial", "33": "Cundinamarca"}, "CN": {"30": "Guangdong", "02": "Zhejiang", "03": "Jiangxi", "26": "Shaanxi", "01": "Anhui", "06": "Qinghai", "07": "Fujian", "04": "Jiangsu", "05": "Jilin", "08": "Heilongjiang", "09": "Henan", "28": "Tianjin", "29": "Yunnan", "14": "Xizang", "24": "Shanxi", "25": "Shandong", "20": "Nei Mongol", "21": "Ningxia", "11": "Hunan", "10": "Hebei", "13": "Xinjiang", "12": "Hubei", "15": "Gansu", "22": "Beijing", "16": "Guangxi", "19": "Liaoning", "18": "Guizhou", "31": "Hainan", "23": "Shanghai", "33": "Chongqing", "32": "Sichuan"}, "CM": {"11": "Centre", "10": "Adamaoua", "13": "Nord", "12": "Extreme-Nord", "07": "Nord-Ouest", "04": "Est", "05": "Littoral", "08": "Ouest", "09": "Sud-Ouest", "14": "Sud"}, "CL": {"02": "Aisen del General Carlos Ibanez del Campo", "03": "Antofagasta", "13": "Tarapaca", "01": "Valparaiso", "06": "Bio-Bio", "07": "Coquimbo", "04": "Araucania", "05": "Atacama", "08": "Libertador General Bernardo O'Higgins", "09": "Los Lagos", "16": "Arica y Parinacota", "12": "Region Metropolitana", "17": "Los Rios", "14": "Los Lagos", "11": "Maule", "15": "Tarapaca", "10": "Magallanes y de la Antartica Chilena"}, "CA": {"ON": "Ontario", "AB": "Alberta", "NL": "Newfoundland", "MB": "Manitoba", "NB": "New Brunswick", "BC": "British Columbia", "YT": "Yukon Territory", "SK": "Saskatchewan", "QC": "Quebec", "PE": "Prince Edward Island", "NS": "Nova Scotia", "NT": "Northwest Territories", "NU": "Nunavut"}, "CG": {"11": "Pool", "10": "Sangha", "13": "Cuvette", "01": "Bouenza", "06": "Likouala", "07": "Niari", "04": "Kouilou", "05": "Lekoumou", "08": "Plateaux", "12": "Brazzaville", "14": "Cuvette-Ouest"}, "CF": {"02": "Basse-Kotto", "03": "Haute-Kotto", "13": "Ouham-Pende", "01": "Bamingui-Bangoran", "06": "Kemo", "07": "Lobaye", "04": "Mambere-Kadei", "05": "Haut-Mbomou", "18": "Bangui", "08": "Mbomou", "09": "Nana-Mambere", "16": "Sangha-Mbaere", "12": "Ouham", "17": "Ombella-Mpoko", "11": "Ouaka", "15": "Nana-Grebizi", "14": "Cuvette-Ouest"}, "CD": {"02": "Equateur", "10": "Maniema", "01": "Bandundu", "06": "Kinshasa", "04": "Kasai-Oriental", "05": "Katanga", "08": "Bas-Congo", "09": "Orientale", "12": "Sud-Kivu", "11": "Nord-Kivu"}, "CZ": {"86": "Pardubicky kraj", "52": "Hlavni mesto Praha", "88": "Stredocesky kraj", "89": "Ustecky kraj", "84": "Olomoucky kraj", "90": "Zlinsky kraj", "82": "Kralovehradecky kraj", "83": "Liberecky kraj", "80": "Vysocina", "81": "Karlovarsky kraj", "85": "Moravskoslezsky kraj", "87": "Plzensky kraj", "79": "Jihocesky kraj", "78": "Jihomoravsky kraj"}, "CY": {"02": "Kyrenia", "03": "Larnaca", "01": "Famagusta", "06": "Paphos", "04": "Nicosia", "05": "Limassol"}, "CR": {"02": "Cartago", "03": "Guanacaste", "01": "Alajuela", "06": "Limon", "07": "Puntarenas", "04": "Heredia", "08": "San Jose"}, "CV": {"02": "Brava", "10": "Sao Nicolau", "13": "Mosteiros", "01": "Boa Vista", "15": "Santa Catarina", "07": "Ribeira Grande", "04": "Maio", "05": "Paul", "19": "Sao Miguel", "18": "Sao Filipe", "08": "Sal", "16": "Santa Cruz", "17": "Sao Domingos", "11": "Sao Vicente", "20": "Tarrafal", "14": "Praia"}, "CU": {"02": "Ciudad de la Habana", "03": "Matanzas", "13": "Las Tunas", "01": "Pinar del Rio", "15": "Santiago de Cuba", "07": "Ciego de Avila", "04": "Isla de la Juventud", "05": "Camaguey", "08": "Cienfuegos", "09": "Granma", "16": "Villa Clara", "12": "Holguin", "14": "Sancti Spiritus", "11": "La Habana", "10": "Guantanamo"}, "SZ": {"02": "Lubombo", "03": "Manzini", "01": "Hhohho", "04": "Shiselweni", "05": "Praslin"}, "SY": {"02": "Al Ladhiqiyah", "03": "Al Qunaytirah", "13": "Dimashq", "01": "Al Hasakah", "06": "Dar", "07": "Dayr az Zawr", "04": "Ar Raqqah", "05": "As Suwayda'", "08": "Rif Dimashq", "09": "Halab", "12": "Idlib", "14": "Tartus", "11": "Hims", "10": "Hamah"}, "KG": {"02": "Chuy", "03": "Jalal-Abad", "01": "Bishkek", "06": "Talas", "07": "Ysyk-Kol", "04": "Naryn", "05": "Osh", "08": "Osh", "09": "Batken"}, "KE": {"02": "Coast", "03": "Eastern", "01": "Central", "06": "North-Eastern", "07": "Nyanza", "05": "Nairobi Area", "08": "Rift Valley", "09": "Western"}, "SR": {"11": "Commewijne", "10": "Brokopondo", "13": "Marowijne", "12": "Coronie", "15": "Para", "14": "Nickerie", "17": "Saramacca", "16": "Paramaribo", "19": "Wanica", "18": "Sipaliwini"}, "KI": {"02": "Line Islands", "03": "Phoenix Islands", "01": "Gilbert Islands"}, "KH": {"02": "Kampong Cham", "03": "Kampong Chhnang", "01": "Batdambang", "06": "Kampot", "07": "Kandal", "04": "Kampong Speu", "05": "Kampong Thum", "08": "Koh Kong", "09": "Kracheh", "29": "Batdambang", "25": "Banteay Meanchey", "11": "Phnum Penh", "10": "Mondulkiri", "13": "Preah Vihear", "12": "Pursat", "15": "Ratanakiri Kiri", "14": "Prey Veng", "17": "Stung Treng", "16": "Siem Reap", "19": "Takeo", "18": "Svay Rieng", "30": "Pailin"}, "KN": {"02": "Saint Anne Sandy Point", "03": "Saint George Basseterre", "13": "Saint Thomas Middle Island", "01": "Christ Church Nichola Town", "06": "Saint John Capisterre", "07": "Saint John Figtree", "04": "Saint George Gingerland", "05": "Saint James Windward", "08": "Saint Mary Cayon", "09": "Saint Paul Capisterre", "12": "Saint Thomas Lowland", "11": "Saint Peter Basseterre", "15": "Trinity Palmetto Point", "10": "Saint Paul Charlestown"}, "KM": {"02": "Grande Comore", "03": "Moheli", "01": "Anjouan"}, "ST": {"02": "Sao Tome", "01": "Principe"}, "SK": {"02": "Bratislava", "03": "Kosice", "01": "Banska Bystrica", "06": "Trencin", "07": "Trnava", "04": "Nitra", "05": "Presov", "08": "Zilina"}, "KR": {"11": "Seoul-t'ukpyolsi", "03": "Cholla-bukto", "13": "Kyonggi-do", "01": "Cheju-do", "06": "Kangwon-do", "14": "Kyongsang-bukto", "17": "Ch'ungch'ong-namdo", "10": "Pusan-jikhalsi", "19": "Taejon-jikhalsi", "18": "Kwangju-jikhalsi", "05": "Ch'ungch'ong-bukto", "20": "Kyongsang-namdo", "16": "Cholla-namdo", "12": "Inch'on-jikhalsi", "15": "Taegu-jikhalsi", "21": "Ulsan-gwangyoksi"}, "SI": {"I2": "Kuzma Commune", "I5": "Litija Commune", "I7": "Loska Dolina Commune", "O1": "Cirkulane Commune", "24": "Dornava Commune", "25": "Dravograd Commune", "26": "Duplek Commune", "27": "Gorenja vas-Poljane Commune", "20": "Dobrepolje Commune", "G2": "Dobje Commune", "22": "Dol pri Ljubljani Commune", "28": "Gorisnica Commune", "29": "Gornja Radgona Commune", "G9": "Hajdina Commune", "G8": "Grad Commune", "N6": "Zetale Commune", "N7": "Zirovnica Commune", "F2": "Ziri Commune", "F3": "Zrece Commune", "F4": "Benedikt Commune", "F5": "Bistrica ob Sotli Commune", "J8": "Oplotnica Commune", "F6": "Bloke Commune", "K2": "Podvelka Commune", "55": "Kungota Commune", "54": "Krsko Commune", "57": "Lasko Commune", "F7": "Braslovce Commune", "51": "Kozje Commune", "50": "Koper-Capodistria Urban Commune", "53": "Kranjska Gora Commune", "52": "Kranj Commune", "B4": "Sentjernej Commune", "F8": "Cankova Commune", "B6": "Sevnica Commune", "B7": "Sezana Commune", "L6": "Sempeter-Vrtojba Commune", "B1": "Semic Commune", "B2": "Sencur Commune", "B3": "Sentilj Commune", "K7": "Ptuj Urban Commune", "B8": "Skocjan Commune", "B9": "Skofja Loka Commune", "N5": "Zalec Commune", "J7": "Novo mesto Urban Commune", "K5": "Preddvor Commune", "J1": "Majsperk Commune", "M5": "Tabor Commune", "M4": "Sveti Andraz v Slovenskih goricah Commune", "88": "Osilnica Commune", "89": "Pesnica Commune", "M1": "Sodrazica Commune", "J2": "Maribor Commune", "M3": "Sveta Ana Commune", "M2": "Solcava Commune", "82": "Naklo Commune", "83": "Nazarje Commune", "80": "Murska Sobota Urban Commune", "81": "Muta Commune", "86": "Odranci Commune", "87": "Ormoz Commune", "84": "Nova Gorica Urban Commune", "02": "Beltinci Commune", "03": "Bled Commune", "01": "Ajdovscina Commune", "06": "Bovec Commune", "07": "Brda Commune", "04": "Bohinj Commune", "05": "Borovnica Commune", "08": "Brezice Commune", "09": "Brezovica Commune", "M8": "Trzin Commune", "E9": "Zavrc Commune", "E5": "Vrhnika Commune", "E7": "Zagorje ob Savi Commune", "E6": "Vuzenica Commune", "E1": "Vipava Commune", "E3": "Vodice Commune", "E2": "Vitanje Commune", "G7": "Domzale Commune", "G6": "Dolenjske Toplice Commune", "G5": "Dobrovnik-Dobronak Commune", "G4": "Dobrova-Horjul-Polhov Gradec Commune", "J3": "Markovci Commune", "G3": "Dobrna Commune", "M9": "Velika Polana Commune", "K1": "Podlehnik Commune", "39": "Ivancna Gorica Commune", "38": "Ilirska Bistrica Commune", "G1": "Destrnik Commune", "L7": "Sentjur pri Celju Commune", "H2": "Hodos-Hodos Commune", "32": "Grosuplje Commune", "31": "Gornji Petrovci Commune", "30": "Gornji Grad Commune", "37": "Ig Commune", "36": "Idrija Commune", "35": "Hrpelje-Kozina Commune", "34": "Hrastnik Commune", "L5": "Selnica ob Dravi Commune", "L2": "Ribnica na Pohorju Commune", "L3": "Ruse Commune", "K3": "Polzela Commune", "61": "Ljubljana Urban Commune", "62": "Ljubno Commune", "64": "Logatec Commune", "K6": "Prevalje Commune", "66": "Loski Potok Commune", "K4": "Prebold Commune", "68": "Lukovica Commune", "K9": "Razkrizje Commune", "K8": "Ravne na Koroskem Commune", "N3": "Vojnik Commune", "C9": "Store Commune", "C8": "Starse Commune", "L8": "Slovenska Bistrica Commune", "C2": "Slovenj Gradec Urban Commune", "C1": "Skofljica Commune", "L9": "Smartno pri Litiji Commune", "C7": "Sostanj Commune", "C6": "Smartno ob Paki Commune", "C5": "Smarje pri Jelsah Commune", "C4": "Slovenske Konjice Commune", "H8": "Komenda Commune", "99": "Radece Commune", "98": "Race-Fram Commune", "H9": "Kostel Commune", "91": "Pivka Commune", "N1": "Verzej Commune", "N2": "Videm Commune", "92": "Podcetrtek Commune", "N4": "Vransko Commune", "94": "Postojna Commune", "97": "Puconci Commune", "I3": "Lenart Commune", "11": "Celje Commune", "F1": "Zelezniki Commune", "13": "Cerknica Commune", "12": "Cerklje na Gorenjskem Commune", "15": "Crensovci Commune", "14": "Cerkno Commune", "17": "Crnomelj Commune", "16": "Crna na Koroskem Commune", "19": "Divaca Commune", "F9": "Cerkvenjak Commune", "M7": "Trnovska vas Commune", "M6": "Tisina Commune", "H3": "Horjul Commune", "J9": "Piran-Pirano Commune", "H1": "Hoce-Slivnica Commune", "H6": "Kamnik Commune", "H7": "Kocevje Commune", "H4": "Jesenice Commune", "H5": "Jezersko Commune", "I9": "Luce Commune", "I8": "Lovrenc na Pohorju Commune", "49": "Komen Commune", "46": "Kobarid Commune", "47": "Kobilje Commune", "44": "Kanal Commune", "45": "Kidricevo Commune", "42": "Jursinci Commune", "I4": "Lendava-Lendva Commune", "40": "Izola-Isola Commune", "I6": "Ljutomer Commune", "A1": "Radenci Commune", "A3": "Radovljica Commune", "A2": "Radlje ob Dravi Commune", "A7": "Rogaska Slatina Commune", "A6": "Rogasovci Commune", "A8": "Rogatec Commune", "77": "Moravce Commune", "76": "Mislinja Commune", "L4": "Salovci Commune", "74": "Mezica Commune", "73": "Metlika Commune", "72": "Menges Commune", "71": "Medvode Commune", "L1": "Ribnica Commune", "J4": "Miklavz na Dravskem polju Commune", "79": "Mozirje Commune", "78": "Moravske Toplice Commune", "N8": "Zuzemberk Commune", "J6": "Mirna Pec Commune", "I1": "Krizevci Commune", "D8": "Velike Lasce Commune", "J5": "Miren-Kostanjevica Commune", "D6": "Turnisce Commune", "D7": "Velenje Urban Commune", "D4": "Trebnje Commune", "D5": "Trzic Commune", "D2": "Tolmin Commune", "D3": "Trbovlje Commune", "N9": "Apace Commune", "D1": "Sveti Jurij Commune"}, "KP": {"11": "P'yongan-bukto", "03": "Hamgyong-namdo", "13": "Yanggang-do", "01": "Chagang-do", "06": "Hwanghae-namdo", "07": "Hwanghae-bukto", "17": "Hamgyong-bukto", "18": "Najin Sonbong-si", "08": "Kaesong-si", "09": "Kangwon-do", "12": "P'yongyang-si", "15": "P'yongan-namdo", "14": "Namp'o-si"}, "KW": {"02": "Al Kuwayt", "01": "Al Ahmadi", "07": "Al Farwaniyah", "05": "Al Jahra", "08": "Hawalli", "09": "Mubarak al Kabir"}, "SN": {"11": "Kolda", "03": "Diourbel", "13": "Louga", "01": "Dakar", "15": "Matam", "07": "Thies", "10": "Kaolack", "09": "Fatick", "12": "Ziguinchor", "05": "Tambacounda", "14": "Saint-Louis"}, "SM": {"02": "Chiesanuova", "03": "Domagnano", "01": "Acquaviva", "06": "Borgo Maggiore", "07": "San Marino", "04": "Faetano", "05": "Fiorentino", "08": "Monte Giardino", "09": "Serravalle"}, "SL": {"02": "Northern", "03": "Southern", "01": "Eastern", "04": "Western Area"}, "SC": {"02": "Anse Boileau", "03": "Anse Etoile", "01": "Anse aux Pins", "06": "Baie Lazare", "07": "Baie Sainte Anne", "04": "Anse Louis", "05": "Anse Royale", "08": "Beau Vallon", "09": "Bel Air", "14": "Grand' Anse", "20": "Pointe La Rue", "21": "Port Glaud", "11": "Cascade", "10": "Bel Ombre", "13": "Grand' Anse", "12": "Glacis", "15": "La Digue", "22": "Saint Louis", "17": "Mont Buxton", "16": "La Riviere Anglaise", "19": "Plaisance", "18": "Mont Fleuri", "23": "Takamaka"}, "KZ": {"02": "Almaty City", "03": "Aqmola", "13": "Qostanay", "01": "Almaty", "06": "Atyrau", "07": "West Kazakhstan", "04": "Aqtobe", "05": "Astana", "08": "Bayqonyr", "09": "Mangghystau", "16": "North Kazakhstan", "12": "Qaraghandy", "17": "Zhambyl", "14": "Qyzylorda", "11": "Pavlodar", "15": "East Kazakhstan", "10": "South Kazakhstan"}, "KY": {"02": "Eastern", "03": "Midland", "01": "Creek", "06": "Stake Bay", "07": "West End", "04": "South Town", "05": "Spot Bay", "08": "Western"}, "SE": {"02": "Blekinge Lan", "03": "Gavleborgs Lan", "23": "Vasterbottens Lan", "12": "Kronobergs Lan", "06": "Hallands Lan", "07": "Jamtlands Lan", "22": "Varmlands Lan", "05": "Gotlands Lan", "24": "Vasternorrlands Lan", "18": "Sodermanlands Lan", "08": "Jonkopings Lan", "09": "Kalmar Lan", "28": "Vastra Gotaland", "21": "Uppsala Lan", "27": "Skane Lan", "16": "Ostergotlands Lan", "26": "Stockholms Lan", "25": "Vastmanlands Lan", "14": "Norrbottens Lan", "15": "Orebro Lan", "10": "Dalarnas Lan"}, "SD": {"27": "Al Wusta", "33": "Darfur", "32": "Bahr al Ghazal", "31": "Ash Sharqiyah", "30": "Ash Shamaliyah", "28": "Al Istiwa'iyah", "29": "Al Khartum", "35": "Upper Nile", "34": "Kurdufan", "40": "Al Wahadah State", "44": "Central Equatoria State"}, "DO": {"30": "La Vega", "02": "Baoruco", "03": "Barahona", "26": "Santiago Rodriguez", "01": "Azua", "06": "Duarte", "21": "Sanchez Ramirez", "04": "Dajabon", "23": "San Juan", "08": "Espaillat", "09": "Independencia", "28": "El Seibo", "29": "Hato Mayor", "24": "San Pedro De Macoris", "25": "Santiago", "27": "Valverde", "20": "Samana", "11": "Elias Pina", "10": "La Altagracia", "12": "La Romana", "15": "Monte Cristi", "14": "Maria Trinidad Sanchez", "17": "Peravia", "16": "Pedernales", "19": "Salcedo", "18": "Puerto Plata", "31": "Monsenor Nouel", "05": "Distrito Nacional", "37": "Santo Domingo", "36": "San Jose de Ocoa", "35": "Peravia", "34": "Distrito Nacional", "33": "San Cristobal", "32": "Monte Plata"}, "DM": {"02": "Saint Andrew", "03": "Saint David", "06": "Saint Joseph", "07": "Saint Luke", "04": "Saint George", "05": "Saint John", "08": "Saint Mark", "09": "Saint Patrick", "11": "Saint Peter", "10": "Saint Paul"}, "DJ": {"01": "Ali Sabieh", "06": "Dikhil", "07": "Djibouti", "04": "Obock", "05": "Tadjoura", "08": "Arta"}, "DK": {"19": "Nordjylland", "18": "Midtjylland", "20": "Sjelland", "21": "Syddanmark", "17": "Hovedstaden"}, "DE": {"02": "Bayern", "03": "Bremen", "13": "Sachsen", "01": "Baden-Wurttemberg", "06": "Niedersachsen", "07": "Nordrhein-Westfalen", "04": "Hamburg", "05": "Hessen", "08": "Rheinland-Pfalz", "09": "Saarland", "16": "Berlin", "12": "Mecklenburg-Vorpommern", "14": "Sachsen-Anhalt", "11": "Brandenburg", "15": "Thuringen", "10": "Schleswig-Holstein"}, "YE": {"02": "Adan", "03": "Al Mahrah", "01": "Abyan", "06": "Lahij", "07": "Al Bayda'", "04": "Hadramawt", "05": "Shabwah", "08": "Al Hudaydah", "09": "Al Jawf", "14": "Ma'rib", "24": "Lahij", "25": "Taizz", "20": "Al Bayda'", "21": "Al Jawf", "11": "Dhamar", "10": "Al Mahwit", "13": "Ibb", "12": "Hajjah", "15": "Sa'dah", "22": "Hajjah", "17": "Taizz", "16": "San'a'", "19": "Amran", "18": "Ad Dali", "23": "Ibb"}, "DZ": {"45": "Ghardaia", "54": "Tindouf", "51": "Relizane", "43": "El Oued", "49": "Naama", "52": "Souk Ahras", "53": "Tamanghasset", "24": "Jijel", "03": "Batna", "26": "Mascara", "01": "Alger", "06": "Medea", "07": "Mostaganem", "04": "Constantine", "23": "Guelma", "46": "Illizi", "47": "Khenchela", "44": "El Tarf", "09": "Oran", "42": "El Bayadh", "29": "Oum el Bouaghi", "40": "Boumerdes", "41": "Chlef", "14": "Tizi Ouzou", "56": "Tissemsilt", "25": "Laghouat", "39": "Bordj Bou Arreridj", "27": "M'sila", "20": "Blida", "21": "Bouira", "10": "Saida", "13": "Tiaret", "12": "Setif", "15": "Tlemcen", "22": "Djelfa", "38": "Bechar", "19": "Biskra", "18": "Bejaia", "31": "Skikda", "30": "Sidi Bel Abbes", "37": "Annaba", "36": "Ain Temouchent", "35": "Ain Defla", "34": "Adrar", "33": "Tebessa", "55": "Tipaza", "48": "Mila", "50": "Ouargla"}, "US": {"WA": "Washington", "VA": "Virginia", "DE": "Delaware", "DC": "District of Columbia", "WI": "Wisconsin", "WV": "West Virginia", "HI": "Hawaii", "CO": "Colorado", "FL": "Florida", "FM": "Federated States of Micronesia", "WY": "Wyoming", "NH": "New Hampshire", "NJ": "New Jersey", "NM": "New Mexico", "TX": "Texas", "LA": "Louisiana", "NC": "North Carolina", "ND": "North Dakota", "NE": "Nebraska", "TN": "Tennessee", "NY": "New York", "PA": "Pennsylvania", "CA": "California", "NV": "Nevada", "AA": "Armed Forces Americas", "PR": "Puerto Rico", "GU": "Guam", "AE": "Armed Forces Europe, Middle East, & Canada", "PW": "Palau", "VI": "Virgin Islands", "AK": "Alaska", "AL": "Alabama", "AP": "Armed Forces Pacific", "AS": "American Samoa", "AR": "Arkansas", "VT": "Vermont", "IL": "Illinois", "GA": "Georgia", "IN": "Indiana", "IA": "Iowa", "OK": "Oklahoma", "AZ": "Arizona", "ID": "Idaho", "CT": "Connecticut", "ME": "Maine", "MD": "Maryland", "MA": "Massachusetts", "OH": "Ohio", "UT": "Utah", "MO": "Missouri", "MN": "Minnesota", "MI": "Michigan", "MH": "Marshall Islands", "RI": "Rhode Island", "KS": "Kansas", "MT": "Montana", "MP": "Northern Mariana Islands", "MS": "Mississippi", "SC": "South Carolina", "KY": "Kentucky", "OR": "Oregon", "SD": "South Dakota"}, "UY": {"02": "Canelones", "03": "Cerro Largo", "13": "Rivera", "01": "Artigas", "06": "Flores", "07": "Florida", "04": "Colonia", "05": "Durazno", "19": "Treinta y Tres", "18": "Tacuarembo", "08": "Lavalleja", "09": "Maldonado", "16": "San Jose", "12": "Rio Negro", "17": "Soriano", "14": "Rocha", "11": "Paysandu", "15": "Salto", "10": "Montevideo"}, "LB": {"02": "Al Janub", "03": "Liban-Nord", "01": "Beqaa", "06": "Liban-Sud", "07": "Nabatiye", "04": "Beyrouth", "05": "Mont-Liban", "08": "Beqaa", "09": "Liban-Nord", "11": "Baalbek-Hermel", "10": "Aakk,r"}, "LC": {"02": "Dauphin", "03": "Castries", "01": "Anse-la-Raye", "06": "Gros-Islet", "07": "Laborie", "04": "Choiseul", "05": "Dennery", "08": "Micoud", "09": "Soufriere", "11": "Praslin", "10": "Vieux-Fort"}, "LA": {"02": "Champasak", "03": "Houaphan", "13": "Xaignabouri", "01": "Attapu", "07": "Oudomxai", "04": "Khammouan", "05": "Louang Namtha", "08": "Phongsali", "09": "Saravan", "17": "Louangphrabang", "14": "Xiangkhoang", "11": "Vientiane", "10": "Savannakhet"}, "TW": {"02": "Kao-hsiung", "03": "T'ai-pei", "01": "Fu-chien", "04": "T'ai-wan"}, "TT": {"02": "Caroni", "03": "Mayaro", "01": "Arima", "06": "Saint Andrew", "07": "Saint David", "04": "Nariva", "05": "Port-of-Spain", "08": "Saint George", "09": "Saint Patrick", "12": "Victoria", "11": "Tobago", "10": "San Fernando"}, "TR": {"45": "Manisa", "54": "Sakarya", "58": "Sivas", "60": "Tokat", "61": "Trabzon", "62": "Tunceli", "57": "Sinop", "64": "Usak", "49": "Mus", "66": "Yozgat", "83": "Gaziantep", "68": "Ankara", "69": "Gumushane", "80": "Sirnak", "52": "Ordu", "86": "Ardahan", "53": "Rize", "84": "Kars", "85": "Zonguldak", "02": "Adiyaman", "03": "Afyonkarahisar", "26": "Eskisehir", "81": "Adana", "20": "Denizli", "07": "Antalya", "04": "Agri", "05": "Amasya", "46": "Kahramanmaras", "08": "Artvin", "09": "Aydin", "28": "Giresun", "43": "Kutahya", "40": "Kirsehir", "41": "Kocaeli", "82": "Cankiri", "79": "Kirikkale", "14": "Bolu", "78": "Karaman", "87": "Bartin", "24": "Erzincan", "89": "Karabuk", "25": "Erzurum", "39": "Kirklareli", "77": "Bayburt", "76": "Batman", "75": "Aksaray", "38": "Kayseri", "73": "Nigde", "72": "Mardin", "71": "Konya", "70": "Hakkari", "91": "Osmaniye", "59": "Tekirdag", "93": "Duzce", "92": "Yalova", "74": "Siirt", "21": "Diyarbakir", "11": "Bilecik", "10": "Balikesir", "13": "Bitlis", "12": "Bingol", "15": "Burdur", "22": "Edirne", "17": "Canakkale", "16": "Bursa", "19": "Corum", "32": "Mersin", "31": "Hatay", "23": "Elazig", "37": "Kastamonu", "50": "Nevsehir", "35": "Izmir", "34": "Istanbul", "33": "Isparta", "55": "Samsun", "63": "Sanliurfa", "88": "Igdir", "48": "Mugla", "44": "Malatya", "90": "Kilis", "65": "Van"}, "LK": {"02": "Anuradhapura", "03": "Badulla", "26": "Mannar", "01": "Amparai", "06": "Galle", "07": "Hambantota", "04": "Batticaloa", "23": "Colombo", "09": "Kalutara", "28": "Vavuniya", "29": "Central", "24": "Gampaha", "25": "Jaffna", "27": "Mullaittivu", "20": "Ratnapura", "21": "Trincomalee", "11": "Kegalla", "10": "Kandy", "12": "Kurunegala", "15": "Matara", "14": "Matale", "17": "Nuwara Eliya", "16": "Moneragala", "19": "Puttalam", "18": "Polonnaruwa", "31": "Northern", "30": "North Central", "36": "Western", "35": "Uva", "34": "Southern", "33": "Sabaragamuwa", "32": "North Western"}, "LI": {"02": "Eschen", "03": "Gamprin", "01": "Balzers", "06": "Ruggell", "07": "Schaan", "04": "Mauren", "05": "Planken", "08": "Schellenberg", "09": "Triesen", "22": "River Gee", "21": "Gbarpolu", "11": "Vaduz", "10": "Triesenberg"}, "LV": {"30": "Valkas", "02": "Aluksnes", "03": "Balvu", "26": "Rigas", "01": "Aizkraukles", "06": "Daugavpils", "07": "Daugavpils", "04": "Bauskas", "05": "Cesu", "08": "Dobeles", "09": "Gulbenes", "28": "Talsu", "29": "Tukuma", "14": "Kraslavas", "24": "Rezeknes", "25": "Riga", "27": "Saldus", "20": "Madonas", "21": "Ogres", "11": "Jelgava", "10": "Jekabpils", "13": "Jurmala", "12": "Jelgavas", "15": "Kuldigas", "22": "Preilu", "17": "Liepajas", "16": "Liepaja", "19": "Ludzas", "18": "Limbazu", "31": "Valmieras", "23": "Rezekne", "33": "Ventspils", "32": "Ventspils"}, "TO": {"02": "Tongatapu", "03": "Vava", "01": "Ha"}, "LT": {"59": "Marijampoles Apskritis", "58": "Klaipedos Apskritis", "57": "Kauno Apskritis", "56": "Alytaus Apskritis", "60": "Panevezio Apskritis", "61": "Siauliu Apskritis", "62": "Taurages Apskritis", "63": "Telsiu Apskritis", "64": "Utenos Apskritis", "65": "Vilniaus Apskritis"}, "LU": {"02": "Grevenmacher", "03": "Luxembourg", "01": "Diekirch"}, "LR": {"11": "Grand Bassa", "10": "Sino", "13": "Maryland", "01": "Bong", "06": "Maryland", "07": "Monrovia", "04": "Grand Cape Mount", "05": "Lofa", "19": "Grand Gedeh", "18": "River Cess", "09": "Nimba", "22": "River Gee", "21": "Gbarpolu", "12": "Grand Cape Mount", "17": "Margibi", "20": "Lofa", "14": "Montserrado"}, "LS": {"11": "Butha-Buthe", "10": "Berea", "13": "Mafeteng", "12": "Leribe", "15": "Mohales Hoek", "14": "Maseru", "17": "Qachas Nek", "16": "Mokhotlong", "19": "Thaba-Tseka", "18": "Quthing"}, "TH": {"58": "Chumphon", "30": "Sisaket", "77": "Amnat Charoen", "54": "Samut Songkhram", "42": "Samut Prakan", "48": "Chanthaburi", "45": "Prachin Buri", "43": "Nakhon Nayok", "60": "Surat Thani", "61": "Phangnga", "62": "Phuket", "57": "Prachuap Khiri Khan", "64": "Nakhon Si Thammarat", "49": "Trat", "66": "Phatthalung", "67": "Satun", "68": "Songkhla", "69": "Pattani", "80": "Sa Kaeo", "52": "Ratchaburi", "53": "Nakhon Pathom", "02": "Chiang Mai", "03": "Chiang Rai", "26": "Chaiyaphum", "01": "Mae Hong Son", "06": "Lampang", "07": "Phrae", "04": "Nan", "05": "Lamphun", "46": "Chon Buri", "47": "Rayong", "08": "Tak", "09": "Sukhothai", "28": "Buriram", "29": "Surin", "40": "Krung Thep", "41": "Phayao", "79": "Nong Bua Lamphu", "14": "Phetchabun", "59": "Ranong", "78": "Mukdahan", "51": "Suphan Buri", "24": "Maha Sarakham", "56": "Phetchaburi", "25": "Roi Et", "39": "Pathum Thani", "65": "Trang", "76": "Udon Thani", "75": "Ubon Ratchathani", "27": "Nakhon Ratchasima", "73": "Nakhon Phanom", "72": "Yasothon", "71": "Ubon Ratchathani", "70": "Yala", "20": "Sakon Nakhon", "38": "Nonthaburi", "74": "Prachin Buri", "21": "Nakhon Phanom", "11": "Kamphaeng Phet", "10": "Uttaradit", "13": "Phichit", "12": "Phitsanulok", "15": "Uthai Thani", "22": "Khon Kaen", "17": "Nong Khai", "16": "Nakhon Sawan", "33": "Sing Buri", "18": "Loei", "31": "Narathiwat", "23": "Kalasin", "37": "Saraburi", "36": "Phra Nakhon Si Ayutthaya", "35": "Ang Thong", "34": "Lop Buri", "55": "Samut Sakhon", "63": "Krabi", "32": "Chai Nat", "44": "Chachoengsao", "50": "Kanchanaburi"}, "TG": {"24": "Maritime", "25": "Plateaux", "26": "Savanes", "22": "Centrale", "23": "Kara"}, "TD": {"02": "Biltine", "03": "Borkou-Ennedi-Tibesti", "13": "Salamat", "01": "Batha", "06": "Kanem", "07": "Lac", "04": "Chari-Baguirmi", "05": "Guera", "08": "Logone Occidental", "09": "Logone Oriental", "12": "Ouaddai", "14": "Tandjile", "11": "Moyen-Chari", "10": "Mayo-Kebbi"}, "LY": {"56": "Ghadamis", "60": "Surt", "61": "Tarabulus", "62": "Yafran", "49": "Al Jabal al Akhdar", "52": "Awbari", "03": "Al Aziziyah", "48": "Al Fatih", "05": "Al Jufrah", "47": "Ajdabiya", "08": "Al Kufrah", "45": "Zlitan", "42": "Tubruq", "41": "Tarhunah", "13": "Ash Shati'", "59": "Sawfajjin", "58": "Misratah", "55": "Darnah", "54": "Banghazi", "57": "Gharyan", "30": "Murzuq", "51": "An Nuqat al Khams", "50": "Al Khums", "53": "Az Zawiyah", "34": "Sabha"}, "VC": {"02": "Saint Andrew", "03": "Saint David", "01": "Charlotte", "06": "Grenadines", "04": "Saint George", "05": "Saint Patrick"}, "AE": {"02": "Ajman", "03": "Dubai", "01": "Abu Dhabi", "06": "Sharjah", "07": "Umm Al Quwain", "04": "Fujairah", "05": "Ras Al Khaimah"}, "AD": {"02": "Canillo", "03": "Encamp", "06": "Sant Julia de Loria", "07": "Andorra la Vella", "04": "La Massana", "05": "Ordino", "08": "Escaldes-Engordany"}, "AG": {"03": "Saint George", "01": "Barbuda", "06": "Saint Paul", "07": "Saint Peter", "04": "Saint John", "05": "Saint Mary", "08": "Saint Philip", "09": "Redonda"}, "AF": {"42": "Panjshir", "02": "Badghis", "03": "Baghlan", "26": "Takhar", "01": "Badakhshan", "06": "Farah", "07": "Faryab", "05": "Bamian", "23": "Kandahar", "08": "Ghazni", "09": "Ghowr", "28": "Zabol", "29": "Paktika", "40": "Parvan", "41": "Daykondi", "24": "Kondoz", "39": "Oruzgan", "27": "Vardak", "11": "Herat", "10": "Helmand", "13": "Kabol", "38": "Nurestan", "14": "Kapisa", "17": "Lowgar", "19": "Nimruz", "18": "Nangarhar", "31": "Jowzjan", "30": "Balkh", "37": "Khowst", "36": "Paktia", "35": "Laghman", "34": "Konar", "33": "Sar-e Pol", "32": "Samangan"}, "IQ": {"02": "Al Basrah", "03": "Al Muthanna", "13": "At Ta'mim", "01": "Al Anbar", "06": "Babil", "07": "Baghdad", "04": "Al Qadisiyah", "05": "As Sulaymaniyah", "18": "Salah ad Din", "08": "Dahuk", "09": "Dhi Qar", "16": "Wasit", "12": "Karbala'", "17": "An Najaf", "14": "Maysan", "11": "Arbil", "15": "Ninawa", "10": "Diyala"}, "IS": {"45": "Vesturland", "42": "Suourland", "43": "Suournes", "03": "Arnessysla", "06": "Austur-Skaftafellssysla", "07": "Borgarfjardarsysla", "23": "Rangarvallasysla", "44": "Vestfiroir", "09": "Eyjafjardarsysla", "28": "Skagafjardarsysla", "29": "Snafellsnes- og Hnappadalssysla", "40": "Norourland Eystra", "41": "Norourland Vestra", "05": "Austur-Hunavatnssysla", "20": "Nordur-Mulasysla", "21": "Nordur-Tingeyjarsysla", "10": "Gullbringusysla", "15": "Kjosarsysla", "17": "Myrasysla", "32": "Sudur-Tingeyjarsysla", "31": "Sudur-Mulasysla", "30": "Strandasysla", "37": "Vestur-Skaftafellssysla", "36": "Vestur-Isafjardarsysla", "35": "Vestur-Hunavatnssysla", "34": "Vestur-Bardastrandarsysla"}, "IR": {"30": "Khorasan", "42": "Khorasan-e Razavi", "43": "Khorasan-e Shemali", "24": "Markazi", "03": "Chahar Mahall va Bakhtiari", "26": "Tehran", "01": "Azarbayjan-e Bakhtari", "07": "Fars", "04": "Sistan va Baluchestan", "05": "Kohkiluyeh va Buyer Ahmadi", "08": "Gilan", "09": "Hamadan", "28": "Esfahan", "29": "Kerman", "40": "Yazd", "41": "Khorasan-e Janubi", "25": "Semnan", "39": "Qom", "27": "Zanjan", "38": "Qazvin", "21": "Zanjan", "11": "Hormozgan", "10": "Ilam", "13": "Bakhtaran", "12": "Kerman", "15": "Khuzestan", "22": "Bushehr", "17": "Mazandaran", "16": "Kordestan", "19": "Markazi", "18": "Semnan Province", "31": "Yazd", "23": "Lorestan", "37": "Golestan", "36": "Zanjan", "35": "Mazandaran", "34": "Markazi", "33": "East Azarbaijan", "32": "Ardabil", "44": "Alborz"}, "AM": {"02": "Ararat", "03": "Armavir", "01": "Aragatsotn", "06": "Lorri", "07": "Shirak", "04": "Geghark'unik'", "05": "Kotayk'", "08": "Syunik'", "09": "Tavush", "11": "Yerevan", "10": "Vayots' Dzor"}, "AL": {"51": "Vlore", "48": "Lezhe", "49": "Shkoder", "46": "Korce", "47": "Kukes", "44": "Fier", "45": "Gjirokaster", "42": "Durres", "43": "Elbasan", "40": "Berat", "41": "Diber", "50": "Tirane"}, "AO": {"02": "Bie", "03": "Cabinda", "13": "Namibe", "01": "Benguela", "06": "Cuanza Sul", "07": "Cunene", "04": "Cuando Cubango", "05": "Cuanza Norte", "19": "Bengo", "18": "Lunda Sul", "08": "Huambo", "09": "Huila", "20": "Luanda", "16": "Zaire", "12": "Malanje", "17": "Lunda Norte", "15": "Uige", "14": "Moxico"}, "AR": {"02": "Catamarca", "03": "Chaco", "01": "Buenos Aires", "06": "Corrientes", "07": "Distrito Federal", "04": "Chubut", "05": "Cordoba", "08": "Entre Rios", "09": "Formosa", "14": "Misiones", "24": "Tucuman", "20": "Santa Cruz", "21": "Santa Fe", "11": "La Pampa", "10": "Jujuy", "13": "Mendoza", "12": "La Rioja", "15": "Neuquen", "22": "Santiago del Estero", "17": "Salta", "16": "Rio Negro", "19": "San Luis", "18": "San Juan", "23": "Tierra del Fuego"}, "AU": {"02": "New South Wales", "03": "Northern Territory", "01": "Australian Capital Territory", "06": "Tasmania", "07": "Victoria", "04": "Queensland", "05": "South Australia", "08": "Western Australia"}, "AT": {"02": "Karnten", "03": "Niederosterreich", "01": "Burgenland", "06": "Steiermark", "07": "Tirol", "04": "Oberosterreich", "05": "Salzburg", "08": "Vorarlberg", "09": "Wien"}, "IN": {"30": "Arunachal Pradesh", "02": "Andhra Pradesh", "03": "Assam", "26": "Tripura", "01": "Andaman and Nicobar Islands", "06": "Dadra and Nagar Haveli", "07": "Delhi", "22": "Puducherry", "05": "Chandigarh", "09": "Gujarat", "28": "West Bengal", "29": "Sikkim", "24": "Rajasthan", "25": "Tamil Nadu", "39": "Uttarakhand", "38": "Jharkhand", "20": "Nagaland", "21": "Orissa", "11": "Himachal Pradesh", "10": "Haryana", "13": "Kerala", "12": "Jammu and Kashmir", "14": "Lakshadweep", "17": "Manipur", "16": "Maharashtra", "19": "Karnataka", "18": "Meghalaya", "31": "Mizoram", "23": "Punjab", "37": "Chhattisgarh", "36": "Uttar Pradesh", "35": "Madhya Pradesh", "34": "Bihar", "33": "Goa", "32": "Daman and Diu"}, "TZ": {"02": "Pwani", "03": "Dodoma", "26": "Arusha", "27": "Manyara", "06": "Kilimanjaro", "07": "Lindi", "04": "Iringa", "05": "Kigoma", "08": "Mara", "09": "Mbeya", "14": "Ruvuma", "24": "Rukwa", "25": "Zanzibar Urban", "20": "Pemba South", "21": "Zanzibar Central", "11": "Mtwara", "10": "Morogoro", "13": "Pemba North", "12": "Mwanza", "15": "Shinyanga", "22": "Zanzibar North", "17": "Tabora", "16": "Singida", "19": "Kagera", "18": "Tanga", "23": "Dar es Salaam"}, "AZ": {"58": "Tovuz", "30": "Lankaran", "54": "Sumqayit", "42": "Quba", "48": "Saki", "45": "Saatli", "43": "Qubadli", "60": "Xacmaz", "61": "Xankandi", "62": "Xanlar", "57": "Tartar", "64": "Xocali", "49": "Salyan", "66": "Yardimli", "67": "Yevlax", "68": "Yevlax", "69": "Zangilan", "52": "Samux", "53": "Siyazan", "02": "Agcabadi", "03": "Agdam", "26": "Kalbacar", "01": "Abseron", "06": "Agsu", "07": "Ali Bayramli", "04": "Agdas", "05": "Agstafa", "46": "Sabirabad", "47": "Saki", "08": "Astara", "09": "Baki", "28": "Lacin", "29": "Lankaran", "40": "Qazax", "41": "Qobustan", "14": "Cabrayil", "59": "Ucar", "51": "Samkir", "24": "Imisli", "56": "Susa", "25": "Ismayilli", "39": "Qax", "65": "Xocavand", "27": "Kurdamir", "71": "Zardab", "70": "Zaqatala", "20": "Ganca", "38": "Qabala", "21": "Goranboy", "11": "Barda", "10": "Balakan", "13": "Bilasuvar", "12": "Beylaqan", "15": "Calilabad", "22": "Goycay", "17": "Davaci", "16": "Daskasan", "19": "Gadabay", "18": "Fuzuli", "31": "Lerik", "23": "Haciqabul", "37": "Oguz", "36": "Neftcala", "35": "Naxcivan", "34": "Naftalan", "33": "Mingacevir", "55": "Susa", "63": "Xizi", "32": "Masalli", "44": "Qusar", "50": "Samaxi"}, "IE": {"02": "Cavan", "03": "Clare", "26": "Tipperary", "01": "Carlow", "06": "Donegal", "07": "Dublin", "04": "Cork", "23": "Offaly", "29": "Westmeath", "14": "Leitrim", "24": "Roscommon", "25": "Sligo", "27": "Waterford", "20": "Mayo", "21": "Meath", "11": "Kerry", "10": "Galway", "13": "Kilkenny", "12": "Kildare", "15": "Laois", "22": "Monaghan", "16": "Limerick", "19": "Louth", "18": "Longford", "31": "Wicklow", "30": "Wexford"}, "ID": {"02": "Bali", "03": "Bengkulu", "26": "Sumatera Utara", "01": "Aceh", "07": "Jawa Tengah", "04": "Jakarta Raya", "05": "Jambi", "08": "Jawa Timur", "28": "Maluku", "29": "Maluku Utara", "40": "Kepulauan Riau", "41": "Sulawesi Barat", "14": "Kalimantan Timur", "24": "Sumatera Barat", "39": "Irian Jaya Barat", "38": "Sulawesi Selatan", "21": "Sulawesi Tengah", "11": "Kalimantan Barat", "10": "Yogyakarta", "13": "Kalimantan Tengah", "12": "Kalimantan Selatan", "15": "Lampung", "22": "Sulawesi Tenggara", "17": "Nusa Tenggara Barat", "33": "Banten", "18": "Nusa Tenggara Timur", "31": "Sulawesi Utara", "30": "Jawa Barat", "37": "Riau", "36": "Papua", "35": "Kepulauan Bangka Belitung", "34": "Gorontalo", "32": "Sumatera Selatan"}, "UA": {"02": "Chernihivs'ka Oblast'", "03": "Chernivets'ka Oblast'", "26": "Zaporiz'ka Oblast'", "01": "Cherkas'ka Oblast'", "06": "Ivano-Frankivs'ka Oblast'", "07": "Kharkivs'ka Oblast'", "04": "Dnipropetrovs'ka Oblast'", "05": "Donets'ka Oblast'", "08": "Khersons'ka Oblast'", "09": "Khmel'nyts'ka Oblast'", "14": "Luhans'ka Oblast'", "24": "Volyns'ka Oblast'", "25": "Zakarpats'ka Oblast'", "27": "Zhytomyrs'ka Oblast'", "20": "Sevastopol'", "21": "Sums'ka Oblast'", "11": "Krym", "10": "Kirovohrads'ka Oblast'", "13": "Kyyivs'ka Oblast'", "12": "Kyyiv", "15": "L'vivs'ka Oblast'", "22": "Ternopil's'ka Oblast'", "17": "Odes'ka Oblast'", "16": "Mykolayivs'ka Oblast'", "19": "Rivnens'ka Oblast'", "18": "Poltavs'ka Oblast'", "23": "Vinnyts'ka Oblast'"}, "QA": {"02": "Al Ghuwariyah", "03": "Al Jumaliyah", "01": "Ad Dawhah", "06": "Ar Rayyan", "04": "Al Khawr", "05": "Al Wakrah Municipality", "08": "Madinat ach Shamal", "09": "Umm Salal", "12": "Umm Sa'id", "11": "Jariyan al Batnah", "10": "Al Wakrah"}, "MZ": {"02": "Gaza", "03": "Inhambane", "01": "Cabo Delgado", "06": "Nampula", "07": "Niassa", "04": "Maputo", "05": "Sofala", "08": "Tete", "09": "Zambezia", "11": "Maputo", "10": "Manica"}} \ No newline at end of file diff --git a/disabled_stuff/data/insults.txt b/disabled_stuff/data/insults.txt deleted file mode 100644 index 0cfc316..0000000 --- a/disabled_stuff/data/insults.txt +++ /dev/null @@ -1,30 +0,0 @@ -You are the son of a motherless ogre. -Your mother was a hamster and your father smelled of elderberries. -I once owned a dog that was smarter than you. -Go climb a wall of dicks. -You fight like a dairy farmer. -I've spoken to apes more polite than you. -Go and boil your bottom! Son of a silly person! -Go away or I shall taunt you a second time. -Shouldn't you have a license for being that ugly? -Calling you an idiot would be an insult to all the stupid people. -Why don't you slip into something more comfortable...like a coma. -Well, they do say opposites attract...so I sincerely hope you meet somebody who is attractive, honest, intelligent, and cultured... -Are you always this stupid or are you just making a special effort today? -Yo momma so fat when she sits around the house she sits AROUND the house. -Yo momma so ugly she made an onion cry. -Is your name Maple Syrup? It should be, you sap. -Bite my shiny metal ass! -Up yours, meatbag. -Don't piss me off today, I'm running out of places to hide the bodies... -Why don't you go outside and play hide and go fuck yourself! -I'll use small words you're sure to understand, you warthog-faced buffoon. -You are a sad, strange little man, and you have my pity. -Sit your five dollar ass down before I make change. -What you've just said is one of the most insanely idiotic things I've ever heard. Everyone in this room is now dumber for having listened to it. May God have mercy on your soul. -Look up Idiot in the dictionary. Know what you'll find? The definition of the word IDIOT, which you are. -You're dumber than a bag of hammers. -Why don't you go back to your home on Whore Island? -If I had a dick this is when I'd tell you to suck it. -Go play in traffic. -The village called, they want their idiot back. \ No newline at end of file diff --git a/disabled_stuff/data/itemids.txt b/disabled_stuff/data/itemids.txt deleted file mode 100644 index 4f0ce1d..0000000 --- a/disabled_stuff/data/itemids.txt +++ /dev/null @@ -1,620 +0,0 @@ - 1 Stone - 1:1 Granite - 1:2 Polished Granite - 1:3 Diorite - 1:4 Polished Diorite - 1:5 Andesite - 1:6 Polished Andesite - 2 Grass - 3 Dirt - 3:1 Dirt (No Grass) - 3:2 Podzol - 4 Cobblestone - 5 Wooden Plank (Oak) - 5:1 Wooden Plank (Spruce) - 5:2 Wooden Plank (Birch) - 5:3 Wooden Plank (Jungle) - 5:4 Wooden Plank (Acacia) - 5:5 Wooden Plank (Dark Oak) - 6 Sapling (Oak) - 6:1 Sapling (Spruce) - 6:2 Sapling (Birch) - 6:3 Sapling (Jungle) - 6:4 Sapling (Acacia) - 6:5 Sapling (Dark Oak) - 7 Bedrock - 8 Water - 9 Water (No Spread) - 10 Lava - 11 Lava (No Spread) - 12 Sand - 12:1 Red Sand - 13 Gravel - 14 Gold Ore - 15 Iron Ore - 16 Coal Ore - 17 Wood (Oak) - 17:1 Wood (Spruce) - 17:2 Wood (Birch) - 17:3 Wood (Jungle) - 17:4 Wood (Oak 4) - 17:5 Wood (Oak 5) - 18 Leaves (Oak) - 18:1 Leaves (Spruce) - 18:2 Leaves (Birch) - 18:3 Leaves (Jungle) - 19 Sponge - 20 Glass - 21 Lapis Lazuli Ore - 22 Lapis Lazuli Block - 23 Dispenser - 24 Sandstone - 24:1 Sandstone (Chiseled) - 24:2 Sandstone (Smooth) - 25 Note Block - 26 Bed (Block) - 27 Rail (Powered) - 28 Rail (Detector) - 29 Sticky Piston - 30 Cobweb - 31 Tall Grass (Dead Shrub) - 31:1 Tall Grass - 31:2 Tall Grass (Fern) - 32 Dead Shrub - 33 Piston - 34 Piston (Head) - 35 Wool - 35:1 Orange Wool - 35:2 Magenta Wool - 35:3 Light Blue Wool - 35:4 Yellow Wool - 35:5 Lime Wool - 35:6 Pink Wool - 35:7 Gray Wool - 35:8 Light Gray Wool - 35:9 Cyan Wool - 35:10 Purple Wool - 35:11 Blue Wool - 35:12 Brown Wool - 35:13 Green Wool - 35:14 Red Wool - 35:15 Black Wool - 36 Piston (Moving) - 37 Dandelion - 38 Poppy - 38:1 Blue Orchid - 38:2 Allium - 38:4 Red Tulip - 38:5 Orange Tulip - 38:6 White Tulip - 38:7 Pink Tulip - 38:8 Oxeye Daisy - 39 Brown Mushroom - 40 Red Mushroom - 41 Block of Gold - 42 Block of Iron - 43 Stone Slab (Double) - 43:1 Sandstone Slab (Double) - 43:2 Wooden Slab (Double) - 43:3 Cobblestone Slab (Double) - 43:4 Brick Slab (Double) - 43:5 Stone Brick Slab (Double) - 43:6 Nether Brick Slab (Double) - 43:7 Quartz Slab (Double) - 43:8 Smooth Stone Slab (Double) - 43:9 Smooth Sandstone Slab (Double) - 44 Stone Slab - 44:1 Sandstone Slab - 44:2 Wooden Slab - 44:3 Cobblestone Slab - 44:4 Brick Slab - 44:5 Stone Brick Slab - 44:6 Nether Brick Slab - 44:7 Quartz Slab - 45 Brick - 46 TNT - 47 Bookshelf - 48 Moss Stone - 49 Obsidian - 50 Torch - 51 Fire - 52 Mob Spawner - 53 Wooden Stairs (Oak) - 54 Chest - 55 Redstone Wire - 56 Diamond Ore - 57 Block of Diamond - 58 Workbench - 59 Wheat (Crop) - 60 Farmland - 61 Furnace - 62 Furnace (Smelting) - 63 Sign (Block) - 64 Wood Door (Block) - 65 Ladder - 66 Rail - 67 Cobblestone Stairs - 68 Sign (Wall Block) - 69 Lever - 70 Stone Pressure Plate - 71 Iron Door (Block) - 72 Wooden Pressure Plate - 73 Redstone Ore - 74 Redstone Ore (Glowing) - 75 Redstone Torch (Off) - 76 Redstone Torch - 77 Button (Stone) - 78 Snow - 79 Ice - 80 Snow Block - 81 Cactus - 82 Clay Block - 83 Sugar Cane (Block) - 84 Jukebox - 85 Fence - 86 Pumpkin - 87 Netherrack - 88 Soul Sand - 89 Glowstone - 90 Portal - 91 Jack-O-Lantern - 92 Cake (Block) - 93 Redstone Repeater (Block Off) - 94 Redstone Repeater (Block On) - 95 Stained Glass (White) - 95:1 Stained Glass (Orange) - 95:2 Stained Glass (Magenta) - 95:3 Stained Glass (Light Blue) - 95:4 Stained Glass (Yellow) - 95:5 Stained Glass (Lime) - 95:6 Stained Glass (Pink) - 95:7 Stained Glass (Gray) - 95:8 Stained Glass (Light Grey) - 95:9 Stained Glass (Cyan) - 95:10 Stained Glass (Purple) - 95:11 Stained Glass (Blue) - 95:12 Stained Glass (Brown) - 95:13 Stained Glass (Green) - 95:14 Stained Glass (Red) - 95:15 Stained Glass (Black) - 96 Trapdoor - 97 Monster Egg (Stone) - 97:1 Monster Egg (Cobblestone) - 97:2 Monster Egg (Stone Brick) - 97:3 Monster Egg (Mossy Stone Brick) - 97:4 Monster Egg (Cracked Stone) - 97:5 Monster Egg (Chiseled Stone) - 98 Stone Bricks - 98:1 Mossy Stone Bricks - 98:2 Cracked Stone Bricks - 98:3 Chiseled Stone Brick - 99 Brown Mushroom (Block) - 100 Red Mushroom (Block) - 101 Iron Bars - 102 Glass Pane - 103 Melon (Block) - 104 Pumpkin Vine - 105 Melon Vine - 106 Vines - 107 Fence Gate - 108 Brick Stairs - 109 Stone Brick Stairs - 110 Mycelium - 111 Lily Pad - 112 Nether Brick - 113 Nether Brick Fence - 114 Nether Brick Stairs - 115 Nether Wart - 116 Enchantment Table - 117 Brewing Stand (Block) - 118 Cauldron (Block) - 119 End Portal - 120 End Portal Frame - 121 End Stone - 122 Dragon Egg - 123 Redstone Lamp - 124 Redstone Lamp (On) - 125 Oak-Wood Slab (Double) - 125:1 Spruce-Wood Slab (Double) - 125:2 Birch-Wood Slab (Double) - 125:3 Jungle-Wood Slab (Double) - 125:4 Acacia Wood Slab (Double) - 125:5 Dark Oak Wood Slab (Double) - 126 Oak-Wood Slab - 126:1 Spruce-Wood Slab - 126:2 Birch-Wood Slab - 126:3 Jungle-Wood Slab - 126:4 Acacia Wood Slab - 126:5 Dark Oak Wood Slab - 127 Cocoa Plant - 128 Sandstone Stairs - 129 Emerald Ore - 130 Ender Chest - 131 Tripwire Hook - 132 Tripwire - 133 Block of Emerald - 134 Wooden Stairs (Spruce) - 135 Wooden Stairs (Birch) - 136 Wooden Stairs (Jungle) - 137 Command Block - 138 Beacon - 139 Cobblestone Wall - 139:1 Mossy Cobblestone Wall - 140 Flower Pot (Block) - 141 Carrot (Crop) - 142 Potatoes (Crop) - 143 Button (Wood) - 144 Head Block (Skeleton) - 144:1 Head Block (Wither) - 144:2 Head Block (Zombie) - 144:3 Head Block (Steve) - 144:4 Head Block (Creeper) - 145 Anvil - 145:1 Anvil (Slightly Damaged) - 145:2 Anvil (Very Damaged) - 146 Trapped Chest - 147 Weighted Pressure Plate (Light) - 148 Weighted Pressure Plate (Heavy) - 149 Redstone Comparator (Off) - 150 Redstone Comparator (On) - 151 Daylight Sensor - 152 Block of Redstone - 153 Nether Quartz Ore - 154 Hopper - 155 Quartz Block - 155:1 Chiseled Quartz Block - 155:2 Pillar Quartz Block - 156 Quartz Stairs - 157 Rail (Activator) - 158 Dropper - 159 Stained Clay (White) - 159:1 Stained Clay (Orange) - 159:2 Stained Clay (Magenta) - 159:3 Stained Clay (Light Blue) - 159:4 Stained Clay (Yellow) - 159:5 Stained Clay (Lime) - 159:6 Stained Clay (Pink) - 159:7 Stained Clay (Gray) - 159:8 Stained Clay (Light Gray) - 159:9 Stained Clay (Cyan) - 159:10 Stained Clay (Purple) - 159:11 Stained Clay (Blue) - 159:12 Stained Clay (Brown) - 159:13 Stained Clay (Green) - 159:14 Stained Clay (Red) - 159:15 Stained Clay (Black) - 160 Stained Glass Pane (White) - 160:1 Stained Glass Pane (Orange) - 160:2 Stained Glass Pane (Magenta) - 160:3 Stained Glass Pane (Light Blue) - 160:4 Stained Glass Pane (Yellow) - 160:5 Stained Glass Pane (Lime) - 160:6 Stained Glass Pane (Pink) - 160:7 Stained Glass Pane (Gray) - 160:8 Stained Glass Pane (Light Gray) - 160:9 Stained Glass Pane (Cyan) - 160:10 Stained Glass Pane (Purple) - 160:11 Stained Glass Pane (Blue) - 160:12 Stained Glass Pane (Brown) - 160:13 Stained Glass Pane (Green) - 160:14 Stained Glass Pane (Red) - 160:15 Stained Glass Pane (Black) - 162 Wood (Acacia Oak) - 162:1 Wood (Dark Oak) - 163 Wooden Stairs (Acacia) - 164 Wooden Stairs (Dark Oak) - 165 Slime Block - 170 Hay Bale - 171 Carpet (White) - 171:1 Carpet (Orange) - 171:2 Carpet (Magenta) - 171:3 Carpet (Light Blue) - 171:4 Carpet (Yellow) - 171:5 Carpet (Lime) - 171:6 Carpet (Pink) - 171:7 Carpet (Grey) - 171:8 Carpet (Light Gray) - 171:9 Carpet (Cyan) - 171:10 Carpet (Purple) - 171:11 Carpet (Blue) - 171:12 Carpet (Brown) - 171:13 Carpet (Green) - 171:14 Carpet (Red) - 171:15 Carpet (Black) - 172 Hardened Clay - 173 Block of Coal - 174 Packed Ice - 175 Sunflower - 175:1 Lilac - 175:2 Double Tallgrass - 175:3 Large Fern - 175:4 Rose Bush - 175:5 Peony - 256 Iron Shovel - 257 Iron Pickaxe - 258 Iron Axe - 259 Flint and Steel - 260 Apple - 261 Bow - 262 Arrow - 263 Coal - 263:1 Charcoal - 264 Diamond Gem - 265 Iron Ingot - 266 Gold Ingot - 267 Iron Sword - 268 Wooden Sword - 269 Wooden Shovel - 270 Wooden Pickaxe - 271 Wooden Axe - 272 Stone Sword - 273 Stone Shovel - 274 Stone Pickaxe - 275 Stone Axe - 276 Diamond Sword - 277 Diamond Shovel - 278 Diamond Pickaxe - 279 Diamond Axe - 280 Stick - 281 Bowl - 282 Mushroom Stew - 283 Gold Sword - 284 Gold Shovel - 285 Gold Pickaxe - 286 Gold Axe - 287 String - 288 Feather - 289 Gunpowder - 290 Wooden Hoe - 291 Stone Hoe - 292 Iron Hoe - 293 Diamond Hoe - 294 Gold Hoe - 295 Wheat Seeds - 296 Wheat - 297 Bread - 298 Leather Helmet - 299 Leather Chestplate - 300 Leather Leggings - 301 Leather Boots - 302 Chainmail Helmet - 303 Chainmail Chestplate - 304 Chainmail Leggings - 305 Chainmail Boots - 306 Iron Helmet - 307 Iron Chestplate - 308 Iron Leggings - 309 Iron Boots - 310 Diamond Helmet - 311 Diamond Chestplate - 312 Diamond Leggings - 313 Diamond Boots - 314 Gold Helmet - 315 Gold Chestplate - 316 Gold Leggings - 317 Gold Boots - 318 Flint - 319 Raw Porkchop - 320 Cooked Porkchop - 321 Painting - 322 Golden Apple - 322:1 Enchanted Golden Apple - 323 Sign - 324 Wooden Door - 325 Bucket - 326 Bucket (Water) - 327 Bucket (Lava) - 328 Minecart - 329 Saddle - 330 Iron Door - 331 Redstone Dust - 332 Snowball - 333 Boat - 334 Leather - 335 Bucket (Milk) - 336 Clay Brick - 337 Clay - 338 Sugar Cane - 339 Paper - 340 Book - 341 Slime Ball - 342 Minecart (Storage) - 343 Minecart (Powered) - 344 Egg - 345 Compass - 346 Fishing Rod - 347 Watch - 348 Glowstone Dust - 349 Raw Fish - 349:1 Raw Salmon - 349:2 Clownfish - 349:3 Pufferfish - 350 Cooked Fish - 350:1 Cooked Salmon - 350:2 Clownfish - 350:3 Pufferfish - 351 Ink Sack - 351:1 Rose Red Dye - 351:2 Cactus Green Dye - 351:3 Cocoa Bean - 351:4 Lapis Lazuli - 351:5 Purple Dye - 351:6 Cyan Dye - 351:7 Light Gray Dye - 351:8 Gray Dye - 351:9 Pink Dye - 351:10 Lime Dye - 351:11 Dandelion Yellow Dye - 351:12 Light Blue Dye - 351:13 Magenta Dye - 351:14 Orange Dye - 351:15 Bone Meal - 352 Bone - 353 Sugar - 354 Cake - 355 Bed - 356 Redstone Repeater - 357 Cookie - 358 Map - 359 Shears - 360 Melon (Slice) - 361 Pumpkin Seeds - 362 Melon Seeds - 363 Raw Beef - 364 Steak - 365 Raw Chicken - 366 Cooked Chicken - 367 Rotten Flesh - 368 Ender Pearl - 369 Blaze Rod - 370 Ghast Tear - 371 Gold Nugget - 372 Nether Wart Seeds - 373 Water Bottle - 373:16 Awkward Potion - 373:32 Thick Potion - 373:64 Mundane Potion - 373:8193 Regeneration Potion (0:45) - 373:8194 Swiftness Potion (3:00) - 373:8195 Fire Resistance Potion (3:00) - 373:8196 Poison Potion (0:45) - 373:8197 Healing Potion - 373:8198 Night Vision Potion (3:00) - 373:8200 Weakness Potion (1:30) - 373:8201 Strength Potion (3:00) - 373:8202 Slowness Potion (1:30) - 373:8204 Harming Potion - 373:8205 Water Breathing Potion (3:00) - 373:8206 Invisibility Potion (3:00) - 373:8225 Regeneration Potion II (0:22) - 373:8226 Swiftness Potion II (1:30) - 373:8228 Poison Potion II (0:22) - 373:8229 Healing Potion II - 373:8233 Strength Potion II (1:30) - 373:8236 Harming Potion II - 373:8257 Regeneration Potion (2:00) - 373:8258 Swiftness Potion (8:00) - 373:8259 Fire Resistance Potion (8:00) - 373:8260 Poison Potion (2:00) - 373:8262 Night Vision Potion (8:00) - 373:8264 Weakness Potion (4:00) - 373:8265 Strength Potion (8:00) - 373:8266 Slowness Potion (4:00) - 373:8269 Water Breathing Potion (8:00) - 373:8270 Invisibility Potion (8:00) - 373:8289 Regeneration Potion II (1:00) - 373:8290 Swiftness Potion II (4:00) - 373:8292 Poison Potion II (1:00) - 373:8297 Strength Potion II (4:00) - 373:16385 Regeneration Splash (0:33) - 373:16386 Swiftness Splash (2:15) - 373:16387 Fire Resistance Splash (2:15) - 373:16388 Poison Splash (0:33) - 373:16389 Healing Splash - 373:16390 Night Vision Splash (2:15) - 373:16392 Weakness Splash (1:07) - 373:16393 Strength Splash (2:15) - 373:16394 Slowness Splash (1:07) - 373:16396 Harming Splash - 373:16397 Breathing Splash (2:15) - 373:16398 Invisibility Splash (2:15) - 373:16417 Regeneration Splash II (0:16) - 373:16418 Swiftness Splash II (1:07) - 373:16420 Poison Splash II (0:16) - 373:16421 Healing Splash II - 373:16425 Strength Splash II (1:07) - 373:16428 Harming Splash II - 373:16449 Regeneration Splash (1:30) - 373:16450 Swiftness Splash (6:00) - 373:16451 Fire Resistance Splash (6:00) - 373:16452 Poison Splash (1:30) - 373:16454 Night Vision Splash (6:00) - 373:16456 Weakness Splash (3:00) - 373:16457 Strength Splash (6:00) - 373:16458 Slowness Splash (3:00) - 373:16461 Breathing Splash (6:00) - 373:16462 Invisibility Splash (6:00) - 373:16481 Regeneration Splash II (0:45) - 373:16482 Swiftness Splash II (3:00) - 373:16484 Poison Splash II (0:45) - 373:16489 Strength Splash II (3:00) - 374 Glass Bottle - 375 Spider Eye - 376 Fermented Spider Eye - 377 Blaze Powder - 378 Magma Cream - 379 Brewing Stand - 380 Cauldron - 381 Eye of Ender - 382 Glistering Melon (Slice) - 383:50 Spawn Egg (Creeper) - 383:51 Spawn Egg (Skeleton) - 383:52 Spawn Egg (Spider) - 383:54 Spawn Egg (Zombie) - 383:55 Spawn Egg (Slime) - 383:56 Spawn Egg (Ghast) - 383:57 Spawn Egg (Zombie Pigmen) - 383:58 Spawn Egg (Endermen) - 383:59 Spawn Egg (Cave Spider) - 383:60 Spawn Egg (Silverfish) - 383:61 Spawn Egg (Blaze) - 383:62 Spawn Egg (Magma Cube) - 383:65 Spawn Egg (Bat) - 383:66 Spawn Egg (Witch) - 383:90 Spawn Egg (Pig) - 383:91 Spawn Egg (Sheep) - 383:92 Spawn Egg (Cow) - 383:93 Spawn Egg (Chicken) - 383:94 Spawn Egg (Squid) - 383:95 Spawn Egg (Wolf) - 383:96 Spawn Egg (Mooshroom) - 383:98 Spawn Egg (Ocelot) - 383:100 Spawn Egg (Horse) - 383:120 Spawn Egg (Villager) - 384 Bottle of Enchanting - 385 Fire Charge - 386 Book and Quill - 387 Written Book - 388 Emerald - 389 Item Frame - 390 Flower Pot - 391 Carrot - 392 Potato - 393 Baked Potato - 394 Poisonous Potato - 395 Empty Map - 396 Golden Carrot - 397 Head (Skeleton) - 397:1 Head (Wither) - 397:2 Head (Zombie) - 397:3 Head (Steve) - 397:4 Head (Creeper) - 398 Carrot on a Stick - 399 Nether Star - 400 Pumpkin Pie - 401 Firework Rocket - 402 Firework Star - 403 Enchanted Book - 404 Redstone Comparator - 405 Nether Brick (Item) - 406 Nether Quartz - 407 Minecart (TNT) - 408 Minecart (Hopper) - 417 Iron Horse Armor - 418 Gold Horse Armor - 419 Diamond Horse Armor - 420 Lead - 421 Name Tag - 422 Minecart (Command Block) - 2256 Music Disk (13) - 2257 Music Disk (Cat) - 2258 Music Disk (Blocks) - 2259 Music Disk (Chirp) - 2260 Music Disk (Far) - 2261 Music Disk (Mall) - 2262 Music Disk (Mellohi) - 2263 Music Disk (Stal) - 2264 Music Disk (Strad) - 2265 Music Disk (Ward) - 2266 Music Disk (11) - 2267 Music Disk (Wait) diff --git a/disabled_stuff/data/kills.json b/disabled_stuff/data/kills.json deleted file mode 100644 index 5f6d046..0000000 --- a/disabled_stuff/data/kills.json +++ /dev/null @@ -1,79 +0,0 @@ -{ - "templates": [ - "rips off {user}'s {limbs} and leaves them to die.", - "grabs {user}'s head and rips it clean off their body.", - "grabs a {gun} and riddles {user}'s body with bullets.", - "gags and ties {user} then throws them off a {tall_thing}.", - "crushes {user} with a huge spiked {spiked_thing}.", - "glares at {user} until they die of boredom.", - "stabs {user} in the heart a few times with a {weapon_stab}.", - "rams a {weapon_explosive} up {user}'s ass and lets off a few rounds.", - "crushes {user}'s skull in with a {weapon_crush}.", - "unleashes the armies of Isengard on {user}.", - "gags and ties {user} then throws them off a {tall_thing} to their death.", - "reaches out and punches right through {user}'s chest.", - "slices {user}'s limbs off with a {weapon_slice}.", - "throws {user} to Cthulu and watches them get ripped to shreds.", - "feeds {user} to an owlbear who then proceeds to maul them violently.", - "turns {user} into a snail and covers then in salt.", - "snacks on {user}'s dismembered body.", - "stuffs {bomb} up {user}'s ass and waits for it to go off.", - "puts {user} into a sack, throws the sack in the river, and hurls the river into space.", - "goes bowling with {user}'s bloody disembodied head.", - "sends {user} to /dev/null!", - "feeds {user} coke and mentos till they violently explode." - ], - "parts": { - "gun": [ - "AK47", - "machine gun", - "automatic pistol", - "Uzi" - ], - "limbs": [ - "legs", - "arms", - "limbs" - ], - "weapon_stab": [ - "knife", - "shard of glass", - "sword blade", - "butchers knife", - "corkscrew" - ], - "weapon_slice": [ - "sharpened katana", - "chainsaw", - "polished axe" - ], - "weapon_crush": [ - "spiked mace", - "baseball bat", - "wooden club", - "massive steel ball", - "heavy iron rod" - ], - "weapon_explosive": [ - "rocket launcher", - "grenade launcher", - "napalm launcher" - ], - "tall_thing": [ - "bridge", - "tall building", - "cliff", - "mountain" - ], - "spiked_thing": [ - "boulder", - "rock", - "barrel of rocks" - ], - "bomb": [ - "a bomb", - "some TNT", - "a bunch of C4" - ] - } -} diff --git a/disabled_stuff/data/kills.txt b/disabled_stuff/data/kills.txt deleted file mode 100644 index 115b667..0000000 --- a/disabled_stuff/data/kills.txt +++ /dev/null @@ -1,22 +0,0 @@ -rips off {user}'s legs and leaves them to die. -grabs {user}'s head and rips it clean off their body. -grabs a machine gun and riddles {user}'s body with bullets. -gags and ties {user} then throws them off a bridge. -crushes {user} with a huge spiked boulder. -glares at {user} until they die of boredom. -shivs {user} in the heart a few times. -rams a rocket launcher up {user}'s ass and lets off a few rounds. -crushes {user}'s skull in with a spiked mace. -unleashes the armies of Isengard on {user}. -gags and ties {user} then throws them off a building to their death. -reaches out and punches right through {user}'s chest. -slices {user}'s limbs off with a sharpened Katana. -throws {user} to Cthulu and watches them get ripped to shreds. -feeds {user} to an owlbear who then proceeds to maul them violently. -turns {user} into a snail and salts them. -snacks on {user}'s dismembered body. -stuffs some TNT up {user}'s ass and waits for it to go off. -puts {user} into a sack, throws the sack in the river, and hurls the river into space. -goes bowling with {user}'s bloody disembodied head. -sends {user} to /dev/null! -feeds {user} coke and mentos till they violently explode. diff --git a/disabled_stuff/data/larts.txt b/disabled_stuff/data/larts.txt deleted file mode 100644 index 029e3a0..0000000 --- a/disabled_stuff/data/larts.txt +++ /dev/null @@ -1,99 +0,0 @@ -smacks {user} in the face with a burlap sack full of broken glass. -swaps {user}'s shampoo with glue. -installs Windows Vista on {user}'s computer. -forces {user} to use perl for 3 weeks. -registers {user}'s name with 50 known spammers. -resizes {user}'s console to 40x24. -takes {user}'s drink. -dispenses {user}'s email address to a few hundred 'bulk mailing services'. -pokes {user} in the eye. -beats {user} senseless with a 50lb Linux manual. -cats /dev/random into {user}'s ear. -signs {user} up for AOL. -downvotes {user} on Reddit. -enrolls {user} in Visual Basic 101. -sporks {user}. -drops a truckload of support tickets on {user}. -judo chops {user}. -sets {user}'s resolution to 800x600. -formats {user}'s harddrive to fat12. -rm -rf's {user}. -stabs {user}. -makes {user} learn C++. -steals {user}'s mojo. -strangles {user} with a doohicky mouse cord. -whacks {user} with the cluebat. -sells {user} on EBay. -drops creepers on {user}'s house. -throws all of {user}'s diamond gear into lava. -uses {user} as a biological warfare study. -uses the 'Customer Appreciation Bat' on {user}. -puts {user} in the Total Perspective Vortex. -casts {user} into the fires of Mt. Doom. -gives {user} a melvin. -turns {user} over to the Fun Police. -turns over {user} to Agent Smith to be 'bugged'. -takes away {user}'s internet connection. -pushes {user} past the Shoe Event Horizon. -counts '1, 2, 5... er... 3!' and hurls the Holy Handgrenade Of Antioch at {user}. -puts {user} in a nest of camel spiders. -puts 'alias vim=emacs' in {user}'s /etc/profile. -uninstalls every web browser from {user}'s system. -signs {user} up for getting hit on the head lessons. -makes {user} try to set up a Lexmark printer. -fills {user}'s eyedrop bottle with lime juice. -casts {user} into the fires of Mt. Doom. -gives {user} a Flying Dutchman. -rips off {user}'s arm, and uses it to beat them to death. -pierces {user}'s nose with a rusty paper hole puncher. -pokes {user} with a rusty nail. -puts sugar between {user}'s bedsheets. -pours sand into {user}'s breakfast. -mixes epoxy into {user}'s toothpaste. -puts Icy-Hot in {user}'s lube container. -forces {user} to use a Commodore 64 for all their word processing. -puts {user} in a room with several heavily armed manic depressives. -makes {user} watch reruns of "Blue's Clues". -puts lye in {user}'s coffee. -tattoos the Windows symbol on {user}'s ass. -lets Borg have his way with {user}. -signs {user} up for line dancing classes at the local senior center. -wakes {user} out of a sound sleep with some brand new nipple piercings. -gives {user} a 2 gauge Prince Albert. -forces {user} to eat all their veggies. -covers {user}'s toilet paper with lemon-pepper. -fills {user}'s ketchup bottle with Dave's Insanity sauce. -forces {user} to stare at an incredibly frustrating and seemingly never-ending IRC political debate. -knocks two of {user}'s teeth out with a 2x4. -removes Debian from {user}'s system. -switches {user} over to CentOS. -uses {user}'s iPod for skeet shooting practice. -gives {user}'s phone number to Borg. -posts {user}'s IP, username(s), and password(s) on 4chan. -forces {user} to use words like 'irregardless' and 'administrate' (thereby sounding like a real dumbass). -tickles {user} until they wet their pants and pass out. -replaces {user}'s KY with elmer's clear wood glue. -replaces {user}'s TUMS with alka-seltzer tablets. -squeezes habanero pepper juice into {user}'s tub of vaseline. -forces {user} to learn the Win32 API. -gives {user} an atomic wedgie. -ties {user} to a chair and forces them to listen to 'N Sync at full blast. -forces {user} to use notepad for text editing. -frowns at {user} really, really hard. -jabs a hot lighter into {user}'s eye sockets. -forces {user} to browse the web with IE6. -takes {user} out at the knees with a broken pool cue. -forces {user} to listen to emo music. -lets a few creepers into {user}'s house. -signs {user} up for the Iowa State Ferret Legging Championship. -attempts to hotswap {user}'s RAM. -dragon punches {user}. -puts railroad spikes into {user}'s side. -replaces {user}'s lubricant with liquid weld. -replaces {user}'s stress pills with rat poison pellets. -replaces {user}'s itch cream with hair removal cream. -does the Australian Death Grip on {user}. -dances upon the grave of {user}'s ancestors. -farts loudly in {user}'s general direction. -flogs {user} with stinging nettle. -hands {user} a poison ivy joint. diff --git a/disabled_stuff/data/name_files/dragons.json b/disabled_stuff/data/name_files/dragons.json deleted file mode 100644 index c6c3e2d..0000000 --- a/disabled_stuff/data/name_files/dragons.json +++ /dev/null @@ -1,170 +0,0 @@ -{ - "name": "Dragon names", - "author": "Brett Slocum", - "templates": { - "default": "{start}{end}" - }, - "default_templates": [ - "default" - ], - "parts": { - "end": [ - "bald", - "beald", - "balt", - "balth", - "beorht", - "berct", - "berict", - "beorn", - "bern", - "brand", - "broad", - "burg", - "burh", - "cyni", - "cyn", - "degn", - "ferth", - "flaed", - "fled", - "for", - "frith", - "frit", - "frid", - "gar", - "geld", - "gifu", - "geofu", - "gisl", - "gund", - "gunn", - "gyth", - "gyd", - "haed", - "hathu", - "heard", - "hard", - "here", - "heri", - "helm", - "hild", - "hun", - "lac", - "laf", - "lid", - "lind", - "linda", - "maer", - "man", - "mon", - "mund", - "noth", - "raed", - "red", - "refu", - "ric", - "sig", - "sige", - "stan", - "swith", - "swid", - "theof", - "theow", - "thryth", - "thryd", - "wealch", - "walh", - "weald", - "wald", - "weard", - "ward", - "wic", - "wict", - "wiht", - "wine", - "wini", - "wiw", - "wiv", - "wuda", - "wida", - "wudu", - "wulf", - "ulf", - "wyn", - "wynn" - ], - "start": [ - "Aelf", - "Aelb", - "Aethel", - "Aedil", - "Badu", - "Beado", - "Beo", - "Blith", - "Bregu", - "Ceol", - "Ceon", - "Coin", - "Cene", - "Cuth", - "Cud", - "Cwic", - "Cuic", - "Quic", - "Dryct", - "Dryht", - "Ead", - "Ed", - "Aead", - "Eald", - "Ald", - "Ealh", - "Alh", - "Earcon", - "Ercon", - "Earn", - "Ecg", - "Ec", - "Eofor", - "Eorcon", - "Eormen", - "Yrmen", - "Folc", - "Ford", - "Fri", - "Gold", - "Grim", - "Haem", - "Haeth", - "Heah", - "Healf", - "Hreth", - "Hroth", - "Huaet", - "Hyg", - "Hugu", - "Iaru", - "Leof", - "Maegen", - "Oidil", - "Ongen", - "Os", - "Rath", - "Saex", - "Sax", - "Sex", - "Sele", - "Tat", - "Theod", - "Til", - "Torct", - "Trum", - "Tun", - "Waeg", - "Wig", - "Wil" - ] - } -} diff --git a/disabled_stuff/data/name_files/dwarves.json b/disabled_stuff/data/name_files/dwarves.json deleted file mode 100644 index 7041b22..0000000 --- a/disabled_stuff/data/name_files/dwarves.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "name": "Dwarven names", - "author": "Johan Danforth", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "bur", - "fur", - "gan", - "gnus", - "gnar", - "li", - "lin", - "lir", - "mli", - "nar", - "nus", - "rin", - "ran", - "sin", - "sil", - "sur" - ], - "mid": [ - "a", - "e", - "i", - "o", - "oi", - "u" - ], - "first": [ - "B", - "D", - "F", - "G", - "Gl", - "H", - "K", - "L", - "M", - "N", - "R", - "S", - "T", - "V" - ] - } -} \ No newline at end of file diff --git a/disabled_stuff/data/name_files/elves_female.json b/disabled_stuff/data/name_files/elves_female.json deleted file mode 100644 index ce05689..0000000 --- a/disabled_stuff/data/name_files/elves_female.json +++ /dev/null @@ -1,85 +0,0 @@ -{ - "name": "Elven female names", - "author": "Johan Danforth", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "clya", - "lindi", - "di", - "dien", - "dith", - "dia", - "lith", - "lia", - "ndra", - "ng", - "nia", - "niel", - "rith", - "thien", - "thiel", - "viel", - "wen", - "wien", - "wiel" - ], - "mid": [ - "a", - "a", - "adrie", - "ara", - "e", - "e", - "ebri", - "i", - "io", - "ithra", - "ilma", - "il-Ga", - "o", - "orfi", - "o", - "u", - "y" - ], - "first": [ - "An", - "Am", - "Bel", - "Cel", - "C", - "Cal", - "Del", - "El", - "Elr", - "Elv", - "Eow", - "Ear", - "F", - "G", - "Gal", - "Gl", - "H", - "Is", - "Leg", - "Lem", - "M", - "N", - "P", - "R", - "S", - "T", - "Thr", - "Tin", - "Ur", - "Un", - "V" - ] - } -} diff --git a/disabled_stuff/data/name_files/elves_male.json b/disabled_stuff/data/name_files/elves_male.json deleted file mode 100644 index 08ebe9e..0000000 --- a/disabled_stuff/data/name_files/elves_male.json +++ /dev/null @@ -1,86 +0,0 @@ -{ - "name": "Elven male names", - "author": "Johan Danforth", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "l", - "las", - "lad", - "ldor", - "ldur", - "lith", - "mir", - "n", - "nd", - "ndel", - "ndil", - "ndir", - "nduil", - "ng", - "mbor", - "r", - "ril", - "riand", - "rion", - "wyn" - ], - "mid": [ - "a", - "a", - "adrie", - "ara", - "e", - "e", - "ebri", - "i", - "io", - "ithra", - "ilma", - "il-Ga", - "o", - "orfi", - "o", - "u", - "y" - ], - "first": [ - "An", - "Am", - "Bel", - "Cel", - "C", - "Cal", - "Del", - "El", - "Elr", - "Elv", - "Eow", - "Ear", - "F", - "G", - "Gal", - "Gl", - "H", - "Is", - "Leg", - "Lem", - "M", - "N", - "P", - "R", - "S", - "T", - "Thr", - "Tin", - "Ur", - "Un", - "V" - ] - } -} diff --git a/disabled_stuff/data/name_files/fantasy.json b/disabled_stuff/data/name_files/fantasy.json deleted file mode 100644 index 7128a40..0000000 --- a/disabled_stuff/data/name_files/fantasy.json +++ /dev/null @@ -1,554 +0,0 @@ -{ - "name": "General fantasy names", - "author": "Brett Slocum", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "ty", - "carn", - "ar", - "acy", - "er", - "al", - "gary", - "y", - "ar", - "arny", - "alen", - "carth", - "gant", - "y", - "ber", - "art", - "dal", - "arth", - "arth", - "an", - "ere", - "geth", - "aldy", - "yn", - "valer", - "arne", - "aller", - "varn", - "ayne", - "an", - "nal", - "tyne", - "ayne", - "art", - "ont", - "ney", - "aver", - "lyn", - "iel", - "gar", - "y", - "arry", - "or", - "quine", - "astar", - "er", - "aryn", - "art", - "war", - "asty", - "zane", - "arik", - "ayne", - "an", - "oller", - "warty", - "aryne", - "chean", - "ta", - "un", - "tha", - "reth", - "ant", - "el", - "yne", - "el", - "tuny", - "wat", - "juin", - "dor", - "gayn", - "tyn", - "dar", - "car", - "gine", - "codd", - "quent", - "eas", - "ew", - "azer", - "ont", - "ly", - "stead", - "orn", - "en", - "cath", - "iera", - "que", - "air", - "la", - "art", - "erry", - "sa", - "ar", - "er", - "ern", - "arty", - "doth", - "y", - "ert", - "dy", - "orn", - "ont", - "ern", - "ayn", - "art", - "warne", - "arn", - "in", - "ian", - "el", - "ak", - "il", - "ydd", - "ime", - "yn", - "en", - "in", - "im", - "el", - "ar", - "ro", - "is", - "is", - "ro", - "era", - "ene", - "in", - "ane", - "iam", - "ain", - "ir", - "un", - "il", - "bin", - "lin", - "is", - "sene", - "bin", - "lir", - "ame", - "a", - "fyn", - "se", - "in", - "yd", - "ien", - "ain", - "yn", - "ar", - "er", - "in", - "sume", - "ras", - "on", - "mel", - "luth", - "ance", - "er", - "yn", - "an", - "ar", - "ayne", - "eth", - "nyd", - "ter", - "rik", - "nik", - "ro", - "a", - "mel", - "yn", - "ris", - "lene", - "ane", - "yr" - ], - "mid": [ - "gur", - "carn", - "az", - "acy", - "ayn", - "asc", - "gary", - "hen", - "tan", - "arny", - "alen", - "carth", - "gant", - "rath", - "cam", - "art", - "ron", - "arth", - "arth", - "carad", - "ere", - "geth", - "aldy", - "yn", - "valer", - "arne", - "aller", - "varn", - "ar", - "an", - "nal", - "tyne", - "ar", - "art", - "ont", - "aur", - "aver", - "lyn", - "as", - "gar", - "cuth", - "arry", - "or", - "quine", - "astar", - "mel", - "aryn", - "art", - "war", - "asty", - "zane", - "arik", - "ayne", - "loc", - "oller", - "warty", - "aryne", - "chean", - "quin", - "tar", - "dar", - "reth", - "ant", - "an", - "yne", - "ax", - "tuny", - "wat", - "juin", - "a", - "gayn", - "on", - "an", - "car", - "gine", - "codd", - "quent", - "eas", - "ew", - "azer", - "am", - "ly", - "stead", - "orn", - "ar", - "cath", - "iera", - "que", - "air", - "la", - "art", - "erry", - "end", - "om", - "ast", - "et", - "arty", - "doth", - "cath", - "ert", - "dy", - "orn", - "ont", - "tak", - "ar", - "art", - "warne", - "arn", - "in", - "ian", - "el", - "ak", - "il", - "ydd", - "ime", - "yn", - "en", - "in", - "im", - "el", - "ar", - "ro", - "is", - "is", - "ro", - "era", - "ene", - "in", - "ane", - "iam", - "ain", - "ir", - "un", - "il", - "bin", - "lin", - "is", - "sene", - "bin", - "lir", - "ame", - "a", - "fyn", - "y", - "in", - "yd", - "ien", - "ain", - "yn", - "ar", - "er", - "in", - "sume", - "ras", - "id", - "mel", - "luth", - "ance", - "er", - "yn", - "an", - "ar", - "ayne", - "eth", - "len", - "ter", - "rik", - "er", - "ro", - "tin", - "mel", - "yn", - "ris", - "lene", - "ane", - "as" - ], - "first": [ - "Ral", - "Na", - "Ard", - "Vald", - "Cal", - "Hy", - "Pan", - "Chies", - "Per", - "Er", - "Hob", - "Harg", - "Win", - "Mar", - "Quarne", - "Ba", - "Er", - "Odas", - "Ka", - "Mold", - "Syn", - "Ro", - "Jast", - "Yal", - "Nap", - "Vard", - "As", - "Binthe", - "Zald", - "Dez", - "Las", - "Uld", - "Nev", - "Haur", - "Bar", - "Das", - "Ty", - "Dar", - "Ost", - "Tral", - "Grave", - "Eth", - "Flar", - "Yal", - "Klab", - "Harab", - "Jar", - "Nor", - "Dain", - "Toc", - "Bay", - "Haith", - "Cal", - "Lar", - "Naut", - "Druc", - "Bar", - "Art", - "For", - "Mart", - "Yar", - "Ha", - "Ny", - "Yar", - "Verd", - "Wy", - "Plag", - "Ter", - "Haur", - "Var", - "Ar", - "Dar", - "Val", - "Mar", - "Car", - "Loc", - "Wearn", - "Dras", - "Bel", - "Har", - "Jar", - "For", - "Kil", - "Oc", - "Al", - "Skal", - "Nun", - "Az", - "Kop", - "Houl", - "Lab", - "Jar", - "Vast", - "Claune", - "Tes", - "Ob", - "Nist", - "El", - "Est", - "Zol", - "Brow", - "Pulg", - "Star", - "Kren", - "Crac", - "Scaun", - "Wal", - "Quer", - "Ry", - "Cyn", - "Rusk", - "Del", - "Lab", - "Mel", - "Sep", - "Lor", - "Ros", - "Jar", - "Daf", - "Hal", - "Kol", - "In", - "Ael", - "Sald", - "Kuv", - "Ym", - "Ca", - "Keld", - "Bar", - "Tarl", - "Shot", - "Pes", - "Quer", - "Lor", - "Geld", - "Ar", - "Har", - "Bae", - "Vad", - "Pas", - "Ur", - "Nor", - "Kir", - "Var", - "Mel", - "Ar", - "Shy", - "I", - "Rald", - "Cor", - "Sar", - "Kor", - "Rol", - "Har", - "Ash", - "Dir", - "Las", - "Vab", - "Ald", - "Par", - "Ob", - "Hor", - "Chy", - "Jar", - "Ryle", - "Char", - "Hab", - "Sar", - "Vart", - "Nist", - "Obr", - "Jar", - "Ge", - "Yas", - "Pav", - "Jes", - "Shot", - "Mar", - "Hor", - "Er", - "Ki", - "Har", - "Cal", - "And" - ] - } -} diff --git a/disabled_stuff/data/name_files/female.json b/disabled_stuff/data/name_files/female.json deleted file mode 100644 index f2d5c08..0000000 --- a/disabled_stuff/data/name_files/female.json +++ /dev/null @@ -1,190 +0,0 @@ -{ - "name": "Fantasy female names", - "author": "Johan Danforth", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "beth", - "cia", - "cien", - "clya", - "de", - "dia", - "dda", - "dien", - "dith", - "dia", - "lind", - "lith", - "lia", - "lian", - "lla", - "llan", - "lle", - "ma", - "mma", - "mwen", - "meth", - "n", - "n", - "n", - "nna", - "ndra", - "ng", - "ni", - "nia", - "niel", - "rith", - "rien", - "ria", - "ri", - "rwen", - "sa", - "sien", - "ssa", - "ssi", - "swen", - "thien", - "thiel", - "viel", - "via", - "ven", - "veth", - "wen", - "wen", - "wen", - "wen", - "wia", - "weth", - "wien", - "wiel" - ], - "mid": [ - "a", - "a", - "a", - "ae", - "ae", - "au", - "ao", - "are", - "ale", - "ali", - "ay", - "ardo", - "e", - "e", - "e", - "ei", - "ea", - "ea", - "eri", - "era", - "ela", - "eli", - "enda", - "erra", - "i", - "i", - "i", - "ia", - "ie", - "ire", - "ira", - "ila", - "ili", - "ira", - "igo", - "o", - "oa", - "oi", - "oe", - "ore", - "u", - "y" - ], - "first": [ - "A", - "Ab", - "Ac", - "Ad", - "Af", - "Agr", - "Ast", - "As", - "Al", - "Adw", - "Adr", - "Ar", - "B", - "Br", - "C", - "C", - "C", - "Cr", - "Ch", - "Cad", - "D", - "Dr", - "Dw", - "Ed", - "Eth", - "Et", - "Er", - "El", - "Eow", - "F", - "Fr", - "G", - "Gr", - "Gw", - "Gw", - "Gal", - "Gl", - "H", - "Ha", - "Ib", - "Jer", - "K", - "Ka", - "Ked", - "L", - "Loth", - "Lar", - "Leg", - "M", - "Mir", - "N", - "Nyd", - "Ol", - "Oc", - "On", - "P", - "Pr", - "Q", - "R", - "Rh", - "S", - "Sev", - "T", - "Tr", - "Th", - "Th", - "Ul", - "Um", - "Un", - "V", - "Y", - "Yb", - "Z", - "W", - "W", - "Wic" - ] - } -} diff --git a/disabled_stuff/data/name_files/general.json b/disabled_stuff/data/name_files/general.json deleted file mode 100644 index c163b7a..0000000 --- a/disabled_stuff/data/name_files/general.json +++ /dev/null @@ -1,199 +0,0 @@ -{ - "name": "Generic names", - "author": "Johan Danforth", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "a", - "and", - "b", - "bwyn", - "baen", - "bard", - "c", - "ctred", - "cred", - "ch", - "can", - "d", - "dan", - "don", - "der", - "dric", - "dfrid", - "dus", - "f", - "g", - "gord", - "gan", - "l", - "li", - "lgrin", - "lin", - "lith", - "lath", - "loth", - "ld", - "ldric", - "ldan", - "m", - "mas", - "mos", - "mar", - "mond", - "n", - "nydd", - "nidd", - "nnon", - "nwan", - "nyth", - "nad", - "nn", - "nnor", - "nd", - "p", - "r", - "ron", - "rd", - "s", - "sh", - "seth", - "sean", - "t", - "th", - "th", - "tha", - "tlan", - "trem", - "tram", - "v", - "vudd", - "w", - "wan", - "win", - "win", - "wyn", - "wyn", - "wyr", - "wyr", - "wyth" - ], - "mid": [ - "a", - "ae", - "ae", - "au", - "ao", - "are", - "ale", - "ali", - "ay", - "ardo", - "e", - "ei", - "ea", - "ea", - "eri", - "era", - "ela", - "eli", - "enda", - "erra", - "i", - "ia", - "ie", - "ire", - "ira", - "ila", - "ili", - "ira", - "igo", - "o", - "oa", - "oi", - "oe", - "ore", - "u", - "y" - ], - "first": [ - "A", - "Ab", - "Ac", - "Ad", - "Af", - "Agr", - "Ast", - "As", - "Al", - "Adw", - "Adr", - "Ar", - "B", - "Br", - "C", - "C", - "C", - "Cr", - "Ch", - "Cad", - "D", - "Dr", - "Dw", - "Ed", - "Eth", - "Et", - "Er", - "El", - "Eow", - "F", - "Fr", - "G", - "Gr", - "Gw", - "Gw", - "Gal", - "Gl", - "H", - "Ha", - "Ib", - "Jer", - "K", - "Ka", - "Ked", - "L", - "Loth", - "Lar", - "Leg", - "M", - "Mir", - "N", - "Nyd", - "Ol", - "Oc", - "On", - "P", - "Pr", - "R", - "Rh", - "S", - "Sev", - "T", - "Tr", - "Th", - "Th", - "V", - "Y", - "Yb", - "Z", - "W", - "W", - "Wic" - ] - } -} \ No newline at end of file diff --git a/disabled_stuff/data/name_files/hobbits.json b/disabled_stuff/data/name_files/hobbits.json deleted file mode 100644 index 0559bcf..0000000 --- a/disabled_stuff/data/name_files/hobbits.json +++ /dev/null @@ -1,38 +0,0 @@ -{ - - "name": "Tolkien hobbit names", - "author": "Johan Danforth", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "bo", - "do", - "doc", - "go", - "grin", - "m" - ], - "mid": [ - "a", - "e", - "i", - "ia", - "o", - "oi", - "u" - ], - "first": [ - "B", - "Dr", - "Fr", - "Mer", - "Per", - "S" - ] - } -} diff --git a/disabled_stuff/data/name_files/inns.json b/disabled_stuff/data/name_files/inns.json deleted file mode 100644 index e257581..0000000 --- a/disabled_stuff/data/name_files/inns.json +++ /dev/null @@ -1,124 +0,0 @@ -{ - "name": "Inn/Tavern/Bar/Pub Names", - "author": "Kimmo \"Arkhan\" Kulovesi", - "templates": { - "default": "{start} {end}" - }, - "default_templates": [ - "default" - ], - "parts": { - "end": [ - "Axe", - "Barrel", - "Basilisk", - "Belly", - "Blade", - "Boar", - "Breath", - "Brew", - "Busom", - "Claw", - "Coin", - "Delight", - "Den", - "Dragon", - "Drum", - "Dwarf", - "Fist", - "Flea", - "Flower", - "Gem", - "Gryphon", - "Hand", - "Head", - "Inn", - "Lady", - "Maiden", - "Lantern", - "Lips", - "Monk", - "Mug", - "Nest", - "Orc", - "Pearl", - "Pig", - "Pit", - "Place", - "Tavern", - "Portal", - "Ranger", - "Rest", - "Sailor", - "Sleep", - "Song", - "Stool", - "Swan", - "Swords", - "Tree", - "Unicorn", - "Whale", - "Wish", - "Wizard", - "Rain" - ], - "start": [ - "Bent", - "Black", - "Blind", - "Blue", - "Bob's", - "Joe's", - "Broken", - "Buxom", - "Cat's", - "Crow's", - "Dirty", - "Dragon", - "Dragon's", - "Drunken", - "Eagle's", - "Eastern", - "Falcon's", - "Fawning", - "Fiend's", - "Flaming", - "Frosty", - "Frozen", - "Gilded", - "Genie's", - "Golden", - "Golden", - "Gray", - "Green", - "King's", - "Licked", - "Lion's", - "Mended", - "Octopus", - "Old", - "Old", - "Orc's", - "Otik's", - "Tika's", - "Pink", - "Pot", - "Puking", - "Queen's", - "Red", - "Ruby", - "Delicate", - "Sea", - "Sexy", - "Shining", - "Silver", - "Singing", - "Strange", - "Thirsty", - "Violet", - "White", - "Wild", - "Yawing " - ] - } -} diff --git a/disabled_stuff/data/name_files/items.json b/disabled_stuff/data/name_files/items.json deleted file mode 100644 index 3d6eafe..0000000 --- a/disabled_stuff/data/name_files/items.json +++ /dev/null @@ -1,166 +0,0 @@ -{ - "name": "Fantasy Item Names", - "author": "Luke Rogers", - "website": "http://www.dempltr.com/", - "templates": { - "default": "{type} of {power}", - "default_stat": "{stats} {type} of {power}", - "prefix": "{prefix} {type} of {power}", - "both": "{prefix} {material} {type}", - "both_stat": "{stats} {prefix} {material} {type}", - "material": "{material} {type} of {power}", - "animal": "{prefix} {type} of the {animal}", - "short": "{prefix} {type}", - "short_m": "{material} {type}", - "short_m_stat": "{stats} {material} {type}" - }, - "default_templates": ["default","short","prefix","material","both","both_stat","short_m","short_m_stat","default_stat"], - "parts": { - "type": [ - "Sword", - "Wand", - "Cloak", - "Robe", - "Stick", - "Staff", - "Ring", - "Amulet", - "Axe", - "Hammer", - "Shield", - "Greataxe", - "Halberd", - "Scythe", - "Scroll", - "Book", - "Armor", - "Dagger", - "Bow", - "Lance", - "Mace", - "Flail", - "Javelin", - "Dart", - "Spear", - "Sling", - "Rapier", - "Coin", - "Trident", - "Whip", - "Crown", - "Jewel", - "Jem", - "Hoopak", - "Orb", - "Platemail", - "Needle", - "Pin", - "Token", - "Helm", - "Battleaxe", - "Longsword" - ], - "animal": [ - "Bear", - "Horse", - "Chicken", - "Wolf", - "Eagle" - ], - "stats": [ - "+3", - "+2", - "+1", - "-2", - "-1" - ], - "prefix": [ - "Carved", - "Fragile", - "Heavy", - "Worn", - "Arcane", - "Intricate", - "Enchanted", - "Weathered", - "Damaged", - "Spiked", - "Strengthened", - "Fitted", - "Cursed", - "Charred", - "Reinforced" - ], - "material": [ - "Wooden", - "Stone", - "Glass", - "Diamond", - "Iron", - "Icy", - "Ebony", - "Steel", - "Paper", - "Adamantite", - "Obsidian", - "Mythril", - "Granite", - "Metal", - "Dwarven", - "Orcish" - ], - "power": [ - "Valor", - "Magic", - "Power", - "Light", - "Kings", - "Knights", - "Shadows", - "Chaos", - "Flame", - "Faith", - "Fire", - "Death", - "Sorcery", - "Stoning", - "Hope", - "Healing", - "Pain", - "Hurting", - "Belar", - "Slaying", - "Haste", - "Avatar", - "Virtue", - "the Way", - "Angels", - "Devils", - "Speed", - "Flying", - "Seeing", - "Blocking", - "Battle", - "Love", - "Hatred", - "Sorcery", - "Nagash", - "Sauron", - "Regeneration", - "Arthur", - "Ending", - "Torak", - "Aldur", - "Time", - "Evil", - "Notch", - "Destruction", - "Morgoth", - "Lucifer", - "Allure", - "Arkhan", - "Protection" - ] - - } -} diff --git a/disabled_stuff/data/name_files/male.json b/disabled_stuff/data/name_files/male.json deleted file mode 100644 index f636deb..0000000 --- a/disabled_stuff/data/name_files/male.json +++ /dev/null @@ -1,217 +0,0 @@ -{ - "name": "Male fantasy names", - "author": "Johan Danforth", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "a", - "and", - "b", - "bwyn", - "baen", - "bard", - "c", - "ch", - "can", - "d", - "dan", - "don", - "der", - "dric", - "dus", - "f", - "g", - "gord", - "gan", - "han", - "har", - "jar", - "jan", - "k", - "kin", - "kith", - "kath", - "koth", - "kor", - "kon", - "l", - "li", - "lin", - "lith", - "lath", - "loth", - "ld", - "ldan", - "m", - "mas", - "mos", - "mar", - "mond", - "n", - "nydd", - "nidd", - "nnon", - "nwan", - "nyth", - "nad", - "nn", - "nnor", - "nd", - "p", - "r", - "red", - "ric", - "rid", - "rin", - "ron", - "rd", - "s", - "sh", - "seth", - "sean", - "t", - "th", - "th", - "tha", - "tlan", - "trem", - "tram", - "v", - "vudd", - "w", - "wan", - "win", - "win", - "wyn", - "wyn", - "wyr", - "wyr", - "wyth" - ], - "mid": [ - "a", - "ae", - "ae", - "au", - "ao", - "are", - "ale", - "ali", - "ay", - "ardo", - "e", - "edri", - "ei", - "ea", - "ea", - "eri", - "era", - "ela", - "eli", - "enda", - "erra", - "i", - "ia", - "ie", - "ire", - "ira", - "ila", - "ili", - "ira", - "igo", - "o", - "oha", - "oma", - "oa", - "oi", - "oe", - "ore", - "u", - "y" - ], - "first": [ - "A", - "Ab", - "Ac", - "Ad", - "Af", - "Agr", - "Ast", - "As", - "Al", - "Adw", - "Adr", - "Ar", - "B", - "Br", - "C", - "C", - "C", - "Cr", - "Ch", - "Cad", - "D", - "Dr", - "Dw", - "Ed", - "Eth", - "Et", - "Er", - "El", - "Eow", - "F", - "Fr", - "G", - "Gr", - "Gw", - "Gw", - "Gal", - "Gl", - "H", - "Ha", - "Ib", - "J", - "Jer", - "K", - "Ka", - "Ked", - "L", - "Loth", - "Lar", - "Leg", - "M", - "Mir", - "N", - "Nyd", - "Ol", - "Oc", - "On", - "P", - "Pr", - "Q", - "R", - "Rh", - "S", - "Sev", - "T", - "Tr", - "Th", - "Th", - "Ul", - "Um", - "Un", - "V", - "Y", - "Yb", - "Z", - "W", - "W", - "Wic" - ] - } -} diff --git a/disabled_stuff/data/name_files/narn.json b/disabled_stuff/data/name_files/narn.json deleted file mode 100644 index 9d55f6f..0000000 --- a/disabled_stuff/data/name_files/narn.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "name": "Babylon 5 Narn names", - "author": "Kevin G. Nunn", - "templates": { - "default": "{first}{mid}{final}" - }, - "default_templates": [ - "default" - ], - "parts": { - "final": [ - "ch", - "k", - "kk", - "l", - "n", - "r", - "th", - "s" - ], - "mid": [ - "Ba", - "Bo", - "Da", - "Do", - "Ga", - "Ge", - "Go", - "Ka", - "Ko", - "La", - "Le", - "Lo", - "Ma", - "Mo", - "Na", - "No", - "Oo", - "Pa", - "Po", - "Qua", - "Quo", - "Ra", - "Rala", - "Ro", - "Sha", - "Shali", - "Ska", - "Skali", - "Sta", - "Ste", - "Sto", - "Ta", - "Te", - "Tee", - "To", - "Tha", - "Tho", - "Va", - "Vo", - "Vy", - "Wa" - ], - "first": [ - "Ch'", - "Do'", - "G'", - "Gre'", - "Mak'", - "Na'", - "Re'", - "Sh'", - "So'", - "T'", - "Ta'", - "Th'", - "Thu'", - "Tu'" - ] - } -} \ No newline at end of file diff --git a/disabled_stuff/data/name_files/warrior_cats.json b/disabled_stuff/data/name_files/warrior_cats.json deleted file mode 100644 index dd5854a..0000000 --- a/disabled_stuff/data/name_files/warrior_cats.json +++ /dev/null @@ -1,304 +0,0 @@ -{ - "name": "Warrior Cats - Cat Names", - "author": "Kenyania", - "templates": { - "default": "{start}{end}" - }, - "default_templates": [ - "default" - ], - "parts": { - "end": [ - "tail", - "shine", - "shade", - "breeze", - "foot", - "cloud", - "petal", - "thorn", - "heart", - "streak", - "stripe", - "dapple", - "spot", - "blaze", - "blossom", - "hawk", - "step", - "gaze", - "dapple", - "berry", - "soul", - "swirl", - "scar", - "snow", - "fall", - "flight", - "whisper", - "walker", - "leaf", - "wish", - "fur", - "pelt", - "leg", - "tooth", - "whisker", - "nose", - "stump", - "scale", - "wing", - "feather", - "spark", - "flame", - "willow", - "leaf", - "storm", - "back", - "head", - "ear", - "shard", - "eye", - "drift", - "strike", - "wave", - "ripple", - "flare", - "scratch", - "stone", - "stream", - "shine", - "shimmer", - "beak", - "stalk", - "moon", - "dusk", - "cloud", - "spirit", - "pool", - "dawn" - ], - "start": [ - "Misty", - "Mist", - "Blossom", - "Ebony", - "Breeze", - "Wind", - "Thunder", - "River", - "Stream", - "Rat", - "Mouse", - "Hazel", - "Kestrel", - "Serval", - "Snow", - "Blue", - "Red", - "Birch", - "Willow", - "Grass", - "Maple", - "Dawn", - "Shimmer", - "Creek", - "Poppy", - "Fox", - "Badger", - "Grass", - "Shade", - "Shaded", - "Swift", - "Huge", - "Small", - "Big", - "Tiny", - "Little", - "Tall", - "Vine", - "Lion", - "Jay", - "Holly", - "Berry", - "Dove", - "Leaf", - "Squirrel", - "Bent", - "Crooked", - "Bracken", - "Long", - "Grace", - "Song", - "Melody", - "Shine", - "Moss", - "Algae", - "Beetle", - "Spotted", - "Dappled", - "Leaf", - "Yellow", - "Red", - "Bristle", - "Stem", - "Lily", - "Petal", - "Flower", - "Pip", - "Seed", - "Brown", - "Sparkle", - "Gust", - "Flight", - "Pool", - "Lake", - "Forest", - "Fawn", - "Mole", - "Vole", - "Shrew", - "Apple", - "Bark", - "Dog", - "Fallen", - "Bramble", - "Lavender", - "Lilac", - "Lynx", - "Rain", - "Patch", - "Shell", - "Vole", - "Dream", - "Flake", - "Cardinal", - "Splash", - "Puddle", - "Bee", - "Bumble", - "Fire", - "Berry", - "Water", - "Cloud", - "Green", - "Storm", - "Gale", - "Hail", - "Broken", - "Sky", - "Mystic", - "Mystical", - "Log", - "Tree", - "Branch", - "Twig", - "Icicle", - "Ruby", - "Red", - "Rose", - "Fox", - "Rat", - "Badger", - "Nettle", - "Cave", - "Shore", - "Ginger", - "Sun", - "Moon", - "Kink", - "Mink", - "Cherry", - "Weed", - "Breeze", - "Panther", - "Ocelot", - "Ocean", - "Sea", - "Tawny", - "Aqua", - "Gentle", - "Fuzzy", - "Striped", - "Heat", - "Magma", - "Lava", - "Volcano", - "Kestrel", - "Dust", - "Dusk", - "Dawn", - "Marsh", - "Swift", - "Echo", - "Frozen", - "Burrow", - "Topaz", - "Sapphire", - "Speckle", - "Egg", - "Shining", - "Blazing", - "Burning", - "Scorch", - "Burnt", - "Clover", - "One", - "Mud", - "Dirt", - "Blend", - "Heather", - "Daisy", - "Juniper", - "Sparrow", - "Brave", - "Murky", - "Sunny", - "Silver", - "Golden", - "Bright", - "Raven", - "Adder", - "Snake", - "Owl", - "Aspen", - "Maple", - "Feather", - "Briar", - "Loud", - "Swirl", - "Swirled", - "Thistle", - "Spiky", - "Bush", - "Blizzard", - "Coral", - "Pebble", - "Rock", - "Stone", - "Light", - "Dark", - "Lightning", - "Vine", - "Fish", - "Minnow", - "Salmon", - "Trout", - "Bubble", - "Smoke", - "Steam", - "Wave", - "Eclipse", - "Twilight", - "Meadow", - "Torn", - "Mallow", - "Faded", - "Dead", - "Half", - "Sharp", - "Skunk", - "Thorn" - ] - } -} diff --git a/disabled_stuff/data/recipes.txt b/disabled_stuff/data/recipes.txt deleted file mode 100644 index 2b0e1db..0000000 --- a/disabled_stuff/data/recipes.txt +++ /dev/null @@ -1,269 +0,0 @@ -//Minecraft Recipes List -//Created by _303 -//Obtained from https://github.com/ClouDev/CloudBot/blob/develop/plugins/data/recipes.txt -//Edited by CHCMATT for Minecraft version: 1.7.4 -// -//Summary of Use: Each column is seperated by a comma (,) and rows by a vertical bar (|). Order of Recipes & Categories taken from -//www.minecraftwiki.net/wiki/Crafting for easier updating in the future (The Future!) -// -//Basic Recipes -// -4x Wooden Planks: Wood -4x Stick: Wooden Planks | Wooden Planks -4x Torch: Coal | Stick -4x Torch: Charcoal | Stick -1x Crafting Table: Wooden Planks, Wooden Planks | Wooden Planks, Wooden Planks -1x Furnace: Cobblestone, Cobblestone, Cobblestone | Cobblestone, None, Cobblestone | Cobblestone, Cobblestone, Cobblestone -1x Chest: Wooden Planks, Wooden Planks, Wooden Planks | Wooden Planks, None, Wooden Planks | Wooden Planks, Wooden Planks, Wooden Planks -// -//Block Recipes -// -1x Block of Gold: Gold Ingot, Gold Ingot, Gold Ingot | Gold Ingot, Gold Ingot, Gold Ingot | Gold Ingot, Gold Ingot, Gold Ingot -1x Block of Iron: Iron Ingot, Iron Ingot, Iron Ingot | Iron Ingot, Iron Ingot, Iron Ingot | Iron Ingot, Iron Ingot, Iron Ingot -1x Block of Diamond: Diamond, Diamond, Diamond | Diamond, Diamond, Diamond | Diamond, Diamond, Diamond -1x Block of Coal: Coal, Coal, Coal | Coal, Coal, Coal | Coal, Coal, Coal -1x Block of Redstone: Redstone Dust, Redstone Dust, Redstone Dust | Redstone Dust, Redstone Dust, Redstone Dust | Redstone Dust, Redstone Dust, Redstone Dust -1x Lapis Lazuli Block: Lapis Lazuli, Lapis Lazuli, Lapis Lazuli | Lapis Lazuli, Lapis Lazuli, Lapis Lazuli | Lapis Lazuli, Lapis Lazuli, Lapis Lazuli -1x Emerald Block: Emerald, Emerald, Emerald | Emerald, Emerald, Emerald | Emerald, Emerald, Emerald -1x Glowstone: Glowstone Dust, Glowstone Dust | Glowstone Dust, Glowstone Dust -1x Wool: String, String | String, String -1x TNT: Gunpowder, Sand, Gunpowder | Sand, Gunpowder, Sand | Gunpowder, Sand, Gunpowder -3x Cobblestone Slab: Cobblestone, Cobblestone, Cobblestone -3x Stone Slab: Stone, Stone, Stone -3x Sandstone Slab: Sandstone, Sandstone, Sandstone -3x Wooden Slab: Wooden Planks, Wooden Planks, Wooden Planks -3x Stone Bricks Slab: Stone Bricks, Stone Bricks, Stone Bricks -3x Bricks Slab: Bricks, Bricks, Bricks -4x Wooden Stairs: Wooden Planks, None, None | Wooden Planks, Wooden Planks, None | Wooden Planks, Wooden Planks, Wooden Planks -4x Stone Stairs: Cobblestone, None, None | Cobblestone, Cobblestone, None | Cobblestone, Cobblestone, Cobblestone -4x Brick Stairs: Bricks, None, None | Bricks, Bricks, None | Bricks, Bricks, Bricks -4x Nether Brick Stairs: Nether Bricks, None, None | Nether Bricks, Nether Bricks, None | Nether Bricks, Nether Bricks, Nether Bricks -4x Stone Brick Stairs: Stone Bricks, None, None | Stone Bricks, Stone Bricks, None | Stone Bricks, Stone Bricks, Stone Bricks -1x Snow: Snowball, Snowball | Snowball, Snowball -1x Clay Block: Clay, Clay | Clay, Clay -1x Brick Block: Brick, Brick | Brick, Brick -4x Stone Bricks: Stone, Stone | Stone, Stone -1x Bookshelf: Wooden Planks, Wooden Planks, Wooden Planks | Book, Book, Book | Wooden Planks, Wooden Planks, Wooden Planks -1x Sandstone: Sand, Sand | Sand, Sand -1x Jack 'o' Lantern: Pumpkin | Torch -// -//Tool Recipes -// -1x Wooden Pickaxe: Wooden Planks, Wooden Planks, Wooden Planks | None, Stick, None | None, Stick, None -1x Wooden Axe: Wooden Planks, Wooden Planks | Wooden Planks, Stick | None, Stick -1x Wooden Hoe: Wooden Planks, Wooden Planks | None, Stick | None, Stick -1x Wooden Shovel: Wooden Planks | Stick | Stick -1x Stone Pickaxe: Cobblestone, Cobblestone, Cobblestone | None, Stick, None | None, Stick, None -1x Stone Axe: Cobblestone, Cobblestone | Cobblestone, Stick | None, Stick -1x Stone Hoe: Cobblestone, Cobblestone | None, Stick | None, Stick -1x Stone Shovel: Cobblestone | Stick | Stick -1x Iron Pickaxe: Iron Ingot, Iron Ingot, Iron Ingot | None, Stick, None | None, Stick, None -1x Iron Axe: Iron Ingot, Iron Ingot | Iron Ingot, Stick | None, Stick -1x Iron Hoe: Iron Ingot, Iron Ingot | None, Stick | None, Stick -1x Iron Shovel: Iron Ingot | Stick | Stick -1x Diamond Pickaxe: Diamond, Diamond, Diamond | None, Stick, None | None, Stick, None -1x Diamond Axe: Diamond, Diamond | Diamond, Stick | None, Stick -1x Diamond Hoe: Diamond, Diamond | None, Stick | None, Stick -1x Diamond Shovel: Diamond | Stick | Stick -1x Golden Pickaxe: Gold Ingot, Gold Ingot, Gold Ingot | None, Stick, None | None, Stick, None -1x Golden Axe: Gold Ingot, Gold Ingot | Gold Ingot, Stick | None, Stick -1x Golden Hoe: Gold Ingot, Gold Ingot | None, Stick | None, Stick -1x Golden Shovel: Gold Ingot | Stick | Stick -1x Flint and Steel: Iron Ingot, None | None, Flint -1x Bucket: Iron Ingot, None, Iron Ingot | None, Iron Ingot, None -1x Compass: None, Iron Ingot, None | Iron Ingot, Redstone, Iron Ingot | None, Iron Ingot, None -1x Map: Paper, Paper, Paper | Paper, Compass, Paper | Paper, Paper, Paper -1x Clock: None, Gold Ingot, None | Gold Ingot, Redstone, Gold Ingot | None, Gold Ingot, None -1x Fishing Rod: None, None, Stick | None, Stick, String | Stick, None, String -1x Shears: None, Iron Ingot | Iron Ingot, None -3x Fire Charge: Gunpowder, None, None | Blaze Powder, Coal/Charcoal, None -// -//Weapon Recipes -// -1x Wooden Sword: Wooden Planks | Wooden Planks | Stick -1x Stone Sword: Cobblestone | Cobblestone | Stick -1x Iron Sword: Iron Ingot | Iron Ingot | Stick -1x Diamond Sword: Diamond | Diamond | Stick -1x Golden Sword: Gold Ingot | Gold Ingot | Stick -1x Bow: None, Stick, String | Stick, None, String | None, Stick, String -4x Arrow: Flint | Stick | Feather -// -//Armor Recipes -// -1x Leather Tunic: Leather, None, Leather | Leather, Leather, Leather | Leather, Leather, Leather -1x Leather Pants: Leather, Leather, Leather | Leather, None, Leather | Leather, None, Leather -1x Leather Cap: Leather, Leather, Leather | Leather, None, Leather -1x Leather Boots: Leather, None, Leather | Leather, None, Leather -1x Chain Chestplate: Fire, None, Fire | Fire, Fire, Fire | Fire, Fire, Fire -1x Chain Leggings: Fire, Fire, Fire | Fire, None, Fire | Fire, None, Fire -1x Chain Helmet: Fire, Fire, Fire | Fire, None, Fire -1x Chain Boots: Fire, None, Fire | Fire, None, Fire -1x Iron Chestplate: Iron Ingot, None, Iron Ingot | Iron Ingot, Iron Ingot, Iron Ingot | Iron Ingot, Iron Ingot, Iron Ingot -1x Iron Leggings: Iron Ingot, Iron Ingot, Iron Ingot | Iron Ingot, None, Iron Ingot | Iron Ingot, None, Iron Ingot -1x Iron Helmet: Iron Ingot, Iron Ingot, Iron Ingot | Iron Ingot, None, Iron Ingot -1x Iron Boots: Iron Ingot, None, Iron Ingot | Iron Ingot, None, Iron Ingot -1x Diamond Chestplate: Diamond, None, Diamond | Diamond, Diamond, Diamond | Diamond, Diamond, Diamond -1x Diamond Leggings: Diamond, Diamond, Diamond | Diamond, None, Diamond | Diamond, None, Diamond -1x Diamond Helmet: Diamond, Diamond, Diamond | Diamond, None, Diamond -1x Diamond Boots: Diamond, None, Diamond | Diamond, None, Diamond -1x Golden Chestplate: Gold Ingot, None, Gold Ingot | Gold Ingot, Gold Ingot, Gold Ingot | Gold Ingot, Gold Ingot, Gold Ingot -1x Golden Leggings: Gold Ingot, Gold Ingot, Gold Ingot | Gold Ingot, None, Gold Ingot | Gold Ingot, None, Gold Ingot -1x Golden Helmet: Gold Ingot, Gold Ingot, Gold Ingot | Gold Ingot, None, Gold Ingot -1x Golden Boots: Gold Ingot, None, Gold Ingot | Gold Ingot, None, Gold Ingot -// -//Transportation Recipes -// -1x Minecart: Iron Ingot, None, Iron Ingot | Iron Ingot, Iron Ingot, Iron Ingot -1x Minecart with Chest: Chest | Minecart -1x Minecart with Furnace: Furnace | Minecart -16x Rail: Iron Ingot, None, Iron Ingot | Iron Ingot, Stick, Iron Ingot | Iron Ingot, None, Iron Ingot -6x Powered Rail: Gold Ingot, None, Gold Ingot | Gold Ingot, Stick, Gold Ingot | Gold Ingot, Redstone, Gold Ingot -6x Detector Rail: Iron Ingot, None, Iron Ingot | Iron Ingot, Pressure Plate, Iron Ingot | Iron Ingot, Redstone, Iron Ingot -1x Boat: Wooden Planks, None, Wooden Planks | Wooden Planks, Wooden Planks, Wooden Planks -1x Carrot On A Stick: Fishing Rod | None, Carrot -// -//Mechanism Recipes -// -1x Wooden Door: Wooden Planks, Wooden Planks | Wooden Planks, Wooden Planks | Wooden Planks, Wooden Planks -1x Iron Door: Iron Ingot, Iron Ingot | Iron Ingot, Iron Ingot | Iron Ingot, Iron Ingot -2x Trapdoor: Wooden Planks, Wooden Planks, Wooden Planks | Wooden Planks, Wooden Planks, Wooden Planks -1x Stone Pressure Plate: Stone, Stone -1x Wooden Pressure Plate: Wooden Planks, Wooden Planks -1x Stone Button: Stone -1x Wooden Button: Wooden Planks -1x Redstone Torch: Redstone | Stick -1x Lever: Stick | Cobblestone -1x Note Block: Wooden Planks, Wooden Planks, Wooden Planks | Wooden Planks, Redstone, Wooden Planks | Wooden Planks, Wooden Planks, Wooden Planks -1x Jukebox: Wooden Planks, Wooden Planks, Wooden Planks | Wooden Planks, Diamond, Wooden Planks | Wooden Planks, Wooden Planks, Wooden Planks -1x Dispenser: Cobblestone, Cobblestone, Cobblestone | Cobblestone, Bow, Cobblestone | Cobblestone, Redstone, Cobblestone -1x Redstone Repeater: Redstone Torch, Redstone, Redstone Torch | Stone, Stone, Stone -1x Piston: Wooden Planks, Wooden Planks, Wooden Planks | Cobblestone, Iron Ingot, Cobblestone | Cobblestone, Redstone, Cobblestone -1x Sticky Piston: Slime Ball | Piston -1x Redstone Lamp: None, Redstone Dust, None | Redstone Dust, Glowstone Block, Redstone Dust | None, Redstone Dust, None -1x Trapped Chest: Chest, Tripwire Hook -1x Dropper: Cobblestone, Cobblestone, Cobblestone | Cobblestone, None, Cobblestone | Cobblestone, Redstone Dust, Cobblestone -1x Weighted Pressure Plate (Heavy): Iron Ingot, Iron Ingot -1x Weighted Pressure Plate (Light): Gold Ingot, Gold Ingot -2x Tripwire Hook: Iron Ingot | Stick | Wooden Planks -// -//Food Recipes -// -4x Bowl: Wooden Planks, None, Wooden Planks | None, Wooden Planks, None -1x Mushroom Stew: Brown Mushroom, Red Mushroom | Bowl -1x Bread: Wheat, Wheat, Wheat -1x Sugar: Sugar Canes -1x Cake: Milk, Milk, Milk | Sugar, Egg, Sugar | Wheat, Wheat, Wheat -8x Cookie: Wheat, Cocoa Beans, Wheat -1x Golden Apple: Gold Nugget, Gold Nugget, Gold Nugget | Gold Nugget, Apple, Gold Nugget | Gold Nugget, Gold Nugget, Gold Nugget -1x Melon Block: Melon, Melon, Melon | Melon, Melon, Melon | Melon, Melon, Melon -1x Melon Seeds: Melon Slice -4x Pumpkin Seeds: Pumpkin -// -//Miscellaneous Recipes -// -9x Gold Ingot: Block of Gold -9x Iron Ingot: Block of Iron -9x Diamond: Block of Diamond -9x Lapis Lazuli: Lapis Lazuli Block -2x Ladder: Stick, None, Stick | Stick, Stick, Stick | Stick, None, Stick -1x Sign: Wooden Planks, Wooden Planks, Wooden Planks | Wooden Planks, Wooden Planks, Wooden Planks | None, Stick, None -1x Painting: Stick, Stick, Stick | Stick, Black Wool, Stick | Stick, Stick, Stick -16x Iron Bars: Iron Ingot, Iron Ingot, Iron Ingot | Iron Ingot, Iron Ingot, Iron Ingot -16x Glass Pane: Glass, Glass, Glass | Glass, Glass, Glass -3x Paper: Sugar Canes, Sugar Canes, Sugar Canes -1x Book: Paper | Paper | Paper -2x Fence: Stick, Stick, Stick | Stick, Stick, Stick -2x Nether Brick Fence: Nether Brick, Nether Brick, Nether Brick | Nether Brick, Nether Brick, Nether Brick -1x Fence Gate: Stick, Wooden Planks, Stick | Stick, Wooden Planks, Stick -1x Bed: Wool, Wool, Wool | Wooden Planks, Wooden Planks, Wooden Planks -9x Gold Nugget: Gold Ingot -1x Gold Ingot: Gold Nugget, Gold Nugget, Gold Nugget | Gold Nugget, Gold Nugget, Gold Nugget | Gold Nugget, Gold Nugget, Gold Nugget -1x Eye of Ender: Ender Pearl | Blaze Powder -1x Item Frame: Stick, Stick, Stick | Stick, Leather, Stick | Stick, Stick, Stick -1x Anvil: Block of Iron, Block of Iron, Block of Iron | None, Iron Ingot, None | Iron Ingot, Iron Ingot, Iron Ingot -1x Ender Chest: Obsidian, Obsidian, Obsidian | Osbidian, Eye of Ender, Obsidian | Obsidian, Obsidian, Obsidian -1x Flower Pot: Brick, None, Brick | None, Brick, None -2x Lead: None, String, String | None, Slime Ball, String | String, None, None -// -//Dye Recipes -// -3x Bone Meal: Bone -2x Light Gray Dye: Gray Dye, Bone Meal -2x Gray Dye: Ink Sac, Bone Meal -2x Rose Red: Rose -2x Orange Dye: Rose Red, Dandelion Yellow -2x Dandelion Yellow: Flower -2x Lime Dye: Cactus Green, Bone Meal -2x Light Blue Dye: Lapis Lazuli, Bone Meal -2x Cyan Dye: Lapis Lazuli, Cactus Green -2x Purple Dye: Lapis Lazuli, Rose Red -4x Magenta Dye: Lapis Lazuli, Rose Red, Rose Red, Bone Meal -2x Pink Dye: Rose Red, Bone Meal -// -//Wool Recipes -// -1x Light Gray Wool: Light Gray Dye, Wool -1x Gray Wool: Gray Dye, Wool -1x Black Wool: Ink Sac, Wool -1x Red Wool: Rose Red, Wool -1x Orange Wool: Orange Dye, Wool -1x Yellow Wool: Dandelion Yellow, Wool -1x Lime Wool: Lime Dye, Wool -1x Green Wool: Cactus Green, Wool -1x Light Blue Wool: Light Blue Dye, Wool -1x Cyan Wool: Cyan Dye, Wool -1x Blue Wool: Lapis Lazuli, Wool -1x Purple Wool: Purple Dye, Wool -1x Magenta Wool: Magenta Dye, Wool -1x Pink Wool: Pink Dye, Wool -1x Brown Wool: Cocoa Beans, Wool -1x Wool: Bone Meal, Wool -// -//Enchancement & Brewing Recipes -// -3x Glass Bottle: Glass, None, Glass | None, Glass, None -1x Cauldron: Iron Ingot, None, Iron Ingot | Iron Ingot, None, Iron Ingot | Iron Ingot, Iron Ingot, Iron Ingot -1x Brewing Stand: None, Blaze Rod, None | Cobblestone, Cobblestone, Cobblestone -2x Blaze Powder: Blaze Rod -1x Magma Cream: Slimeball | Blaze Powder -1x Fermented Spider Eye: Spider Eye | Brown Mushroom, Sugar -1x Glistering Melon: Melon Slice, Gold Nugget -9x Gold Nugget: Gold Ingot -1x Enchantment Table: None, Book, None | Diamond, Obsidian, Diamond | Obsidian, Obsidian, Obsidian -// -//Stained Glass Recipes -// -8x White Stained Glass: Glass, Glass, Glass | Glass, Bone Meal, Glass | Glass, Glass, Glass -8x Orange Stained Glass: Glass, Glass, Glass | Glass, Orange Dye, Glass | Glass, Glass, Glass -8x Magenta Stained Glass: Glass, Glass, Glass | Glass, Magenta Dye, Glass | Glass, Glass, Glass -8x Light Blue Stained Glass: Glass, Glass, Glass | Glass, Light Blue Dye, Glass | Glass, Glass, Glass -8x Yellow Stained Glass: Glass, Glass, Glass | Glass, Dandelion Yellow, Glass | Glass, Glass, Glass -8x Lime Stained Glass: Glass, Glass, Glass | Glass, Lime Dye, Glass | Glass, Glass, Glass -8x Pink Stained Glass: Glass, Glass, Glass | Glass, Pink Dye, Glass | Glass, Glass, Glass -8x Gray Stained Glass: Glass, Glass, Glass | Glass, Gray Dye, Glass | Glass, Glass, Glass -8x Light Gray Stained Glass: Glass, Glass, Glass | Glass, Light Gray Dye, Glass | Glass, Glass, Glass -8x Cyan Stained Glass: Glass, Glass, Glass | Glass, Cyan Dye, Glass | Glass, Glass, Glass -8x Purple Stained Glass: Glass, Glass, Glass | Glass, Purple Dye, Glass | Glass, Glass, Glass -8x Blue Stained Glass: Glass, Glass, Glass | Glass, Lapis Lazuli, Glass | Glass, Glass, Glass -8x Brown Stained Glass: Glass, Glass, Glass | Glass, Cocoa Beans, Glass | Glass, Glass, Glass -8x Green Stained Glass: Glass, Glass, Glass | Glass, Cactus Green, Glass | Glass, Glass, Glass -8x Red Stained Glass: Glass, Glass, Glass | Glass, Rose Red, Glass | Glass, Glass, Glass -8x Black Stained Glass: Glass, Glass, Glass | Glass, Inc Sac, Glass | Glass, Glass, Glass -// -//Stained Glass Panes -// -16x White Stained Glass Panes: White Stained Glass, White Stained Glass, White Stained Glass | White Stained Glass, White Stained Glass, White Stained Glass -16x Orange Stained Glass Panes: Orange Stained Glass, Orange Stained Glass, Orange Stained Glass | Orange Stained Glass, Orange Stained Glass, Orange Stained Glass -16x Magenta Stained Glass Panes: Magenta Stained Glass, Magenta Stained Glass, Magenta Stained Glass | Magenta Stained Glass, Magenta Stained Glass, Magenta Stained Glass -16x Light Blue Stained Glass Panes: Light Blue Stained Glass, Light Blue Stained Glass, Light Blue Stained Glass | Light Blue Stained Glass, Light Blue Stained Glass, Light Blue Stained Glass -16x Yellow Stained Glass Panes: Yellow Stained Glass, Yellow Stained Glass, Yellow Stained Glass | Yellow Stained Glass, Yellow Stained Glass, Yellow Stained Glass -16x Lime Stained Glass Panes: Lime Stained Glass, Lime Stained Glass, Lime Stained Glass | Lime Stained Glass, Lime Stained Glass, Lime Stained Glass -16x Pink Stained Glass Panes: Pink Stained Glass, Pink Stained Glass, Pink Stained Glass | Pink Stained Glass, Pink Stained Glass, Pink Stained Glass -16x Gray Stained Glass Panes: Gray Stained Glass, Gray Stained Glass, Gray Stained Glass | Gray Stained Glass, Gray Stained Glass, Gray Stained Glass -16x Light Gray Stained Glass Panes: Light Gray Stained Glass, Light Gray Stained Glass, Light Gray Stained Glass | Light Gray Stained Glass, Light Gray Stained Glass, Light Gray Stained Glass -16x Cyan Stained Glass Panes: Cyan Stained Glass, Cyan Stained Glass, Cyan Stained Glass | Cyan Stained Glass, Cyan Stained Glass, Cyan Stained Glass -16x Purple Stained Glass Panes: Purple Stained Glass, Purple Stained Glass, Purple Stained Glass | Purple Stained Glass, Purple Stained Glass, Purple Stained Glass -16x Blue Stained Glass Panes: Blue Stained Glass, Blue Stained Glass, Blue Stained Glass | Blue Stained Glass, Blue Stained Glass, Blue Stained Glass -16x Brown Stained Glass Panes: Brown Stained Glass, Brown Stained Glass, Brown Stained Glass | Brown Stained Glass, Brown Stained Glass, Brown Stained Glass -16x Green Stained Glass Panes: Green Stained Glass, Green Stained Glass, Green Stained Glass | Green Stained Glass, Green Stained Glass, Green Stained Glass -16x Black Stained Glass Panes: Black Stained Glass, Black Stained Glass, Black Stained Glass | Black Stained Glass, Black Stained Glass, Black Stained Glass diff --git a/disabled_stuff/data/slaps.json b/disabled_stuff/data/slaps.json deleted file mode 100644 index 6ec0166..0000000 --- a/disabled_stuff/data/slaps.json +++ /dev/null @@ -1,69 +0,0 @@ -{ - "templates":[ - "{hits} {user} with a {item}.", - "{hits} {user} around a bit with a {item}.", - "{throws} a {item} at {user}.", - "{throws} a few {item}s at {user}.", - "grabs a {item} and {throws} it in {user}'s face.", - "launches a {item} in {user}'s general direction.", - "sits on {user}'s face while slamming a {item} into their crotch.", - "starts slapping {user} silly with a {item}.", - "holds {user} down and repeatedly {hits} them with a {item}.", - "prods {user} with a {item}.", - "picks up a {item} and {hits} {user} with it.", - "ties {user} to a chair and {throws} a {item} at them.", - "{hits} {user} {where} with a {item}.", - "ties {user} to a pole and whips them with a {item}." - ], - "parts": { - "item":[ - "cast iron skillet", - "large trout", - "baseball bat", - "wooden cane", - "nail", - "printer", - "shovel", - "pair of trousers", - "CRT monitor", - "diamond sword", - "baguette", - "physics textbook", - "toaster", - "portrait of Richard Stallman", - "television", - "mau5head", - "five ton truck", - "roll of duct tape", - "book", - "laptop", - "old television", - "sack of rocks", - "rainbow trout", - "cobblestone block", - "lava bucket", - "rubber chicken", - "spiked bat", - "gold block", - "fire extinguisher", - "heavy rock", - "chunk of dirt" - ], - "throws": [ - "throws", - "flings", - "chucks" - ], - "hits": [ - "hits", - "whacks", - "slaps", - "smacks" - ], - "where": [ - "in the chest", - "on the head", - "on the bum" - ] - } -} diff --git a/disabled_stuff/data/slogans.txt b/disabled_stuff/data/slogans.txt deleted file mode 100644 index 39b07f7..0000000 --- a/disabled_stuff/data/slogans.txt +++ /dev/null @@ -1,197 +0,0 @@ - - get ready. -Everyone should believe in . -, where success is at home. -, your way! -, this is it! -And on the eighth day, god created . - innovate your world. -Are you ready for ? -See you at . -'s got it all! - makes your day. -rific. -The queen buys . -Where's your ? - groove. -There's lots of fun in . -, you'll love it! -I'd do anything for . -Go to heaven with . - on the outside, tasty on the inside. - - a safe place in an unsafe world! -World's finest . - is your friend. -, it's as simple as that! -Free . - - Just do it. -The age of . -The spirit. -Let's talk about . -Do it with . - brings out the best. -Take what you want, but leave alone! - for your health. - is my passion. -The best in the world. -Follow your . -A day with . -The American Way of . -Enjoy . -The Power of . -Every has a story. - - it's like heaven! -Endless possibilities with . -Go farther with . - is my world. - evolution. - - now! -For the love of . -When you say you've said it all. -, pure lust. -Who is ? - empowers you. -Don't worry, takes care. -My beats everything. -Share moments, share . - it's a kind of magic. - - living innovation - - a class of it's own -. We build smiles. -I believe in . - - Think different. -Let your flow. -Heal the world with . -I'd sleep with . -Bigger. Better. . -You can't beat . -Say it with . -, there's no better way. -, stay in touch. - never lies. -Play , start living. -Don't forget your . -The effect. - - what more could you want? -Kick ass with ! -You know when it's . -Good to know . -My and me. - - be prepared. -Oh my gods, it's a . -There is no life without . - - You see this name, you think dirty. - is my sport. -, one for all. - is a never ending story. - for you! -Everyone loves . -, better than sex. - only. -Inspired by . -. Making people sucessful in a changing world. -Who wouldn't fight for ? -Lucky . -Think. Feel. . - rocks. -Think . - is the sound of the future. -I want and I want it now. -, the real thing. -The gods made . -With a name like , it has to be good. -Discover the world of . -Live . -You don't want as your enemy! - - enjoy the difference. -Buy now! -Don't mess with . -Made by . -Be alive with . - values. -High life with . -, whiter than the whitest! - - play it! -Can you feel ? -Simply ! -? You bet. - - The Revolution. - - your game. - is your safe place in an unsafe world! -Go far with . -God made . - keeps going, and going, and going... -The universe. -I can't believe it's . - moments. -I lost weight with . -There's only one true ! -The Future of . - - If you love . - beat. -My way is . -Think different, think . -Nonstop . -All you need is . - is what the world was waiting for. - for the masses. -, the smart choice. - forever. - - Your personal entertainer. - makes me hot. - kicks ass. -There's only one thing in the world I want and that is . - will be for you what you want it to be. - for everyone. - - once you have it, you love it. -Break through with . -The original . -3... 2... 1... . -The goddess made . -Halleluja, it's a . - is rolling, the others are stoned. -My , your , for all! - for a professional image. - for president. -Make yourself at home with . -, just the best. -You can't stop . - extra dry. -Call a friend, call . -Don't get in the way of . -, your family will love you. - is a female force. -Feel good with . -You wouldn't want to miss . - Dreamteam. -I wish i was a . -Where's ? -Jesus loves . -The Queen of . -Life's beautiful with . -Swing your . -The one and only . -? Yes please. -, your specialist. - is good for you. -Feel the magic of . - rules. -It's time to think about . -, so what! - inside you. -The Spirit of . -Up, up and away with . - - first class! -It's my ! -The secret of . -Easy . -Just . - never die. - - be ready. -Say . -Feel it - ! -I trust . -, to hell with the rest. -, the original. - is the only way to be happy. - - One name. One legend. -The ideal . diff --git a/disabled_stuff/dice.py b/disabled_stuff/dice.py deleted file mode 100644 index a89f3d5..0000000 --- a/disabled_stuff/dice.py +++ /dev/null @@ -1,90 +0,0 @@ -# Written by Scaevolus, updated by Lukeroge - -import re -import random - -from util import hook - - -whitespace_re = re.compile(r'\s+') -valid_diceroll = r'^([+-]?(?:\d+|\d*d(?:\d+|F))(?:[+-](?:\d+|\d*d(?:\d+|' \ - 'F)))*)( .+)?$' -valid_diceroll_re = re.compile(valid_diceroll, re.I) -sign_re = re.compile(r'[+-]?(?:\d*d)?(?:\d+|F)', re.I) -split_re = re.compile(r'([\d+-]*)d?(F|\d*)', re.I) - - -def n_rolls(count, n): - """roll an n-sided die count times""" - if n == "F": - return [random.randint(-1, 1) for x in xrange(min(count, 100))] - if n < 2: # it's a coin - if count < 100: - return [random.randint(0, 1) for x in xrange(count)] - else: # fake it - return [int(random.normalvariate(.5 * count, (.75 * count) ** .5))] - else: - if count < 100: - return [random.randint(1, n) for x in xrange(count)] - else: # fake it - return [int(random.normalvariate(.5 * (1 + n) * count, - (((n + 1) * (2 * n + 1) / 6. - - (.5 * (1 + n)) ** 2) * count) ** .5))] - - -@hook.command('roll') -#@hook.regex(valid_diceroll, re.I) -@hook.command -def dice(inp): - """dice -- Simulates dice rolls. Example of : - 'dice 2d20-d5+4 roll 2'. D20s, subtract 1D5, add 4""" - - try: # if inp is a re.match object... - (inp, desc) = inp.groups() - except AttributeError: - (inp, desc) = valid_diceroll_re.match(inp).groups() - - if "d" not in inp: - return - - spec = whitespace_re.sub('', inp) - if not valid_diceroll_re.match(spec): - return "Invalid dice roll" - groups = sign_re.findall(spec) - - total = 0 - rolls = [] - - for roll in groups: - count, side = split_re.match(roll).groups() - count = int(count) if count not in " +-" else 1 - if side.upper() == "F": # fudge dice are basically 1d3-2 - for fudge in n_rolls(count, "F"): - if fudge == 1: - rolls.append("\x033+\x0F") - elif fudge == -1: - rolls.append("\x034-\x0F") - else: - rolls.append("0") - total += fudge - elif side == "": - total += count - else: - side = int(side) - try: - if count > 0: - d = n_rolls(count, side) - rolls += map(str, d) - total += sum(d) - else: - d = n_rolls(-count, side) - rolls += [str(-x) for x in d] - total -= sum(d) - except OverflowError: - # I have never seen this happen. If you make this happen, you win a cookie - return "Thanks for overflowing a float, jerk >:[" - - if desc: - return "{}: {} ({})".format(desc.strip(), total, ", ".join(rolls)) - else: - return "{} ({})".format(total, ", ".join(rolls)) diff --git a/disabled_stuff/dictionary.py b/disabled_stuff/dictionary.py deleted file mode 100644 index 5b4123b..0000000 --- a/disabled_stuff/dictionary.py +++ /dev/null @@ -1,89 +0,0 @@ -# Plugin by GhettoWizard and Scaevolus -import re - -from util import hook -from util import http - - -@hook.command('dictionary') -@hook.command -def define(inp): - """define -- Fetches definition of .""" - - url = 'http://ninjawords.com/' - - h = http.get_html(url + http.quote_plus(inp)) - - definition = h.xpath('//dd[@class="article"] | ' - '//div[@class="definition"] |' - '//div[@class="example"]') - - if not definition: - return u'No results for {} :('.format(inp) - - def format_output(show_examples): - result = u'{}: '.format(h.xpath('//dt[@class="title-word"]/a/text()')[0]) - - correction = h.xpath('//span[@class="correct-word"]/text()') - if correction: - result = 'Definition for "{}": '.format(correction[0]) - - sections = [] - for section in definition: - if section.attrib['class'] == 'article': - sections += [[section.text_content() + ': ']] - elif section.attrib['class'] == 'example': - if show_examples: - sections[-1][-1] += ' ' + section.text_content() - else: - sections[-1] += [section.text_content()] - - for article in sections: - result += article[0] - if len(article) > 2: - result += u' '.join(u'{}. {}'.format(n + 1, section) - for n, section in enumerate(article[1:])) - else: - result += article[1] + ' ' - - synonyms = h.xpath('//dd[@class="synonyms"]') - if synonyms: - result += synonyms[0].text_content() - - result = re.sub(r'\s+', ' ', result) - result = re.sub('\xb0', '', result) - return result - - result = format_output(True) - if len(result) > 450: - result = format_output(False) - - if len(result) > 450: - result = result[:result.rfind(' ', 0, 450)] - result = re.sub(r'[^A-Za-z]+\.?$', '', result) + ' ...' - - return result - - -@hook.command('e') -@hook.command -def etymology(inp): - """etymology -- Retrieves the etymology of .""" - - url = 'http://www.etymonline.com/index.php' - - h = http.get_html(url, term=inp) - - etym = h.xpath('//dl') - - if not etym: - return u'No etymology found for {} :('.format(inp) - - etym = etym[0].text_content() - - etym = ' '.join(etym.split()) - - if len(etym) > 400: - etym = etym[:etym.rfind(' ', 0, 400)] + ' ...' - - return etym diff --git a/disabled_stuff/domainr.py b/disabled_stuff/domainr.py deleted file mode 100644 index e853bfa..0000000 --- a/disabled_stuff/domainr.py +++ /dev/null @@ -1,18 +0,0 @@ -from util import hook, http - - -@hook.command -def domainr(inp): - """domainr - Use domain.nr's API to search for a domain, and similar domains.""" - try: - data = http.get_json('http://domai.nr/api/json/search?q=' + inp) - except (http.URLError, http.HTTPError) as e: - return "Unable to get data for some reason. Try again later." - if data['query'] == "": - return "An error occurred: {status} - {message}".format(**data['error']) - domains = "" - for domain in data['results']: - domains += ("\x034" if domain['availability'] == "taken" else ( - "\x033" if domain['availability'] == "available" else "\x031")) + domain['domain'] + "\x0f" + domain[ - 'path'] + ", " - return "Domains: " + domains diff --git a/disabled_stuff/down.py b/disabled_stuff/down.py deleted file mode 100644 index f03c078..0000000 --- a/disabled_stuff/down.py +++ /dev/null @@ -1,20 +0,0 @@ -import urlparse - -from util import hook, http - - -@hook.command -def down(inp): - """down -- Checks if the site at is up or down.""" - - if 'http://' not in inp: - inp = 'http://' + inp - - inp = 'http://' + urlparse.urlparse(inp).netloc - - # http://mail.python.org/pipermail/python-list/2006-December/589854.html - try: - http.get(inp, get_method='HEAD') - return '{} seems to be up'.format(inp) - except http.URLError: - return '{} seems to be down'.format(inp) diff --git a/disabled_stuff/drama.py b/disabled_stuff/drama.py deleted file mode 100644 index d348cba..0000000 --- a/disabled_stuff/drama.py +++ /dev/null @@ -1,31 +0,0 @@ -import re - -from util import hook, http, text - - -api_url = "http://encyclopediadramatica.se/api.php?action=opensearch" -ed_url = "http://encyclopediadramatica.se/" - - -@hook.command -def drama(inp): - """drama -- Gets the first paragraph of - the Encyclopedia Dramatica article on .""" - - j = http.get_json(api_url, search=inp) - - if not j[1]: - return "No results found." - article_name = j[1][0].replace(' ', '_').encode('utf8') - - url = ed_url + http.quote(article_name, '') - page = http.get_html(url) - - for p in page.xpath('//div[@id="bodyContent"]/p'): - if p.text_content(): - summary = " ".join(p.text_content().splitlines()) - summary = re.sub("\[\d+\]", "", summary) - summary = text.truncate_str(summary, 220) - return "{} :: {}".format(summary, url) - - return "Unknown Error." diff --git a/disabled_stuff/eightball.py b/disabled_stuff/eightball.py deleted file mode 100644 index 8d91303..0000000 --- a/disabled_stuff/eightball.py +++ /dev/null @@ -1,23 +0,0 @@ -import random - -from util import hook, text - - -color_codes = { - "": "\x02\x0305", - "": "\x02\x0303", - "": "\x02" -} - -with open("plugins/data/8ball_responses.txt") as f: - responses = [line.strip() for line in - f.readlines() if not line.startswith("//")] - - -@hook.command('8ball') -def eightball(inp, action=None): - """8ball -- The all knowing magic eight ball, - in electronic form. Ask and it shall be answered!""" - - magic = text.multiword_replace(random.choice(responses), color_codes) - action("shakes the magic 8 ball... {}".format(magic)) diff --git a/disabled_stuff/encrypt.py b/disabled_stuff/encrypt.py deleted file mode 100644 index e391a04..0000000 --- a/disabled_stuff/encrypt.py +++ /dev/null @@ -1,105 +0,0 @@ -import os -import base64 -import json -import hashlib - -from Crypto import Random -from Crypto.Cipher import AES -from Crypto.Protocol.KDF import PBKDF2 - -from util import hook - - -# helper functions to pad and unpad a string to a specified block size -# -BS = AES.block_size -pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS) -unpad = lambda s: s[0:-ord(s[-1])] - -# helper functions to encrypt and encode a string with AES and base64 -encode_aes = lambda c, s: base64.b64encode(c.encrypt(pad(s))) -decode_aes = lambda c, s: unpad(c.decrypt(base64.b64decode(s))) - -db_ready = False - - -def db_init(db): - """check to see that our db has the the encryption table.""" - global db_ready - if not db_ready: - db.execute("create table if not exists encryption(encrypted, iv, " - "primary key(encrypted))") - db.commit() - db_ready = True - - -def get_salt(bot): - """generate an encryption salt if none exists, then returns the salt""" - if not bot.config.get("random_salt", False): - bot.config["random_salt"] = hashlib.md5(os.urandom(16)).hexdigest() - json.dump(bot.config, open('config', 'w'), sort_keys=True, indent=2) - return bot.config["random_salt"] - - -@hook.command -def encrypt(inp, bot=None, db=None, notice=None): - """encrypt -- Encrypts with . ( can only be decrypted using this bot)""" - db_init(db) - - split = inp.split(" ") - - # if there is only one argument, return the help message - if len(split) == 1: - notice(encrypt.__doc__) - return - - # generate the key from the password and salt - password = split[0] - salt = get_salt(bot) - key = PBKDF2(password, salt) - - # generate the IV and encode it to store in the database - iv = Random.new().read(AES.block_size) - iv_encoded = base64.b64encode(iv) - - # create the AES cipher and encrypt/encode the text with it - text = " ".join(split[1:]) - cipher = AES.new(key, AES.MODE_CBC, iv) - encoded = encode_aes(cipher, text) - - # store the encoded text and IV in the DB for decoding later - db.execute("insert or replace into encryption(encrypted, iv)" - "values(?,?)", (encoded, iv_encoded)) - db.commit() - - return encoded - - -@hook.command -def decrypt(inp, bot=None, db=None, notice=None): - """decrypt -- Decrypts with . (can only decrypt strings encrypted on this bot)""" - if not db_ready: - db_init(db) - - split = inp.split(" ") - - # if there is only one argument, return the help message - if len(split) == 1: - notice(decrypt.__doc__) - return - - # generate the key from the password and salt - password = split[0] - salt = get_salt(bot) - key = PBKDF2(password, salt) - - text = " ".join(split[1:]) - - # get the encoded IV from the database and decode it - iv_encoded = db.execute("select iv from encryption where" - " encrypted=?", (text,)).fetchone()[0] - iv = base64.b64decode(iv_encoded) - - # create AES cipher, decode text, decrypt text, and unpad it - cipher = AES.new(key, AES.MODE_CBC, iv) - return decode_aes(cipher, text) diff --git a/disabled_stuff/fact.py b/disabled_stuff/fact.py deleted file mode 100644 index 1d48ae7..0000000 --- a/disabled_stuff/fact.py +++ /dev/null @@ -1,37 +0,0 @@ -from util import hook, http, web - - -@hook.command(autohelp=False) -def fact(inp): - """fact -- Gets a random fact from OMGFACTS.""" - - attempts = 0 - - # all of this is because omgfacts is fail - while True: - try: - soup = http.get_soup('http://www.omg-facts.com/random') - except: - if attempts > 2: - return "Could not find a fact!" - else: - attempts += 1 - continue - - response = soup.find('a', {'class': 'surprise'}) - link = response['href'] - fact_data = ''.join(response.find(text=True)) - - if fact_data: - fact_data = fact_data.strip() - break - else: - if attempts > 2: - return "Could not find a fact!" - else: - attempts += 1 - continue - - url = web.try_isgd(link) - - return "{} - {}".format(fact_data, url) diff --git a/disabled_stuff/factoids.py b/disabled_stuff/factoids.py deleted file mode 100644 index 403e6f5..0000000 --- a/disabled_stuff/factoids.py +++ /dev/null @@ -1,162 +0,0 @@ -# Written by Scaevolus 2010 -import string -import re - -from util import hook, http, text, pyexec - - -re_lineends = re.compile(r'[\r\n]*') - -db_ready = False - -# some simple "shortcodes" for formatting purposes -shortcodes = { - '[b]': '\x02', - '[/b]': '\x02', - '[u]': '\x1F', - '[/u]': '\x1F', - '[i]': '\x16', - '[/i]': '\x16'} - - -def db_init(db): - global db_ready - if not db_ready: - db.execute("create table if not exists mem(word, data, nick," - " primary key(word))") - db.commit() - db_ready = True - - -def get_memory(db, word): - row = db.execute("select data from mem where word=lower(?)", - [word]).fetchone() - if row: - return row[0] - else: - return None - - -@hook.command("r", permissions=["addfactoid"]) -@hook.command(permissions=["addfactoid"]) -def remember(inp, nick='', db=None, notice=None): - """remember [+] -- Remembers with . Add + - to to append.""" - db_init(db) - - append = False - - try: - word, data = inp.split(None, 1) - except ValueError: - return remember.__doc__ - - old_data = get_memory(db, word) - - if data.startswith('+') and old_data: - append = True - # remove + symbol - new_data = data[1:] - # append new_data to the old_data - if len(new_data) > 1 and new_data[1] in (string.punctuation + ' '): - data = old_data + new_data - else: - data = old_data + ' ' + new_data - - db.execute("replace into mem(word, data, nick) values" - " (lower(?),?,?)", (word, data, nick)) - db.commit() - - if old_data: - if append: - notice("Appending \x02{}\x02 to \x02{}\x02".format(new_data, old_data)) - else: - notice('Remembering \x02{}\x02 for \x02{}\x02. Type ?{} to see it.'.format(data, word, word)) - notice('Previous data was \x02{}\x02'.format(old_data)) - else: - notice('Remembering \x02{}\x02 for \x02{}\x02. Type ?{} to see it.'.format(data, word, word)) - - -@hook.command("f", permissions=["delfactoid"]) -@hook.command(permissions=["delfactoid"]) -def forget(inp, db=None, notice=None): - """forget -- Forgets a remembered .""" - - db_init(db) - data = get_memory(db, inp) - - if data: - db.execute("delete from mem where word=lower(?)", - [inp]) - db.commit() - notice('"%s" has been forgotten.' % data.replace('`', "'")) - return - else: - notice("I don't know about that.") - return - - -@hook.command -def info(inp, notice=None, db=None): - """info -- Shows the source of a factoid.""" - - db_init(db) - - # attempt to get the factoid from the database - data = get_memory(db, inp.strip()) - - if data: - notice(data) - else: - notice("Unknown Factoid.") - - -@hook.regex(r'^\? ?(.+)') -def factoid(inp, message=None, db=None, bot=None, action=None, conn=None, input=None): - """? -- Shows what data is associated with .""" - try: - prefix_on = bot.config["plugins"]["factoids"].get("prefix", False) - except KeyError: - prefix_on = False - - db_init(db) - - # split up the input - split = inp.group(1).strip().split(" ") - factoid_id = split[0] - - if len(split) >= 1: - arguments = " ".join(split[1:]) - else: - arguments = "" - - data = get_memory(db, factoid_id) - - if data: - # factoid preprocessors - if data.startswith(""): - code = data[4:].strip() - variables = 'input="""{}"""; nick="{}"; chan="{}"; bot_nick="{}";'.format(arguments.replace('"', '\\"'), - input.nick, input.chan, - input.conn.nick) - result = pyexec.eval_py(variables + code) - else: - result = data - - # factoid postprocessors - result = text.multiword_replace(result, shortcodes) - - if result.startswith(""): - result = result[5:].strip() - action(result) - elif result.startswith(""): - url = result[5:].strip() - try: - message(http.get(url)) - except http.HttpError: - message("Could not fetch URL.") - else: - if prefix_on: - message("\x02[{}]:\x02 {}".format(factoid_id, result)) - else: - message(result) diff --git a/disabled_stuff/fishbans.py b/disabled_stuff/fishbans.py deleted file mode 100644 index aa76676..0000000 --- a/disabled_stuff/fishbans.py +++ /dev/null @@ -1,57 +0,0 @@ -from urllib import quote_plus - -from util import hook, http - - -api_url = "http://api.fishbans.com/stats/{}/" - - -@hook.command("bans") -@hook.command -def fishbans(inp): - """fishbans -- Gets information on s minecraft bans from fishbans""" - user = inp.strip() - - try: - request = http.get_json(api_url.format(quote_plus(user))) - except (http.HTTPError, http.URLError) as e: - return "Could not fetch ban data from the Fishbans API: {}".format(e) - - if not request["success"]: - return "Could not fetch ban data for {}.".format(user) - - user_url = "http://fishbans.com/u/{}/".format(user) - ban_count = request["stats"]["totalbans"] - - return "The user \x02{}\x02 has \x02{}\x02 ban(s). See detailed info " \ - "at {}".format(user, ban_count, user_url) - - -@hook.command -def bancount(inp): - """bancount -- Gets a count of s minecraft bans from fishbans""" - user = inp.strip() - - try: - request = http.get_json(api_url.format(quote_plus(user))) - except (http.HTTPError, http.URLError) as e: - return "Could not fetch ban data from the Fishbans API: {}".format(e) - - if not request["success"]: - return "Could not fetch ban data for {}.".format(user) - - user_url = "http://fishbans.com/u/{}/".format(user) - services = request["stats"]["service"] - - out = [] - for service, ban_count in services.items(): - if ban_count != 0: - out.append("{}: \x02{}\x02".format(service, ban_count)) - else: - pass - - if not out: - return "The user \x02{}\x02 has no bans.".format(user) - else: - return "Bans for \x02{}\x02: ".format(user) + ", ".join(out) + ". More info " \ - "at {}".format(user_url) diff --git a/disabled_stuff/fmylife.py b/disabled_stuff/fmylife.py deleted file mode 100644 index 1d8c0fa..0000000 --- a/disabled_stuff/fmylife.py +++ /dev/null @@ -1,29 +0,0 @@ -from util import hook, http - -fml_cache = [] - - -def refresh_cache(): - """ gets a page of random FMLs and puts them into a dictionary """ - soup = http.get_soup('http://www.fmylife.com/random/') - - for e in soup.find_all('div', {'class': 'post article'}): - fml_id = int(e['id']) - text = ''.join(e.find('p').find_all(text=True)) - fml_cache.append((fml_id, text)) - -# do an initial refresh of the cache -refresh_cache() - - -@hook.command(autohelp=False) -def fml(inp, reply=None): - """fml -- Gets a random quote from fmyfife.com.""" - - # grab the last item in the fml cache and remove it - fml_id, text = fml_cache.pop() - # reply with the fml we grabbed - reply('(#{}) {}'.format(fml_id, text)) - # refresh fml cache if its getting empty - if len(fml_cache) < 3: - refresh_cache() diff --git a/disabled_stuff/fortune.py b/disabled_stuff/fortune.py deleted file mode 100644 index 5f1c478..0000000 --- a/disabled_stuff/fortune.py +++ /dev/null @@ -1,14 +0,0 @@ -import random - -from util import hook - - -with open("plugins/data/fortunes.txt") as f: - fortunes = [line.strip() for line in f.readlines() - if not line.startswith("//")] - - -@hook.command(autohelp=False) -def fortune(inp): - """fortune -- Fortune cookies on demand.""" - return random.choice(fortunes) diff --git a/disabled_stuff/freddy.py b/disabled_stuff/freddy.py deleted file mode 100644 index c77fa5a..0000000 --- a/disabled_stuff/freddy.py +++ /dev/null @@ -1,13 +0,0 @@ -from util import hook, http, web -from subprocess import check_output, CalledProcessError - -@hook.command -def freddycode(inp): - """freddycode - Check if the Freddy Fresh code is correct.""" - - try: - return "Freddy: '%s' ist %s" % (inp, \ - check_output(["/bin/freddycheck", inp])) - except CalledProcessError as err: - return "Freddy: Skript returned %s" % (str(err)) - diff --git a/disabled_stuff/geoip.py b/disabled_stuff/geoip.py deleted file mode 100644 index b7ca61d..0000000 --- a/disabled_stuff/geoip.py +++ /dev/null @@ -1,54 +0,0 @@ -import os.path -import json -import gzip -from StringIO import StringIO - -import pygeoip - -from util import hook, http - - -# load region database -with open("./plugins/data/geoip_regions.json", "rb") as f: - regions = json.loads(f.read()) - -if os.path.isfile(os.path.abspath("./plugins/data/GeoLiteCity.dat")): - # initialise geolocation database - geo = pygeoip.GeoIP(os.path.abspath("./plugins/data/GeoLiteCity.dat")) -else: - download = http.get("http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz") - string_io = StringIO(download) - geoip_file = gzip.GzipFile(fileobj=string_io, mode='rb') - - output = open(os.path.abspath("./plugins/data/GeoLiteCity.dat"), 'wb') - output.write(geoip_file.read()) - output.close() - - geo = pygeoip.GeoIP(os.path.abspath("./plugins/data/GeoLiteCity.dat")) - - -@hook.command -def geoip(inp): - """geoip -- Gets the location of """ - - try: - record = geo.record_by_name(inp) - except: - return "Sorry, I can't locate that in my database." - - data = {} - - if "region_name" in record: - # we try catching an exception here because the region DB is missing a few areas - # it's a lazy patch, but it should do the job - try: - data["region"] = ", " + regions[record["country_code"]][record["region_name"]] - except: - data["region"] = "" - else: - data["region"] = "" - - data["cc"] = record["country_code"] or "N/A" - data["country"] = record["country_name"] or "Unknown" - data["city"] = record["city"] or "Unknown" - return u"\x02Country:\x02 {country} ({cc}), \x02City:\x02 {city}{region}".format(**data) diff --git a/disabled_stuff/github.py b/disabled_stuff/github.py deleted file mode 100644 index 18033ef..0000000 --- a/disabled_stuff/github.py +++ /dev/null @@ -1,120 +0,0 @@ -import json -import urllib2 - -from util import hook, http - - -shortcuts = {"cloudbot": "ClouDev/CloudBot"} - - -def truncate(msg): - nmsg = msg.split() - out = None - x = 0 - for i in nmsg: - if x <= 7: - if out: - out = out + " " + nmsg[x] - else: - out = nmsg[x] - x += 1 - if x <= 7: - return out - else: - return out + "..." - - -@hook.command -def ghissues(inp): - """ghissues username/repo [number] - Get specified issue summary, or open issue count """ - args = inp.split(" ") - try: - if args[0] in shortcuts: - repo = shortcuts[args[0]] - else: - repo = args[0] - url = "https://api.github.com/repos/{}/issues".format(repo) - except IndexError: - return "Invalid syntax. .github issues username/repo [number]" - try: - url += "/%s" % args[1] - number = True - except IndexError: - number = False - try: - data = json.loads(http.open(url).read()) - print url - if not number: - try: - data = data[0] - except IndexError: - print data - return "Repo has no open issues" - except ValueError: - return "Invalid data returned. Check arguments (.github issues username/repo [number]" - fmt = "Issue: #%s (%s) by %s: %s | %s %s" # (number, state, user.login, title, truncate(body), gitio.gitio(data.url)) - fmt1 = "Issue: #%s (%s) by %s: %s %s" # (number, state, user.login, title, gitio.gitio(data.url)) - number = data["number"] - if data["state"] == "open": - state = u"\x033\x02OPEN\x02\x0f" - else: - state = u"\x034\x02CLOSED\x02\x0f by {}".format(data["closed_by"]["login"]) - user = data["user"]["login"] - title = data["title"] - summary = truncate(data["body"]) - gitiourl = gitio(data["html_url"]) - if "Failed to get URL" in gitiourl: - gitiourl = gitio(data["html_url"] + " " + repo.split("/")[1] + number) - if summary == "": - return fmt1 % (number, state, user, title, gitiourl) - else: - return fmt % (number, state, user, title, summary, gitiourl) - - -@hook.command -def gitio(inp): - """gitio [code] -- Shorten Github URLs with git.io. [code] is - a optional custom short code.""" - split = inp.split(" ") - url = split[0] - - try: - code = split[1] - except: - code = None - - # if the first 8 chars of "url" are not "https://" then append - # "https://" to the url, also convert "http://" to "https://" - if url[:8] != "https://": - if url[:7] != "http://": - url = "https://" + url - else: - url = "https://" + url[7:] - url = 'url=' + str(url) - if code: - url = url + '&code=' + str(code) - req = urllib2.Request(url='http://git.io', data=url) - - # try getting url, catch http error - try: - f = urllib2.urlopen(req) - except urllib2.HTTPError: - return "Failed to get URL!" - urlinfo = str(f.info()) - - # loop over the rows in urlinfo and pick out location and - # status (this is pretty odd code, but urllib2.Request is weird) - for row in urlinfo.split("\n"): - if row.find("Status") != -1: - status = row - if row.find("Location") != -1: - location = row - - print status - if not "201" in status: - return "Failed to get URL!" - - # this wont work for some reason, so lets ignore it ^ - - # return location, minus the first 10 chars - return location[10:] diff --git a/disabled_stuff/google.py b/disabled_stuff/google.py deleted file mode 100644 index fe9e288..0000000 --- a/disabled_stuff/google.py +++ /dev/null @@ -1,51 +0,0 @@ -import random - -from util import hook, http, text - - -def api_get(kind, query): - """Use the RESTful Google Search API""" - url = 'http://ajax.googleapis.com/ajax/services/search/%s?' \ - 'v=1.0&safe=moderate' - return http.get_json(url % kind, q=query) - - -@hook.command('image') -@hook.command('gis') -@hook.command -def googleimage(inp): - """gis -- Returns first Google Image result for .""" - - parsed = api_get('images', inp) - if not 200 <= parsed['responseStatus'] < 300: - raise IOError('error searching for images: {}: {}'.format(parsed['responseStatus'], '')) - if not parsed['responseData']['results']: - return 'no images found' - return random.choice(parsed['responseData']['results'][:10])['unescapedUrl'] - - -@hook.command('search') -@hook.command('g') -@hook.command -def google(inp): - """google -- Returns first google search result for .""" - - parsed = api_get('web', inp) - if not 200 <= parsed['responseStatus'] < 300: - raise IOError('error searching for pages: {}: {}'.format(parsed['responseStatus'], '')) - if not parsed['responseData']['results']: - return 'No results found.' - - result = parsed['responseData']['results'][0] - - title = http.unescape(result['titleNoFormatting']) - title = text.truncate_str(title, 60) - content = http.unescape(result['content']) - - if not content: - content = "No description available." - else: - content = http.html.fromstring(content).text_content() - content = text.truncate_str(content, 150) - - return u'{} -- \x02{}\x02: "{}"'.format(result['unescapedUrl'], title, content) diff --git a/disabled_stuff/google_translate.py b/disabled_stuff/google_translate.py deleted file mode 100644 index a9d4ea3..0000000 --- a/disabled_stuff/google_translate.py +++ /dev/null @@ -1,168 +0,0 @@ -""" -A Google API key is required and retrieved from the bot config file. -Since December 1, 2011, the Google Translate API is a paid service only. -""" - -import htmlentitydefs -import re - -from util import hook, http - - -max_length = 100 - - -########### from http://effbot.org/zone/re-sub.htm#unescape-html ############# - - -def unescape(text): - def fixup(m): - text = m.group(0) - if text[:2] == "&#": - # character reference - try: - if text[:3] == "&#x": - return unichr(int(text[3:-1], 16)) - else: - return unichr(int(text[2:-1])) - except ValueError: - pass - else: - # named entity - try: - text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) - except KeyError: - pass - return text # leave as is - - return re.sub("&#?\w+;", fixup, text) - -############################################################################## - - -def goog_trans(api_key, text, slang, tlang): - url = 'https://www.googleapis.com/language/translate/v2' - - if len(text) > max_length: - return "This command only supports input of less then 100 characters." - - if slang: - parsed = http.get_json(url, key=api_key, q=text, source=slang, target=tlang, format="text") - else: - parsed = http.get_json(url, key=api_key, q=text, target=tlang, format="text") - - #if not 200 <= parsed['responseStatus'] < 300: - # raise IOError('error with the translation server: %d: %s' % ( - # parsed['responseStatus'], parsed['responseDetails'])) - if not slang: - return unescape('(%(detectedSourceLanguage)s) %(translatedText)s' % - (parsed['data']['translations'][0])) - return unescape('%(translatedText)s' % parsed['data']['translations'][0]) - - -def match_language(fragment): - fragment = fragment.lower() - for short, _ in lang_pairs: - if fragment in short.lower().split(): - return short.split()[0] - - for short, full in lang_pairs: - if fragment in full.lower(): - return short.split()[0] - - return None - - -@hook.command -def translate(inp, bot=None): - """translate [source language [target language]] -- translates - from source language (default autodetect) to target - language (default English) using Google Translate""" - - api_key = bot.config.get("api_keys", {}).get("googletranslate", None) - if not api_key: - return "This command requires a paid API key." - - args = inp.split(u' ', 2) - - try: - if len(args) >= 2: - sl = match_language(args[0]) - if not sl: - return goog_trans(api_key, inp, '', 'en') - if len(args) == 2: - return goog_trans(api_key, args[1], sl, 'en') - if len(args) >= 3: - tl = match_language(args[1]) - if not tl: - if sl == 'en': - return 'unable to determine desired target language' - return goog_trans(api_key, args[1] + ' ' + args[2], sl, 'en') - return goog_trans(api_key, args[2], sl, tl) - return goog_trans(api_key, inp, '', 'en') - except IOError, e: - return e - - -lang_pairs = [ - ("no", "Norwegian"), - ("it", "Italian"), - ("ht", "Haitian Creole"), - ("af", "Afrikaans"), - ("sq", "Albanian"), - ("ar", "Arabic"), - ("hy", "Armenian"), - ("az", "Azerbaijani"), - ("eu", "Basque"), - ("be", "Belarusian"), - ("bg", "Bulgarian"), - ("ca", "Catalan"), - ("zh-CN zh", "Chinese"), - ("hr", "Croatian"), - ("cs", "Czech"), - ("da", "Danish"), - ("nl", "Dutch"), - ("en", "English"), - ("et", "Estonian"), - ("tl", "Filipino"), - ("fi", "Finnish"), - ("fr", "French"), - ("gl", "Galician"), - ("ka", "Georgian"), - ("de", "German"), - ("el", "Greek"), - ("ht", "Haitian Creole"), - ("iw", "Hebrew"), - ("hi", "Hindi"), - ("hu", "Hungarian"), - ("is", "Icelandic"), - ("id", "Indonesian"), - ("ga", "Irish"), - ("it", "Italian"), - ("ja jp jpn", "Japanese"), - ("ko", "Korean"), - ("lv", "Latvian"), - ("lt", "Lithuanian"), - ("mk", "Macedonian"), - ("ms", "Malay"), - ("mt", "Maltese"), - ("no", "Norwegian"), - ("fa", "Persian"), - ("pl", "Polish"), - ("pt", "Portuguese"), - ("ro", "Romanian"), - ("ru", "Russian"), - ("sr", "Serbian"), - ("sk", "Slovak"), - ("sl", "Slovenian"), - ("es", "Spanish"), - ("sw", "Swahili"), - ("sv", "Swedish"), - ("th", "Thai"), - ("tr", "Turkish"), - ("uk", "Ukrainian"), - ("ur", "Urdu"), - ("vi", "Vietnamese"), - ("cy", "Welsh"), - ("yi", "Yiddish") -] diff --git a/disabled_stuff/googleurlparse.py b/disabled_stuff/googleurlparse.py deleted file mode 100644 index cbea897..0000000 --- a/disabled_stuff/googleurlparse.py +++ /dev/null @@ -1,22 +0,0 @@ -from util import hook -from urllib import unquote - -@hook.command(autohelp=False) -def googleurl(inp, db=None, nick=None): - """googleurl [nickname] - Converts Google urls (google.com/url) to normal urls - where possible, in the specified nickname's last message. If nickname isn't provided, - action will be performed on user's last message""" - if not inp: - inp = nick - last_message = db.execute("select name, quote from seen_user where name" - " like ? and chan = ?", (inp.lower(), input.chan.lower())).fetchone() - if last_message: - msg = last_message[1] - out = ", ".join([(unquote(a[4:]) if a[:4] == "url=" else "") for a in msg.split("&")])\ - .replace(", ,", "").strip() - return out if out else "No matches in your last message." - else: - if inp == nick: - return "You haven't said anything in this channel yet!" - else: - return "That user hasn't said anything in this channel yet!" diff --git a/disabled_stuff/history.py b/disabled_stuff/history.py deleted file mode 100644 index c703bcf..0000000 --- a/disabled_stuff/history.py +++ /dev/null @@ -1,89 +0,0 @@ -from collections import deque -from util import hook, timesince -import time -import re - -db_ready = [] - - -def db_init(db, conn_name): - """check to see that our db has the the seen table (connection name is for caching the result per connection)""" - global db_ready - if db_ready.count(conn_name) < 1: - db.execute("create table if not exists seen_user(name, time, quote, chan, host, " - "primary key(name, chan))") - db.commit() - db_ready.append(conn_name) - - -def track_seen(input, message_time, db, conn): - """ Tracks messages for the .seen command """ - db_init(db, conn) - # keep private messages private - if input.chan[:1] == "#" and not re.findall('^s/.*/.*/$', input.msg.lower()): - db.execute("insert or replace into seen_user(name, time, quote, chan, host)" - "values(?,?,?,?,?)", (input.nick.lower(), message_time, input.msg, - input.chan, input.mask)) - db.commit() - - -def track_history(input, message_time, conn): - try: - history = conn.history[input.chan] - except KeyError: - conn.history[input.chan] = deque(maxlen=100) - history = conn.history[input.chan] - - data = (input.nick, message_time, input.msg) - history.append(data) - - -@hook.singlethread -@hook.event('PRIVMSG', ignorebots=False) -def chat_tracker(paraml, input=None, db=None, conn=None): - message_time = time.time() - track_seen(input, message_time, db, conn) - track_history(input, message_time, conn) - - -@hook.command(autohelp=False) -def resethistory(inp, input=None, conn=None): - """resethistory - Resets chat history for the current channel""" - try: - conn.history[input.chan].clear() - return "Reset chat history for current channel." - except KeyError: - # wat - return "There is no history for this channel." - -"""seen.py: written by sklnd in about two beers July 2009""" - -@hook.command -def seen(inp, nick='', chan='', db=None, input=None, conn=None): - """seen -- Tell when a nickname was last in active in one of this bot's channels.""" - - if input.conn.nick.lower() == inp.lower(): - return "You need to get your eyes checked." - - if inp.lower() == nick.lower(): - return "Have you looked in a mirror lately?" - - if not re.match("^[A-Za-z0-9_|.\-\]\[]*$", inp.lower()): - return "I can't look up that name, its impossible to use!" - - db_init(db, conn.name) - - last_seen = db.execute("select name, time, quote from seen_user where name" - " like ? and chan = ?", (inp, chan)).fetchone() - - if last_seen: - reltime = timesince.timesince(last_seen[1]) - if last_seen[0] != inp.lower(): # for glob matching - inp = last_seen[0] - if last_seen[2][0:1] == "\x01": - return '{} was last seen {} ago: * {} {}'.format(inp, reltime, inp, - last_seen[2][8:-1]) - else: - return '{} was last seen {} ago saying: {}'.format(inp, reltime, last_seen[2]) - else: - return "I've never seen {} talking in this channel.".format(inp) diff --git a/disabled_stuff/horoscope.py b/disabled_stuff/horoscope.py deleted file mode 100644 index e4404cf..0000000 --- a/disabled_stuff/horoscope.py +++ /dev/null @@ -1,56 +0,0 @@ -# Plugin by Infinity - - -from util import hook, http, text - -db_ready = False - - -def db_init(db): - """check to see that our db has the horoscope table and return a connection.""" - global db_ready - if not db_ready: - db.execute("create table if not exists horoscope(nick primary key, sign)") - db.commit() - db_ready = True - - -@hook.command(autohelp=False) -def horoscope(inp, db=None, notice=None, nick=None): - """horoscope -- Get your horoscope.""" - db_init(db) - - # check if the user asked us not to save his details - dontsave = inp.endswith(" dontsave") - if dontsave: - sign = inp[:-9].strip().lower() - else: - sign = inp - - db.execute("create table if not exists horoscope(nick primary key, sign)") - - if not sign: - sign = db.execute("select sign from horoscope where nick=lower(?)", - (nick,)).fetchone() - if not sign: - notice("horoscope -- Get your horoscope") - return - sign = sign[0] - - url = "http://my.horoscope.com/astrology/free-daily-horoscope-{}.html".format(sign) - soup = http.get_soup(url) - - title = soup.find_all('h1', {'class': 'h1b'})[1] - horoscope_text = soup.find('div', {'class': 'fontdef1'}) - result = u"\x02%s\x02 %s" % (title, horoscope_text) - result = text.strip_html(result) - #result = unicode(result, "utf8").replace('flight ','') - - if not title: - return "Could not get the horoscope for {}.".format(inp) - - if inp and not dontsave: - db.execute("insert or replace into horoscope(nick, sign) values (?,?)", - (nick.lower(), sign)) - db.commit() - - return result diff --git a/disabled_stuff/hulu.py b/disabled_stuff/hulu.py deleted file mode 100644 index 74e6b00..0000000 --- a/disabled_stuff/hulu.py +++ /dev/null @@ -1,30 +0,0 @@ -from urllib import urlencode -import re - -from util import hook, http, timeformat - - -hulu_re = (r'(.*://)(www.hulu.com|hulu.com)(.*)', re.I) - - -@hook.regex(*hulu_re) -def hulu_url(match): - data = http.get_json("http://www.hulu.com/api/oembed.json?url=http://www.hulu.com" + match.group(3)) - showname = data['title'].split("(")[-1].split(")")[0] - title = data['title'].split(" (")[0] - return "{}: {} - {}".format(showname, title, timeformat.format_time(int(data['duration']))) - - -@hook.command('hulu') -def hulu_search(inp): - """hulu - Search Hulu""" - result = http.get_soup( - "http://m.hulu.com/search?dp_identifier=hulu&{}&items_per_page=1&page=1".format(urlencode({'query': inp}))) - data = result.find('results').find('videos').find('video') - showname = data.find('show').find('name').text - title = data.find('title').text - duration = timeformat.format_time(int(float(data.find('duration').text))) - description = data.find('description').text - rating = data.find('content-rating').text - return "{}: {} - {} - {} ({}) {}".format(showname, title, description, duration, rating, - "http://www.hulu.com/watch/" + str(data.find('id').text)) diff --git a/disabled_stuff/imdb.py b/disabled_stuff/imdb.py deleted file mode 100644 index 0272248..0000000 --- a/disabled_stuff/imdb.py +++ /dev/null @@ -1,59 +0,0 @@ -# IMDb lookup plugin by Ghetto Wizard (2011) and blha303 (2013) - -import re - -from util import hook, http, text - - -id_re = re.compile("tt\d+") -imdb_re = (r'(.*:)//(imdb.com|www.imdb.com)(:[0-9]+)?(.*)', re.I) - - -@hook.command -def imdb(inp): - """imdb -- Gets information about from IMDb.""" - - strip = inp.strip() - - if id_re.match(strip): - content = http.get_json("http://www.omdbapi.com/", i=strip) - else: - content = http.get_json("http://www.omdbapi.com/", t=strip) - - if content.get('Error', None) == 'Movie not found!': - return 'Movie not found!' - elif content['Response'] == 'True': - content['URL'] = 'http://www.imdb.com/title/{}'.format(content['imdbID']) - - out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s' - if content['Runtime'] != 'N/A': - out += ' \x02%(Runtime)s\x02.' - if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A': - out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \ - ' votes.' - out += ' %(URL)s' - return out % content - else: - return 'Unknown error.' - - -@hook.regex(*imdb_re) -def imdb_url(match): - imdb_id = match.group(4).split('/')[-1] - if imdb_id == "": - imdb_id = match.group(4).split('/')[-2] - content = http.get_json("http://www.omdbapi.com/", i=imdb_id) - if content.get('Error', None) == 'Movie not found!': - return 'Movie not found!' - elif content['Response'] == 'True': - content['URL'] = 'http://www.imdb.com/title/%(imdbID)s' % content - content['Plot'] = text.truncate_str(content['Plot'], 50) - out = '\x02%(Title)s\x02 (%(Year)s) (%(Genre)s): %(Plot)s' - if content['Runtime'] != 'N/A': - out += ' \x02%(Runtime)s\x02.' - if content['imdbRating'] != 'N/A' and content['imdbVotes'] != 'N/A': - out += ' \x02%(imdbRating)s/10\x02 with \x02%(imdbVotes)s\x02' \ - ' votes.' - return out % content - else: - return 'Unknown error.' diff --git a/disabled_stuff/imgur.py b/disabled_stuff/imgur.py deleted file mode 100644 index 320bc6e..0000000 --- a/disabled_stuff/imgur.py +++ /dev/null @@ -1,82 +0,0 @@ -import re -import random - -from util import hook, http, web - - -base_url = "http://reddit.com/r/{}/.json" -imgur_re = re.compile(r'http://(?:i\.)?imgur\.com/(a/)?(\w+\b(?!/))\.?\w?') - -album_api = "https://api.imgur.com/3/album/{}/images.json" - - -def is_valid(data): - if data["domain"] in ["i.imgur.com", "imgur.com"]: - return True - else: - return False - - -@hook.command(autohelp=False) -def imgur(inp): - """imgur [subreddit] -- Gets the first page of imgur images from [subreddit] and returns a link to them. - If [subreddit] is undefined, return any imgur images""" - if inp: - # see if the input ends with "nsfw" - show_nsfw = inp.endswith(" nsfw") - - # remove "nsfw" from the input string after checking for it - if show_nsfw: - inp = inp[:-5].strip().lower() - - url = base_url.format(inp.strip()) - else: - url = "http://www.reddit.com/domain/imgur.com/.json" - show_nsfw = False - - try: - data = http.get_json(url, user_agent=http.ua_chrome) - except Exception as e: - return "Error: " + str(e) - - data = data["data"]["children"] - random.shuffle(data) - - # filter list to only have imgur links - filtered_posts = [i["data"] for i in data if is_valid(i["data"])] - - if not filtered_posts: - return "No images found." - - items = [] - - headers = { - "Authorization": "Client-ID b5d127e6941b07a" - } - - # loop over the list of posts - for post in filtered_posts: - if post["over_18"] and not show_nsfw: - continue - - match = imgur_re.search(post["url"]) - if match.group(1) == 'a/': - # post is an album - url = album_api.format(match.group(2)) - images = http.get_json(url, headers=headers)["data"] - - # loop over the images in the album and add to the list - for image in images: - items.append(image["id"]) - - elif match.group(2) is not None: - # post is an image - items.append(match.group(2)) - - if not items: - return "No images found (use .imgur nsfw to show explicit content)" - - if show_nsfw: - return "{} \x02NSFW\x02".format(web.isgd("http://imgur.com/" + ','.join(items))) - else: - return web.isgd("http://imgur.com/" + ','.join(items)) diff --git a/disabled_stuff/isup.py b/disabled_stuff/isup.py deleted file mode 100644 index 5fc95d6..0000000 --- a/disabled_stuff/isup.py +++ /dev/null @@ -1,28 +0,0 @@ -import urlparse - -from util import hook, http, urlnorm - - -@hook.command -def isup(inp): - """isup -- uses isup.me to see if a site is up or not""" - - # slightly overcomplicated, esoteric URL parsing - scheme, auth, path, query, fragment = urlparse.urlsplit(inp.strip()) - - domain = auth.encode('utf-8') or path.encode('utf-8') - url = urlnorm.normalize(domain, assume_scheme="http") - - try: - soup = http.get_soup('http://isup.me/' + domain) - except http.HTTPError, http.URLError: - return "Could not get status." - - content = soup.find('div').text.strip() - - if "not just you" in content: - return "It's not just you. {} looks \x02\x034down\x02\x0f from here!".format(url) - elif "is up" in content: - return "It's just you. {} is \x02\x033up\x02\x0f.".format(url) - else: - return "Huh? That doesn't look like a site on the interweb." diff --git a/disabled_stuff/kernel.py b/disabled_stuff/kernel.py deleted file mode 100644 index 90cbed5..0000000 --- a/disabled_stuff/kernel.py +++ /dev/null @@ -1,15 +0,0 @@ -import re - -from util import hook, http - - -@hook.command(autohelp=False) -def kernel(inp, reply=None): - contents = http.get("https://www.kernel.org/finger_banner") - contents = re.sub(r'The latest(\s*)', '', contents) - contents = re.sub(r'version of the Linux kernel is:(\s*)', '- ', contents) - lines = contents.split("\n") - - message = "Linux kernel versions: " - message += ", ".join(line for line in lines[:-1]) - reply(message) diff --git a/disabled_stuff/kill.py b/disabled_stuff/kill.py deleted file mode 100644 index d25228e..0000000 --- a/disabled_stuff/kill.py +++ /dev/null @@ -1,33 +0,0 @@ -import json - -from util import hook, textgen - - -def get_generator(_json, variables): - data = json.loads(_json) - return textgen.TextGenerator(data["templates"], - data["parts"], variables=variables) - - -@hook.command -def kill(inp, action=None, nick=None, conn=None, notice=None): - """kill -- Makes the bot kill .""" - target = inp.strip() - - if " " in target: - notice("Invalid username!") - return - - # if the user is trying to make the bot kill itself, kill them - if target.lower() == conn.nick.lower() or target.lower() == "itself": - target = nick - - variables = { - "user": target - } - - with open("plugins/data/kills.json") as f: - generator = get_generator(f.read(), variables) - - # act out the message - action(generator.generate_string()) diff --git a/disabled_stuff/lastfm.py b/disabled_stuff/lastfm.py deleted file mode 100644 index b928b1e..0000000 --- a/disabled_stuff/lastfm.py +++ /dev/null @@ -1,83 +0,0 @@ -from datetime import datetime - -from util import hook, http, timesince - - -api_url = "http://ws.audioscrobbler.com/2.0/?format=json" - - -@hook.command('l', autohelp=False) -@hook.command(autohelp=False) -def lastfm(inp, nick='', db=None, bot=None, notice=None): - """lastfm [user] [dontsave] -- Displays the now playing (or last played) - track of LastFM user [user].""" - api_key = bot.config.get("api_keys", {}).get("lastfm") - if not api_key: - return "error: no api key set" - - # check if the user asked us not to save his details - dontsave = inp.endswith(" dontsave") - if dontsave: - user = inp[:-9].strip().lower() - else: - user = inp - - db.execute("create table if not exists lastfm(nick primary key, acc)") - - if not user: - user = db.execute("select acc from lastfm where nick=lower(?)", - (nick,)).fetchone() - if not user: - notice(lastfm.__doc__) - return - user = user[0] - - response = http.get_json(api_url, method="user.getrecenttracks", - api_key=api_key, user=user, limit=1) - - if 'error' in response: - return u"Error: {}.".format(response["message"]) - - if not "track" in response["recenttracks"] or len(response["recenttracks"]["track"]) == 0: - return u'No recent tracks for user "{}" found.'.format(user) - - tracks = response["recenttracks"]["track"] - - if type(tracks) == list: - # if the user is listening to something, the tracks entry is a list - # the first item is the current track - track = tracks[0] - status = 'is listening to' - ending = '.' - elif type(tracks) == dict: - # otherwise, they aren't listening to anything right now, and - # the tracks entry is a dict representing the most recent track - track = tracks - status = 'last listened to' - # lets see how long ago they listened to it - time_listened = datetime.fromtimestamp(int(track["date"]["uts"])) - time_since = timesince.timesince(time_listened) - ending = ' ({} ago)'.format(time_since) - - else: - return "error: could not parse track listing" - - title = track["name"] - album = track["album"]["#text"] - artist = track["artist"]["#text"] - - out = u'{} {} "{}"'.format(user, status, title) - if artist: - out += u" by \x02{}\x0f".format(artist) - if album: - out += u" from the album \x02{}\x0f".format(album) - - # append ending based on what type it was - out += ending - - if inp and not dontsave: - db.execute("insert or replace into lastfm(nick, acc) values (?,?)", - (nick.lower(), user)) - db.commit() - - return out diff --git a/disabled_stuff/lmgtfy.py b/disabled_stuff/lmgtfy.py deleted file mode 100644 index 768075f..0000000 --- a/disabled_stuff/lmgtfy.py +++ /dev/null @@ -1,14 +0,0 @@ -from util import hook, web, http - - -@hook.command('gfy') -@hook.command -def lmgtfy(inp): - """lmgtfy [phrase] - Posts a google link for the specified phrase""" - - link = u"http://lmgtfy.com/?q={}".format(http.quote_plus(inp)) - - try: - return web.isgd(link) - except (web.ShortenError, http.HTTPError): - return link diff --git a/disabled_stuff/log.py b/disabled_stuff/log.py deleted file mode 100644 index d72dc1a..0000000 --- a/disabled_stuff/log.py +++ /dev/null @@ -1,113 +0,0 @@ -""" -log.py: written by Scaevolus 2009 -""" - -import os -import codecs -import time -import re - -from util import hook - - -log_fds = {} # '%(net)s %(chan)s': (filename, fd) - -timestamp_format = '%H:%M:%S' - -formats = { - 'PRIVMSG': '<%(nick)s> %(msg)s', - 'PART': '-!- %(nick)s [%(user)s@%(host)s] has left %(chan)s', - 'JOIN': '-!- %(nick)s [%(user)s@%(host)s] has joined %(param0)s', - 'MODE': '-!- mode/%(chan)s [%(param_tail)s] by %(nick)s', - 'KICK': '-!- %(param1)s was kicked from %(chan)s by %(nick)s [%(msg)s]', - 'TOPIC': '-!- %(nick)s changed the topic of %(chan)s to: %(msg)s', - 'QUIT': '-!- %(nick)s has quit [%(msg)s]', - 'PING': '', - 'NOTICE': '-%(nick)s- %(msg)s' -} - -ctcp_formats = { - 'ACTION': '* %(nick)s %(ctcpmsg)s', - 'VERSION': '%(nick)s has requested CTCP %(ctcpcmd)s from %(chan)s: %(ctcpmsg)s', - 'PING': '%(nick)s has requested CTCP %(ctcpcmd)s from %(chan)s: %(ctcpmsg)s', - 'TIME': '%(nick)s has requested CTCP %(ctcpcmd)s from %(chan)s: %(ctcpmsg)s', - 'FINGER': '%(nick)s has requested CTCP %(ctcpcmd)s from %(chan)s: %(ctcpmsg)s' -} - -irc_color_re = re.compile(r'(\x03(\d+,\d+|\d)|[\x0f\x02\x16\x1f])') - - -def get_log_filename(dir, server, chan): - return os.path.join(dir, 'log', gmtime('%Y'), server, chan, - (gmtime('%%s.%m-%d.log') % chan).lower()) - - -def gmtime(format): - return time.strftime(format, time.gmtime()) - - -def beautify(input): - format = formats.get(input.command, '%(raw)s') - args = dict(input) - - leng = len(args['paraml']) - for n, p in enumerate(args['paraml']): - args['param' + str(n)] = p - args['param_' + str(abs(n - leng))] = p - - args['param_tail'] = ' '.join(args['paraml'][1:]) - args['msg'] = irc_color_re.sub('', args['msg']) - - if input.command == 'PRIVMSG' and input.msg.count('\x01') >= 2: - ctcp = input.msg.split('\x01', 2)[1].split(' ', 1) - if len(ctcp) == 1: - ctcp += [''] - args['ctcpcmd'], args['ctcpmsg'] = ctcp - format = ctcp_formats.get(args['ctcpcmd'], - '%(nick)s [%(user)s@%(host)s] requested unknown CTCP ' - '%(ctcpcmd)s from %(chan)s: %(ctcpmsg)s') - - return format % args - - -def get_log_fd(dir, server, chan): - fn = get_log_filename(dir, server, chan) - cache_key = '%s %s' % (server, chan) - filename, fd = log_fds.get(cache_key, ('', 0)) - - if fn != filename: # we need to open a file for writing - if fd != 0: # is a valid fd - fd.flush() - fd.close() - dir = os.path.split(fn)[0] - if not os.path.exists(dir): - os.makedirs(dir) - fd = codecs.open(fn, 'a', 'utf-8') - log_fds[cache_key] = (fn, fd) - - return fd - - -@hook.singlethread -@hook.event('*') -def log(paraml, input=None, bot=None): - timestamp = gmtime(timestamp_format) - - fd = get_log_fd(bot.persist_dir, input.server, 'raw') - fd.write(timestamp + ' ' + input.raw + '\n') - - if input.command == 'QUIT': # these are temporary fixes until proper - input.chan = 'quit' # presence tracking is implemented - if input.command == 'NICK': - input.chan = 'nick' - - beau = beautify(input) - - if beau == '': # don't log this - return - - if input.chan: - fd = get_log_fd(bot.persist_dir, input.server, input.chan) - fd.write(timestamp + ' ' + beau + '\n') - - print timestamp, input.chan, beau.encode('utf8', 'ignore') diff --git a/disabled_stuff/lyrics.py b/disabled_stuff/lyrics.py deleted file mode 100644 index eabb84a..0000000 --- a/disabled_stuff/lyrics.py +++ /dev/null @@ -1,43 +0,0 @@ -from util import hook, http, web - -url = "http://search.azlyrics.com/search.php?q=" - - -@hook.command -def lyrics(inp): - """lyrics - Search AZLyrics.com for song lyrics""" - if "pastelyrics" in inp: - dopaste = True - inp = inp.replace("pastelyrics", "").strip() - else: - dopaste = False - soup = http.get_soup(url + inp.replace(" ", "+")) - if "Try to compose less restrictive search query" in soup.find('div', {'id': 'inn'}).text: - return "No results. Check spelling." - div = None - for i in soup.findAll('div', {'class': 'sen'}): - if "/lyrics/" in i.find('a')['href']: - div = i - break - if div: - title = div.find('a').text - link = div.find('a')['href'] - if dopaste: - newsoup = http.get_soup(link) - try: - lyrics = newsoup.find('div', {'style': 'margin-left:10px;margin-right:10px;'}).text.strip() - pasteurl = " " + web.haste(lyrics) - except Exception as e: - pasteurl = " (\x02Unable to paste lyrics\x02 [{}])".format(str(e)) - else: - pasteurl = "" - artist = div.find('b').text.title() - lyricsum = div.find('div').text - if "\r\n" in lyricsum.strip(): - lyricsum = " / ".join(lyricsum.strip().split("\r\n")[0:4]) # truncate, format - else: - lyricsum = " / ".join(lyricsum.strip().split("\n")[0:4]) # truncate, format - return "\x02{}\x02 by \x02{}\x02 {}{} - {}".format(title, artist, web.try_isgd(link), pasteurl, - lyricsum[:-3]) - else: - return "No song results. " + url + inp.replace(" ", "+") diff --git a/disabled_stuff/metacritic.py b/disabled_stuff/metacritic.py deleted file mode 100644 index 92d0933..0000000 --- a/disabled_stuff/metacritic.py +++ /dev/null @@ -1,104 +0,0 @@ -# metacritic.com scraper - -import re -from urllib2 import HTTPError - -from util import hook, http - - -@hook.command('mc') -@hook.command -def metacritic(inp): - """mc [all|movie|tv|album|x360|ps3|pc|gba|ds|3ds|wii|vita|wiiu|xone|ps4] - Gets rating for <title> from metacritic on the specified medium.""" - - args = inp.strip() - - game_platforms = ('x360', 'ps3', 'pc', 'gba', 'ds', '3ds', 'wii', - 'vita', 'wiiu', 'xone', 'ps4') - - all_platforms = game_platforms + ('all', 'movie', 'tv', 'album') - - try: - plat, title = args.split(' ', 1) - if plat not in all_platforms: - # raise the ValueError so that the except block catches it - # in this case, or in the case of the .split above raising the - # ValueError, we want the same thing to happen - raise ValueError - except ValueError: - plat = 'all' - title = args - - cat = 'game' if plat in game_platforms else plat - - title_safe = http.quote_plus(title) - - url = 'http://www.metacritic.com/search/{}/{}/results'.format(cat, title_safe) - - try: - doc = http.get_html(url) - except HTTPError: - return 'error fetching results' - - # get the proper result element we want to pull data from - result = None - - if not doc.find_class('query_results'): - return 'No results found.' - - # if they specified an invalid search term, the input box will be empty - if doc.get_element_by_id('search_term').value == '': - return 'Invalid search term.' - - if plat not in game_platforms: - # for [all] results, or non-game platforms, get the first result - result = doc.find_class('result first_result')[0] - - # find the platform, if it exists - result_type = result.find_class('result_type') - if result_type: - - # if the result_type div has a platform div, get that one - platform_div = result_type[0].find_class('platform') - if platform_div: - plat = platform_div[0].text_content().strip() - else: - # otherwise, use the result_type text_content - plat = result_type[0].text_content().strip() - - else: - # for games, we want to pull the first result with the correct - # platform - results = doc.find_class('result') - for res in results: - result_plat = res.find_class('platform')[0].text_content().strip() - if result_plat == plat.upper(): - result = res - break - - if not result: - return 'No results found.' - - # get the name, release date, and score from the result - product_title = result.find_class('product_title')[0] - name = product_title.text_content() - link = 'http://metacritic.com' + product_title.find('a').attrib['href'] - - try: - release = result.find_class('release_date')[0]. \ - find_class('data')[0].text_content() - - # strip extra spaces out of the release date - release = re.sub(r'\s{2,}', ' ', release) - except IndexError: - release = None - - try: - score = result.find_class('metascore_w')[0].text_content() - except IndexError: - score = None - - return '[{}] {} - \x02{}/100\x02, {} - {}'.format(plat.upper(), name, score or 'no score', - 'release: \x02%s\x02' % release if release else 'unreleased', - link) diff --git a/disabled_stuff/minecraft_bukget.py b/disabled_stuff/minecraft_bukget.py deleted file mode 100644 index 496f169..0000000 --- a/disabled_stuff/minecraft_bukget.py +++ /dev/null @@ -1,154 +0,0 @@ -import time -import random - -from util import hook, http, web, text - - -## CONSTANTS - -base_url = "http://api.bukget.org/3/" - -search_url = base_url + "search/plugin_name/like/{}" -random_url = base_url + "plugins/bukkit/?start={}&size=1" -details_url = base_url + "plugins/bukkit/{}" - -categories = http.get_json("http://api.bukget.org/3/categories") - -count_total = sum([cat["count"] for cat in categories]) -count_categories = {cat["name"].lower(): int(cat["count"]) for cat in categories} # dict comps! - - -class BukgetError(Exception): - def __init__(self, code, text): - self.code = code - self.text = text - - def __str__(self): - return self.text - - -## DATA FUNCTIONS - -def plugin_search(term): - """ searches for a plugin with the bukget API and returns the slug """ - term = term.lower().strip() - - search_term = http.quote_plus(term) - - try: - results = http.get_json(search_url.format(search_term)) - except (http.HTTPError, http.URLError) as e: - raise BukgetError(500, "Error Fetching Search Page: {}".format(e)) - - if not results: - raise BukgetError(404, "No Results Found") - - for result in results: - if result["slug"] == term: - return result["slug"] - - return results[0]["slug"] - - -def plugin_random(): - """ gets a random plugin from the bukget API and returns the slug """ - results = None - - while not results: - plugin_number = random.randint(1, count_total) - print "trying {}".format(plugin_number) - try: - results = http.get_json(random_url.format(plugin_number)) - except (http.HTTPError, http.URLError) as e: - raise BukgetError(500, "Error Fetching Search Page: {}".format(e)) - - return results[0]["slug"] - - -def plugin_details(slug): - """ takes a plugin slug and returns details from the bukget API """ - slug = slug.lower().strip() - - try: - details = http.get_json(details_url.format(slug)) - except (http.HTTPError, http.URLError) as e: - raise BukgetError(500, "Error Fetching Details: {}".format(e)) - return details - - -## OTHER FUNCTIONS - -def format_output(data): - """ takes plugin data and returns two strings representing information about that plugin """ - name = data["plugin_name"] - description = text.truncate_str(data['description'], 30) - url = data['website'] - authors = data['authors'][0] - authors = authors[0] + u"\u200b" + authors[1:] - stage = data['stage'] - - current_version = data['versions'][0] - - last_update = time.strftime('%d %B %Y %H:%M', - time.gmtime(current_version['date'])) - version_number = data['versions'][0]['version'] - - bukkit_versions = ", ".join(current_version['game_versions']) - link = web.try_isgd(current_version['link']) - - if description: - line_a = u"\x02{}\x02, by \x02{}\x02 - {} - ({}) \x02{}".format(name, authors, description, stage, url) - else: - line_a = u"\x02{}\x02, by \x02{}\x02 ({}) \x02{}".format(name, authors, stage, url) - - line_b = u"Last release: \x02v{}\x02 for \x02{}\x02 at {} \x02{}\x02".format(version_number, bukkit_versions, - last_update, link) - - return line_a, line_b - - -## HOOK FUNCTIONS - -@hook.command('plugin') -@hook.command -def bukget(inp, reply=None, message=None): - """bukget <slug/name> - Look up a plugin on dev.bukkit.org""" - # get the plugin slug using search - try: - slug = plugin_search(inp) - except BukgetError as e: - return e - - # get the plugin info using the slug - try: - data = plugin_details(slug) - except BukgetError as e: - return e - - # format the final message and send it to IRC - line_a, line_b = format_output(data) - - reply(line_a) - message(line_b) - - -@hook.command(autohelp=None) -def randomplugin(inp, reply=None, message=None): - """randomplugin - Gets a random plugin from dev.bukkit.org""" - # get a random plugin slug - try: - slug = plugin_random() - except BukgetError as e: - return e - - # get the plugin info using the slug - try: - data = plugin_details(slug) - except BukgetError as e: - return e - - # format the final message and send it to IRC - line_a, line_b = format_output(data) - - reply(line_a) - message(line_b) \ No newline at end of file diff --git a/disabled_stuff/minecraft_items.py b/disabled_stuff/minecraft_items.py deleted file mode 100644 index f1e94f9..0000000 --- a/disabled_stuff/minecraft_items.py +++ /dev/null @@ -1,100 +0,0 @@ -""" plugin by _303 (?) -""" - -import re - -from util import hook - - -pattern = re.compile(r'^(?P<count>\d+)x (?P<name>.+?): (?P<ingredients>.*)$') - -recipelist = [] - - -class Recipe(object): - __slots__ = 'output', 'count', 'ingredients', 'line' - - def __init__(self, output, count, ingredients, line): - self.output = output - self.count = count - self.ingredients = ingredients - self.line = line - - def __str__(self): - return self.line - - -with open("plugins/data/recipes.txt") as f: - for line in f.readlines(): - if line.startswith("//"): - continue - line = line.strip() - match = pattern.match(line) - if not match: - continue - recipelist.append(Recipe(line=line, - output=match.group("name").lower(), - ingredients=match.group("ingredients"), - count=match.group("count"))) - -ids = [] - -with open("plugins/data/itemids.txt") as f: - for line in f.readlines(): - if line.startswith("//"): - continue - parts = line.strip().split() - itemid = parts[0] - name = " ".join(parts[1:]) - ids.append((itemid, name)) - - -@hook.command("mcid") -@hook.command -def mcitem(inp, reply=None): - """mcitem <item/id> -- gets the id from an item or vice versa""" - inp = inp.lower().strip() - - if inp == "": - reply("error: no input.") - return - - results = [] - - for item_id, item_name in ids: - if inp == item_id: - results = ["\x02[{}]\x02 {}".format(item_id, item_name)] - break - elif inp in item_name.lower(): - results.append("\x02[{}]\x02 {}".format(item_id, item_name)) - - if not results: - return "No matches found." - - if len(results) > 12: - reply("There are too many options, please narrow your search. ({})".format(str(len(results)))) - return - - out = ", ".join(results) - - return out - - -@hook.command("mccraft") -@hook.command -def mcrecipe(inp, reply=None): - """mcrecipe <item> -- gets the crafting recipe for an item""" - inp = inp.lower().strip() - - results = [recipe.line for recipe in recipelist - if inp in recipe.output] - - if not results: - return "No matches found." - - if len(results) > 3: - reply("There are too many options, please narrow your search. ({})".format(len(results))) - return - - for result in results: - reply(result) diff --git a/disabled_stuff/minecraft_ping.py b/disabled_stuff/minecraft_ping.py deleted file mode 100644 index 978ca19..0000000 --- a/disabled_stuff/minecraft_ping.py +++ /dev/null @@ -1,232 +0,0 @@ -import socket -import struct -import json -import traceback - -from util import hook - - -try: - import DNS - has_dns = True -except ImportError: - has_dns = False - - -mc_colors = [(u'\xa7f', u'\x0300'), (u'\xa70', u'\x0301'), (u'\xa71', u'\x0302'), (u'\xa72', u'\x0303'), - (u'\xa7c', u'\x0304'), (u'\xa74', u'\x0305'), (u'\xa75', u'\x0306'), (u'\xa76', u'\x0307'), - (u'\xa7e', u'\x0308'), (u'\xa7a', u'\x0309'), (u'\xa73', u'\x0310'), (u'\xa7b', u'\x0311'), - (u'\xa71', u'\x0312'), (u'\xa7d', u'\x0313'), (u'\xa78', u'\x0314'), (u'\xa77', u'\x0315'), - (u'\xa7l', u'\x02'), (u'\xa79', u'\x0310'), (u'\xa7o', u'\t'), (u'\xa7m', u'\x13'), - (u'\xa7r', u'\x0f'), (u'\xa7n', u'\x15')] - - -## EXCEPTIONS - - -class PingError(Exception): - def __init__(self, text): - self.text = text - - def __str__(self): - return self.text - - -class ParseError(Exception): - def __init__(self, text): - self.text = text - - def __str__(self): - return self.text - - -## MISC - - -def unpack_varint(s): - d = 0 - i = 0 - while True: - b = ord(s.recv(1)) - d |= (b & 0x7F) << 7 * i - i += 1 - if not b & 0x80: - return d - -pack_data = lambda d: struct.pack('>b', len(d)) + d -pack_port = lambda i: struct.pack('>H', i) - -## DATA FUNCTIONS - - -def mcping_modern(host, port): - """ pings a server using the modern (1.7+) protocol and returns data """ - try: - # connect to the server - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - s.connect((host, port)) - except socket.gaierror: - raise PingError("Invalid hostname") - except socket.timeout: - raise PingError("Request timed out") - - # send handshake + status request - s.send(pack_data("\x00\x00" + pack_data(host.encode('utf8')) + pack_port(port) + "\x01")) - s.send(pack_data("\x00")) - - # read response - unpack_varint(s) # Packet length - unpack_varint(s) # Packet ID - l = unpack_varint(s) # String length - - if not l > 1: - raise PingError("Invalid response") - - d = "" - while len(d) < l: - d += s.recv(1024) - - # Close our socket - s.close() - except socket.error: - raise PingError("Socket Error") - - # Load json and return - data = json.loads(d.decode('utf8')) - try: - version = data["version"]["name"] - try: - desc = u" ".join(data["description"]["text"].split()) - except TypeError: - desc = u" ".join(data["description"].split()) - max_players = data["players"]["max"] - online = data["players"]["online"] - except Exception as e: - # TODO: except Exception is bad - traceback.print_exc(e) - raise PingError("Unknown Error: {}".format(e)) - - output = { - "motd": format_colors(desc), - "motd_raw": desc, - "version": version, - "players": online, - "players_max": max_players - } - return output - - -def mcping_legacy(host, port): - """ pings a server using the legacy (1.6 and older) protocol and returns data """ - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - - try: - sock.connect((host, port)) - sock.send('\xfe\x01') - response = sock.recv(1) - except socket.gaierror: - raise PingError("Invalid hostname") - except socket.timeout: - raise PingError("Request timed out") - - if response[0] != '\xff': - raise PingError("Invalid response") - - length = struct.unpack('!h', sock.recv(2))[0] - values = sock.recv(length * 2).decode('utf-16be') - data = values.split(u'\x00') # try to decode data using new format - if len(data) == 1: - # failed to decode data, server is using old format - data = values.split(u'\xa7') - output = { - "motd": format_colors(" ".join(data[0].split())), - "motd_raw": data[0], - "version": None, - "players": data[1], - "players_max": data[2] - } - else: - # decoded data, server is using new format - output = { - "motd": format_colors(" ".join(data[3].split())), - "motd_raw": data[3], - "version": data[2], - "players": data[4], - "players_max": data[5] - } - sock.close() - return output - - -## FORMATTING/PARSING FUNCTIONS - -def check_srv(domain): - """ takes a domain and finds minecraft SRV records """ - DNS.DiscoverNameServers() - srv_req = DNS.Request(qtype='srv') - srv_result = srv_req.req('_minecraft._tcp.{}'.format(domain)) - - for getsrv in srv_result.answers: - if getsrv['typename'] == 'SRV': - data = [getsrv['data'][2], getsrv['data'][3]] - return data - - -def parse_input(inp): - """ takes the input from the mcping command and returns the host and port """ - inp = inp.strip().split(" ")[0] - if ":" in inp: - # the port is defined in the input string - host, port = inp.split(":", 1) - try: - port = int(port) - if port > 65535 or port < 0: - raise ParseError("The port '{}' is invalid.".format(port)) - except ValueError: - raise ParseError("The port '{}' is invalid.".format(port)) - return host, port - if has_dns: - # the port is not in the input string, but we have PyDNS so look for a SRV record - srv_data = check_srv(inp) - if srv_data: - return str(srv_data[1]), int(srv_data[0]) - # return default port - return inp, 25565 - - -def format_colors(motd): - for original, replacement in mc_colors: - motd = motd.replace(original, replacement) - motd = motd.replace(u"\xa7k", "") - return motd - - -def format_output(data): - if data["version"]: - return u"{motd}\x0f - {version}\x0f - {players}/{players_max}" \ - u" players.".format(**data).replace("\n", u"\x0f - ") - else: - return u"{motd}\x0f - {players}/{players_max}" \ - u" players.".format(**data).replace("\n", u"\x0f - ") - - -@hook.command -@hook.command("mcp") -def mcping(inp): - """mcping <server>[:port] - Ping a Minecraft server to check status.""" - try: - host, port = parse_input(inp) - except ParseError as e: - return "Could not parse input ({})".format(e) - - try: - data = mcping_modern(host, port) - except PingError: - try: - data = mcping_legacy(host, port) - except PingError as e: - return "Could not ping server, is it offline? ({})".format(e) - - return format_output(data) diff --git a/disabled_stuff/minecraft_status.py b/disabled_stuff/minecraft_status.py deleted file mode 100644 index 4ca67d3..0000000 --- a/disabled_stuff/minecraft_status.py +++ /dev/null @@ -1,44 +0,0 @@ -import json - -from util import hook, http - - -@hook.command(autohelp=False) -def mcstatus(inp): - """mcstatus -- Checks the status of various Mojang (the creators of Minecraft) servers.""" - - try: - request = http.get("http://status.mojang.com/check") - except (http.URLError, http.HTTPError) as e: - return "Unable to get Minecraft server status: {}".format(e) - - # lets just reformat this data to get in a nice format - data = json.loads(request.replace("}", "").replace("{", "").replace("]", "}").replace("[", "{")) - - out = [] - - # use a loop so we don't have to update it if they add more servers - green = [] - yellow = [] - red = [] - for server, status in data.items(): - if status == "green": - green.append(server) - elif status == "yellow": - yellow.append(server) - else: - red.append(server) - - if green: - out = "\x033\x02Online\x02\x0f: " + ", ".join(green) - if yellow: - out += " " - if yellow: - out += "\x02Issues\x02: " + ", ".join(yellow) - if red: - out += " " - if red: - out += "\x034\x02Offline\x02\x0f: " + ", ".join(red) - - return "\x0f" + out.replace(".mojang.com", ".mj") \ - .replace(".minecraft.net", ".mc") diff --git a/disabled_stuff/minecraft_user.py b/disabled_stuff/minecraft_user.py deleted file mode 100644 index 4026994..0000000 --- a/disabled_stuff/minecraft_user.py +++ /dev/null @@ -1,101 +0,0 @@ -import json -from util import hook, http - -NAME_URL = "https://account.minecraft.net/buy/frame/checkName/{}" -PAID_URL = "http://www.minecraft.net/haspaid.jsp" - - -class McuError(Exception): - pass - - -def get_status(name): - """ takes a name and returns status """ - try: - name_encoded = http.quote_plus(name) - response = http.get(NAME_URL.format(name_encoded)) - except (http.URLError, http.HTTPError) as e: - raise McuError("Could not get name status: {}".format(e)) - - if "OK" in response: - return "free" - elif "TAKEN" in response: - return "taken" - elif "invalid characters" in response: - return "invalid" - - -def get_profile(name): - profile = {} - - # form the profile request - request = { - "name": name, - "agent": "minecraft" - } - - # submit the profile request - try: - headers = {"Content-Type": "application/json"} - r = http.get_json( - 'https://api.mojang.com/profiles/page/1', - post_data=json.dumps(request), - headers=headers - ) - except (http.URLError, http.HTTPError) as e: - raise McuError("Could not get profile status: {}".format(e)) - - user = r["profiles"][0] - profile["name"] = user["name"] - profile["id"] = user["id"] - - profile["legacy"] = user.get("legacy", False) - - try: - response = http.get(PAID_URL, user=name) - except (http.URLError, http.HTTPError) as e: - raise McuError("Could not get payment status: {}".format(e)) - - if "true" in response: - profile["paid"] = True - else: - profile["paid"] = False - - return profile - - -@hook.command("haspaid") -@hook.command("mcpaid") -@hook.command -def mcuser(inp): - """mcpaid <username> -- Gets information about the Minecraft user <account>.""" - user = inp.strip() - - try: - # get status of name (does it exist?) - name_status = get_status(user) - except McuError as e: - return e - - if name_status == "taken": - try: - # get information about user - profile = get_profile(user) - except McuError as e: - return "Error: {}".format(e) - - profile["lt"] = ", legacy" if profile["legacy"] else "" - - if profile["paid"]: - return u"The account \x02{name}\x02 ({id}{lt}) exists. It is a \x02paid\x02" \ - u" account.".format(**profile) - else: - return u"The account \x02{name}\x02 ({id}{lt}) exists. It \x034\x02is NOT\x02\x0f a paid" \ - u" account.".format(**profile) - elif name_status == "free": - return u"The account \x02{}\x02 does not exist.".format(user) - elif name_status == "invalid": - return u"The name \x02{}\x02 contains invalid characters.".format(user) - else: - # if you see this, panic - return "Unknown Error." \ No newline at end of file diff --git a/disabled_stuff/minecraft_wiki.py b/disabled_stuff/minecraft_wiki.py deleted file mode 100644 index 072a8ac..0000000 --- a/disabled_stuff/minecraft_wiki.py +++ /dev/null @@ -1,51 +0,0 @@ -import re - -from util import hook, http, text - - -api_url = "http://minecraft.gamepedia.com/api.php?action=opensearch" -mc_url = "http://minecraft.gamepedia.com/" - - -@hook.command -def mcwiki(inp): - """mcwiki <phrase> -- Gets the first paragraph of - the Minecraft Wiki article on <phrase>.""" - - try: - j = http.get_json(api_url, search=inp) - except (http.HTTPError, http.URLError) as e: - return "Error fetching search results: {}".format(e) - except ValueError as e: - return "Error reading search results: {}".format(e) - - if not j[1]: - return "No results found." - - # we remove items with a '/' in the name, because - # gamepedia uses sub-pages for different languages - # for some stupid reason - items = [item for item in j[1] if not "/" in item] - - if items: - article_name = items[0].replace(' ', '_').encode('utf8') - else: - # there are no items without /, just return a / one - article_name = j[1][0].replace(' ', '_').encode('utf8') - - url = mc_url + http.quote(article_name, '') - - try: - page = http.get_html(url) - except (http.HTTPError, http.URLError) as e: - return "Error fetching wiki page: {}".format(e) - - for p in page.xpath('//div[@class="mw-content-ltr"]/p'): - if p.text_content(): - summary = " ".join(p.text_content().splitlines()) - summary = re.sub("\[\d+\]", "", summary) - summary = text.truncate_str(summary, 200) - return u"{} :: {}".format(summary, url) - - # this shouldn't happen - return "Unknown Error." diff --git a/disabled_stuff/mlia.py b/disabled_stuff/mlia.py deleted file mode 100644 index feea642..0000000 --- a/disabled_stuff/mlia.py +++ /dev/null @@ -1,34 +0,0 @@ -# Plugin by Infinity - <https://github.com/infinitylabs/UguuBot> - -import random - -from util import hook, http - - -mlia_cache = [] - - -def refresh_cache(): - """gets a page of random MLIAs and puts them into a dictionary """ - url = 'http://mylifeisaverage.com/{}'.format(random.randint(1, 11000)) - soup = http.get_soup(url) - - for story in soup.find_all('div', {'class': 'story '}): - mlia_id = story.find('span', {'class': 'left'}).a.text - mlia_text = story.find('div', {'class': 'sc'}).text.strip() - mlia_cache.append((mlia_id, mlia_text)) - -# do an initial refresh of the cache -refresh_cache() - - -@hook.command(autohelp=False) -def mlia(inp, reply=None): - """mlia -- Gets a random quote from MyLifeIsAverage.com.""" - # grab the last item in the mlia cache and remove it - mlia_id, text = mlia_cache.pop() - # reply with the mlia we grabbed - reply('({}) {}'.format(mlia_id, text)) - # refresh mlia cache if its getting empty - if len(mlia_cache) < 3: - refresh_cache() diff --git a/disabled_stuff/mtg.py b/disabled_stuff/mtg.py deleted file mode 100644 index 3db8306..0000000 --- a/disabled_stuff/mtg.py +++ /dev/null @@ -1,183 +0,0 @@ -import re - -from util import hook, http - - -@hook.command -def mtg(inp): - ".mtg <name> -- Gets information about Magic the Gathering card <name>." - - url = 'http://magiccards.info/query?v=card&s=cname' - h = http.get_html(url, q=inp) - - name = h.find('body/table/tr/td/span/a') - if name is None: - return "No cards found :(" - card = name.getparent().getparent().getparent() - - type = card.find('td/p').text.replace('\n', '') - - # this is ugly - text = http.html.tostring(card.xpath("//p[@class='ctext']/b")[0]) - text = text.replace('<br>', '$') - text = http.html.fromstring(text).text_content() - text = re.sub(r'(\w+\s*)\$+(\s*\w+)', r'\1. \2', text) - text = text.replace('$', ' ') - text = re.sub(r'\(.*?\)', '', text) # strip parenthetical explanations - text = re.sub(r'\.(\S)', r'. \1', text) # fix spacing - - printings = card.find('td/small').text_content() - printings = re.search(r'Editions:(.*)Languages:', printings).group(1) - printings = re.findall(r'\s*(.+?(?: \([^)]+\))*) \((.*?)\)', - ' '.join(printings.split())) - - printing_out = ', '.join('%s (%s)' % (set_abbrevs.get(x[0], x[0]), - rarity_abbrevs.get(x[1], x[1])) - for x in printings) - - name.make_links_absolute(base_url=url) - link = name.attrib['href'] - name = name.text_content().strip() - type = type.strip() - text = ' '.join(text.split()) - - return ' | '.join((name, type, text, printing_out, link)) - - -set_abbrevs = { - '15th Anniversary': '15ANN', - 'APAC Junior Series': 'AJS', - 'Alara Reborn': 'ARB', - 'Alliances': 'AI', - 'Anthologies': 'AT', - 'Antiquities': 'AQ', - 'Apocalypse': 'AP', - 'Arabian Nights': 'AN', - 'Arena League': 'ARENA', - 'Asia Pacific Land Program': 'APAC', - 'Battle Royale': 'BR', - 'Battle Royale Box Set': 'BRB', - 'Beatdown': 'BTD', - 'Beatdown Box Set': 'BTD', - 'Betrayers of Kamigawa': 'BOK', - 'Celebration Cards': 'UQC', - 'Champions of Kamigawa': 'CHK', - 'Champs': 'CP', - 'Chronicles': 'CH', - 'Classic Sixth Edition': '6E', - 'Coldsnap': 'CS', - 'Coldsnap Theme Decks': 'CSTD', - 'Conflux': 'CFX', - 'Core Set - Eighth Edition': '8E', - 'Core Set - Ninth Edition': '9E', - 'Darksteel': 'DS', - 'Deckmasters': 'DM', - 'Dissension': 'DI', - 'Dragon Con': 'DRC', - 'Duel Decks: Divine vs. Demonic': 'DVD', - 'Duel Decks: Elves vs. Goblins': 'EVG', - 'Duel Decks: Garruk vs. Liliana': 'GVL', - 'Duel Decks: Jace vs. Chandra': 'JVC', - 'Eighth Edition': '8ED', - 'Eighth Edition Box Set': '8EB', - 'European Land Program': 'EURO', - 'Eventide': 'EVE', - 'Exodus': 'EX', - 'Fallen Empires': 'FE', - 'Fifth Dawn': '5DN', - 'Fifth Edition': '5E', - 'Fourth Edition': '4E', - 'Friday Night Magic': 'FNMP', - 'From the Vault: Dragons': 'FVD', - 'From the Vault: Exiled': 'FVE', - 'Future Sight': 'FUT', - 'Gateway': 'GRC', - 'Grand Prix': 'GPX', - 'Guildpact': 'GP', - 'Guru': 'GURU', - 'Happy Holidays': 'HHO', - 'Homelands': 'HL', - 'Ice Age': 'IA', - 'Introductory Two-Player Set': 'ITP', - 'Invasion': 'IN', - 'Judge Gift Program': 'JR', - 'Judgment': 'JU', - 'Junior Series': 'JSR', - 'Legend Membership': 'DCILM', - 'Legends': 'LG', - 'Legions': 'LE', - 'Limited Edition (Alpha)': 'LEA', - 'Limited Edition (Beta)': 'LEB', - 'Limited Edition Alpha': 'LEA', - 'Limited Edition Beta': 'LEB', - 'Lorwyn': 'LW', - 'MTGO Masters Edition': 'MED', - 'MTGO Masters Edition II': 'ME2', - 'MTGO Masters Edition III': 'ME3', - 'Magic 2010': 'M10', - 'Magic Game Day Cards': 'MGDC', - 'Magic Player Rewards': 'MPRP', - 'Magic Scholarship Series': 'MSS', - 'Magic: The Gathering Launch Parties': 'MLP', - 'Media Inserts': 'MBP', - 'Mercadian Masques': 'MM', - 'Mirage': 'MR', - 'Mirrodin': 'MI', - 'Morningtide': 'MT', - 'Multiverse Gift Box Cards': 'MGBC', - 'Nemesis': 'NE', - 'Ninth Edition Box Set': '9EB', - 'Odyssey': 'OD', - 'Onslaught': 'ON', - 'Planar Chaos': 'PC', - 'Planechase': 'PCH', - 'Planeshift': 'PS', - 'Portal': 'PO', - 'Portal Demogame': 'POT', - 'Portal Second Age': 'PO2', - 'Portal Three Kingdoms': 'P3K', - 'Premium Deck Series: Slivers': 'PDS', - 'Prerelease Events': 'PTC', - 'Pro Tour': 'PRO', - 'Prophecy': 'PR', - 'Ravnica: City of Guilds': 'RAV', - 'Release Events': 'REP', - 'Revised Edition': 'RV', - 'Saviors of Kamigawa': 'SOK', - 'Scourge': 'SC', - 'Seventh Edition': '7E', - 'Shadowmoor': 'SHM', - 'Shards of Alara': 'ALA', - 'Starter': 'ST', - 'Starter 1999': 'S99', - 'Starter 2000 Box Set': 'ST2K', - 'Stronghold': 'SH', - 'Summer of Magic': 'SOM', - 'Super Series': 'SUS', - 'Tempest': 'TP', - 'Tenth Edition': '10E', - 'The Dark': 'DK', - 'Time Spiral': 'TS', - 'Time Spiral Timeshifted': 'TSTS', - 'Torment': 'TR', - 'Two-Headed Giant Tournament': 'THGT', - 'Unglued': 'UG', - 'Unhinged': 'UH', - 'Unhinged Alternate Foils': 'UHAA', - 'Unlimited Edition': 'UN', - "Urza's Destiny": 'UD', - "Urza's Legacy": 'UL', - "Urza's Saga": 'US', - 'Visions': 'VI', - 'Weatherlight': 'WL', - 'Worlds': 'WRL', - 'WotC Online Store': 'WOTC', - 'Zendikar': 'ZEN'} - -rarity_abbrevs = { - 'Land': 'L', - 'Common': 'C', - 'Uncommon': 'UC', - 'Rare': 'R', - 'Special': 'S', - 'Mythic Rare': 'MR'} diff --git a/disabled_stuff/mygengo_translate.py b/disabled_stuff/mygengo_translate.py deleted file mode 100644 index 6e7b006..0000000 --- a/disabled_stuff/mygengo_translate.py +++ /dev/null @@ -1,115 +0,0 @@ -# BING translation plugin by Lukeroge and neersighted -from util import hook -from util import http -import re -import htmlentitydefs -import mygengo - -gengo = mygengo.MyGengo( - public_key='PlwtF1CZ2tu27IdX_SXNxTFmfN0j|_-pJ^Rf({O-oLl--r^QM4FygRdt^jusSSDE', - private_key='wlXpL=SU[#JpPu[dQaf$v{S3@rg[=95$$TA(k$sb3_6~B_zDKkTbd4#hXxaorIae', - sandbox=False, -) - -def gengo_translate(text, source, target): - try: - translation = gengo.postTranslationJob(job={ - 'type': 'text', - 'slug': 'Translating '+source+' to '+target+' with the myGengo API', - 'body_src': text, - 'lc_src': source, - 'lc_tgt': target, - 'tier': 'machine', - }) - translated = translation['response']['job']['body_tgt'] - return u"(%s > %s) %s" % (source, target, translated) - except mygengo.MyGengoError: - return "error: could not translate" - -def match_language(fragment): - fragment = fragment.lower() - for short, _ in lang_pairs: - if fragment in short.lower().split(): - return short.split()[0] - - for short, full in lang_pairs: - if fragment in full.lower(): - return short.split()[0] - return None - -@hook.command -def translate(inp): - ".translate <source language> <target language> <sentence> -- Translates <sentence> from <source language> to <target language> using MyGengo." - args = inp.split(' ') - sl = match_language(args[0]) - tl = match_language(args[1]) - txt = unicode(" ".join(args[2:])) - if sl and tl: - return unicode(gengo_translate(txt, sl, tl)) - else: - return "error: translate could not reliably determine one or both languages" - -languages = 'ja fr de ko ru zh'.split() -language_pairs = zip(languages[:-1], languages[1:]) -lang_pairs = [ - ("no", "Norwegian"), - ("it", "Italian"), - ("ht", "Haitian Creole"), - ("af", "Afrikaans"), - ("sq", "Albanian"), - ("ar", "Arabic"), - ("hy", "Armenian"), - ("az", "Azerbaijani"), - ("eu", "Basque"), - ("be", "Belarusian"), - ("bg", "Bulgarian"), - ("ca", "Catalan"), - ("zh-CN zh", "Chinese"), - ("hr", "Croatian"), - ("cs cz", "Czech"), - ("da dk", "Danish"), - ("nl", "Dutch"), - ("en", "English"), - ("et", "Estonian"), - ("tl", "Filipino"), - ("fi", "Finnish"), - ("fr", "French"), - ("gl", "Galician"), - ("ka", "Georgian"), - ("de", "German"), - ("el", "Greek"), - ("ht", "Haitian Creole"), - ("iw", "Hebrew"), - ("hi", "Hindi"), - ("hu", "Hungarian"), - ("is", "Icelandic"), - ("id", "Indonesian"), - ("ga", "Irish"), - ("it", "Italian"), - ("ja jp jpn", "Japanese"), - ("ko", "Korean"), - ("lv", "Latvian"), - ("lt", "Lithuanian"), - ("mk", "Macedonian"), - ("ms", "Malay"), - ("mt", "Maltese"), - ("no", "Norwegian"), - ("fa", "Persian"), - ("pl", "Polish"), - ("pt", "Portuguese"), - ("ro", "Romanian"), - ("ru", "Russian"), - ("sr", "Serbian"), - ("sk", "Slovak"), - ("sl", "Slovenian"), - ("es", "Spanish"), - ("sw", "Swahili"), - ("sv", "Swedish"), - ("th", "Thai"), - ("tr", "Turkish"), - ("uk", "Ukrainian"), - ("ur", "Urdu"), - ("vi", "Vietnamese"), - ("cy", "Welsh"), - ("yi", "Yiddish") -] diff --git a/disabled_stuff/namegen.py b/disabled_stuff/namegen.py deleted file mode 100644 index 7a1f0e6..0000000 --- a/disabled_stuff/namegen.py +++ /dev/null @@ -1,60 +0,0 @@ -import json -import os - -from util import hook, text, textgen - - -GEN_DIR = "./plugins/data/name_files/" - - -def get_generator(_json): - data = json.loads(_json) - return textgen.TextGenerator(data["templates"], - data["parts"], default_templates=data["default_templates"]) - - -@hook.command(autohelp=False) -def namegen(inp, notice=None): - """namegen [generator] -- Generates some names using the chosen generator. - 'namegen list' will display a list of all generators.""" - - # clean up the input - inp = inp.strip().lower() - - # get a list of available name generators - files = os.listdir(GEN_DIR) - all_modules = [] - for i in files: - if os.path.splitext(i)[1] == ".json": - all_modules.append(os.path.splitext(i)[0]) - all_modules.sort() - - # command to return a list of all available generators - if inp == "list": - message = "Available generators: " - message += text.get_text_list(all_modules, 'and') - notice(message) - return - - if inp: - selected_module = inp.split()[0] - else: - # make some generic fantasy names - selected_module = "fantasy" - - # check if the selected module is valid - if not selected_module in all_modules: - return "Invalid name generator :(" - - # load the name generator - with open(os.path.join(GEN_DIR, "{}.json".format(selected_module))) as f: - try: - generator = get_generator(f.read()) - except ValueError as error: - return "Unable to read name file: {}".format(error) - - # time to generate some names - name_list = generator.generate_strings(10) - - # and finally return the final message :D - return "Some names to ponder: {}.".format(text.get_text_list(name_list, 'and')) diff --git a/disabled_stuff/newegg.py b/disabled_stuff/newegg.py deleted file mode 100644 index 68d604d..0000000 --- a/disabled_stuff/newegg.py +++ /dev/null @@ -1,95 +0,0 @@ -import json -import re - -from util import hook, http, text, web - - -## CONSTANTS - -ITEM_URL = "http://www.newegg.com/Product/Product.aspx?Item={}" - -API_PRODUCT = "http://www.ows.newegg.com/Products.egg/{}/ProductDetails" -API_SEARCH = "http://www.ows.newegg.com/Search.egg/Advanced" - -NEWEGG_RE = (r"(?:(?:www.newegg.com|newegg.com)/Product/Product\.aspx\?Item=)([-_a-zA-Z0-9]+)", re.I) - - -## OTHER FUNCTIONS - -def format_item(item, show_url=True): - """ takes a newegg API item object and returns a description """ - title = text.truncate_str(item["Title"], 50) - - # format the rating nicely if it exists - if not item["ReviewSummary"]["TotalReviews"] == "[]": - rating = "Rated {}/5 ({} ratings)".format(item["ReviewSummary"]["Rating"], - item["ReviewSummary"]["TotalReviews"][1:-1]) - else: - rating = "No Ratings" - - if not item["FinalPrice"] == item["OriginalPrice"]: - price = "{FinalPrice}, was {OriginalPrice}".format(**item) - else: - price = item["FinalPrice"] - - tags = [] - - if item["Instock"]: - tags.append("\x02Stock Available\x02") - else: - tags.append("\x02Out Of Stock\x02") - - if item["FreeShippingFlag"]: - tags.append("\x02Free Shipping\x02") - - if item["IsFeaturedItem"]: - tags.append("\x02Featured\x02") - - if item["IsShellShockerItem"]: - tags.append(u"\x02SHELL SHOCKER\u00AE\x02") - - # join all the tags together in a comma separated string ("tag1, tag2, tag3") - tag_text = u", ".join(tags) - - if show_url: - # create the item URL and shorten it - url = web.try_isgd(ITEM_URL.format(item["NeweggItemNumber"])) - return u"\x02{}\x02 ({}) - {} - {} - {}".format(title, price, rating, - tag_text, url) - else: - return u"\x02{}\x02 ({}) - {} - {}".format(title, price, rating, - tag_text) - - -## HOOK FUNCTIONS - -@hook.regex(*NEWEGG_RE) -def newegg_url(match): - item_id = match.group(1) - item = http.get_json(API_PRODUCT.format(item_id)) - return format_item(item, show_url=False) - - -@hook.command -def newegg(inp): - """newegg <item name> -- Searches newegg.com for <item name>""" - - # form the search request - request = { - "Keyword": inp, - "Sort": "FEATURED" - } - - # submit the search request - r = http.get_json( - 'http://www.ows.newegg.com/Search.egg/Advanced', - post_data=json.dumps(request) - ) - - # get the first result - if r["ProductListItems"]: - return format_item(r["ProductListItems"][0]) - else: - return "No results found." - - diff --git a/disabled_stuff/newgrounds.py b/disabled_stuff/newgrounds.py deleted file mode 100644 index b26ffe4..0000000 --- a/disabled_stuff/newgrounds.py +++ /dev/null @@ -1,59 +0,0 @@ -import re - -from util import hook, http - - -newgrounds_re = (r'(.*:)//(www.newgrounds.com|newgrounds.com)(:[0-9]+)?(.*)', re.I) -valid = set('0123456789') - - -def test(s): - return set(s) <= valid - - -@hook.regex(*newgrounds_re) -def newgrounds_url(match): - location = match.group(4).split("/")[-1] - if not test(location): - print "Not a valid Newgrounds portal ID. Example: http://www.newgrounds.com/portal/view/593993" - return None - soup = http.get_soup("http://www.newgrounds.com/portal/view/" + location) - - title = "\x02{}\x02".format(soup.find('title').text) - - # get author - try: - author_info = soup.find('ul', {'class': 'authorlinks'}).find('img')['alt'] - author = " - \x02{}\x02".format(author_info) - except: - author = "" - - # get rating - try: - rating_info = soup.find('dd', {'class': 'star-variable'})['title'].split("Stars –")[0].strip() - rating = u" - rated \x02{}\x02/\x025.0\x02".format(rating_info) - except: - rating = "" - - # get amount of ratings - try: - ratings_info = soup.find('dd', {'class': 'star-variable'})['title'].split("Stars –")[1].replace("Votes", - "").strip() - numofratings = " ({})".format(ratings_info) - except: - numofratings = "" - - # get amount of views - try: - views_info = soup.find('dl', {'class': 'contentdata'}).findAll('dd')[1].find('strong').text - views = " - \x02{}\x02 views".format(views_info) - except: - views = "" - - # get upload data - try: - date = "on \x02{}\x02".format(soup.find('dl', {'class': 'sidestats'}).find('dd').text) - except: - date = "" - - return title + rating + numofratings + views + author + date diff --git a/disabled_stuff/notes.py b/disabled_stuff/notes.py deleted file mode 100644 index 070a9fb..0000000 --- a/disabled_stuff/notes.py +++ /dev/null @@ -1,191 +0,0 @@ -import re - -from util import hook - - -db_ready = False - - -def clean_sql(sql): - return re.sub(r'\s+', " ", sql).strip() - - -def db_init(db): - global db_ready - if db_ready: - return - - exists = db.execute(""" - select exists ( - select * from sqlite_master where type = "table" and name = "todos" - ) - """).fetchone()[0] == 1 - - if not exists: - db.execute(clean_sql(""" - create virtual table todos using fts4( - user, - text, - added, - tokenize=porter - )""")) - - db.commit() - - db_ready = True - - -def db_getall(db, nick, limit=-1): - return db.execute(""" - select added, text - from todos - where lower(user) = lower(?) - order by added desc - limit ? - - """, (nick, limit)) - - -def db_get(db, nick, note_id): - return db.execute(""" - select added, text from todos - where lower(user) = lower(?) - order by added desc - limit 1 - offset ? - """, (nick, note_id)).fetchone() - - -def db_del(db, nick, limit='all'): - row = db.execute(""" - delete from todos - where rowid in ( - select rowid from todos - where lower(user) = lower(?) - order by added desc - limit ? - offset ?) - """, (nick, - -1 if limit == 'all' else 1, - 0 if limit == 'all' else limit)) - db.commit() - return row - - -def db_add(db, nick, text): - db.execute(""" - insert into todos (user, text, added) - values (?, ?, CURRENT_TIMESTAMP) - """, (nick, text)) - db.commit() - - -def db_search(db, nick, query): - return db.execute(""" - select added, text - from todos - where todos match ? - and lower(user) = lower(?) - order by added desc - """, (query, nick)) - - -@hook.command("notes") -@hook.command -def note(inp, nick='', chan='', db=None, notice=None, bot=None): - """note(s) <add|del|list|search> args -- Manipulates your list of notes.""" - - db_init(db) - - parts = inp.split() - cmd = parts[0].lower() - - args = parts[1:] - - # code to allow users to access each others factoids and a copy of help - # ".note (add|del|list|search) [@user] args -- Manipulates your list of todos." - #if len(args) and args[0].startswith("@"): - # nick = args[0][1:] - # args = args[1:] - - if cmd == 'add': - if not len(args): - return "no text" - - text = " ".join(args) - - db_add(db, nick, text) - - notice("Note added!") - return - elif cmd == 'get': - if len(args): - try: - index = int(args[0]) - except ValueError: - notice("Invalid number format.") - return - else: - index = 0 - - row = db_get(db, nick, index) - - if not row: - notice("No such entry.") - return - notice("[{}]: {}: {}".format(index, row[0], row[1])) - elif cmd == 'del' or cmd == 'delete' or cmd == 'remove': - if not len(args): - return "error" - - if args[0] == 'all': - index = 'all' - else: - try: - index = int(args[0]) - except ValueError: - notice("Invalid number.") - return - - rows = db_del(db, nick, index) - - notice("Deleted {} entries".format(rows.rowcount)) - elif cmd == 'list': - limit = -1 - - if len(args): - try: - limit = int(args[0]) - limit = max(-1, limit) - except ValueError: - notice("Invalid number.") - return - - rows = db_getall(db, nick, limit) - - found = False - - for (index, row) in enumerate(rows): - notice("[{}]: {}: {}".format(index, row[0], row[1])) - found = True - - if not found: - notice("{} has no entries.".format(nick)) - elif cmd == 'search': - if not len(args): - notice("No search query given!") - return - query = " ".join(args) - rows = db_search(db, nick, query) - - found = False - - for (index, row) in enumerate(rows): - notice("[{}]: {}: {}".format(index, row[0], row[1])) - found = True - - if not found: - notice("{} has no matching entries for: {}".format(nick, query)) - - else: - notice("Unknown command: {}".format(cmd)) diff --git a/disabled_stuff/osrc.py b/disabled_stuff/osrc.py deleted file mode 100644 index 99cba1c..0000000 --- a/disabled_stuff/osrc.py +++ /dev/null @@ -1,29 +0,0 @@ -from bs4 import BeautifulSoup - -from util import hook, http, web - - -user_url = "http://osrc.dfm.io/{}" - - -@hook.command -def osrc(inp): - """osrc <github user> -- Gets an Open Source Report Card for <github user>""" - - user_nick = inp.strip() - url = user_url.format(user_nick) - - try: - soup = http.get_soup(url) - except (http.HTTPError, http.URLError): - return "Couldn't find any stats for this user." - - report = soup.find("div", {"id": "description"}).find("p").get_text() - - # Split and join to remove all the excess whitespace, slice the - # string to remove the trailing full stop. - report = " ".join(report.split())[:-1] - - short_url = web.try_isgd(url) - - return "{} - {}".format(report, short_url) diff --git a/disabled_stuff/password.py b/disabled_stuff/password.py deleted file mode 100644 index 34a379b..0000000 --- a/disabled_stuff/password.py +++ /dev/null @@ -1,50 +0,0 @@ -# TODO: Add some kind of pronounceable password generation -# TODO: Improve randomness -import string -import random - -from util import hook - - -@hook.command -def password(inp, notice=None): - """password <length> [types] -- Generates a password of <length> (default 10). - [types] can include 'alpha', 'no caps', 'numeric', 'symbols' or any combination of the inp, eg. 'numbers symbols'""" - okay = [] - - # find the length needed for the password - numb = inp.split(" ") - - try: - length = int(numb[0]) - except ValueError: - length = 10 - - # add alpha characters - if "alpha" in inp or "letter" in inp: - okay = okay + list(string.ascii_lowercase) - #adds capital characters if not told not to - if "no caps" not in inp: - okay = okay + list(string.ascii_uppercase) - - # add numbers - if "numeric" in inp or "number" in inp: - okay = okay + [str(x) for x in xrange(0, 10)] - - # add symbols - if "symbol" in inp: - sym = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '-', '=', '_', '+', '[', ']', '{', '}', '\\', '|', ';', - ':', "'", '.', '>', ',', '<', '/', '?', '`', '~', '"'] - okay += okay + sym - - # defaults to lowercase alpha password if the okay list is empty - if not okay: - okay = okay + list(string.ascii_lowercase) - - pw = "" - - # generates password - for x in range(length): - pw = pw + random.choice(okay) - - notice(pw) diff --git a/disabled_stuff/plpaste.py b/disabled_stuff/plpaste.py deleted file mode 100644 index 238037d..0000000 --- a/disabled_stuff/plpaste.py +++ /dev/null @@ -1,12 +0,0 @@ -from util import hook, web - - -@hook.command(adminonly=True) -def plpaste(inp): - if "/" in inp and inp.split("/")[0] != "util": - return "Invalid input" - try: - with open("plugins/%s.py" % inp) as f: - return web.haste(f.read(), ext='py') - except IOError: - return "Plugin not found (must be in plugins folder)" diff --git a/disabled_stuff/potato.py b/disabled_stuff/potato.py deleted file mode 100644 index 9987e18..0000000 --- a/disabled_stuff/potato.py +++ /dev/null @@ -1,56 +0,0 @@ -# coding=utf-8 -import re -import random - -from util import hook - - -potatoes = ['AC Belmont', 'AC Blue Pride', 'AC Brador', 'AC Chaleur', 'AC Domino', 'AC Dubuc', 'AC Glacier Chip', - 'AC Maple Gold', 'AC Novachip', 'AC Peregrine Red', 'AC Ptarmigan', 'AC Red Island', 'AC Saguenor', - 'AC Stampede Russet', 'AC Sunbury', 'Abeille', 'Abnaki', 'Acadia', 'Acadia Russet', 'Accent', - 'Adirondack Blue', 'Adirondack Red', 'Adora', 'Agria', 'All Blue', 'All Red', 'Alpha', 'Alta Russet', - 'Alturas Russet', 'Amandine', 'Amisk', 'Andover', 'Anoka', 'Anson', 'Aquilon', 'Arran Consul', 'Asterix', - 'Atlantic', 'Austrian Crescent', 'Avalanche', 'Banana', 'Bannock Russet', 'Batoche', 'BeRus', - 'Belle De Fonteney', 'Belleisle', 'Bintje', 'Blossom', 'Blue Christie', 'Blue Mac', 'Brigus', - 'Brise du Nord', 'Butte', 'Butterfinger', 'Caesar', 'CalWhite', 'CalRed', 'Caribe', 'Carlingford', - 'Carlton', 'Carola', 'Cascade', 'Castile', 'Centennial Russet', 'Century Russet', 'Charlotte', 'Cherie', - 'Cherokee', 'Cherry Red', 'Chieftain', 'Chipeta', 'Coastal Russet', 'Colorado Rose', 'Concurrent', - 'Conestoga', 'Cowhorn', 'Crestone Russet', 'Crispin', 'Cupids', 'Daisy Gold', 'Dakota Pearl', 'Defender', - 'Delikat', 'Denali', 'Desiree', 'Divina', 'Dundrod', 'Durango Red', 'Early Rose', 'Elba', 'Envol', - 'Epicure', 'Eramosa', 'Estima', 'Eva', 'Fabula', 'Fambo', 'Fremont Russet', 'French Fingerling', - 'Frontier Russet', 'Fundy', 'Garnet Chile', 'Gem Russet', 'GemStar Russet', 'Gemchip', 'German Butterball', - 'Gigant', 'Goldrush', 'Granola', 'Green Mountain', 'Haida', 'Hertha', 'Hilite Russet', 'Huckleberry', - 'Hunter', 'Huron', 'IdaRose', 'Innovator', 'Irish Cobbler', 'Island Sunshine', 'Ivory Crisp', - 'Jacqueline Lee', 'Jemseg', 'Kanona', 'Katahdin', 'Kennebec', "Kerr's Pink", 'Keswick', 'Keuka Gold', - 'Keystone Russet', 'King Edward VII', 'Kipfel', 'Klamath Russet', 'Krantz', 'LaRatte', 'Lady Rosetta', - 'Latona', 'Lemhi Russet', 'Liberator', 'Lili', 'MaineChip', 'Marfona', 'Maris Bard', 'Maris Piper', - 'Matilda', 'Mazama', 'McIntyre', 'Michigan Purple', 'Millenium Russet', 'Mirton Pearl', 'Modoc', 'Mondial', - 'Monona', 'Morene', 'Morning Gold', 'Mouraska', 'Navan', 'Nicola', 'Nipigon', 'Niska', 'Nooksack', - 'NorValley', 'Norchip', 'Nordonna', 'Norgold Russet', 'Norking Russet', 'Norland', 'Norwis', 'Obelix', - 'Ozette', 'Peanut', 'Penta', 'Peribonka', 'Peruvian Purple', 'Pike', 'Pink Pearl', 'Prospect', 'Pungo', - 'Purple Majesty', 'Purple Viking', 'Ranger Russet', 'Reba', 'Red Cloud', 'Red Gold', 'Red La Soda', - 'Red Pontiac', 'Red Ruby', 'Red Thumb', 'Redsen', 'Rocket', 'Rose Finn Apple', 'Rose Gold', 'Roselys', - 'Rote Erstling', 'Ruby Crescent', 'Russet Burbank', 'Russet Legend', 'Russet Norkotah', 'Russet Nugget', - 'Russian Banana', 'Saginaw Gold', 'Sangre', 'Sant�', 'Satina', 'Saxon', 'Sebago', 'Shepody', 'Sierra', - 'Silverton Russet', 'Simcoe', 'Snowden', 'Spunta', "St. John's", 'Summit Russet', 'Sunrise', 'Superior', - 'Symfonia', 'Tolaas', 'Trent', 'True Blue', 'Ulla', 'Umatilla Russet', 'Valisa', 'Van Gogh', 'Viking', - 'Wallowa Russet', 'Warba', 'Western Russet', 'White Rose', 'Willamette', 'Winema', 'Yellow Finn', - 'Yukon Gold'] - - -@hook.command -def potato(inp, action=None): - """potato <user> - Makes <user> a tasty little potato.""" - inp = inp.strip() - - if not re.match("^[A-Za-z0-9_|.-\]\[]*$", inp.lower()): - return "I cant make a tasty potato for that user!" - - potato_type = random.choice(potatoes) - size = random.choice(['small', 'little', 'mid-sized', 'medium-sized', 'large', 'gigantic']) - flavor = random.choice(['tasty', 'delectable', 'delicious', 'yummy', 'toothsome', 'scrumptious', 'luscious']) - method = random.choice(['bakes', 'fries', 'boils', 'roasts']) - side_dish = random.choice(['side salad', 'dollop of sour cream', 'piece of chicken', 'bowl of shredded bacon']) - - action("{} a {} {} {} potato for {} and serves it with a small {}!".format(method, flavor, size, potato_type, inp, - side_dish)) diff --git a/disabled_stuff/pre.py b/disabled_stuff/pre.py deleted file mode 100644 index f4e61a3..0000000 --- a/disabled_stuff/pre.py +++ /dev/null @@ -1,38 +0,0 @@ -import datetime - -from util import hook, http, timesince - - -@hook.command("scene") -@hook.command -def pre(inp): - """pre <query> -- searches scene releases using orlydb.com""" - - try: - h = http.get_html("http://orlydb.com/", q=inp) - except http.HTTPError as e: - return 'Unable to fetch results: {}'.format(e) - - results = h.xpath("//div[@id='releases']/div/span[@class='release']/..") - - if not results: - return "No results found." - - result = results[0] - - date = result.xpath("span[@class='timestamp']/text()")[0] - section = result.xpath("span[@class='section']//text()")[0] - name = result.xpath("span[@class='release']/text()")[0] - - # parse date/time - date = datetime.datetime.strptime(date, "%Y-%m-%d %H:%M:%S") - date_string = date.strftime("%d %b %Y") - since = timesince.timesince(date) - - size = result.xpath("span[@class='inforight']//text()") - if size: - size = ' - ' + size[0].split()[0] - else: - size = '' - - return '{} - {}{} - {} ({} ago)'.format(section, name, size, date_string, since) diff --git a/disabled_stuff/python.py b/disabled_stuff/python.py deleted file mode 100644 index 5cdc524..0000000 --- a/disabled_stuff/python.py +++ /dev/null @@ -1,9 +0,0 @@ -from util import hook -from util.pyexec import eval_py - - -@hook.command -def python(inp): - """python <prog> -- Executes <prog> as Python code.""" - - return eval_py(inp) diff --git a/disabled_stuff/qrcode.py b/disabled_stuff/qrcode.py deleted file mode 100644 index 9d481e0..0000000 --- a/disabled_stuff/qrcode.py +++ /dev/null @@ -1,18 +0,0 @@ -# Plugin by https://github.com/Mu5tank05 -from util import hook, web, http - - -@hook.command('qr') -@hook.command -def qrcode(inp): - """qrcode [link] returns a link for a QR code.""" - - args = { - "cht": "qr", # chart type (QR) - "chs": "200x200", # dimensions - "chl": inp # data - } - - link = http.prepare_url("http://chart.googleapis.com/chart", args) - - return web.try_isgd(link) diff --git a/disabled_stuff/quote.py b/disabled_stuff/quote.py deleted file mode 100644 index 6beefc5..0000000 --- a/disabled_stuff/quote.py +++ /dev/null @@ -1,149 +0,0 @@ -import random -import re -import time - -from util import hook - - -def format_quote(q, num, n_quotes): - """Returns a formatted string of a quote""" - ctime, nick, msg = q - return "[{}/{}] <{}> {}".format(num, n_quotes, - nick, msg) - - -def create_table_if_not_exists(db): - """Creates an empty quote table if one does not already exist""" - db.execute("create table if not exists quote" - "(chan, nick, add_nick, msg, time real, deleted default 0, " - "primary key (chan, nick, msg))") - db.commit() - - -def add_quote(db, chan, nick, add_nick, msg): - """Adds a quote to a nick, returns message string""" - try: - db.execute('''INSERT OR FAIL INTO quote - (chan, nick, add_nick, msg, time) - VALUES(?,?,?,?,?)''', - (chan, nick, add_nick, msg, time.time())) - db.commit() - except db.IntegrityError: - return "Message already stored, doing nothing." - return "Quote added." - - -def del_quote(db, chan, nick, add_nick, msg): - """Deletes a quote from a nick""" - db.execute('''UPDATE quote SET deleted = 1 WHERE - chan=? AND lower(nick)=lower(?) AND msg=msg''') - db.commit() - - -def get_quote_num(num, count, name): - """Returns the quote number to fetch from the DB""" - if num: # Make sure num is a number if it isn't false - num = int(num) - if count == 0: # Error on no quotes - raise Exception("No quotes found for {}.".format(name)) - if num and num < 0: # Count back if possible - num = count + num + 1 if num + count > -1 else count + 1 - if num and num > count: # If there are not enough quotes, raise an error - raise Exception("I only have {} quote{} for {}.".format(count, ('s', '')[count == 1], name)) - if num and num == 0: # If the number is zero, set it to one - num = 1 - if not num: # If a number is not given, select a random one - num = random.randint(1, count) - return num - - -def get_quote_by_nick(db, nick, num=False): - """Returns a formatted quote from a nick, random or selected by number""" - count = db.execute('''SELECT COUNT(*) FROM quote WHERE deleted != 1 - AND lower(nick) = lower(?)''', [nick]).fetchall()[0][0] - - try: - num = get_quote_num(num, count, nick) - except Exception as error_message: - return error_message - - quote = db.execute('''SELECT time, nick, msg - FROM quote - WHERE deleted != 1 - AND lower(nick) = lower(?) - ORDER BY time - LIMIT ?, 1''', (nick, (num - 1))).fetchall()[0] - return format_quote(quote, num, count) - - -def get_quote_by_nick_chan(db, chan, nick, num=False): - """Returns a formatted quote from a nick in a channel, random or selected by number""" - count = db.execute('''SELECT COUNT(*) - FROM quote - WHERE deleted != 1 - AND chan = ? - AND lower(nick) = lower(?)''', (chan, nick)).fetchall()[0][0] - - try: - num = get_quote_num(num, count, nick) - except Exception as error_message: - return error_message - - quote = db.execute('''SELECT time, nick, msg - FROM quote - WHERE deleted != 1 - AND chan = ? - AND lower(nick) = lower(?) - ORDER BY time - LIMIT ?, 1''', (chan, nick, (num - 1))).fetchall()[0] - return format_quote(quote, num, count) - - -def get_quote_by_chan(db, chan, num=False): - """Returns a formatted quote from a channel, random or selected by number""" - count = db.execute('''SELECT COUNT(*) - FROM quote - WHERE deleted != 1 - AND chan = ?''', (chan,)).fetchall()[0][0] - - try: - num = get_quote_num(num, count, chan) - except Exception as error_message: - return error_message - - quote = db.execute('''SELECT time, nick, msg - FROM quote - WHERE deleted != 1 - AND chan = ? - ORDER BY time - LIMIT ?, 1''', (chan, (num - 1))).fetchall()[0] - return format_quote(quote, num, count) - - -@hook.command('q') -@hook.command -def quote(inp, nick='', chan='', db=None, notice=None): - """quote [#chan] [nick] [#n]/.quote add <nick> <msg> - Gets random or [#n]th quote by <nick> or from <#chan>/adds quote.""" - create_table_if_not_exists(db) - - add = re.match(r"add[^\w@]+(\S+?)>?\s+(.*)", inp, re.I) - retrieve = re.match(r"(\S+)(?:\s+#?(-?\d+))?$", inp) - retrieve_chan = re.match(r"(#\S+)\s+(\S+)(?:\s+#?(-?\d+))?$", inp) - - if add: - quoted_nick, msg = add.groups() - notice(add_quote(db, chan, quoted_nick, nick, msg)) - return - elif retrieve: - select, num = retrieve.groups() - by_chan = True if select.startswith('#') else False - if by_chan: - return get_quote_by_chan(db, select, num) - else: - return get_quote_by_nick(db, select, num) - elif retrieve_chan: - chan, nick, num = retrieve_chan.groups() - return get_quote_by_nick_chan(db, chan, nick, num) - - notice(quote.__doc__) diff --git a/disabled_stuff/rdio.py b/disabled_stuff/rdio.py deleted file mode 100644 index 2677090..0000000 --- a/disabled_stuff/rdio.py +++ /dev/null @@ -1,131 +0,0 @@ -import urllib -import json -import re - -import oauth2 as oauth - -from util import hook - - -def getdata(inp, types, api_key, api_secret): - consumer = oauth.Consumer(api_key, api_secret) - client = oauth.Client(consumer) - response = client.request('http://api.rdio.com/1/', 'POST', - urllib.urlencode({'method': 'search', 'query': inp, 'types': types, 'count': '1'})) - data = json.loads(response[1]) - return data - - -@hook.command -def rdio(inp, bot=None): - """ rdio <search term> - alternatives: .rdiot (track), .rdioar (artist), .rdioal (album) """ - api_key = bot.config.get("api_keys", {}).get("rdio_key") - api_secret = bot.config.get("api_keys", {}).get("rdio_secret") - if not api_key: - return "error: no api key set" - data = getdata(inp, "Track,Album,Artist", api_key, api_secret) - try: - info = data['result']['results'][0] - except IndexError: - return "No results." - if 'name' in info: - if 'artist' in info and 'album' in info: # Track - name = info['name'] - artist = info['artist'] - album = info['album'] - url = info['shortUrl'] - return u"\x02{}\x02 by \x02{}\x02 - {} {}".format(name, artist, album, url) - elif 'artist' in info and not 'album' in info: # Album - name = info['name'] - artist = info['artist'] - url = info['shortUrl'] - return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url) - else: # Artist - name = info['name'] - url = info['shortUrl'] - return u"\x02{}\x02 - {}".format(name, url) - - -@hook.command -def rdiot(inp, bot=None): - """ rdiot <search term> - Search for tracks on rdio """ - api_key = bot.config.get("api_keys", {}).get("rdio_key") - api_secret = bot.config.get("api_keys", {}).get("rdio_secret") - if not api_key: - return "error: no api key set" - data = getdata(inp, "Track", api_key, api_secret) - try: - info = data['result']['results'][0] - except IndexError: - return "No results." - name = info['name'] - artist = info['artist'] - album = info['album'] - url = info['shortUrl'] - return u"\x02{}\x02 by \x02{}\x02 - {} - {}".format(name, artist, album, url) - - -@hook.command -def rdioar(inp, bot=None): - """ rdioar <search term> - Search for artists on rdio """ - api_key = bot.config.get("api_keys", {}).get("rdio_key") - api_secret = bot.config.get("api_keys", {}).get("rdio_secret") - if not api_key: - return "error: no api key set" - data = getdata(inp, "Artist", api_key, api_secret) - try: - info = data['result']['results'][0] - except IndexError: - return "No results." - name = info['name'] - url = info['shortUrl'] - return u"\x02{}\x02 - {}".format(name, url) - - -@hook.command -def rdioal(inp, bot=None): - """ rdioal <search term> - Search for albums on rdio """ - api_key = bot.config.get("api_keys", {}).get("rdio_key") - api_secret = bot.config.get("api_keys", {}).get("rdio_secret") - if not api_key: - return "error: no api key set" - data = getdata(inp, "Album", api_key, api_secret) - try: - info = data['result']['results'][0] - except IndexError: - return "No results." - name = info['name'] - artist = info['artist'] - url = info['shortUrl'] - return u"\x02{}\x02 by \x02{}\x02 - {}".format(name, artist, url) - - -rdio_re = (r'(.*:)//(rd.io|www.rdio.com|rdio.com)(:[0-9]+)?(.*)', re.I) - - -@hook.regex(*rdio_re) -def rdio_url(match, bot=None): - api_key = bot.config.get("api_keys", {}).get("rdio_key") - api_secret = bot.config.get("api_keys", {}).get("rdio_secret") - if not api_key: - return None - url = match.group(1) + "//" + match.group(2) + match.group(4) - consumer = oauth.Consumer(api_key, api_secret) - client = oauth.Client(consumer) - response = client.request('http://api.rdio.com/1/', 'POST', - urllib.urlencode({'method': 'getObjectFromUrl', 'url': url})) - data = json.loads(response[1]) - info = data['result'] - if 'name' in info: - if 'artist' in info and 'album' in info: # Track - name = info['name'] - artist = info['artist'] - album = info['album'] - return u"Rdio track: \x02{}\x02 by \x02{}\x02 - {}".format(name, artist, album) - elif 'artist' in info and not 'album' in info: # Album - name = info['name'] - artist = info['artist'] - return u"Rdio album: \x02{}\x02 by \x02{}\x02".format(name, artist) - else: # Artist - name = info['name'] - return u"Rdio artist: \x02{}\x02".format(name) diff --git a/disabled_stuff/recipe.py b/disabled_stuff/recipe.py deleted file mode 100644 index 0e04572..0000000 --- a/disabled_stuff/recipe.py +++ /dev/null @@ -1,106 +0,0 @@ -import random - -from util import hook, http, web - -metadata_url = "http://omnidator.appspot.com/microdata/json/?url={}" - -base_url = "http://www.cookstr.com" -search_url = base_url + "/searches" -random_url = search_url + "/surprise" - -# set this to true to censor this plugin! -censor = True -phrases = [ - u"EAT SOME FUCKING \x02{}\x02", - u"YOU WON'T NOT MAKE SOME FUCKING \x02{}\x02", - u"HOW ABOUT SOME FUCKING \x02{}?\x02", - u"WHY DON'T YOU EAT SOME FUCKING \x02{}?\x02", - u"MAKE SOME FUCKING \x02{}\x02", - u"INDUCE FOOD COMA WITH SOME FUCKING \x02{}\x02" -] - -clean_key = lambda i: i.split("#")[1] - - -class ParseError(Exception): - pass - - -def get_data(url): - """ Uses the omnidator API to parse the metadata from the provided URL """ - try: - omni = http.get_json(metadata_url.format(url)) - except (http.HTTPError, http.URLError) as e: - raise ParseError(e) - schemas = omni["@"] - for d in schemas: - if d["a"] == "<http://schema.org/Recipe>": - data = {clean_key(key): value for (key, value) in d.iteritems() - if key.startswith("http://schema.org/Recipe")} - return data - raise ParseError("No recipe data found") - - -@hook.command(autohelp=False) -def recipe(inp): - """recipe [term] - Gets a recipe for [term], or ets a random recipe if [term] is not provided""" - if inp: - # get the recipe URL by searching - try: - search = http.get_soup(search_url, query=inp.strip()) - except (http.HTTPError, http.URLError) as e: - return "Could not get recipe: {}".format(e) - - # find the list of results - result_list = search.find('div', {'class': 'found_results'}) - - if result_list: - results = result_list.find_all('div', {'class': 'recipe_result'}) - else: - return "No results" - - # pick a random front page result - result = random.choice(results) - - # extract the URL from the result - url = base_url + result.find('div', {'class': 'image-wrapper'}).find('a')['href'] - - else: - # get a random recipe URL - try: - page = http.open(random_url) - except (http.HTTPError, http.URLError) as e: - return "Could not get recipe: {}".format(e) - url = page.geturl() - - # use get_data() to get the recipe info from the URL - try: - data = get_data(url) - except ParseError as e: - return "Could not parse recipe: {}".format(e) - - name = data["name"].strip() - return u"Try eating \x02{}!\x02 - {}".format(name, web.try_isgd(url)) - - -@hook.command(autohelp=False) -def dinner(inp): - """dinner - WTF IS FOR DINNER""" - try: - page = http.open(random_url) - except (http.HTTPError, http.URLError) as e: - return "Could not get recipe: {}".format(e) - url = page.geturl() - - try: - data = get_data(url) - except ParseError as e: - return "Could not parse recipe: {}".format(e) - - name = data["name"].strip().upper() - text = random.choice(phrases).format(name) - - if censor: - text = text.replace("FUCK", "F**K") - - return u"{} - {}".format(text, web.try_isgd(url)) diff --git a/disabled_stuff/reddit.py b/disabled_stuff/reddit.py deleted file mode 100644 index 80fcb76..0000000 --- a/disabled_stuff/reddit.py +++ /dev/null @@ -1,79 +0,0 @@ -from datetime import datetime -import re -import random - -from util import hook, http, text, timesince - - -reddit_re = (r'.*(((www\.)?reddit\.com/r|redd\.it)[^ ]+)', re.I) - -base_url = "http://reddit.com/r/{}/.json" -short_url = "http://redd.it/{}" - - -@hook.regex(*reddit_re) -def reddit_url(match): - thread = http.get_html(match.group(0)) - - title = thread.xpath('//title/text()')[0] - upvotes = thread.xpath("//span[@class='upvotes']/span[@class='number']/text()")[0] - downvotes = thread.xpath("//span[@class='downvotes']/span[@class='number']/text()")[0] - author = thread.xpath("//div[@id='siteTable']//a[contains(@class,'author')]/text()")[0] - timeago = thread.xpath("//div[@id='siteTable']//p[@class='tagline']/time/text()")[0] - comments = thread.xpath("//div[@id='siteTable']//a[@class='comments']/text()")[0] - - return u'\x02{}\x02 - posted by \x02{}\x02 {} ago - {} upvotes, {} downvotes - {}'.format( - title, author, timeago, upvotes, downvotes, comments) - - -@hook.command(autohelp=False) -def reddit(inp): - """reddit <subreddit> [n] -- Gets a random post from <subreddit>, or gets the [n]th post in the subreddit.""" - id_num = None - - if inp: - # clean and split the input - parts = inp.lower().strip().split() - - # find the requested post number (if any) - if len(parts) > 1: - url = base_url.format(parts[0].strip()) - try: - id_num = int(parts[1]) - 1 - except ValueError: - return "Invalid post number." - else: - url = base_url.format(parts[0].strip()) - else: - url = "http://reddit.com/.json" - - try: - data = http.get_json(url, user_agent=http.ua_chrome) - except Exception as e: - return "Error: " + str(e) - data = data["data"]["children"] - - # get the requested/random post - if id_num is not None: - try: - item = data[id_num]["data"] - except IndexError: - length = len(data) - return "Invalid post number. Number must be between 1 and {}.".format(length) - else: - item = random.choice(data)["data"] - - item["title"] = text.truncate_str(item["title"], 50) - item["link"] = short_url.format(item["id"]) - - raw_time = datetime.fromtimestamp(int(item["created_utc"])) - item["timesince"] = timesince.timesince(raw_time) - - if item["over_18"]: - item["warning"] = " \x02NSFW\x02" - else: - item["warning"] = "" - - return u"\x02{title} : {subreddit}\x02 - posted by \x02{author}\x02" \ - " {timesince} ago - {ups} upvotes, {downs} downvotes -" \ - " {link}{warning}".format(**item) diff --git a/disabled_stuff/regex_chans.py b/disabled_stuff/regex_chans.py deleted file mode 100644 index c16c250..0000000 --- a/disabled_stuff/regex_chans.py +++ /dev/null @@ -1,128 +0,0 @@ -from util import hook - - -# Default value. -# If True, all channels without a setting will have regex enabled -# If False, all channels without a setting will have regex disabled -default_enabled = True - -db_ready = False - - -def db_init(db): - global db_ready - if not db_ready: - db.execute("CREATE TABLE IF NOT EXISTS regexchans(channel PRIMARY KEY, status)") - db.commit() - db_ready = True - - -def get_status(db, channel): - row = db.execute("SELECT status FROM regexchans WHERE channel = ?", [channel]).fetchone() - if row: - return row[0] - else: - return None - - -def set_status(db, channel, status): - row = db.execute("REPLACE INTO regexchans (channel, status) VALUES(?, ?)", [channel, status]) - db.commit() - - -def delete_status(db, channel): - row = db.execute("DELETE FROM regexchans WHERE channel = ?", [channel]) - db.commit() - - -def list_status(db): - row = db.execute("SELECT * FROM regexchans").fetchall() - result = None - for values in row: - if result: - result += u", {}: {}".format(values[0], values[1]) - else: - result = u"{}: {}".format(values[0], values[1]) - return result - - -@hook.sieve -def sieve_regex(bot, inp, func, kind, args): - db = bot.get_db_connection(inp.conn) - db_init(db) - if kind == 'regex' and inp.chan.startswith("#") and func.__name__ != 'factoid': - chanstatus = get_status(db, inp.chan) - if chanstatus != "ENABLED" and (chanstatus == "DISABLED" or not default_enabled): - print u"Denying input.raw={}, kind={}, args={} from {}".format(inp.raw, kind, args, inp.chan) - return None - print u"Allowing input.raw={}, kind={}, args={} from {}".format(inp.raw, kind, args, inp.chan) - - return inp - - -@hook.command(permissions=["botcontrol"]) -def enableregex(inp, db=None, message=None, notice=None, chan=None, nick=None): - db_init(db) - inp = inp.strip().lower() - if not inp: - channel = chan - elif inp.startswith("#"): - channel = inp - else: - channel = u"#{}".format(inp) - - message(u"Enabling regex matching (youtube, etc) (issued by {})".format(nick), target=channel) - notice(u"Enabling regex matching (youtube, etc) in channel {}".format(channel)) - set_status(db, channel, "ENABLED") - - -@hook.command(permissions=["botcontrol"]) -def disableregex(inp, db=None, message=None, notice=None, chan=None, nick=None): - db_init(db) - inp = inp.strip().lower() - if not inp: - channel = chan - elif inp.startswith("#"): - channel = inp - else: - channel = u"#{}".format(inp) - - message(u"Disabling regex matching (youtube, etc) (issued by {})".format(nick), target=channel) - notice(u"Disabling regex matching (youtube, etc) in channel {}".format(channel)) - set_status(db, channel, "DISABLED") - - -@hook.command(permissions=["botcontrol"]) -def resetregex(inp, db=None, message=None, notice=None, chan=None, nick=None): - db_init(db) - inp = inp.strip().lower() - if not inp: - channel = chan - elif inp.startswith("#"): - channel = inp - else: - channel = u"#{}".format(inp) - - message(u"Resetting regex matching setting (youtube, etc) (issued by {})".format(nick), target=channel) - notice(u"Resetting regex matching setting (youtube, etc) in channel {}".format(channel)) - delete_status(db, channel) - - -@hook.command(permissions=["botcontrol"]) -def regexstatus(inp, db=None, chan=None): - db_init(db) - inp = inp.strip().lower() - if not inp: - channel = chan - elif inp.startswith("#"): - channel = inp - else: - channel = u"#{}".format(inp) - - return u"Regex status for {}: {}".format(channel, get_status(db, channel)) - - -@hook.command(permissions=["botcontrol"]) -def listregex(inp, db=None): - db_init(db) - return list_status(db) diff --git a/disabled_stuff/religion.py b/disabled_stuff/religion.py deleted file mode 100644 index 552b23f..0000000 --- a/disabled_stuff/religion.py +++ /dev/null @@ -1,38 +0,0 @@ -from util import hook, http - - -@hook.command('god') -@hook.command -def bible(inp): - """.bible <passage> -- gets <passage> from the Bible (ESV)""" - - base_url = ('http://www.esvapi.org/v2/rest/passageQuery?key=IP&' - 'output-format=plain-text&include-heading-horizontal-lines&' - 'include-headings=false&include-passage-horizontal-lines=false&' - 'include-passage-references=false&include-short-copyright=false&' - 'include-footnotes=false&line-length=0&' - 'include-heading-horizontal-lines=false') - - text = http.get(base_url, passage=inp) - - text = ' '.join(text.split()) - - if len(text) > 400: - text = text[:text.rfind(' ', 0, 400)] + '...' - - return text - - -@hook.command('allah') -@hook.command -def koran(inp): # Koran look-up plugin by Ghetto Wizard - """.koran <chapter.verse> -- gets <chapter.verse> from the Koran""" - - url = 'http://quod.lib.umich.edu/cgi/k/koran/koran-idx?type=simple' - - results = http.get_html(url, q1=inp).xpath('//li') - - if not results: - return 'No results for ' + inp - - return results[0].text_content() diff --git a/disabled_stuff/repaste.py b/disabled_stuff/repaste.py deleted file mode 100644 index 1443345..0000000 --- a/disabled_stuff/repaste.py +++ /dev/null @@ -1,180 +0,0 @@ -from util import hook, http - -import urllib -import random -import urllib2 -import htmlentitydefs -import re - -re_htmlent = re.compile("&(" + "|".join(htmlentitydefs.name2codepoint.keys()) + ");") -re_numeric = re.compile(r'&#(x?)([a-fA-F0-9]+);') - - -def db_init(db): - db.execute("create table if not exists repaste(chan, manual, primary key(chan))") - db.commit() - - -def decode_html(text): - text = re.sub(re_htmlent, - lambda m: unichr(htmlentitydefs.name2codepoint[m.group(1)]), - text) - - text = re.sub(re_numeric, - lambda m: unichr(int(m.group(2), 16 if m.group(1) else 10)), - text) - return text - - -def scrape_mibpaste(url): - if not url.startswith("http"): - url = "http://" + url - pagesource = http.get(url) - rawpaste = re.search(r'(?s)(?<=<body>\n).+(?=<hr>)', pagesource).group(0) - filterbr = rawpaste.replace("<br />", "") - unescaped = decode_html(filterbr) - stripped = unescaped.strip() - - return stripped - - -def scrape_pastebin(url): - id = re.search(r'(?:www\.)?pastebin.com/([a-zA-Z0-9]+)$', url).group(1) - rawurl = "http://pastebin.com/raw.php?i=" + id - text = http.get(rawurl) - - return text - - -autorepastes = {} - - -#@hook.regex('(pastebin\.com)(/[^ ]+)') -@hook.regex('(mibpaste\.com)(/[^ ]+)') -def autorepaste(inp, input=None, notice=None, db=None, chan=None, nick=None): - db_init(db) - manual = db.execute("select manual from repaste where chan=?", (chan, )).fetchone() - if manual and len(manual) and manual[0]: - return - url = inp.group(1) + inp.group(2) - urllib.unquote(url) - if url in autorepastes: - out = autorepastes[url] - notice("In the future, please use a less awful pastebin (e.g. pastebin.com)") - else: - out = repaste("http://" + url, input, db, False) - autorepastes[url] = out - notice("In the future, please use a less awful pastebin (e.g. pastebin.com) instead of %s." % inp.group(1)) - input.say("%s (repasted for %s)" % (out, nick)) - - -scrapers = { - r'mibpaste\.com': scrape_mibpaste, - r'pastebin\.com': scrape_pastebin -} - - -def scrape(url): - for pat, scraper in scrapers.iteritems(): - print "matching " + repr(pat) + " " + url - if re.search(pat, url): - break - else: - return None - - return scraper(url) - - -def paste_sprunge(text, syntax=None, user=None): - data = urllib.urlencode({"sprunge": text}) - url = urllib2.urlopen("http://sprunge.us/", data).read().strip() - - if syntax: - url += "?" + syntax - - return url - - -def paste_ubuntu(text, user=None, syntax='text'): - data = urllib.urlencode({"poster": user, - "syntax": syntax, - "content": text}) - - return urllib2.urlopen("http://paste.ubuntu.com/", data).url - - -def paste_gist(text, user=None, syntax=None, description=None): - data = { - 'file_contents[gistfile1]': text, - 'action_button': "private" - } - - if description: - data['description'] = description - - if syntax: - data['file_ext[gistfile1]'] = "." + syntax - - req = urllib2.urlopen('https://gist.github.com/gists', urllib.urlencode(data).encode('utf8')) - return req.url - - -def paste_strictfp(text, user=None, syntax="plain"): - data = urllib.urlencode(dict( - language=syntax, - paste=text, - private="private", - submit="Paste")) - req = urllib2.urlopen("http://paste.strictfp.com/", data) - return req.url - - -pasters = dict( - ubuntu=paste_ubuntu, - sprunge=paste_sprunge, - gist=paste_gist, - strictfp=paste_strictfp -) - - -@hook.command -def repaste(inp, input=None, db=None, isManual=True): - ".repaste mode|list|[provider] [syntax] <pastebinurl> -- Reuploads mibpaste to [provider]." - - parts = inp.split() - db_init(db) - if parts[0] == 'list': - return " ".join(pasters.keys()) - - paster = paste_gist - args = {} - - if not parts[0].startswith("http"): - p = parts[0].lower() - - if p in pasters: - paster = pasters[p] - parts = parts[1:] - - if not parts[0].startswith("http"): - p = parts[0].lower() - parts = parts[1:] - - args["syntax"] = p - - if len(parts) > 1: - return "PEBKAC" - - args["user"] = input.user - - url = parts[0] - - scraped = scrape(url) - - if not scraped: - return "No scraper for given url" - - args["text"] = scraped - pasted = paster(**args) - - return pasted diff --git a/disabled_stuff/rottentomatoes.py b/disabled_stuff/rottentomatoes.py deleted file mode 100644 index 2d7af38..0000000 --- a/disabled_stuff/rottentomatoes.py +++ /dev/null @@ -1,39 +0,0 @@ -from util import http, hook - -api_root = 'http://api.rottentomatoes.com/api/public/v1.0/' -movie_search_url = api_root + 'movies.json' -movie_reviews_url = api_root + 'movies/%s/reviews.json' - - -@hook.command('rt') -def rottentomatoes(inp, bot=None): - """rt <title> -- gets ratings for <title> from Rotten Tomatoes""" - - api_key = bot.config.get("api_keys", {}).get("rottentomatoes", None) - if not api_key: - return "error: no api key set" - - title = inp.strip() - - results = http.get_json(movie_search_url, q=title, apikey=api_key) - if results['total'] == 0: - return 'No results.' - - movie = results['movies'][0] - title = movie['title'] - movie_id = movie['id'] - critics_score = movie['ratings']['critics_score'] - audience_score = movie['ratings']['audience_score'] - url = movie['links']['alternate'] - - if critics_score == -1: - return - - reviews = http.get_json(movie_reviews_url % movie_id, apikey=api_key, review_type='all') - review_count = reviews['total'] - - fresh = critics_score * review_count / 100 - rotten = review_count - fresh - - return u"{} - Critics Rating: \x02{}%\x02 ({} liked, {} disliked) " \ - "Audience Rating: \x02{}%\x02 - {}".format(title, critics_score, fresh, rotten, audience_score, url) diff --git a/disabled_stuff/rss.py b/disabled_stuff/rss.py deleted file mode 100644 index f7ed1c4..0000000 --- a/disabled_stuff/rss.py +++ /dev/null @@ -1,40 +0,0 @@ -from util import hook, http, web, text - - -@hook.command("feed") -@hook.command -def rss(inp, message=None): - """rss <feed> -- Gets the first three items from the RSS feed <feed>.""" - limit = 3 - - # preset news feeds - strip = inp.lower().strip() - if strip == "bukkit": - feed = "http://dl.bukkit.org/downloads/craftbukkit/feeds/latest-rb.rss" - limit = 1 - elif strip == "xkcd": - feed = "http://xkcd.com/rss.xml" - elif strip == "ars": - feed = "http://feeds.arstechnica.com/arstechnica/index" - else: - feed = inp - - query = "SELECT title, link FROM rss WHERE url=@feed LIMIT @limit" - result = web.query(query, {"feed": feed, "limit": limit}) - - if not result.rows: - return "Could not find/read RSS feed." - - for row in result.rows: - title = text.truncate_str(row["title"], 100) - try: - link = web.isgd(row["link"]) - except (web.ShortenError, http.HTTPError, http.URLError): - link = row["link"] - message(u"{} - {}".format(title, link)) - - -@hook.command(autohelp=False) -def rb(inp, message=None): - """rb -- Shows the latest Craftbukkit recommended build""" - rss("bukkit", message) diff --git a/disabled_stuff/shorten.py b/disabled_stuff/shorten.py deleted file mode 100644 index 39d993b..0000000 --- a/disabled_stuff/shorten.py +++ /dev/null @@ -1,11 +0,0 @@ -from util import hook, http, web - - -@hook.command -def shorten(inp): - """shorten <url> - Makes an is.gd shortlink to the url provided.""" - - try: - return web.isgd(inp) - except (web.ShortenError, http.HTTPError) as error: - return error diff --git a/disabled_stuff/slap.py b/disabled_stuff/slap.py deleted file mode 100644 index 37dfbbd..0000000 --- a/disabled_stuff/slap.py +++ /dev/null @@ -1,33 +0,0 @@ -import json - -from util import hook, textgen - - -def get_generator(_json, variables): - data = json.loads(_json) - return textgen.TextGenerator(data["templates"], - data["parts"], variables=variables) - - -@hook.command -def slap(inp, action=None, nick=None, conn=None, notice=None): - """slap <user> -- Makes the bot slap <user>.""" - target = inp.strip() - - if " " in target: - notice("Invalid username!") - return - - # if the user is trying to make the bot slap itself, slap them - if target.lower() == conn.nick.lower() or target.lower() == "itself": - target = nick - - variables = { - "user": target - } - - with open("plugins/data/slaps.json") as f: - generator = get_generator(f.read(), variables) - - # act out the message - action(generator.generate_string()) diff --git a/disabled_stuff/slogan.py b/disabled_stuff/slogan.py deleted file mode 100644 index 279c41d..0000000 --- a/disabled_stuff/slogan.py +++ /dev/null @@ -1,18 +0,0 @@ -import random - -from util import hook, text - - -with open("plugins/data/slogans.txt") as f: - slogans = [line.strip() for line in f.readlines() - if not line.startswith("//")] - - -@hook.command -def slogan(inp): - """slogan <word> -- Makes a slogan for <word>.""" - out = random.choice(slogans) - if inp.lower() and out.startswith("<text>"): - inp = text.capitalize_first(inp) - - return out.replace('<text>', inp) diff --git a/disabled_stuff/snopes.py b/disabled_stuff/snopes.py deleted file mode 100644 index 9850a68..0000000 --- a/disabled_stuff/snopes.py +++ /dev/null @@ -1,34 +0,0 @@ -import re - -from util import hook, http - - -search_url = "http://search.atomz.com/search/?sp_a=00062d45-sp00000000" - - -@hook.command -def snopes(inp): - """snopes <topic> -- Searches snopes for an urban legend about <topic>.""" - - search_page = http.get_html(search_url, sp_q=inp, sp_c="1") - result_urls = search_page.xpath("//a[@target='_self']/@href") - - if not result_urls: - return "no matching pages found" - - snopes_page = http.get_html(result_urls[0]) - snopes_text = snopes_page.text_content() - - claim = re.search(r"Claim: .*", snopes_text).group(0).strip() - status = re.search(r"Status: .*", snopes_text) - - if status is not None: - status = status.group(0).strip() - else: # new-style statuses - status = "Status: %s." % re.search(r"FALSE|TRUE|MIXTURE|UNDETERMINED", - snopes_text).group(0).title() - - claim = re.sub(r"[\s\xa0]+", " ", claim) # compress whitespace - status = re.sub(r"[\s\xa0]+", " ", status) - - return "{} {} {}".format(claim, status, result_urls[0]) diff --git a/disabled_stuff/soundcloud.py b/disabled_stuff/soundcloud.py deleted file mode 100644 index d31f103..0000000 --- a/disabled_stuff/soundcloud.py +++ /dev/null @@ -1,50 +0,0 @@ -from urllib import urlencode -import re - -from util import hook, http, web, text - - -sc_re = (r'(.*:)//(www.)?(soundcloud.com)(.*)', re.I) -api_url = "http://api.soundcloud.com" -sndsc_re = (r'(.*:)//(www.)?(snd.sc)(.*)', re.I) - - -def soundcloud(url, api_key): - data = http.get_json(api_url + '/resolve.json?' + urlencode({'url': url, 'client_id': api_key})) - - if data['description']: - desc = u": {} ".format(text.truncate_str(data['description'], 50)) - else: - desc = "" - if data['genre']: - genre = u"- Genre: \x02{}\x02 ".format(data['genre']) - else: - genre = "" - - url = web.try_isgd(data['permalink_url']) - - return u"SoundCloud track: \x02{}\x02 by \x02{}\x02 {}{}- {} plays, {} downloads, {} comments - {}".format( - data['title'], data['user']['username'], desc, genre, data['playback_count'], data['download_count'], - data['comment_count'], url) - - -@hook.regex(*sc_re) -def soundcloud_url(match, bot=None): - api_key = bot.config.get("api_keys", {}).get("soundcloud") - if not api_key: - print "Error: no api key set" - return None - url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \ - match.group(4).split(' ')[0] - return soundcloud(url, api_key) - - -@hook.regex(*sndsc_re) -def sndsc_url(match, bot=None): - api_key = bot.config.get("api_keys", {}).get("soundcloud") - if not api_key: - print "Error: no api key set" - return None - url = match.group(1).split(' ')[-1] + "//" + (match.group(2) if match.group(2) else "") + match.group(3) + \ - match.group(4).split(' ')[0] - return soundcloud(http.open(url).url, api_key) diff --git a/disabled_stuff/spellcheck.py b/disabled_stuff/spellcheck.py deleted file mode 100644 index 1630a0d..0000000 --- a/disabled_stuff/spellcheck.py +++ /dev/null @@ -1,47 +0,0 @@ -from enchant.checker import SpellChecker -import enchant - -from util import hook - - -locale = "en_US" - - -@hook.command -def spell(inp): - """spell <word/sentence> -- Check spelling of a word or sentence.""" - - if not enchant.dict_exists(locale): - return "Could not find dictionary: {}".format(locale) - - if len(inp.split(" ")) > 1: - # input is a sentence - checker = SpellChecker(locale) - checker.set_text(inp) - - offset = 0 - for err in checker: - # find the location of the incorrect word - start = err.wordpos + offset - finish = start + len(err.word) - # get some suggestions for it - suggestions = err.suggest() - s_string = '/'.join(suggestions[:3]) - s_string = "\x02{}\x02".format(s_string) - # calculate the offset for the next word - offset = (offset + len(s_string)) - len(err.word) - # replace the word with the suggestions - inp = inp[:start] + s_string + inp[finish:] - return inp - else: - # input is a word - dictionary = enchant.Dict(locale) - is_correct = dictionary.check(inp) - suggestions = dictionary.suggest(inp) - s_string = ', '.join(suggestions[:10]) - if is_correct: - return '"{}" appears to be \x02valid\x02! ' \ - '(suggestions: {})'.format(inp, s_string) - else: - return '"{}" appears to be \x02invalid\x02! ' \ - '(suggestions: {})'.format(inp, s_string) diff --git a/disabled_stuff/spotify.py b/disabled_stuff/spotify.py deleted file mode 100644 index 9897235..0000000 --- a/disabled_stuff/spotify.py +++ /dev/null @@ -1,106 +0,0 @@ -import re -from urllib import urlencode - -from util import hook, http, web - -gateway = 'http://open.spotify.com/{}/{}' # http spotify gw address -spuri = 'spotify:{}:{}' - -spotify_re = (r'(spotify:(track|album|artist|user):([a-zA-Z0-9]+))', re.I) -http_re = (r'(open\.spotify\.com\/(track|album|artist|user)\/' - '([a-zA-Z0-9]+))', re.I) - - -def sptfy(inp, sptfy=False): - if sptfy: - shortenurl = "http://sptfy.com/index.php" - data = urlencode({'longUrl': inp, 'shortUrlDomain': 1, 'submitted': 1, "shortUrlFolder": 6, "customUrl": "", - "shortUrlPassword": "", "shortUrlExpiryDate": "", "shortUrlUses": 0, "shortUrlType": 0}) - try: - soup = http.get_soup(shortenurl, post_data=data, cookies=True) - except: - return inp - try: - link = soup.find('div', {'class': 'resultLink'}).text.strip() - return link - except: - message = "Unable to shorten URL: %s" % \ - soup.find('div', {'class': 'messagebox_text'}).find('p').text.split("<br/>")[0] - return message - else: - return web.try_isgd(inp) - - -@hook.command('sptrack') -@hook.command -def spotify(inp): - """spotify <song> -- Search Spotify for <song>""" - try: - data = http.get_json("http://ws.spotify.com/search/1/track.json", q=inp.strip()) - except Exception as e: - return "Could not get track information: {}".format(e) - - try: - type, id = data["tracks"][0]["href"].split(":")[1:] - except IndexError: - return "Could not find track." - url = sptfy(gateway.format(type, id)) - return u"\x02{}\x02 by \x02{}\x02 - {}".format(data["tracks"][0]["name"], - data["tracks"][0]["artists"][0]["name"], url) - - -@hook.command -def spalbum(inp): - """spalbum <album> -- Search Spotify for <album>""" - try: - data = http.get_json("http://ws.spotify.com/search/1/album.json", q=inp.strip()) - except Exception as e: - return "Could not get album information: {}".format(e) - - try: - type, id = data["albums"][0]["href"].split(":")[1:] - except IndexError: - return "Could not find album." - url = sptfy(gateway.format(type, id)) - return u"\x02{}\x02 by \x02{}\x02 - {}".format(data["albums"][0]["name"], - data["albums"][0]["artists"][0]["name"], url) - - -@hook.command -def spartist(inp): - """spartist <artist> -- Search Spotify for <artist>""" - try: - data = http.get_json("http://ws.spotify.com/search/1/artist.json", q=inp.strip()) - except Exception as e: - return "Could not get artist information: {}".format(e) - - try: - type, id = data["artists"][0]["href"].split(":")[1:] - except IndexError: - return "Could not find artist." - url = sptfy(gateway.format(type, id)) - return u"\x02{}\x02 - {}".format(data["artists"][0]["name"], url) - - -@hook.regex(*http_re) -@hook.regex(*spotify_re) -def spotify_url(match): - type = match.group(2) - spotify_id = match.group(3) - url = spuri.format(type, spotify_id) - # no error catching here, if the API is down fail silently - data = http.get_json("http://ws.spotify.com/lookup/1/.json", uri=url) - if type == "track": - name = data["track"]["name"] - artist = data["track"]["artists"][0]["name"] - album = data["track"]["album"]["name"] - return u"Spotify Track: \x02{}\x02 by \x02{}\x02 from the album \x02{}\x02 - {}".format(name, artist, - album, sptfy( - gateway.format(type, spotify_id))) - elif type == "artist": - return u"Spotify Artist: \x02{}\x02 - {}".format(data["artist"]["name"], - sptfy(gateway.format(type, spotify_id))) - elif type == "album": - return u"Spotify Album: \x02{}\x02 - \x02{}\x02 - {}".format(data["album"]["artist"], - data["album"]["name"], - sptfy(gateway.format(type, spotify_id))) diff --git a/disabled_stuff/status.py b/disabled_stuff/status.py deleted file mode 100644 index 977ac8e..0000000 --- a/disabled_stuff/status.py +++ /dev/null @@ -1,53 +0,0 @@ -from util import hook -import re -import time -from subprocess import check_output - -def getstatus(): - try: - return check_output("sudo /bin/chch-status", shell=True).strip("\n").decode("utf-8") - except: - return "unbekannt" - -@hook.command("status", autohelp=False) -def cmd_status(inp, reply=None): - """status - Return the door status""" - reply("Chaostreff Status: %s" % (getstatus())) - -@hook.event("TOPIC") -def topic_update(info, conn=None, chan=None): - """topic_update -- Update the topic on TOPIC command""" - status = getstatus() - - topic = info[-1] - - sstr = "Status: %s" % (status) - if sstr in topic: - return - - if 'Status: ' in topic: - new_topic = re.sub("Status: [^ ]*", sstr, topic) - else: - new_topic = "%s | %s" % (topic.rstrip(' |'), sstr) - - if new_topic != topic: - conn.send("TOPIC %s :%s" % (chan, new_topic)) - -@hook.event("332") -def e332_update(info, conn=None, chan=None): - """e332_update -- run after current topic was requested""" - chan = info[1] - topic_update(info, conn=conn, chan=chan) - -@hook.singlethread -@hook.event("353") -def e353_update(info, conn=None, chan=None): - """e353_update -- runs after a channel was joined""" - chan = info[2] - if chan.lower() == "#chaoschemnitz": - conn.send("PRIVMSG Chanserv :op #chaoschemnitz") - - while True: - conn.send("TOPIC %s" % (chan)) - time.sleep(60) - diff --git a/disabled_stuff/steam.py b/disabled_stuff/steam.py deleted file mode 100644 index f3814db..0000000 --- a/disabled_stuff/steam.py +++ /dev/null @@ -1,75 +0,0 @@ -import re - -from bs4 import BeautifulSoup, NavigableString, Tag - -from util import hook, http, web -from util.text import truncate_str - - -steam_re = (r'(.*:)//(store.steampowered.com)(:[0-9]+)?(.*)', re.I) - - -def get_steam_info(url): - page = http.get(url) - soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8") - - data = {} - - data["name"] = soup.find('div', {'class': 'apphub_AppName'}).text - data["desc"] = truncate_str(soup.find('meta', {'name': 'description'})['content'].strip(), 80) - - # get the element details_block - details = soup.find('div', {'class': 'details_block'}) - - # loop over every <b></b> tag in details_block - for b in details.findAll('b'): - # get the contents of the <b></b> tag, which is our title - title = b.text.lower().replace(":", "") - if title == "languages": - # we have all we need! - break - - # find the next element directly after the <b></b> tag - next_element = b.nextSibling - if next_element: - # if the element is some text - if isinstance(next_element, NavigableString): - text = next_element.string.strip() - if text: - # we found valid text, save it and continue the loop - data[title] = text - continue - else: - # the text is blank - sometimes this means there are - # useless spaces or tabs between the <b> and <a> tags. - # so we find the next <a> tag and carry on to the next - # bit of code below - next_element = next_element.find_next('a', href=True) - - # if the element is an <a></a> tag - if isinstance(next_element, Tag) and next_element.name == 'a': - text = next_element.string.strip() - if text: - # we found valid text (in the <a></a> tag), - # save it and continue the loop - data[title] = text - continue - - data["price"] = soup.find('div', {'class': 'game_purchase_price price'}).text.strip() - - return u"\x02{name}\x02: {desc}, \x02Genre\x02: {genre}, \x02Release Date\x02: {release date}," \ - u" \x02Price\x02: {price}".format(**data) - - -@hook.regex(*steam_re) -def steam_url(match): - return get_steam_info("http://store.steampowered.com" + match.group(4)) - - -@hook.command -def steam(inp): - """steam [search] - Search for specified game/trailer/DLC""" - page = http.get("http://store.steampowered.com/search/?term=" + inp) - soup = BeautifulSoup(page, 'lxml', from_encoding="utf-8") - result = soup.find('a', {'class': 'search_result_row'}) - return get_steam_info(result['href']) + " - " + web.isgd(result['href']) diff --git a/disabled_stuff/steam_calc.py b/disabled_stuff/steam_calc.py deleted file mode 100644 index 6684eba..0000000 --- a/disabled_stuff/steam_calc.py +++ /dev/null @@ -1,120 +0,0 @@ -import csv -import StringIO - -from util import hook, http, text - - -gauge_url = "http://www.mysteamgauge.com/search?username={}" - -api_url = "http://mysteamgauge.com/user/{}.csv" -steam_api_url = "http://steamcommunity.com/id/{}/?xml=1" - - -def refresh_data(name): - http.get(gauge_url.format(name), timeout=25, get_method='HEAD') - - -def get_data(name): - return http.get(api_url.format(name)) - - -def is_number(s): - try: - float(s) - return True - except ValueError: - return False - - -def unicode_dictreader(utf8_data, **kwargs): - csv_reader = csv.DictReader(utf8_data, **kwargs) - for row in csv_reader: - yield dict([(key.lower(), unicode(value, 'utf-8')) for key, value in row.iteritems()]) - - -@hook.command('sc') -@hook.command -def steamcalc(inp, reply=None): - """steamcalc <username> [currency] - Gets value of steam account and - total hours played. Uses steamcommunity.com/id/<nickname>. """ - - # check if the user asked us to force reload - force_reload = inp.endswith(" forcereload") - if force_reload: - name = inp[:-12].strip().lower() - else: - name = inp.strip() - - if force_reload: - try: - reply("Collecting data, this may take a while.") - refresh_data(name) - request = get_data(name) - do_refresh = False - except (http.HTTPError, http.URLError): - return "Could not get data for this user." - else: - try: - request = get_data(name) - do_refresh = True - except (http.HTTPError, http.URLError): - try: - reply("Collecting data, this may take a while.") - refresh_data(name) - request = get_data(name) - do_refresh = False - except (http.HTTPError, http.URLError): - return "Could not get data for this user." - - csv_data = StringIO.StringIO(request) # we use StringIO because CSV can't read a string - reader = unicode_dictreader(csv_data) - - # put the games in a list - games = [] - for row in reader: - games.append(row) - - data = {} - - # basic information - steam_profile = http.get_xml(steam_api_url.format(name)) - try: - data["name"] = steam_profile.find('steamID').text - online_state = steam_profile.find('stateMessage').text - except AttributeError: - return "Could not get data for this user." - - online_state = online_state.replace("<br/>", ": ") # will make this pretty later - data["state"] = text.strip_html(online_state) - - # work out the average metascore for all games - ms = [float(game["metascore"]) for game in games if is_number(game["metascore"])] - metascore = float(sum(ms)) / len(ms) if len(ms) > 0 else float('nan') - data["average_metascore"] = "{0:.1f}".format(metascore) - - # work out the totals - data["games"] = len(games) - - total_value = sum([float(game["value"]) for game in games if is_number(game["value"])]) - data["value"] = str(int(round(total_value))) - - # work out the total size - total_size = 0.0 - - for game in games: - if not is_number(game["size"]): - continue - - if game["unit"] == "GB": - total_size += float(game["size"]) - else: - total_size += float(game["size"]) / 1024 - - data["size"] = "{0:.1f}".format(total_size) - - reply("{name} ({state}) has {games} games with a total value of ${value}" - " and a total size of {size}GB! The average metascore for these" - " games is {average_metascore}.".format(**data)) - - if do_refresh: - refresh_data(name) diff --git a/disabled_stuff/stock.py b/disabled_stuff/stock.py deleted file mode 100644 index aedf051..0000000 --- a/disabled_stuff/stock.py +++ /dev/null @@ -1,30 +0,0 @@ -from util import hook, web - - -@hook.command -def stock(inp): - """stock <symbol> -- gets stock information""" - sym = inp.strip().lower() - - query = "SELECT * FROM yahoo.finance.quote WHERE symbol=@symbol LIMIT 1" - quote = web.query(query, {"symbol": sym}).one() - - # if we don't get a company name back, the symbol doesn't match a company - if quote['Change'] is None: - return "Unknown ticker symbol: {}".format(sym) - - change = float(quote['Change']) - price = float(quote['LastTradePriceOnly']) - - if change < 0: - quote['color'] = "5" - else: - quote['color'] = "3" - - quote['PercentChange'] = 100 * change / (price - change) - print quote - - return u"\x02{Name}\x02 (\x02{symbol}\x02) - {LastTradePriceOnly} " \ - "\x03{color}{Change} ({PercentChange:.2f}%)\x03 " \ - "Day Range: {DaysRange} " \ - "MCAP: {MarketCapitalization}".format(**quote) diff --git a/disabled_stuff/suggest.py b/disabled_stuff/suggest.py deleted file mode 100644 index ec66144..0000000 --- a/disabled_stuff/suggest.py +++ /dev/null @@ -1,19 +0,0 @@ -from util import hook, http, text -from bs4 import BeautifulSoup - - -@hook.command -def suggest(inp): - """suggest <phrase> -- Gets suggested phrases for a google search""" - suggestions = http.get_json('http://suggestqueries.google.com/complete/search', client='firefox', q=inp)[1] - - if not suggestions: - return 'no suggestions found' - - out = u", ".join(suggestions) - - # defuckify text (might not be needed now, but I'll keep it) - soup = BeautifulSoup(out) - out = soup.get_text() - - return text.truncate_str(out, 200) diff --git a/disabled_stuff/system.py b/disabled_stuff/system.py deleted file mode 100644 index 08891fd..0000000 --- a/disabled_stuff/system.py +++ /dev/null @@ -1,76 +0,0 @@ -import os -import re -import time -import platform -from datetime import timedelta - -from util import hook - - -def convert_kilobytes(kilobytes): - if kilobytes >= 1024: - megabytes = kilobytes / 1024 - size = '%.2f MB' % megabytes - else: - size = '%.2f KB' % kilobytes - return size - - -@hook.command(autohelp=False) -def system(inp): - """system -- Retrieves information about the host system.""" - hostname = platform.node() - os = platform.platform() - python_imp = platform.python_implementation() - python_ver = platform.python_version() - architecture = '-'.join(platform.architecture()) - cpu = platform.machine() - return "Hostname: \x02{}\x02, Operating System: \x02{}\x02, Python " \ - "Version: \x02{} {}\x02, Architecture: \x02{}\x02, CPU: \x02{}" \ - "\x02".format(hostname, os, python_imp, python_ver, architecture, cpu) - - -@hook.command(autohelp=False) -def memory(inp): - """memory -- Displays the bot's current memory usage.""" - if os.name == "posix": - # get process info - status_file = open('/proc/self/status').read() - s = dict(re.findall(r'^(\w+):\s*(.*)\s*$', status_file, re.M)) - # get the data we need and process it - data = s['VmRSS'], s['VmSize'], s['VmPeak'], s['VmStk'], s['VmData'] - data = [float(i.replace(' kB', '')) for i in data] - strings = [convert_kilobytes(i) for i in data] - # prepare the output - out = "Threads: \x02{}\x02, Real Memory: \x02{}\x02, Allocated Memory: \x02{}\x02, Peak " \ - "Allocated Memory: \x02{}\x02, Stack Size: \x02{}\x02, Heap " \ - "Size: \x02{}\x02".format(s['Threads'], strings[0], strings[1], strings[2], - strings[3], strings[4]) - # return output - return out - - elif os.name == "nt": - cmd = 'tasklist /FI "PID eq %s" /FO CSV /NH' % os.getpid() - out = os.popen(cmd).read() - memory = 0 - for amount in re.findall(r'([,0-9]+) K', out): - memory += float(amount.replace(',', '')) - memory = convert_kilobytes(memory) - return "Memory Usage: \x02{}\x02".format(memory) - - else: - return "Sorry, this command is not supported on your OS." - - -@hook.command(autohelp=False) -def uptime(inp, bot=None): - """uptime -- Shows the bot's uptime.""" - uptime_raw = round(time.time() - bot.start_time) - uptime = timedelta(seconds=uptime_raw) - return "Uptime: \x02{}\x02".format(uptime) - - -@hook.command(autohelp=False) -def pid(inp): - """pid -- Prints the bot's PID.""" - return "PID: \x02{}\x02".format(os.getpid()) diff --git a/disabled_stuff/tell.py b/disabled_stuff/tell.py deleted file mode 100644 index 52a0aa1..0000000 --- a/disabled_stuff/tell.py +++ /dev/null @@ -1,121 +0,0 @@ -""" tell.py: written by sklnd in July 2009 - 2010.01.25 - modified by Scaevolus""" - -import time -import re - -from util import hook, timesince - -db_ready = [] - - -def db_init(db, conn): - """Check that our db has the tell table, create it if not.""" - global db_ready - if not conn.name in db_ready: - db.execute("create table if not exists tell" - "(user_to, user_from, message, chan, time," - "primary key(user_to, message))") - db.commit() - db_ready.append(conn.name) - - -def get_tells(db, user_to): - return db.execute("select user_from, message, time, chan from tell where" - " user_to=lower(?) order by time", - (user_to.lower(),)).fetchall() - - -@hook.singlethread -@hook.event('PRIVMSG') -def tellinput(inp, input=None, notice=None, db=None, nick=None, conn=None): - if 'showtells' in input.msg.lower(): - return - - db_init(db, conn) - - tells = get_tells(db, nick) - - if tells: - user_from, message, time, chan = tells[0] - reltime = timesince.timesince(time) - - reply = "{} sent you a message {} ago from {}: {}".format(user_from, reltime, chan, - message) - if len(tells) > 1: - reply += " (+{} more, {}showtells to view)".format(len(tells) - 1, conn.conf["command_prefix"]) - - db.execute("delete from tell where user_to=lower(?) and message=?", - (nick, message)) - db.commit() - notice(reply) - - -@hook.command(autohelp=False) -def showtells(inp, nick='', chan='', notice=None, db=None, conn=None): - """showtells -- View all pending tell messages (sent in a notice).""" - - db_init(db, conn) - - tells = get_tells(db, nick) - - if not tells: - notice("You have no pending tells.") - return - - for tell in tells: - user_from, message, time, chan = tell - past = timesince.timesince(time) - notice("{} sent you a message {} ago from {}: {}".format(user_from, past, chan, message)) - - db.execute("delete from tell where user_to=lower(?)", - (nick,)) - db.commit() - - -@hook.command -def tell(inp, nick='', chan='', db=None, input=None, notice=None, conn=None): - """tell <nick> <message> -- Relay <message> to <nick> when <nick> is around.""" - query = inp.split(' ', 1) - - if len(query) != 2: - notice(tell.__doc__) - return - - user_to = query[0].lower() - message = query[1].strip() - user_from = nick - - if chan.lower() == user_from.lower(): - chan = 'a pm' - - if user_to == user_from.lower(): - notice("Have you looked in a mirror lately?") - return - - if user_to.lower() == input.conn.nick.lower(): - # user is looking for us, being a smart-ass - notice("Thanks for the message, {}!".format(user_from)) - return - - if not re.match("^[A-Za-z0-9_|.\-\]\[]*$", user_to.lower()): - notice("I can't send a message to that user!") - return - - db_init(db, conn) - - if db.execute("select count() from tell where user_to=?", - (user_to,)).fetchone()[0] >= 10: - notice("That person has too many messages queued.") - return - - try: - db.execute("insert into tell(user_to, user_from, message, chan," - "time) values(?,?,?,?,?)", (user_to, user_from, message, - chan, time.time())) - db.commit() - except db.IntegrityError: - notice("Message has already been queued.") - return - - notice("Your message has been sent!") diff --git a/disabled_stuff/time_plugin.py b/disabled_stuff/time_plugin.py deleted file mode 100644 index 885208b..0000000 --- a/disabled_stuff/time_plugin.py +++ /dev/null @@ -1,62 +0,0 @@ -import time - -from util import hook, http -from util.text import capitalize_first - - -api_url = 'http://api.wolframalpha.com/v2/query?format=plaintext' - - -@hook.command("time") -def time_command(inp, bot=None): - """time <area> -- Gets the time in <area>""" - - query = "current time in {}".format(inp) - - api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) - if not api_key: - return "error: no wolfram alpha api key set" - - request = http.get_xml(api_url, input=query, appid=api_key) - current_time = " ".join(request.xpath("//pod[@title='Result']/subpod/plaintext/text()")) - current_time = current_time.replace(" | ", ", ") - - if current_time: - # nice place name for UNIX time - if inp.lower() == "unix": - place = "Unix Epoch" - else: - place = capitalize_first(" ".join(request.xpath("//pod[@" - "title='Input interpretation']/subpod/plaintext/text()"))[ - 16:]) - return "{} - \x02{}\x02".format(current_time, place) - else: - return "Could not get the time for '{}'.".format(inp) - - -@hook.command(autohelp=False) -def beats(inp): - """beats -- Gets the current time in .beats (Swatch Internet Time). """ - - if inp.lower() == "wut": - return "Instead of hours and minutes, the mean solar day is divided " \ - "up into 1000 parts called \".beats\". Each .beat lasts 1 minute and" \ - " 26.4 seconds. Times are notated as a 3-digit number out of 1000 af" \ - "ter midnight. So, @248 would indicate a time 248 .beats after midni" \ - "ght representing 248/1000 of a day, just over 5 hours and 57 minute" \ - "s. There are no timezones." - elif inp.lower() == "guide": - return "1 day = 1000 .beats, 1 hour = 41.666 .beats, 1 min = 0.6944 .beats, 1 second = 0.01157 .beats" - - t = time.gmtime() - h, m, s = t.tm_hour, t.tm_min, t.tm_sec - - utc = 3600 * h + 60 * m + s - bmt = utc + 3600 # Biel Mean Time (BMT) - - beat = bmt / 86.4 - - if beat > 1000: - beat -= 1000 - - return "Swatch Internet Time: @%06.2f" % beat diff --git a/disabled_stuff/title.py b/disabled_stuff/title.py deleted file mode 100644 index 4264188..0000000 --- a/disabled_stuff/title.py +++ /dev/null @@ -1,23 +0,0 @@ -from bs4 import BeautifulSoup - -from util import hook, http, urlnorm - - -@hook.command -def title(inp): - """title <url> -- gets the title of a web page""" - url = urlnorm.normalize(inp.encode('utf-8'), assume_scheme="http") - - try: - page = http.open(url) - real_url = page.geturl() - soup = BeautifulSoup(page.read()) - except (http.HTTPError, http.URLError): - return "Could not fetch page." - - page_title = soup.find('title').contents[0] - - if not page_title: - return "Could not find title." - - return u"{} [{}]".format(page_title, real_url) diff --git a/disabled_stuff/tvdb.py b/disabled_stuff/tvdb.py deleted file mode 100644 index b5fa12f..0000000 --- a/disabled_stuff/tvdb.py +++ /dev/null @@ -1,154 +0,0 @@ -import datetime - -from util import hook, http - - -base_url = "http://thetvdb.com/api/" -api_key = "469B73127CA0C411" - - -def get_episodes_for_series(series_name, api_key): - res = {"error": None, "ended": False, "episodes": None, "name": None} - # http://thetvdb.com/wiki/index.php/API:GetSeries - try: - query = http.get_xml(base_url + 'GetSeries.php', seriesname=series_name) - except http.URLError: - res["error"] = "error contacting thetvdb.com" - return res - - series_id = query.xpath('//seriesid/text()') - - if not series_id: - res["error"] = "Unknown TV series. (using www.thetvdb.com)" - return res - - series_id = series_id[0] - - try: - series = http.get_xml(base_url + '%s/series/%s/all/en.xml' % (api_key, series_id)) - except http.URLError: - res["error"] = "Error contacting thetvdb.com." - return res - - series_name = series.xpath('//SeriesName/text()')[0] - - if series.xpath('//Status/text()')[0] == 'Ended': - res["ended"] = True - - res["episodes"] = series.xpath('//Episode') - res["name"] = series_name - return res - - -def get_episode_info(episode, api_key): - first_aired = episode.findtext("FirstAired") - - try: - air_date = datetime.date(*map(int, first_aired.split('-'))) - except (ValueError, TypeError): - return None - - episode_num = "S%02dE%02d" % (int(episode.findtext("SeasonNumber")), - int(episode.findtext("EpisodeNumber"))) - - episode_name = episode.findtext("EpisodeName") - # in the event of an unannounced episode title, users either leave the - # field out (None) or fill it with TBA - if episode_name == "TBA": - episode_name = None - - episode_desc = '{}'.format(episode_num) - if episode_name: - episode_desc += ' - {}'.format(episode_name) - return first_aired, air_date, episode_desc - - -@hook.command -@hook.command('tv') -def tv_next(inp, bot=None): - """tv <series> -- Get the next episode of <series>.""" - - api_key = bot.config.get("api_keys", {}).get("tvdb", None) - if api_key is None: - return "error: no api key set" - episodes = get_episodes_for_series(inp, api_key) - - if episodes["error"]: - return episodes["error"] - - series_name = episodes["name"] - ended = episodes["ended"] - episodes = episodes["episodes"] - - if ended: - return "{} has ended.".format(series_name) - - next_eps = [] - today = datetime.date.today() - - for episode in reversed(episodes): - ep_info = get_episode_info(episode, api_key) - - if ep_info is None: - continue - - (first_aired, air_date, episode_desc) = ep_info - - if air_date > today: - next_eps = ['{} ({})'.format(first_aired, episode_desc)] - elif air_date == today: - next_eps = ['Today ({})'.format(episode_desc)] + next_eps - else: - # we're iterating in reverse order with newest episodes last - # so, as soon as we're past today, break out of loop - break - - if not next_eps: - return "There are no new episodes scheduled for {}.".format(series_name) - - if len(next_eps) == 1: - return "The next episode of {} airs {}".format(series_name, next_eps[0]) - else: - next_eps = ', '.join(next_eps) - return "The next episodes of {}: {}".format(series_name, next_eps) - - -@hook.command -@hook.command('tv_prev') -def tv_last(inp, bot=None): - """tv_last <series> -- Gets the most recently aired episode of <series>.""" - - api_key = bot.config.get("api_keys", {}).get("tvdb", None) - if api_key is None: - return "error: no api key set" - episodes = get_episodes_for_series(inp, api_key) - - if episodes["error"]: - return episodes["error"] - - series_name = episodes["name"] - ended = episodes["ended"] - episodes = episodes["episodes"] - - prev_ep = None - today = datetime.date.today() - - for episode in reversed(episodes): - ep_info = get_episode_info(episode, api_key) - - if ep_info is None: - continue - - (first_aired, air_date, episode_desc) = ep_info - - if air_date < today: - #iterating in reverse order, so the first episode encountered - #before today was the most recently aired - prev_ep = '{} ({})'.format(first_aired, episode_desc) - break - - if not prev_ep: - return "There are no previously aired episodes for {}.".format(series_name) - if ended: - return '{} ended. The last episode aired {}.'.format(series_name, prev_ep) - return "The last episode of {} aired {}.".format(series_name, prev_ep) diff --git a/disabled_stuff/twitch.py b/disabled_stuff/twitch.py deleted file mode 100644 index 7e1a56a..0000000 --- a/disabled_stuff/twitch.py +++ /dev/null @@ -1,115 +0,0 @@ -import re -from HTMLParser import HTMLParser - -from util import hook, http - - -twitch_re = (r'(.*:)//(twitch.tv|www.twitch.tv)(:[0-9]+)?(.*)', re.I) -multitwitch_re = (r'(.*:)//(www.multitwitch.tv|multitwitch.tv)/(.*)', re.I) - - -def test(s): - valid = set('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_/') - return set(s) <= valid - - -def truncate(msg): - nmsg = msg.split(" ") - out = None - x = 0 - for i in nmsg: - if x <= 7: - if out: - out = out + " " + nmsg[x] - else: - out = nmsg[x] - x += 1 - if x <= 7: - return out - else: - return out + "..." - - -@hook.regex(*multitwitch_re) -def multitwitch_url(match): - usernames = match.group(3).split("/") - out = "" - for i in usernames: - if not test(i): - print "Not a valid username" - return None - if out == "": - out = twitch_lookup(i) - else: - out = out + " \x02|\x02 " + twitch_lookup(i) - return out - - -@hook.regex(*twitch_re) -def twitch_url(match): - bit = match.group(4).split("#")[0] - location = "/".join(bit.split("/")[1:]) - if not test(location): - print "Not a valid username" - return None - return twitch_lookup(location) - - -@hook.command('twitchviewers') -@hook.command -def twviewers(inp): - inp = inp.split("/")[-1] - if test(inp): - location = inp - else: - return "Not a valid channel name." - return twitch_lookup(location).split("(")[-1].split(")")[0].replace("Online now! ", "") - - -def twitch_lookup(location): - locsplit = location.split("/") - if len(locsplit) > 1 and len(locsplit) == 3: - channel = locsplit[0] - type = locsplit[1] # should be b or c - id = locsplit[2] - else: - channel = locsplit[0] - type = None - id = None - h = HTMLParser() - fmt = "{}: {} playing {} ({})" # Title: nickname playing Game (x views) - if type and id: - if type == "b": # I haven't found an API to retrieve broadcast info - soup = http.get_soup("http://twitch.tv/" + location) - title = soup.find('span', {'class': 'real_title js-title'}).text - playing = soup.find('a', {'class': 'game js-game'}).text - views = soup.find('span', {'id': 'views-count'}).text + " view" - views = views + "s" if not views[0:2] == "1 " else views - return h.unescape(fmt.format(title, channel, playing, views)) - elif type == "c": - data = http.get_json("https://api.twitch.tv/kraken/videos/" + type + id) - title = data['title'] - playing = data['game'] - views = str(data['views']) + " view" - views = views + "s" if not views[0:2] == "1 " else views - return h.unescape(fmt.format(title, channel, playing, views)) - else: - data = http.get_json("http://api.justin.tv/api/stream/list.json?channel=" + channel) - if data and len(data) >= 1: - data = data[0] - title = data['title'] - playing = data['meta_game'] - viewers = "\x033\x02Online now!\x02\x0f " + str(data["channel_count"]) + " viewer" - print viewers - viewers = viewers + "s" if not " 1 view" in viewers else viewers - print viewers - return h.unescape(fmt.format(title, channel, playing, viewers)) - else: - try: - data = http.get_json("https://api.twitch.tv/kraken/channels/" + channel) - except: - return - title = data['status'] - playing = data['game'] - viewers = "\x034\x02Offline\x02\x0f" - return h.unescape(fmt.format(title, channel, playing, viewers)) diff --git a/disabled_stuff/twitter.py b/disabled_stuff/twitter.py deleted file mode 100644 index c83ea67..0000000 --- a/disabled_stuff/twitter.py +++ /dev/null @@ -1,178 +0,0 @@ -import re -import random -from datetime import datetime - -import tweepy - -from util import hook, timesince - - -TWITTER_RE = (r"(?:(?:www.twitter.com|twitter.com)/(?:[-_a-zA-Z0-9]+)/status/)([0-9]+)", re.I) - - -def get_api(bot): - consumer_key = bot.config.get("api_keys", {}).get("twitter_consumer_key") - consumer_secret = bot.config.get("api_keys", {}).get("twitter_consumer_secret") - - oauth_token = bot.config.get("api_keys", {}).get("twitter_access_token") - oauth_secret = bot.config.get("api_keys", {}).get("twitter_access_secret") - - if not consumer_key: - return False - - auth = tweepy.OAuthHandler(consumer_key, consumer_secret) - auth.set_access_token(oauth_token, oauth_secret) - - return tweepy.API(auth) - - -@hook.regex(*TWITTER_RE) -def twitter_url(match, bot=None): - # Find the tweet ID from the URL - tweet_id = match.group(1) - - # Get the tweet using the tweepy API - api = get_api(bot) - if not api: - return - try: - tweet = api.get_status(tweet_id) - user = tweet.user - except tweepy.error.TweepError: - return - - # Format the return the text of the tweet - text = " ".join(tweet.text.split()) - - if user.verified: - prefix = u"\u2713" - else: - prefix = "" - - time = timesince.timesince(tweet.created_at, datetime.utcnow()) - - return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time) - - -@hook.command("tw") -@hook.command("twatter") -@hook.command -def twitter(inp, bot=None): - """twitter <user> [n] -- Gets last/[n]th tweet from <user>""" - - api = get_api(bot) - if not api: - return "Error: No Twitter API details." - - if re.match(r'^\d+$', inp): - # user is getting a tweet by id - - try: - # get tweet by id - tweet = api.get_status(inp) - except tweepy.error.TweepError as e: - if e[0][0]['code'] == 34: - return "Could not find tweet." - else: - return u"Error {}: {}".format(e[0][0]['code'], e[0][0]['message']) - - user = tweet.user - - elif re.match(r'^\w{1,15}$', inp) or re.match(r'^\w{1,15}\s+\d+$', inp): - # user is getting a tweet by name - - if inp.find(' ') == -1: - username = inp - tweet_number = 0 - else: - username, tweet_number = inp.split() - tweet_number = int(tweet_number) - 1 - - if tweet_number > 300: - return "This command can only find the last \x02300\x02 tweets." - - try: - # try to get user by username - user = api.get_user(username) - except tweepy.error.TweepError as e: - if e[0][0]['code'] == 34: - return "Could not find user." - else: - return u"Error {}: {}".format(e[0][0]['code'], e[0][0]['message']) - - # get the users tweets - user_timeline = api.user_timeline(id=user.id, count=tweet_number + 1) - - # if the timeline is empty, return an error - if not user_timeline: - return u"The user \x02{}\x02 has no tweets.".format(user.screen_name) - - # grab the newest tweet from the users timeline - try: - tweet = user_timeline[tweet_number] - except IndexError: - tweet_count = len(user_timeline) - return u"The user \x02{}\x02 only has \x02{}\x02 tweets.".format(user.screen_name, tweet_count) - - elif re.match(r'^#\w+$', inp): - # user is searching by hashtag - search = api.search(inp) - - if not search: - return "No tweets found." - - tweet = random.choice(search) - user = tweet.user - else: - # ??? - return "Invalid Input" - - # Format the return the text of the tweet - text = " ".join(tweet.text.split()) - - if user.verified: - prefix = u"\u2713" - else: - prefix = "" - - time = timesince.timesince(tweet.created_at, datetime.utcnow()) - - return u"{}@\x02{}\x02 ({}): {} ({} ago)".format(prefix, user.screen_name, user.name, text, time) - - -@hook.command("twinfo") -@hook.command -def twuser(inp, bot=None): - """twuser <user> -- Get info on the Twitter user <user>""" - - api = get_api(bot) - if not api: - return "Error: No Twitter API details." - - try: - # try to get user by username - user = api.get_user(inp) - except tweepy.error.TweepError as e: - if e[0][0]['code'] == 34: - return "Could not find user." - else: - return "Unknown error" - - if user.verified: - prefix = u"\u2713" - else: - prefix = "" - - if user.location: - loc_str = u" is located in \x02{}\x02 and".format(user.location) - else: - loc_str = "" - - if user.description: - desc_str = u" The users description is \"{}\"".format(user.description) - else: - desc_str = "" - - return u"{}@\x02{}\x02 ({}){} has \x02{:,}\x02 tweets and \x02{:,}\x02 followers.{}" \ - "".format(prefix, user.screen_name, user.name, loc_str, user.statuses_count, user.followers_count, - desc_str) diff --git a/disabled_stuff/update.py b/disabled_stuff/update.py deleted file mode 100644 index 67e55f2..0000000 --- a/disabled_stuff/update.py +++ /dev/null @@ -1,43 +0,0 @@ -from git import Repo - - -from util import hook, web - -@hook.command -def update(inp, bot=None): - repo = Repo() - git = repo.git - try: - pull = git.pull() - except Exception as e: - return e - if "\n" in pull: - return web.haste(pull) - else: - return pull - - -@hook.command -def version(inp, bot=None): - repo = Repo() - - # get origin and fetch it - origin = repo.remotes.origin - info = origin.fetch() - - # get objects - head = repo.head - origin_head = info[0] - current_commit = head.commit - remote_commit = origin_head.commit - - if current_commit == remote_commit: - in_sync = True - else: - in_sync = False - - # output - return "Local \x02{}\x02 is at commit \x02{}\x02, remote \x02{}\x02 is at commit \x02{}\x02." \ - " You {} running the latest version.".format(head, current_commit.name_rev[:7], - origin_head, remote_commit.name_rev[:7], - "are" if in_sync else "are not") diff --git a/disabled_stuff/urban.py b/disabled_stuff/urban.py deleted file mode 100644 index 48da433..0000000 --- a/disabled_stuff/urban.py +++ /dev/null @@ -1,66 +0,0 @@ -import re -import random - -from util import hook, http, text - - -base_url = 'http://api.urbandictionary.com/v0' -define_url = base_url + "/define" -random_url = base_url + "/random" - -@hook.command('u', autohelp=False) -@hook.command(autohelp=False) -def urban(inp): - """urban <phrase> [id] -- Looks up <phrase> on urbandictionary.com.""" - - if inp: - # clean and split the input - inp = inp.lower().strip() - parts = inp.split() - - # if the last word is a number, set the ID to that number - if parts[-1].isdigit(): - id_num = int(parts[-1]) - # remove the ID from the input string - del parts[-1] - inp = " ".join(parts) - else: - id_num = 1 - - # fetch the definitions - page = http.get_json(define_url, term=inp, referer="http://m.urbandictionary.com") - - if page['result_type'] == 'no_results': - return 'Not found.' - else: - # get a random definition! - page = http.get_json(random_url, referer="http://m.urbandictionary.com") - id_num = None - - definitions = page['list'] - - if id_num: - # try getting the requested definition - try: - definition = definitions[id_num - 1] - - def_text = " ".join(definition['definition'].split()) # remove excess spaces - def_text = text.truncate_str(def_text, 200) - except IndexError: - return 'Not found.' - - url = definition['permalink'] - output = u"[%i/%i] %s :: %s" % \ - (id_num, len(definitions), def_text, url) - - else: - definition = random.choice(definitions) - - def_text = " ".join(definition['definition'].split()) # remove excess spaces - def_text = text.truncate_str(def_text, 200) - - name = definition['word'] - url = definition['permalink'] - output = u"\x02{}\x02: {} :: {}".format(name, def_text, url) - - return output diff --git a/disabled_stuff/urlhistory.py b/disabled_stuff/urlhistory.py deleted file mode 100644 index c5e344e..0000000 --- a/disabled_stuff/urlhistory.py +++ /dev/null @@ -1,80 +0,0 @@ -import math -import re -import time - -from util import hook, urlnorm, timesince - - -expiration_period = 60 * 60 * 24 # 1 day - -ignored_urls = [urlnorm.normalize("http://google.com"),] - - -def db_init(db): - db.execute("create table if not exists urlhistory" - "(chan, url, nick, time)") - db.commit() - - -def insert_history(db, chan, url, nick): - now = time.time() - db.execute("insert into urlhistory(chan, url, nick, time) " - "values(?,?,?,?)", (chan, url, nick, time.time())) - db.commit() - - -def get_history(db, chan, url): - db.execute("delete from urlhistory where time < ?", - (time.time() - expiration_period,)) - return db.execute("select nick, time from urlhistory where " - "chan=? and url=? order by time desc", (chan, url)).fetchall() - - -def nicklist(nicks): - nicks = sorted(dict(nicks), key=unicode.lower) - if len(nicks) <= 2: - return ' and '.join(nicks) - else: - return ', and '.join((', '.join(nicks[:-1]), nicks[-1])) - - -def format_reply(history): - if not history: - return - - last_nick, recent_time = history[0] - last_time = timesince.timesince(recent_time) - - if len(history) == 1: - return #"%s linked that %s ago." % (last_nick, last_time) - - hour_span = math.ceil((time.time() - history[-1][1]) / 3600) - hour_span = '%.0f hours' % hour_span if hour_span > 1 else 'hour' - - hlen = len(history) - ordinal = ["once", "twice", "%d times" % hlen][min(hlen, 3) - 1] - - if len(dict(history)) == 1: - last = "last linked %s ago" % last_time - else: - last = "last linked by %s %s ago" % (last_nick, last_time) - - return #"that url has been posted %s in the past %s by %s (%s)." % (ordinal, - -@hook.command -def url(inp, nick='', chan='', db=None, bot=None): - db_init(db) - url = urlnorm.normalize(inp.group().encode('utf-8')) - if url not in ignored_urls: - url = url.decode('utf-8') - history = get_history(db, chan, url) - insert_history(db, chan, url, nick) - - inp = match.string.lower() - - for name in dict(history): - if name.lower() in inp: # person was probably quoting a line - return # that had a link. don't remind them. - - if nick not in dict(history): - return format_reply(history) diff --git a/disabled_stuff/utility.py b/disabled_stuff/utility.py deleted file mode 100644 index b0afa5b..0000000 --- a/disabled_stuff/utility.py +++ /dev/null @@ -1,197 +0,0 @@ -import hashlib -import collections -import re - -from util import hook, text - - -# variables - -colors = collections.OrderedDict([ - ('red', '\x0304'), - ('orange', '\x0307'), - ('yellow', '\x0308'), - ('green', '\x0309'), - ('cyan', '\x0303'), - ('ltblue', '\x0310'), - ('rylblue', '\x0312'), - ('blue', '\x0302'), - ('magenta', '\x0306'), - ('pink', '\x0313'), - ('maroon', '\x0305') -]) - -# helper functions - -strip_re = re.compile("(\x03|\x02|\x1f)(?:,?\d{1,2}(?:,\d{1,2})?)?", re.UNICODE) - - -def strip(string): - return strip_re.sub('', string) - - -# basic text tools - - -## TODO: make this capitalize sentences correctly -@hook.command("capitalise") -@hook.command -def capitalize(inp): - """capitalize <string> -- Capitalizes <string>.""" - return inp.capitalize() - - -@hook.command -def upper(inp): - """upper <string> -- Convert string to uppercase.""" - return inp.upper() - - -@hook.command -def lower(inp): - """lower <string> -- Convert string to lowercase.""" - return inp.lower() - - -@hook.command -def titlecase(inp): - """title <string> -- Convert string to title case.""" - return inp.title() - - -@hook.command -def swapcase(inp): - """swapcase <string> -- Swaps the capitalization of <string>.""" - return inp.swapcase() - - -# encoding - - -@hook.command -def rot13(inp): - """rot13 <string> -- Encode <string> with rot13.""" - return inp.encode('rot13') - - -@hook.command -def base64(inp): - """base64 <string> -- Encode <string> with base64.""" - return inp.encode('base64') - - -@hook.command -def unbase64(inp): - """unbase64 <string> -- Decode <string> with base64.""" - return inp.decode('base64') - - -@hook.command -def checkbase64(inp): - try: - decoded = inp.decode('base64') - recoded = decoded.encode('base64').strip() - is_base64 = recoded == inp - except: - return '"{}" is not base64 encoded'.format(inp) - - if is_base64: - return '"{}" is base64 encoded'.format(recoded) - else: - return '"{}" is not base64 encoded'.format(inp) - - -@hook.command -def unescape(inp): - """unescape <string> -- Unescapes <string>.""" - try: - return inp.decode('unicode-escape') - except Exception as e: - return "Error: {}".format(e) - - -@hook.command -def escape(inp): - """escape <string> -- Escapes <string>.""" - try: - return inp.encode('unicode-escape') - except Exception as e: - return "Error: {}".format(e) - - -# length - - -@hook.command -def length(inp): - """length <string> -- gets the length of <string>""" - return "The length of that string is {} characters.".format(len(inp)) - - -# reverse - - -@hook.command -def reverse(inp): - """reverse <string> -- reverses <string>.""" - return inp[::-1] - - -# hashing - - -@hook.command("hash") -def hash_command(inp): - """hash <string> -- Returns hashes of <string>.""" - return ', '.join(x + ": " + getattr(hashlib, x)(inp).hexdigest() - for x in ['md5', 'sha1', 'sha256']) - - -# novelty - - -@hook.command -def munge(inp): - """munge <text> -- Munges up <text>.""" - return text.munge(inp) - - -# colors - based on code by Reece Selwood - <https://github.com/hitzler/homero> - - -@hook.command -def rainbow(inp): - inp = unicode(inp) - inp = strip(inp) - col = colors.items() - out = "" - l = len(colors) - for i, t in enumerate(inp): - if t == " ": - out += t - else: - out += col[i % l][1] + t - return out - - -@hook.command -def wrainbow(inp): - inp = unicode(inp) - col = colors.items() - inp = strip(inp).split(' ') - out = [] - l = len(colors) - for i, t in enumerate(inp): - out.append(col[i % l][1] + t) - return ' '.join(out) - - -@hook.command -def usa(inp): - inp = strip(inp) - c = [colors['red'], '\x0300', colors['blue']] - l = len(c) - out = '' - for i, t in enumerate(inp): - out += c[i % l] + t - return out diff --git a/disabled_stuff/validate.py b/disabled_stuff/validate.py deleted file mode 100644 index 88022b7..0000000 --- a/disabled_stuff/validate.py +++ /dev/null @@ -1,26 +0,0 @@ -""" -Runs a given url through the w3c validator - -by Vladi -""" - -from util import hook, http - - -@hook.command('w3c') -@hook.command -def validate(inp): - """validate <url> -- Runs url through the w3c markup validator.""" - - if not inp.startswith('http://'): - inp = 'http://' + inp - - url = 'http://validator.w3.org/check?uri=' + http.quote_plus(inp) - info = dict(http.open(url).info()) - - status = info['x-w3c-validator-status'].lower() - if status in ("valid", "invalid"): - error_count = info['x-w3c-validator-errors'] - warning_count = info['x-w3c-validator-warnings'] - return "{} was found to be {} with {} errors and {} warnings." \ - " see: {}".format(inp, status, error_count, warning_count, url) diff --git a/disabled_stuff/valvesounds.py b/disabled_stuff/valvesounds.py deleted file mode 100644 index 88bc8ce..0000000 --- a/disabled_stuff/valvesounds.py +++ /dev/null @@ -1,92 +0,0 @@ -import json -import urllib2 - -from util import hook, http, web - - -def get_sound_info(game, search): - search = search.replace(" ", "+") - try: - data = http.get_json("http://p2sounds.blha303.com.au/search/%s/%s?format=json" % (game, search)) - except urllib2.HTTPError as e: - return "Error: " + json.loads(e.read())["error"] - items = [] - for item in data["items"]: - if "music" in game: - textsplit = item["text"].split('"') - text = "" - for i in xrange(len(textsplit)): - if i % 2 != 0 and i < 6: - if text: - text += " / " + textsplit[i] - else: - text = textsplit[i] - else: - text = item["text"] - items.append("{} - {} {}".format(item["who"], - text if len(text) < 325 else text[:325] + "...", - item["listen"])) - if len(items) == 1: - return items[0] - else: - return "{} (and {} others: {})".format(items[0], len(items) - 1, web.haste("\n".join(items))) - - -@hook.command -def portal2(inp): - """portal2 <quote> - Look up Portal 2 quote. - Example: .portal2 demand to see life's manager""" - return get_sound_info("portal2", inp) - - -@hook.command -def portal2dlc(inp): - """portal2dlc <quote> - Look up Portal 2 DLC quote. - Example: .portal2dlc1 these exhibits are interactive""" - return get_sound_info("portal2dlc1", inp) - - -@hook.command("portal2pti") -@hook.command -def portal2dlc2(inp): - """portal2dlc2 <quote> - Look up Portal 2 Perpetual Testing Inititive quote. - Example: .portal2 Cave here.""" - return get_sound_info("portal2dlc2", inp) - - -@hook.command -def portal2music(inp): - """portal2music <title> - Look up Portal 2 music. - Example: .portal2music turret opera""" - return get_sound_info("portal2music", inp) - - -@hook.command('portal1') -@hook.command -def portal(inp): - """portal <quote> - Look up Portal quote. - Example: .portal The last thing you want to do is hurt me""" - return get_sound_info("portal1", inp) - - -@hook.command('portal1music') -@hook.command -def portalmusic(inp): - """portalmusic <title> - Look up Portal music. - Example: .portalmusic still alive""" - return get_sound_info("portal1music", inp) - - -@hook.command('tf2sound') -@hook.command -def tf2(inp): - """tf2 [who - ]<quote> - Look up TF2 quote. - Example: .tf2 may i borrow your earpiece""" - return get_sound_info("tf2", inp) - - -@hook.command -def tf2music(inp): - """tf2music title - Look up TF2 music lyrics. - Example: .tf2music rocket jump waltz""" - return get_sound_info("tf2music", inp) diff --git a/disabled_stuff/vimeo.py b/disabled_stuff/vimeo.py deleted file mode 100644 index 0a55549..0000000 --- a/disabled_stuff/vimeo.py +++ /dev/null @@ -1,20 +0,0 @@ -from util import hook, http, timeformat - - -@hook.regex(r'vimeo.com/([0-9]+)') -def vimeo_url(match): - """vimeo <url> -- returns information on the Vimeo video at <url>""" - info = http.get_json('http://vimeo.com/api/v2/video/%s.json' - % match.group(1)) - - if info: - info[0]["duration"] = timeformat.format_time(info[0]["duration"]) - info[0]["stats_number_of_likes"] = format( - info[0]["stats_number_of_likes"], ",d") - info[0]["stats_number_of_plays"] = format( - info[0]["stats_number_of_plays"], ",d") - return ("\x02%(title)s\x02 - length \x02%(duration)s\x02 - " - "\x02%(stats_number_of_likes)s\x02 likes - " - "\x02%(stats_number_of_plays)s\x02 plays - " - "\x02%(user_name)s\x02 on \x02%(upload_date)s\x02" - % info[0]) diff --git a/disabled_stuff/weather.py b/disabled_stuff/weather.py deleted file mode 100644 index 8a56046..0000000 --- a/disabled_stuff/weather.py +++ /dev/null @@ -1,99 +0,0 @@ -from util import hook, http, web - -base_url = "http://api.wunderground.com/api/{}/{}/q/{}.json" - - -@hook.command(autohelp=None) -def weather(inp, reply=None, db=None, nick=None, bot=None, notice=None): - """weather <location> [dontsave] -- Gets weather data - for <location> from Wunderground.""" - - api_key = bot.config.get("api_keys", {}).get("wunderground") - - if not api_key: - return "Error: No wunderground API details." - - # initialise weather DB - db.execute("create table if not exists weather(nick primary key, loc)") - - # if there is no input, try getting the users last location from the DB - if not inp: - location = db.execute("select loc from weather where nick=lower(?)", - [nick]).fetchone() - if not location: - # no location saved in the database, send the user help text - notice(weather.__doc__) - return - loc = location[0] - - # no need to save a location, we already have it - dontsave = True - else: - # see if the input ends with "dontsave" - dontsave = inp.endswith(" dontsave") - - # remove "dontsave" from the input string after checking for it - if dontsave: - loc = inp[:-9].strip().lower() - else: - loc = inp - - location = http.quote_plus(loc) - - request_url = base_url.format(api_key, "geolookup/forecast/conditions", location) - response = http.get_json(request_url) - - if 'location' not in response: - try: - location_id = response['response']['results'][0]['zmw'] - except KeyError: - return "Could not get weather for that location." - - # get the weather again, using the closest match - request_url = base_url.format(api_key, "geolookup/forecast/conditions", "zmw:" + location_id) - response = http.get_json(request_url) - - if response['location']['state']: - place_name = "\x02{}\x02, \x02{}\x02 (\x02{}\x02)".format(response['location']['city'], - response['location']['state'], - response['location']['country']) - else: - place_name = "\x02{}\x02 (\x02{}\x02)".format(response['location']['city'], - response['location']['country']) - - forecast_today = response["forecast"]["simpleforecast"]["forecastday"][0] - forecast_tomorrow = response["forecast"]["simpleforecast"]["forecastday"][1] - - # put all the stuff we want to use in a dictionary for easy formatting of the output - weather_data = { - "place": place_name, - "conditions": response['current_observation']['weather'], - "temp_f": response['current_observation']['temp_f'], - "temp_c": response['current_observation']['temp_c'], - "humidity": response['current_observation']['relative_humidity'], - "wind_kph": response['current_observation']['wind_kph'], - "wind_mph": response['current_observation']['wind_mph'], - "wind_direction": response['current_observation']['wind_dir'], - "today_conditions": forecast_today['conditions'], - "today_high_f": forecast_today['high']['fahrenheit'], - "today_high_c": forecast_today['high']['celsius'], - "today_low_f": forecast_today['low']['fahrenheit'], - "today_low_c": forecast_today['low']['celsius'], - "tomorrow_conditions": forecast_tomorrow['conditions'], - "tomorrow_high_f": forecast_tomorrow['high']['fahrenheit'], - "tomorrow_high_c": forecast_tomorrow['high']['celsius'], - "tomorrow_low_f": forecast_tomorrow['low']['fahrenheit'], - "tomorrow_low_c": forecast_tomorrow['low']['celsius'], - "url": web.isgd(response["current_observation"]['forecast_url'] + "?apiref=e535207ff4757b18") - } - - reply("{place} - \x02Current:\x02 {conditions}, {temp_f}F/{temp_c}C, {humidity}, " - "Wind: {wind_kph}KPH/{wind_mph}MPH {wind_direction}, \x02Today:\x02 {today_conditions}, " - "High: {today_high_f}F/{today_high_c}C, Low: {today_low_f}F/{today_low_c}C. " - "\x02Tomorrow:\x02 {tomorrow_conditions}, High: {tomorrow_high_f}F/{tomorrow_high_c}C, " - "Low: {tomorrow_low_f}F/{tomorrow_low_c}C - {url}".format(**weather_data)) - - if location and not dontsave: - db.execute("insert or replace into weather(nick, loc) values (?,?)", - (nick.lower(), location)) - db.commit() diff --git a/disabled_stuff/wikipedia.py b/disabled_stuff/wikipedia.py deleted file mode 100644 index 90461f4..0000000 --- a/disabled_stuff/wikipedia.py +++ /dev/null @@ -1,49 +0,0 @@ -"""Searches wikipedia and returns first sentence of article -Scaevolus 2009""" - -import re - -from util import hook, http, text - - -api_prefix = "http://en.wikipedia.org/w/api.php" -search_url = api_prefix + "?action=opensearch&format=xml" - -paren_re = re.compile('\s*\(.*\)$') - - -@hook.command('w') -@hook.command -def wiki(inp): - """wiki <phrase> -- Gets first sentence of Wikipedia article on <phrase>.""" - - x = http.get_xml(search_url, search=inp) - - ns = '{http://opensearch.org/searchsuggest2}' - items = x.findall(ns + 'Section/' + ns + 'Item') - - if not items: - if x.find('error') is not None: - return 'error: %(code)s: %(info)s' % x.find('error').attrib - else: - return 'No results found.' - - def extract(item): - return [item.find(ns + x).text for x in - ('Text', 'Description', 'Url')] - - title, desc, url = extract(items[0]) - - if 'may refer to' in desc: - title, desc, url = extract(items[1]) - - title = paren_re.sub('', title) - - if title.lower() not in desc.lower(): - desc = title + desc - - desc = u' '.join(desc.split()) # remove excess spaces - - desc = text.truncate_str(desc, 200) - - return u'{} :: {}'.format(desc, http.quote(url, ':/')) diff --git a/disabled_stuff/wolframalpha.py b/disabled_stuff/wolframalpha.py deleted file mode 100644 index b20ffed..0000000 --- a/disabled_stuff/wolframalpha.py +++ /dev/null @@ -1,58 +0,0 @@ -import re - -from util import hook, http, text, web - - -@hook.command('math') -@hook.command('calc') -@hook.command('wa') -@hook.command -def wolframalpha(inp, bot=None): - """wa <query> -- Computes <query> using Wolfram Alpha.""" - api_key = bot.config.get("api_keys", {}).get("wolframalpha", None) - - if not api_key: - return "error: missing api key" - - url = 'http://api.wolframalpha.com/v2/query?format=plaintext' - - result = http.get_xml(url, input=inp, appid=api_key) - - # get the URL for a user to view this query in a browser - query_url = "http://www.wolframalpha.com/input/?i=" + \ - http.quote_plus(inp.encode('utf-8')) - short_url = web.try_isgd(query_url) - - pod_texts = [] - for pod in result.xpath("//pod[@primary='true']"): - title = pod.attrib['title'] - if pod.attrib['id'] == 'Input': - continue - - results = [] - for subpod in pod.xpath('subpod/plaintext/text()'): - subpod = subpod.strip().replace('\\n', '; ') - subpod = re.sub(r'\s+', ' ', subpod) - if subpod: - results.append(subpod) - if results: - pod_texts.append(title + u': ' + u', '.join(results)) - - ret = u' - '.join(pod_texts) - - if not pod_texts: - return 'No results.' - - ret = re.sub(r'\\(.)', r'\1', ret) - - def unicode_sub(match): - return unichr(int(match.group(1), 16)) - - ret = re.sub(r'\\:([0-9a-z]{4})', unicode_sub, ret) - - ret = text.truncate_str(ret, 250) - - if not ret: - return 'No results.' - - return u"{} - {}".format(ret, short_url) diff --git a/disabled_stuff/wordoftheday.py b/disabled_stuff/wordoftheday.py deleted file mode 100644 index 7b7a19b..0000000 --- a/disabled_stuff/wordoftheday.py +++ /dev/null @@ -1,20 +0,0 @@ -import re -from util import hook, http, misc -from BeautifulSoup import BeautifulSoup - - -@hook.command(autohelp=False) -def word(inp, say=False, nick=False): - "word -- Gets the word of the day." - page = http.get('http://merriam-webster.com/word-of-the-day') - - soup = BeautifulSoup(page) - - word = soup.find('strong', {'class': 'main_entry_word'}).renderContents() - function = soup.find('p', {'class': 'word_function'}).renderContents() - - #definitions = re.findall(r'<span class="ssens"><strong>:</strong>' - # r' *([^<]+)</span>', content) - - say("(%s) The word of the day is:"\ - " \x02%s\x02 (%s)" % (nick, word, function)) diff --git a/disabled_stuff/wrapper.old b/disabled_stuff/wrapper.old deleted file mode 100644 index d2f2cda..0000000 --- a/disabled_stuff/wrapper.old +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python -# Bot Wrapper by neersighted - -# Import required modules -import os -import sys -import subprocess -import json -import re - -# Files -configfile = os.path.isfile("./config") -botfile = os.path.isfile("./bot.py") - -# Colors -nocol = "\033[1;m" -red = "\033[1;31m" -green = "\033[1;32m" - -# Messages -firstrun = "Welclome to your first run of: " -usage = "usage: ./cloudbot {start|stop|restart|status}" -iusage = "{1|start} {2|stop} {3|restart} {4|status} {5|exit}" -quit = "Thanks for using CloudBot!" - -error1 = red + "Neither screen nor daemon is installed! "\ - "This program cannot run! {ERROR 1}" + nocol -error2 = red + "Could not find bot.py! Are you in the wrong folder? "\ - "{ERROR 2}" + nocol -error3 = red + "Invalid choice, exiting! {ERROR 3}" + nocol -error4 = red + "Program killed by user! {ERROR 4}" + nocol -error5 = red + "Invalid backend in config! (Or, backend not installed)"\ - " {ERROR 5}" + nocol -error6 = red + "Author error! We be derpin'! {ERROR 6}" + nocol - - -# Commands -pwd = os.getcwd() -clearlog = ": > ./bot.log" - -start = "echo " + "'" + error1 + "'" -stop = "echo " + "'" + error1 + "'" -restart = "echo " + "'" + error1 + "'" -pid = "echo 'Cannot get pid'" - -daemonstart = "daemon -r -n cloudbot -O " + pwd + \ - "/bot.log python " + pwd + "/bot.py" -daemonstop = "daemon -n cloudbot --stop" -daemonrestart = "./cloudbot stop > /dev/null 2>&1 && ./cloudbot start > /dev/null 2>&1" -daemonpid = "pidof /usr/bin/daemon" - -screenstart = "screen -d -m -S cloudbot -t cloudbot python " + pwd +\ - "/bot.py > " + pwd + "/bot.log 2>&1" -screenstop = "kill `pidof /usr/bin/screen`" -screenrestart = "./cloudbot stop > /dev/null 2>&1 && ./cloudbot start > /dev/null 2>&1" -screenpid = "pidof /usr/bin/screen" - -# Checks -if configfile: - try: - config = json.load(open('config')) - command = ":" - except ValueError, e: - print 'error: malformed config', e -else: - config = False - command = "python bot.py" - -daemoncheck = subprocess.check_output("locate /usr/bin/daemon", shell=True) -daemon = re.match(r'^/usr/bin/daemon$', daemoncheck) - -screencheck = subprocess.check_output("locate /usr/bin/screen", shell=True) -screen = re.match(r'^/usr/bin/screen$', screencheck) - -if configfile: - backend = config.get("wrapper", {}).get("backend", "daemon") - daemonloc = config.get("wrapper", {}).get("daemonloc", "/usr/bin/daemon") - screenloc = config.get("wrapper", {}).get("screenloc", "/usr/bin/screen") -else: - backend = False - daemonloc = "/usr/bin/daemon" - screenloc = "/usr/bin/screen" - -try: - runningcheck = subprocess.check_output("ps ax|grep cloudbot|"\ - "grep -v grep|grep -v ./cloudbot", shell=True) - running = re.match(r'^[1-9]+', runningcheck) -except (subprocess.CalledProcessError): - running = False - -# Set commands -if (backend == "daemon"): - if daemon: - start = daemonstart - stop = daemonstop - restart = daemonrestart - pid = daemonpid - else: - print error5 - exit -elif (backend == "screen"): - if screen: - start = screenstart - stop = screenstop - restart = screenrestart - pid = screenpid - else: - print error5 - exit -elif (backend == False): - print firstrun -else: - print error5 - exit - -# Fancy banner -print " ______ __ ______ __ __ "\ -" _______ .______ ______ .___________." -print " / || | / __ \ | | | | "\ -"| \ | _ \ / __ \ | |" -print "| ,----'| | | | | | | | | | "\ -"| .--. || |_) | | | | | `---| |----`" -print "| | | | | | | | | | | | "\ -"| | | || _ < | | | | | | " -print "| `----.| `----.| `--' | | `--' | "\ -"| '--' || |_) | | `--' | | | " -print " \______||_______| \______/ \______/ "\ -"|_______/ |______/ \______/ |__| " -print "http://git.io/cloudbot "\ -" by lukeroge" - -# Read arguments/turn interactive -try: - if (len(sys.argv) > 1): - read = 0 - else: - sys.argv = "interactive" - print iusage - read = int(raw_input("Please choose a option: ")) - - if (sys.argv[1] == "start") or (read == 1): - if running: - print "Bot is already running, cannot start!" - else: - command = start - print "Starting... (" + backend + ")" - elif (sys.argv[1] == "stop") or (read == 2): - if running: - command = stop - print "Stopping... (" + backend + ")" - else: - print "Bot is not running, cannot stop!" - elif (sys.argv[1] == "restart") or (read == 3): - if running: - command = restart - print "Restarting... (" + backend + ")" - else: - print "Bot is not running, cannot restart!" - elif (sys.argv[1] == "status") or (read == 4): - if running: - command = pid - print green + "Bot is running! " + nocol - else: - print red + "Bot is not running! " + nocol - elif (sys.argv[1] == "clear"): - command = clearlog - elif (sys.argv[1] == "exit") or (read == 5): - exit - elif (sys.argv[1] == "interactive"): - pass - else: - print usage - exit - -# Pretify errors -except (TypeError, ValueError), e: - print error3 - exit -except (KeyboardInterrupt), e: - print error4 - exit -except (NameError, SyntaxError), e: - print error6 - exit - -# Check for bot files -if botfile: - pass -else: - print error2 - exit - -# Call command -subprocess.call(command, shell=True) -print quit -exit diff --git a/disabled_stuff/xkcd.py b/disabled_stuff/xkcd.py deleted file mode 100644 index d7fad59..0000000 --- a/disabled_stuff/xkcd.py +++ /dev/null @@ -1,43 +0,0 @@ -import re - -from util import hook, http - - -xkcd_re = (r'(.*:)//(www.xkcd.com|xkcd.com)(.*)', re.I) -months = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', - 9: 'September', 10: 'October', 11: 'November', 12: 'December'} - - -def xkcd_info(xkcd_id, url=False): - """ takes an XKCD entry ID and returns a formatted string """ - data = http.get_json("http://www.xkcd.com/" + xkcd_id + "/info.0.json") - date = "%s %s %s" % (data['day'], months[int(data['month'])], data['year']) - if url: - url = " | http://xkcd.com/" + xkcd_id.replace("/", "") - return "xkcd: \x02%s\x02 (%s)%s" % (data['title'], date, url if url else "") - - -def xkcd_search(term): - search_term = http.quote_plus(term) - soup = http.get_soup("http://www.ohnorobot.com/index.pl?s={}&Search=Search&" - "comic=56&e=0&n=0&b=0&m=0&d=0&t=0".format(search_term)) - result = soup.find('li') - if result: - url = result.find('div', {'class': 'tinylink'}).text - xkcd_id = url[:-1].split("/")[-1] - print xkcd_id - return xkcd_info(xkcd_id, url=True) - else: - return "No results found!" - - -@hook.regex(*xkcd_re) -def xkcd_url(match): - xkcd_id = match.group(3).split(" ")[0].split("/")[1] - return xkcd_info(xkcd_id) - - -@hook.command -def xkcd(inp): - """xkcd <search term> - Search for xkcd comic matching <search term>""" - return xkcd_search(inp) diff --git a/disabled_stuff/yahooanswers.py b/disabled_stuff/yahooanswers.py deleted file mode 100644 index e28ed63..0000000 --- a/disabled_stuff/yahooanswers.py +++ /dev/null @@ -1,16 +0,0 @@ -from util import hook, web, text - - -@hook.command -def answer(inp): - """answer <query> -- find the answer to a question on Yahoo! Answers""" - - query = "SELECT Subject, ChosenAnswer, Link FROM answers.search WHERE query=@query LIMIT 1" - result = web.query(query, {"query": inp.strip()}).one() - - short_url = web.try_isgd(result["Link"]) - - # we split the answer and .join() it to remove newlines/extra spaces - answer_text = text.truncate_str(' '.join(result["ChosenAnswer"].split()), 80) - - return u'\x02{}\x02 "{}" - {}'.format(result["Subject"], answer_text, short_url) diff --git a/disabled_stuff/youtube.py b/disabled_stuff/youtube.py deleted file mode 100644 index e63bca3..0000000 --- a/disabled_stuff/youtube.py +++ /dev/null @@ -1,136 +0,0 @@ -import re -import time - -from util import hook, http, timeformat - - -youtube_re = (r'(?:youtube.*?(?:v=|/v/)|youtu\.be/|yooouuutuuube.*?id=)' - '([-_a-zA-Z0-9]+)', re.I) - -base_url = 'http://gdata.youtube.com/feeds/api/' -api_url = base_url + 'videos/{}?v=2&alt=jsonc' -search_api_url = base_url + 'videos?v=2&alt=jsonc&max-results=1' -video_url = "http://youtu.be/%s" - - -def plural(num=0, text=''): - return "{:,} {}{}".format(num, text, "s"[num == 1:]) - - -def get_video_description(video_id): - request = http.get_json(api_url.format(video_id)) - - if request.get('error'): - return - - data = request['data'] - - out = u'\x02{}\x02'.format(data['title']) - - if not data.get('duration'): - return out - - length = data['duration'] - out += u' - length \x02{}\x02'.format(timeformat.format_time(length, simple=True)) - - if 'ratingCount' in data: - likes = plural(int(data['likeCount']), "like") - dislikes = plural(data['ratingCount'] - int(data['likeCount']), "dislike") - - percent = 100 * float(data['likeCount']) / float(data['ratingCount']) - out += u' - {}, {} (\x02{:.1f}\x02%)'.format(likes, - dislikes, percent) - - if 'viewCount' in data: - views = data['viewCount'] - out += u' - \x02{:,}\x02 view{}'.format(views, "s"[views == 1:]) - - try: - uploader = http.get_json(base_url + "users/{}?alt=json".format(data["uploader"]))["entry"]["author"][0]["name"][ - "$t"] - except: - uploader = data["uploader"] - - upload_time = time.strptime(data['uploaded'], "%Y-%m-%dT%H:%M:%S.000Z") - out += u' - \x02{}\x02 on \x02{}\x02'.format(uploader, - time.strftime("%Y.%m.%d", upload_time)) - - if 'contentRating' in data: - out += u' - \x034NSFW\x02' - - return out - - -@hook.regex(*youtube_re) -def youtube_url(match): - return get_video_description(match.group(1)) - - -@hook.command('you') -@hook.command('yt') -@hook.command('y') -@hook.command -def youtube(inp): - """youtube <query> -- Returns the first YouTube search result for <query>.""" - request = http.get_json(search_api_url, q=inp) - - if 'error' in request: - return 'error performing search' - - if request['data']['totalItems'] == 0: - return 'no results found' - - video_id = request['data']['items'][0]['id'] - - return get_video_description(video_id) + u" - " + video_url % video_id - - -@hook.command('ytime') -@hook.command -def youtime(inp): - """youtime <query> -- Gets the total run time of the first YouTube search result for <query>.""" - request = http.get_json(search_api_url, q=inp) - - if 'error' in request: - return 'error performing search' - - if request['data']['totalItems'] == 0: - return 'no results found' - - video_id = request['data']['items'][0]['id'] - request = http.get_json(api_url.format(video_id)) - - if request.get('error'): - return - data = request['data'] - - if not data.get('duration'): - return - - length = data['duration'] - views = data['viewCount'] - total = int(length * views) - - length_text = timeformat.format_time(length, simple=True) - total_text = timeformat.format_time(total, accuracy=8) - - return u'The video \x02{}\x02 has a length of {} and has been viewed {:,} times for ' \ - u'a total run time of {}!'.format(data['title'], length_text, views, - total_text) - - -ytpl_re = (r'(.*:)//(www.youtube.com/playlist|youtube.com/playlist)(:[0-9]+)?(.*)', re.I) - - -@hook.regex(*ytpl_re) -def ytplaylist_url(match): - location = match.group(4).split("=")[-1] - try: - soup = http.get_soup("https://www.youtube.com/playlist?list=" + location) - except Exception: - return "\x034\x02Invalid response." - title = soup.find('title').text.split('-')[0].strip() - author = soup.find('img', {'class': 'channel-header-profile-image'})['title'] - num_videos = soup.find('ul', {'class': 'header-stats'}).findAll('li')[0].text.split(' ')[0] - views = soup.find('ul', {'class': 'header-stats'}).findAll('li')[1].text.split(' ')[0] - return u"\x02%s\x02 - \x02%s\x02 views - \x02%s\x02 videos - \x02%s\x02" % (title, views, num_videos, author) diff --git a/images/body-bg.jpg b/images/body-bg.jpg new file mode 100644 index 0000000..0e0f861 Binary files /dev/null and b/images/body-bg.jpg differ diff --git a/images/download-button.png b/images/download-button.png new file mode 100644 index 0000000..df3f09a Binary files /dev/null and b/images/download-button.png differ diff --git a/images/github-button.png b/images/github-button.png new file mode 100644 index 0000000..efe07f9 Binary files /dev/null and b/images/github-button.png differ diff --git a/images/header-bg.jpg b/images/header-bg.jpg new file mode 100644 index 0000000..960bff7 Binary files /dev/null and b/images/header-bg.jpg differ diff --git a/images/highlight-bg.jpg b/images/highlight-bg.jpg new file mode 100644 index 0000000..4c4a78e Binary files /dev/null and b/images/highlight-bg.jpg differ diff --git a/images/sidebar-bg.jpg b/images/sidebar-bg.jpg new file mode 100644 index 0000000..42890fe Binary files /dev/null and b/images/sidebar-bg.jpg differ diff --git a/index.html b/index.html new file mode 100644 index 0000000..e256a15 --- /dev/null +++ b/index.html @@ -0,0 +1,164 @@ +<!DOCTYPE html> +<html> + <head> + <meta charset='utf-8'> + <meta http-equiv="X-UA-Compatible" content="chrome=1"> + <meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1"> + <link href='https://fonts.googleapis.com/css?family=Architects+Daughter' rel='stylesheet' type='text/css'> + <link rel="stylesheet" type="text/css" href="stylesheets/stylesheet.css" media="screen" /> + <link rel="stylesheet" type="text/css" href="stylesheets/pygment_trac.css" media="screen" /> + <link rel="stylesheet" type="text/css" href="stylesheets/print.css" media="print" /> + + <!--[if lt IE 9]> + <script src="//html5shiv.googlecode.com/svn/trunk/html5.js"></script> + <![endif]--> + + <title>CloudBot by ClouDev + + + +
+
+

CloudBot

+

The easy to use, open source Python IRC Bot!

+ View project onGitHub +
+
+ +
+
+
+

+CloudBot

+ +

+About

+ +

CloudBot is a Python IRC bot based on Skybot by rmmh.

+ +

+Getting and using CloudBot

+ +

+Download

+ +

Get CloudBot at https://github.com/ClouDev/CloudBot/zipball/develop.

+ +

Unzip the resulting file, and continue to read this document.

+ +

+Install

+ +

Before you can run the bot, you need to install a few Python dependencies. LXML is required while Enchant and PyDNS are needed for several plugins.

+ +

These can be installed with pip (The Python package manager):

+ +
[sudo] pip install -r requirements.txt
+
+ +

If you use pip, you will also need the following packages on linux or pip will fail to install the requirements. + python, python-dev, libenchant-dev, libenchant1c2a, libxslt-dev, libxml2-dev.

+ +

+How to install pip +

+ +
curl -O http://python-distribute.org/distribute_setup.py # or download with your browser on windows
+python distribute_setup.py
+easy_install pip
+
+ +

If you are unable to use pip, there are Windows installers for LXML available for 64 bit and 32 bit versions of Python.

+ +

+Run

+ +

Before you run the bot, rename config.default to config and edit it with your preferred settings.

+ +

Once you have installed the required dependencies and renamed the config file, you can run the bot! Make sure you are in the correct folder and run the following command:

+ +

python bot.py

+ +

On Windows you can usually just double-click bot.py to start the bot, as long as you have Python installed correctly.

+ +

+Getting help with CloudBot

+ +

+Documentation

+ +

To configure your CloudBot, visit the Config Wiki Page.

+ +

To write your own plugins, visit the Plugin Wiki Page.

+ +

More at the Wiki Main Page.

+ +

(some of the information on the wiki is outdated and needs to be rewritten)

+ +

+Support

+ +

The developers reside in #CloudBot on EsperNet and would be glad to help you.

+ +

If you think you have found a bug/have a idea/suggestion, please open a issue here on Github.

+ +

+Requirements

+ +

CloudBot runs on Python 2.7.x. It is currently developed on Windows 8 with Python 2.7.5.

+ +

It requires the Python module lXML. +The module Enchant is needed for the spellcheck plugin. +The module PyDNS is needed for SRV record lookup in the mcping plugin.

+ +

Windows users: Windows compatibility some plugins is broken (such as ping), but we do intend to add it. Eventually.

+ +

+Example CloudBots

+ +

You can find a number of example bots in #CloudBot.

+ +

+License

+ +

CloudBot is licensed under the GPL v3 license. The terms are as follows.

+ +
CloudBot
+
+Copyright © 2011-2013 Luke Rogers
+
+CloudBot is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+CloudBot is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with CloudBot.  If not, see <http://www.gnu.org/licenses/>.
+
+
+ + +
+
+ + + + \ No newline at end of file diff --git a/javascripts/main.js b/javascripts/main.js new file mode 100644 index 0000000..d8135d3 --- /dev/null +++ b/javascripts/main.js @@ -0,0 +1 @@ +console.log('This would be the main JS file.'); diff --git a/javascripts/scale.fix.js b/javascripts/scale.fix.js new file mode 100644 index 0000000..87a40ca --- /dev/null +++ b/javascripts/scale.fix.js @@ -0,0 +1,17 @@ +var metas = document.getElementsByTagName('meta'); +var i; +if (navigator.userAgent.match(/iPhone/i)) { + for (i=0; i tag), call handle_starttag and then - handle_endtag. - """ - ROOT_TAG_NAME = u'[document]' - - # If the end-user gives no indication which tree builder they - # want, look for one with these features. - DEFAULT_BUILDER_FEATURES = ['html', 'fast'] - - # Used when determining whether a text node is all whitespace and - # can be replaced with a single space. A text node that contains - # fancy Unicode spaces (usually non-breaking) should be left - # alone. - STRIP_ASCII_SPACES = {9: None, 10: None, 12: None, 13: None, 32: None, } - - def __init__(self, markup="", features=None, builder=None, - parse_only=None, from_encoding=None, **kwargs): - """The Soup object is initialized as the 'root tag', and the - provided markup (which can be a string or a file-like object) - is fed into the underlying parser.""" - - if 'convertEntities' in kwargs: - warnings.warn( - "BS4 does not respect the convertEntities argument to the " - "BeautifulSoup constructor. Entities are always converted " - "to Unicode characters.") - - if 'markupMassage' in kwargs: - del kwargs['markupMassage'] - warnings.warn( - "BS4 does not respect the markupMassage argument to the " - "BeautifulSoup constructor. The tree builder is responsible " - "for any necessary markup massage.") - - if 'smartQuotesTo' in kwargs: - del kwargs['smartQuotesTo'] - warnings.warn( - "BS4 does not respect the smartQuotesTo argument to the " - "BeautifulSoup constructor. Smart quotes are always converted " - "to Unicode characters.") - - if 'selfClosingTags' in kwargs: - del kwargs['selfClosingTags'] - warnings.warn( - "BS4 does not respect the selfClosingTags argument to the " - "BeautifulSoup constructor. The tree builder is responsible " - "for understanding self-closing tags.") - - if 'isHTML' in kwargs: - del kwargs['isHTML'] - warnings.warn( - "BS4 does not respect the isHTML argument to the " - "BeautifulSoup constructor. You can pass in features='html' " - "or features='xml' to get a builder capable of handling " - "one or the other.") - - def deprecated_argument(old_name, new_name): - if old_name in kwargs: - warnings.warn( - 'The "%s" argument to the BeautifulSoup constructor ' - 'has been renamed to "%s."' % (old_name, new_name)) - value = kwargs[old_name] - del kwargs[old_name] - return value - return None - - parse_only = parse_only or deprecated_argument( - "parseOnlyThese", "parse_only") - - from_encoding = from_encoding or deprecated_argument( - "fromEncoding", "from_encoding") - - if len(kwargs) > 0: - arg = kwargs.keys().pop() - raise TypeError( - "__init__() got an unexpected keyword argument '%s'" % arg) - - if builder is None: - if isinstance(features, basestring): - features = [features] - if features is None or len(features) == 0: - features = self.DEFAULT_BUILDER_FEATURES - builder_class = builder_registry.lookup(*features) - if builder_class is None: - raise FeatureNotFound( - "Couldn't find a tree builder with the features you " - "requested: %s. Do you need to install a parser library?" - % ",".join(features)) - builder = builder_class() - self.builder = builder - self.is_xml = builder.is_xml - self.builder.soup = self - - self.parse_only = parse_only - - self.reset() - - if hasattr(markup, 'read'): # It's a file-type object. - markup = markup.read() - (self.markup, self.original_encoding, self.declared_html_encoding, - self.contains_replacement_characters) = ( - self.builder.prepare_markup(markup, from_encoding)) - - try: - self._feed() - except StopParsing: - pass - - # Clear out the markup and remove the builder's circular - # reference to this object. - self.markup = None - self.builder.soup = None - - def _feed(self): - # Convert the document to Unicode. - self.builder.reset() - - self.builder.feed(self.markup) - # Close out any unfinished strings and close all the open tags. - self.endData() - while self.currentTag.name != self.ROOT_TAG_NAME: - self.popTag() - - def reset(self): - Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) - self.hidden = 1 - self.builder.reset() - self.currentData = [] - self.currentTag = None - self.tagStack = [] - self.pushTag(self) - - def new_tag(self, name, namespace=None, nsprefix=None, **attrs): - """Create a new tag associated with this soup.""" - return Tag(None, self.builder, name, namespace, nsprefix, attrs) - - def new_string(self, s, subclass=NavigableString): - """Create a new NavigableString associated with this soup.""" - navigable = subclass(s) - navigable.setup() - return navigable - - def insert_before(self, successor): - raise NotImplementedError("BeautifulSoup objects don't support insert_before().") - - def insert_after(self, successor): - raise NotImplementedError("BeautifulSoup objects don't support insert_after().") - - def popTag(self): - tag = self.tagStack.pop() - #print "Pop", tag.name - if self.tagStack: - self.currentTag = self.tagStack[-1] - return self.currentTag - - def pushTag(self, tag): - #print "Push", tag.name - if self.currentTag: - self.currentTag.contents.append(tag) - self.tagStack.append(tag) - self.currentTag = self.tagStack[-1] - - def endData(self, containerClass=NavigableString): - if self.currentData: - currentData = u''.join(self.currentData) - if (currentData.translate(self.STRIP_ASCII_SPACES) == '' and - not set([tag.name for tag in self.tagStack]).intersection( - self.builder.preserve_whitespace_tags)): - if '\n' in currentData: - currentData = '\n' - else: - currentData = ' ' - self.currentData = [] - if self.parse_only and len(self.tagStack) <= 1 and \ - (not self.parse_only.text or \ - not self.parse_only.search(currentData)): - return - o = containerClass(currentData) - self.object_was_parsed(o) - - def object_was_parsed(self, o, parent=None, most_recent_element=None): - """Add an object to the parse tree.""" - parent = parent or self.currentTag - most_recent_element = most_recent_element or self._most_recent_element - o.setup(parent, most_recent_element) - if most_recent_element is not None: - most_recent_element.next_element = o - self._most_recent_element = o - parent.contents.append(o) - - def _popToTag(self, name, nsprefix=None, inclusivePop=True): - """Pops the tag stack up to and including the most recent - instance of the given tag. If inclusivePop is false, pops the tag - stack up to but *not* including the most recent instqance of - the given tag.""" - #print "Popping to %s" % name - if name == self.ROOT_TAG_NAME: - return - - numPops = 0 - mostRecentTag = None - - for i in range(len(self.tagStack) - 1, 0, -1): - if (name == self.tagStack[i].name - and nsprefix == self.tagStack[i].prefix): - numPops = len(self.tagStack) - i - break - if not inclusivePop: - numPops = numPops - 1 - - for i in range(0, numPops): - mostRecentTag = self.popTag() - return mostRecentTag - - def handle_starttag(self, name, namespace, nsprefix, attrs): - """Push a start tag on to the stack. - - If this method returns None, the tag was rejected by the - SoupStrainer. You should proceed as if the tag had not occured - in the document. For instance, if this was a self-closing tag, - don't call handle_endtag. - """ - - # print "Start tag %s: %s" % (name, attrs) - self.endData() - - if (self.parse_only and len(self.tagStack) <= 1 - and (self.parse_only.text - or not self.parse_only.search_tag(name, attrs))): - return None - - tag = Tag(self, self.builder, name, namespace, nsprefix, attrs, - self.currentTag, self._most_recent_element) - if tag is None: - return tag - if self._most_recent_element: - self._most_recent_element.next_element = tag - self._most_recent_element = tag - self.pushTag(tag) - return tag - - def handle_endtag(self, name, nsprefix=None): - #print "End tag: " + name - self.endData() - self._popToTag(name, nsprefix) - - def handle_data(self, data): - self.currentData.append(data) - - def decode(self, pretty_print=False, - eventual_encoding=DEFAULT_OUTPUT_ENCODING, - formatter="minimal"): - """Returns a string or Unicode representation of this document. - To get Unicode, pass None for encoding.""" - - if self.is_xml: - # Print the XML declaration - encoding_part = '' - if eventual_encoding != None: - encoding_part = ' encoding="%s"' % eventual_encoding - prefix = u'\n' % encoding_part - else: - prefix = u'' - if not pretty_print: - indent_level = None - else: - indent_level = 0 - return prefix + super(BeautifulSoup, self).decode( - indent_level, eventual_encoding, formatter) - -# Alias to make it easier to type import: 'from bs4 import _soup' -_s = BeautifulSoup -_soup = BeautifulSoup - -class BeautifulStoneSoup(BeautifulSoup): - """Deprecated interface to an XML parser.""" - - def __init__(self, *args, **kwargs): - kwargs['features'] = 'xml' - warnings.warn( - 'The BeautifulStoneSoup class is deprecated. Instead of using ' - 'it, pass features="xml" into the BeautifulSoup constructor.') - super(BeautifulStoneSoup, self).__init__(*args, **kwargs) - - -class StopParsing(Exception): - pass - - -class FeatureNotFound(ValueError): - pass - - -#By default, act as an HTML pretty-printer. -if __name__ == '__main__': - import sys - soup = BeautifulSoup(sys.stdin) - print soup.prettify() diff --git a/lib/bs4/builder/__init__.py b/lib/bs4/builder/__init__.py deleted file mode 100644 index bae453e..0000000 --- a/lib/bs4/builder/__init__.py +++ /dev/null @@ -1,316 +0,0 @@ -from collections import defaultdict -import itertools -import sys -from bs4.element import ( - CharsetMetaAttributeValue, - ContentMetaAttributeValue, - whitespace_re - ) - -__all__ = [ - 'HTMLTreeBuilder', - 'SAXTreeBuilder', - 'TreeBuilder', - 'TreeBuilderRegistry', - ] - -# Some useful features for a TreeBuilder to have. -FAST = 'fast' -PERMISSIVE = 'permissive' -STRICT = 'strict' -XML = 'xml' -HTML = 'html' -HTML_5 = 'html5' - - -class TreeBuilderRegistry(object): - - def __init__(self): - self.builders_for_feature = defaultdict(list) - self.builders = [] - - def register(self, treebuilder_class): - """Register a treebuilder based on its advertised features.""" - for feature in treebuilder_class.features: - self.builders_for_feature[feature].insert(0, treebuilder_class) - self.builders.insert(0, treebuilder_class) - - def lookup(self, *features): - if len(self.builders) == 0: - # There are no builders at all. - return None - - if len(features) == 0: - # They didn't ask for any features. Give them the most - # recently registered builder. - return self.builders[0] - - # Go down the list of features in order, and eliminate any builders - # that don't match every feature. - features = list(features) - features.reverse() - candidates = None - candidate_set = None - while len(features) > 0: - feature = features.pop() - we_have_the_feature = self.builders_for_feature.get(feature, []) - if len(we_have_the_feature) > 0: - if candidates is None: - candidates = we_have_the_feature - candidate_set = set(candidates) - else: - # Eliminate any candidates that don't have this feature. - candidate_set = candidate_set.intersection( - set(we_have_the_feature)) - - # The only valid candidates are the ones in candidate_set. - # Go through the original list of candidates and pick the first one - # that's in candidate_set. - if candidate_set is None: - return None - for candidate in candidates: - if candidate in candidate_set: - return candidate - return None - -# The BeautifulSoup class will take feature lists from developers and use them -# to look up builders in this registry. -builder_registry = TreeBuilderRegistry() - -class TreeBuilder(object): - """Turn a document into a Beautiful Soup object tree.""" - - features = [] - - is_xml = False - preserve_whitespace_tags = set() - empty_element_tags = None # A tag will be considered an empty-element - # tag when and only when it has no contents. - - # A value for these tag/attribute combinations is a space- or - # comma-separated list of CDATA, rather than a single CDATA. - cdata_list_attributes = {} - - - def __init__(self): - self.soup = None - - def reset(self): - pass - - def can_be_empty_element(self, tag_name): - """Might a tag with this name be an empty-element tag? - - The final markup may or may not actually present this tag as - self-closing. - - For instance: an HTMLBuilder does not consider a

tag to be - an empty-element tag (it's not in - HTMLBuilder.empty_element_tags). This means an empty

tag - will be presented as "

", not "

". - - The default implementation has no opinion about which tags are - empty-element tags, so a tag will be presented as an - empty-element tag if and only if it has no contents. - "" will become "", and "bar" will - be left alone. - """ - if self.empty_element_tags is None: - return True - return tag_name in self.empty_element_tags - - def feed(self, markup): - raise NotImplementedError() - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None): - return markup, None, None, False - - def test_fragment_to_document(self, fragment): - """Wrap an HTML fragment to make it look like a document. - - Different parsers do this differently. For instance, lxml - introduces an empty tag, and html5lib - doesn't. Abstracting this away lets us write simple tests - which run HTML fragments through the parser and compare the - results against other HTML fragments. - - This method should not be used outside of tests. - """ - return fragment - - def set_up_substitutions(self, tag): - return False - - def _replace_cdata_list_attribute_values(self, tag_name, attrs): - """Replaces class="foo bar" with class=["foo", "bar"] - - Modifies its input in place. - """ - if self.cdata_list_attributes: - universal = self.cdata_list_attributes.get('*', []) - tag_specific = self.cdata_list_attributes.get( - tag_name.lower(), []) - for cdata_list_attr in itertools.chain(universal, tag_specific): - if cdata_list_attr in attrs: - # Basically, we have a "class" attribute whose - # value is a whitespace-separated list of CSS - # classes. Split it into a list. - value = attrs[cdata_list_attr] - if isinstance(value, basestring): - values = whitespace_re.split(value) - else: - # html5lib sometimes calls setAttributes twice - # for the same tag when rearranging the parse - # tree. On the second call the attribute value - # here is already a list. If this happens, - # leave the value alone rather than trying to - # split it again. - values = value - attrs[cdata_list_attr] = values - return attrs - -class SAXTreeBuilder(TreeBuilder): - """A Beautiful Soup treebuilder that listens for SAX events.""" - - def feed(self, markup): - raise NotImplementedError() - - def close(self): - pass - - def startElement(self, name, attrs): - attrs = dict((key[1], value) for key, value in list(attrs.items())) - #print "Start %s, %r" % (name, attrs) - self.soup.handle_starttag(name, attrs) - - def endElement(self, name): - #print "End %s" % name - self.soup.handle_endtag(name) - - def startElementNS(self, nsTuple, nodeName, attrs): - # Throw away (ns, nodeName) for now. - self.startElement(nodeName, attrs) - - def endElementNS(self, nsTuple, nodeName): - # Throw away (ns, nodeName) for now. - self.endElement(nodeName) - #handler.endElementNS((ns, node.nodeName), node.nodeName) - - def startPrefixMapping(self, prefix, nodeValue): - # Ignore the prefix for now. - pass - - def endPrefixMapping(self, prefix): - # Ignore the prefix for now. - # handler.endPrefixMapping(prefix) - pass - - def characters(self, content): - self.soup.handle_data(content) - - def startDocument(self): - pass - - def endDocument(self): - pass - - -class HTMLTreeBuilder(TreeBuilder): - """This TreeBuilder knows facts about HTML. - - Such as which tags are empty-element tags. - """ - - preserve_whitespace_tags = set(['pre', 'textarea']) - empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta', - 'spacer', 'link', 'frame', 'base']) - - # The HTML standard defines these attributes as containing a - # space-separated list of values, not a single value. That is, - # class="foo bar" means that the 'class' attribute has two values, - # 'foo' and 'bar', not the single value 'foo bar'. When we - # encounter one of these attributes, we will parse its value into - # a list of values if possible. Upon output, the list will be - # converted back into a string. - cdata_list_attributes = { - "*" : ['class', 'accesskey', 'dropzone'], - "a" : ['rel', 'rev'], - "link" : ['rel', 'rev'], - "td" : ["headers"], - "th" : ["headers"], - "td" : ["headers"], - "form" : ["accept-charset"], - "object" : ["archive"], - - # These are HTML5 specific, as are *.accesskey and *.dropzone above. - "area" : ["rel"], - "icon" : ["sizes"], - "iframe" : ["sandbox"], - "output" : ["for"], - } - - def set_up_substitutions(self, tag): - # We are only interested in tags - if tag.name != 'meta': - return False - - http_equiv = tag.get('http-equiv') - content = tag.get('content') - charset = tag.get('charset') - - # We are interested in tags that say what encoding the - # document was originally in. This means HTML 5-style - # tags that provide the "charset" attribute. It also means - # HTML 4-style tags that provide the "content" - # attribute and have "http-equiv" set to "content-type". - # - # In both cases we will replace the value of the appropriate - # attribute with a standin object that can take on any - # encoding. - meta_encoding = None - if charset is not None: - # HTML 5 style: - # - meta_encoding = charset - tag['charset'] = CharsetMetaAttributeValue(charset) - - elif (content is not None and http_equiv is not None - and http_equiv.lower() == 'content-type'): - # HTML 4 style: - # - tag['content'] = ContentMetaAttributeValue(content) - - return (meta_encoding is not None) - -def register_treebuilders_from(module): - """Copy TreeBuilders from the given module into this module.""" - # I'm fairly sure this is not the best way to do this. - this_module = sys.modules['bs4.builder'] - for name in module.__all__: - obj = getattr(module, name) - - if issubclass(obj, TreeBuilder): - setattr(this_module, name, obj) - this_module.__all__.append(name) - # Register the builder while we're at it. - this_module.builder_registry.register(obj) - -# Builders are registered in reverse order of priority, so that custom -# builder registrations will take precedence. In general, we want lxml -# to take precedence over html5lib, because it's faster. And we only -# want to use HTMLParser as a last result. -from . import _htmlparser -register_treebuilders_from(_htmlparser) -try: - from . import _html5lib - register_treebuilders_from(_html5lib) -except ImportError: - # They don't have html5lib installed. - pass -try: - from . import _lxml - register_treebuilders_from(_lxml) -except ImportError: - # They don't have lxml installed. - pass diff --git a/lib/bs4/builder/_html5lib.py b/lib/bs4/builder/_html5lib.py deleted file mode 100644 index e439ac8..0000000 --- a/lib/bs4/builder/_html5lib.py +++ /dev/null @@ -1,222 +0,0 @@ -__all__ = [ - 'HTML5TreeBuilder', - ] - -import warnings -from bs4.builder import ( - PERMISSIVE, - HTML, - HTML_5, - HTMLTreeBuilder, - ) -from bs4.element import NamespacedAttribute -import html5lib -from html5lib.constants import namespaces -from bs4.element import ( - Comment, - Doctype, - NavigableString, - Tag, - ) - -class HTML5TreeBuilder(HTMLTreeBuilder): - """Use html5lib to build a tree.""" - - features = ['html5lib', PERMISSIVE, HTML_5, HTML] - - def prepare_markup(self, markup, user_specified_encoding): - # Store the user-specified encoding for use later on. - self.user_specified_encoding = user_specified_encoding - return markup, None, None, False - - # These methods are defined by Beautiful Soup. - def feed(self, markup): - if self.soup.parse_only is not None: - warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.") - parser = html5lib.HTMLParser(tree=self.create_treebuilder) - doc = parser.parse(markup, encoding=self.user_specified_encoding) - - # Set the character encoding detected by the tokenizer. - if isinstance(markup, unicode): - # We need to special-case this because html5lib sets - # charEncoding to UTF-8 if it gets Unicode input. - doc.original_encoding = None - else: - doc.original_encoding = parser.tokenizer.stream.charEncoding[0] - - def create_treebuilder(self, namespaceHTMLElements): - self.underlying_builder = TreeBuilderForHtml5lib( - self.soup, namespaceHTMLElements) - return self.underlying_builder - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'%s' % fragment - - -class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder): - - def __init__(self, soup, namespaceHTMLElements): - self.soup = soup - super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements) - - def documentClass(self): - self.soup.reset() - return Element(self.soup, self.soup, None) - - def insertDoctype(self, token): - name = token["name"] - publicId = token["publicId"] - systemId = token["systemId"] - - doctype = Doctype.for_name_and_ids(name, publicId, systemId) - self.soup.object_was_parsed(doctype) - - def elementClass(self, name, namespace): - tag = self.soup.new_tag(name, namespace) - return Element(tag, self.soup, namespace) - - def commentClass(self, data): - return TextNode(Comment(data), self.soup) - - def fragmentClass(self): - self.soup = BeautifulSoup("") - self.soup.name = "[document_fragment]" - return Element(self.soup, self.soup, None) - - def appendChild(self, node): - # XXX This code is not covered by the BS4 tests. - self.soup.append(node.element) - - def getDocument(self): - return self.soup - - def getFragment(self): - return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element - -class AttrList(object): - def __init__(self, element): - self.element = element - self.attrs = dict(self.element.attrs) - def __iter__(self): - return list(self.attrs.items()).__iter__() - def __setitem__(self, name, value): - "set attr", name, value - self.element[name] = value - def items(self): - return list(self.attrs.items()) - def keys(self): - return list(self.attrs.keys()) - def __len__(self): - return len(self.attrs) - def __getitem__(self, name): - return self.attrs[name] - def __contains__(self, name): - return name in list(self.attrs.keys()) - - -class Element(html5lib.treebuilders._base.Node): - def __init__(self, element, soup, namespace): - html5lib.treebuilders._base.Node.__init__(self, element.name) - self.element = element - self.soup = soup - self.namespace = namespace - - def appendChild(self, node): - if (node.element.__class__ == NavigableString and self.element.contents - and self.element.contents[-1].__class__ == NavigableString): - # Concatenate new text onto old text node - # XXX This has O(n^2) performance, for input like - # "aaa..." - old_element = self.element.contents[-1] - new_element = self.soup.new_string(old_element + node.element) - old_element.replace_with(new_element) - self.soup._most_recent_element = new_element - else: - self.soup.object_was_parsed(node.element, parent=self.element) - - def getAttributes(self): - return AttrList(self.element) - - def setAttributes(self, attributes): - if attributes is not None and len(attributes) > 0: - - converted_attributes = [] - for name, value in list(attributes.items()): - if isinstance(name, tuple): - new_name = NamespacedAttribute(*name) - del attributes[name] - attributes[new_name] = value - - self.soup.builder._replace_cdata_list_attribute_values( - self.name, attributes) - for name, value in attributes.items(): - self.element[name] = value - - # The attributes may contain variables that need substitution. - # Call set_up_substitutions manually. - # - # The Tag constructor called this method when the Tag was created, - # but we just set/changed the attributes, so call it again. - self.soup.builder.set_up_substitutions(self.element) - attributes = property(getAttributes, setAttributes) - - def insertText(self, data, insertBefore=None): - text = TextNode(self.soup.new_string(data), self.soup) - if insertBefore: - self.insertBefore(text, insertBefore) - else: - self.appendChild(text) - - def insertBefore(self, node, refNode): - index = self.element.index(refNode.element) - if (node.element.__class__ == NavigableString and self.element.contents - and self.element.contents[index-1].__class__ == NavigableString): - # (See comments in appendChild) - old_node = self.element.contents[index-1] - new_str = self.soup.new_string(old_node + node.element) - old_node.replace_with(new_str) - else: - self.element.insert(index, node.element) - node.parent = self - - def removeChild(self, node): - node.element.extract() - - def reparentChildren(self, newParent): - while self.element.contents: - child = self.element.contents[0] - child.extract() - if isinstance(child, Tag): - newParent.appendChild( - Element(child, self.soup, namespaces["html"])) - else: - newParent.appendChild( - TextNode(child, self.soup)) - - def cloneNode(self): - tag = self.soup.new_tag(self.element.name, self.namespace) - node = Element(tag, self.soup, self.namespace) - for key,value in self.attributes: - node.attributes[key] = value - return node - - def hasContent(self): - return self.element.contents - - def getNameTuple(self): - if self.namespace == None: - return namespaces["html"], self.name - else: - return self.namespace, self.name - - nameTuple = property(getNameTuple) - -class TextNode(Element): - def __init__(self, element, soup): - html5lib.treebuilders._base.Node.__init__(self, None) - self.element = element - self.soup = soup - - def cloneNode(self): - raise NotImplementedError diff --git a/lib/bs4/builder/_htmlparser.py b/lib/bs4/builder/_htmlparser.py deleted file mode 100644 index 65ee618..0000000 --- a/lib/bs4/builder/_htmlparser.py +++ /dev/null @@ -1,249 +0,0 @@ -"""Use the HTMLParser library to parse HTML files that aren't too bad.""" - -__all__ = [ - 'HTMLParserTreeBuilder', - ] - -from HTMLParser import ( - HTMLParser, - HTMLParseError, - ) -import sys -import warnings - -# Starting in Python 3.2, the HTMLParser constructor takes a 'strict' -# argument, which we'd like to set to False. Unfortunately, -# http://bugs.python.org/issue13273 makes strict=True a better bet -# before Python 3.2.3. -# -# At the end of this file, we monkeypatch HTMLParser so that -# strict=True works well on Python 3.2.2. -major, minor, release = sys.version_info[:3] -CONSTRUCTOR_TAKES_STRICT = ( - major > 3 - or (major == 3 and minor > 2) - or (major == 3 and minor == 2 and release >= 3)) - -from bs4.element import ( - CData, - Comment, - Declaration, - Doctype, - ProcessingInstruction, - ) -from bs4.dammit import EntitySubstitution, UnicodeDammit - -from bs4.builder import ( - HTML, - HTMLTreeBuilder, - STRICT, - ) - - -HTMLPARSER = 'html.parser' - -class BeautifulSoupHTMLParser(HTMLParser): - def handle_starttag(self, name, attrs): - # XXX namespace - self.soup.handle_starttag(name, None, None, dict(attrs)) - - def handle_endtag(self, name): - self.soup.handle_endtag(name) - - def handle_data(self, data): - self.soup.handle_data(data) - - def handle_charref(self, name): - # XXX workaround for a bug in HTMLParser. Remove this once - # it's fixed. - if name.startswith('x'): - real_name = int(name.lstrip('x'), 16) - elif name.startswith('X'): - real_name = int(name.lstrip('X'), 16) - else: - real_name = int(name) - - try: - data = unichr(real_name) - except (ValueError, OverflowError), e: - data = u"\N{REPLACEMENT CHARACTER}" - - self.handle_data(data) - - def handle_entityref(self, name): - character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name) - if character is not None: - data = character - else: - data = "&%s;" % name - self.handle_data(data) - - def handle_comment(self, data): - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(Comment) - - def handle_decl(self, data): - self.soup.endData() - if data.startswith("DOCTYPE "): - data = data[len("DOCTYPE "):] - elif data == 'DOCTYPE': - # i.e. "" - data = '' - self.soup.handle_data(data) - self.soup.endData(Doctype) - - def unknown_decl(self, data): - if data.upper().startswith('CDATA['): - cls = CData - data = data[len('CDATA['):] - else: - cls = Declaration - self.soup.endData() - self.soup.handle_data(data) - self.soup.endData(cls) - - def handle_pi(self, data): - self.soup.endData() - if data.endswith("?") and data.lower().startswith("xml"): - # "An XHTML processing instruction using the trailing '?' - # will cause the '?' to be included in data." - HTMLParser - # docs. - # - # Strip the question mark so we don't end up with two - # question marks. - data = data[:-1] - self.soup.handle_data(data) - self.soup.endData(ProcessingInstruction) - - -class HTMLParserTreeBuilder(HTMLTreeBuilder): - - is_xml = False - features = [HTML, STRICT, HTMLPARSER] - - def __init__(self, *args, **kwargs): - if CONSTRUCTOR_TAKES_STRICT: - kwargs['strict'] = False - self.parser_args = (args, kwargs) - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None): - """ - :return: A 4-tuple (markup, original encoding, encoding - declared within markup, whether any characters had to be - replaced with REPLACEMENT CHARACTER). - """ - if isinstance(markup, unicode): - return markup, None, None, False - - try_encodings = [user_specified_encoding, document_declared_encoding] - dammit = UnicodeDammit(markup, try_encodings, is_html=True) - return (dammit.markup, dammit.original_encoding, - dammit.declared_html_encoding, - dammit.contains_replacement_characters) - - def feed(self, markup): - args, kwargs = self.parser_args - parser = BeautifulSoupHTMLParser(*args, **kwargs) - parser.soup = self.soup - try: - parser.feed(markup) - except HTMLParseError, e: - warnings.warn(RuntimeWarning( - "Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help.")) - raise e - -# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some -# 3.2.3 code. This ensures they don't treat markup like

as a -# string. -# -# XXX This code can be removed once most Python 3 users are on 3.2.3. -if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT: - import re - attrfind_tolerant = re.compile( - r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*' - r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?') - HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant - - locatestarttagend = re.compile(r""" - <[a-zA-Z][-.a-zA-Z0-9:_]* # tag name - (?:\s+ # whitespace before attribute name - (?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name - (?:\s*=\s* # value indicator - (?:'[^']*' # LITA-enclosed value - |\"[^\"]*\" # LIT-enclosed value - |[^'\">\s]+ # bare value - ) - )? - ) - )* - \s* # trailing whitespace -""", re.VERBOSE) - BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend - - from html.parser import tagfind, attrfind - - def parse_starttag(self, i): - self.__starttag_text = None - endpos = self.check_for_whole_start_tag(i) - if endpos < 0: - return endpos - rawdata = self.rawdata - self.__starttag_text = rawdata[i:endpos] - - # Now parse the data between i+1 and j into a tag and attrs - attrs = [] - match = tagfind.match(rawdata, i+1) - assert match, 'unexpected call to parse_starttag()' - k = match.end() - self.lasttag = tag = rawdata[i+1:k].lower() - while k < endpos: - if self.strict: - m = attrfind.match(rawdata, k) - else: - m = attrfind_tolerant.match(rawdata, k) - if not m: - break - attrname, rest, attrvalue = m.group(1, 2, 3) - if not rest: - attrvalue = None - elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ - attrvalue[:1] == '"' == attrvalue[-1:]: - attrvalue = attrvalue[1:-1] - if attrvalue: - attrvalue = self.unescape(attrvalue) - attrs.append((attrname.lower(), attrvalue)) - k = m.end() - - end = rawdata[k:endpos].strip() - if end not in (">", "/>"): - lineno, offset = self.getpos() - if "\n" in self.__starttag_text: - lineno = lineno + self.__starttag_text.count("\n") - offset = len(self.__starttag_text) \ - - self.__starttag_text.rfind("\n") - else: - offset = offset + len(self.__starttag_text) - if self.strict: - self.error("junk characters in start tag: %r" - % (rawdata[k:endpos][:20],)) - self.handle_data(rawdata[i:endpos]) - return endpos - if end.endswith('/>'): - # XHTML-style empty tag: - self.handle_startendtag(tag, attrs) - else: - self.handle_starttag(tag, attrs) - if tag in self.CDATA_CONTENT_ELEMENTS: - self.set_cdata_mode(tag) - return endpos - - def set_cdata_mode(self, elem): - self.cdata_elem = elem.lower() - self.interesting = re.compile(r'' % self.cdata_elem, re.I) - - BeautifulSoupHTMLParser.parse_starttag = parse_starttag - BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode - - CONSTRUCTOR_TAKES_STRICT = True diff --git a/lib/bs4/builder/_lxml.py b/lib/bs4/builder/_lxml.py deleted file mode 100644 index be35d70..0000000 --- a/lib/bs4/builder/_lxml.py +++ /dev/null @@ -1,199 +0,0 @@ -__all__ = [ - 'LXMLTreeBuilderForXML', - 'LXMLTreeBuilder', - ] - -from io import BytesIO -from StringIO import StringIO -import collections -from lxml import etree -from bs4.element import Comment, Doctype, NamespacedAttribute -from bs4.builder import ( - FAST, - HTML, - HTMLTreeBuilder, - PERMISSIVE, - TreeBuilder, - XML) -from bs4.dammit import UnicodeDammit - -LXML = 'lxml' - -class LXMLTreeBuilderForXML(TreeBuilder): - DEFAULT_PARSER_CLASS = etree.XMLParser - - is_xml = True - - # Well, it's permissive by XML parser standards. - features = [LXML, XML, FAST, PERMISSIVE] - - CHUNK_SIZE = 512 - - # This namespace mapping is specified in the XML Namespace - # standard. - DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"} - - @property - def default_parser(self): - # This can either return a parser object or a class, which - # will be instantiated with default arguments. - return etree.XMLParser(target=self, strip_cdata=False, recover=True) - - def __init__(self, parser=None, empty_element_tags=None): - if empty_element_tags is not None: - self.empty_element_tags = set(empty_element_tags) - if parser is None: - # Use the default parser. - parser = self.default_parser - if isinstance(parser, collections.Callable): - # Instantiate the parser with default arguments - parser = parser(target=self, strip_cdata=False) - self.parser = parser - self.soup = None - self.nsmaps = [self.DEFAULT_NSMAPS] - - def _getNsTag(self, tag): - # Split the namespace URL out of a fully-qualified lxml tag - # name. Copied from lxml's src/lxml/sax.py. - if tag[0] == '{': - return tuple(tag[1:].split('}', 1)) - else: - return (None, tag) - - def prepare_markup(self, markup, user_specified_encoding=None, - document_declared_encoding=None): - """ - :return: A 3-tuple (markup, original encoding, encoding - declared within markup). - """ - if isinstance(markup, unicode): - return markup, None, None, False - - try_encodings = [user_specified_encoding, document_declared_encoding] - dammit = UnicodeDammit(markup, try_encodings, is_html=True) - return (dammit.markup, dammit.original_encoding, - dammit.declared_html_encoding, - dammit.contains_replacement_characters) - - def feed(self, markup): - if isinstance(markup, bytes): - markup = BytesIO(markup) - elif isinstance(markup, unicode): - markup = StringIO(markup) - # Call feed() at least once, even if the markup is empty, - # or the parser won't be initialized. - data = markup.read(self.CHUNK_SIZE) - self.parser.feed(data) - while data != '': - # Now call feed() on the rest of the data, chunk by chunk. - data = markup.read(self.CHUNK_SIZE) - if data != '': - self.parser.feed(data) - self.parser.close() - - def close(self): - self.nsmaps = [self.DEFAULT_NSMAPS] - - def start(self, name, attrs, nsmap={}): - # Make sure attrs is a mutable dict--lxml may send an immutable dictproxy. - attrs = dict(attrs) - nsprefix = None - # Invert each namespace map as it comes in. - if len(self.nsmaps) > 1: - # There are no new namespaces for this tag, but - # non-default namespaces are in play, so we need a - # separate tag stack to know when they end. - self.nsmaps.append(None) - elif len(nsmap) > 0: - # A new namespace mapping has come into play. - inverted_nsmap = dict((value, key) for key, value in nsmap.items()) - self.nsmaps.append(inverted_nsmap) - # Also treat the namespace mapping as a set of attributes on the - # tag, so we can recreate it later. - attrs = attrs.copy() - for prefix, namespace in nsmap.items(): - attribute = NamespacedAttribute( - "xmlns", prefix, "http://www.w3.org/2000/xmlns/") - attrs[attribute] = namespace - - # Namespaces are in play. Find any attributes that came in - # from lxml with namespaces attached to their names, and - # turn then into NamespacedAttribute objects. - new_attrs = {} - for attr, value in attrs.items(): - namespace, attr = self._getNsTag(attr) - if namespace is None: - new_attrs[attr] = value - else: - nsprefix = self._prefix_for_namespace(namespace) - attr = NamespacedAttribute(nsprefix, attr, namespace) - new_attrs[attr] = value - attrs = new_attrs - - namespace, name = self._getNsTag(name) - nsprefix = self._prefix_for_namespace(namespace) - self.soup.handle_starttag(name, namespace, nsprefix, attrs) - - def _prefix_for_namespace(self, namespace): - """Find the currently active prefix for the given namespace.""" - if namespace is None: - return None - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - return inverted_nsmap[namespace] - return None - - def end(self, name): - self.soup.endData() - completed_tag = self.soup.tagStack[-1] - namespace, name = self._getNsTag(name) - nsprefix = None - if namespace is not None: - for inverted_nsmap in reversed(self.nsmaps): - if inverted_nsmap is not None and namespace in inverted_nsmap: - nsprefix = inverted_nsmap[namespace] - break - self.soup.handle_endtag(name, nsprefix) - if len(self.nsmaps) > 1: - # This tag, or one of its parents, introduced a namespace - # mapping, so pop it off the stack. - self.nsmaps.pop() - - def pi(self, target, data): - pass - - def data(self, content): - self.soup.handle_data(content) - - def doctype(self, name, pubid, system): - self.soup.endData() - doctype = Doctype.for_name_and_ids(name, pubid, system) - self.soup.object_was_parsed(doctype) - - def comment(self, content): - "Handle comments as Comment objects." - self.soup.endData() - self.soup.handle_data(content) - self.soup.endData(Comment) - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'\n%s' % fragment - - -class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML): - - features = [LXML, HTML, FAST, PERMISSIVE] - is_xml = False - - @property - def default_parser(self): - return etree.HTMLParser - - def feed(self, markup): - self.parser.feed(markup) - self.parser.close() - - def test_fragment_to_document(self, fragment): - """See `TreeBuilder`.""" - return u'%s' % fragment diff --git a/lib/bs4/dammit.py b/lib/bs4/dammit.py deleted file mode 100644 index a733cad..0000000 --- a/lib/bs4/dammit.py +++ /dev/null @@ -1,827 +0,0 @@ -# -*- coding: utf-8 -*- -"""Beautiful Soup bonus library: Unicode, Dammit - -This class forces XML data into a standard format (usually to UTF-8 or -Unicode). It is heavily based on code from Mark Pilgrim's Universal -Feed Parser. It does not rewrite the XML or HTML to reflect a new -encoding; that's the tree builder's job. -""" - -import codecs -from htmlentitydefs import codepoint2name -import re -import logging - -# Import a library to autodetect character encodings. -chardet_type = None -try: - # First try the fast C implementation. - # PyPI package: cchardet - import cchardet - def chardet_dammit(s): - return cchardet.detect(s)['encoding'] -except ImportError: - try: - # Fall back to the pure Python implementation - # Debian package: python-chardet - # PyPI package: chardet - import chardet - def chardet_dammit(s): - return chardet.detect(s)['encoding'] - #import chardet.constants - #chardet.constants._debug = 1 - except ImportError: - # No chardet available. - def chardet_dammit(s): - return None - -# Available from http://cjkpython.i18n.org/. -try: - import iconv_codec -except ImportError: - pass - -xml_encoding_re = re.compile( - '^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I) -html_meta_re = re.compile( - '<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I) - -class EntitySubstitution(object): - - """Substitute XML or HTML entities for the corresponding characters.""" - - def _populate_class_variables(): - lookup = {} - reverse_lookup = {} - characters_for_re = [] - for codepoint, name in list(codepoint2name.items()): - character = unichr(codepoint) - if codepoint != 34: - # There's no point in turning the quotation mark into - # ", unless it happens within an attribute value, which - # is handled elsewhere. - characters_for_re.append(character) - lookup[character] = name - # But we do want to turn " into the quotation mark. - reverse_lookup[name] = character - re_definition = "[%s]" % "".join(characters_for_re) - return lookup, reverse_lookup, re.compile(re_definition) - (CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER, - CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables() - - CHARACTER_TO_XML_ENTITY = { - "'": "apos", - '"': "quot", - "&": "amp", - "<": "lt", - ">": "gt", - } - - BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|" - "&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)" - ")") - - AMPERSAND_OR_BRACKET = re.compile("([<>&])") - - @classmethod - def _substitute_html_entity(cls, matchobj): - entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0)) - return "&%s;" % entity - - @classmethod - def _substitute_xml_entity(cls, matchobj): - """Used with a regular expression to substitute the - appropriate XML entity for an XML special character.""" - entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)] - return "&%s;" % entity - - @classmethod - def quoted_attribute_value(self, value): - """Make a value into a quoted XML attribute, possibly escaping it. - - Most strings will be quoted using double quotes. - - Bob's Bar -> "Bob's Bar" - - If a string contains double quotes, it will be quoted using - single quotes. - - Welcome to "my bar" -> 'Welcome to "my bar"' - - If a string contains both single and double quotes, the - double quotes will be escaped, and the string will be quoted - using double quotes. - - Welcome to "Bob's Bar" -> "Welcome to "Bob's bar" - """ - quote_with = '"' - if '"' in value: - if "'" in value: - # The string contains both single and double - # quotes. Turn the double quotes into - # entities. We quote the double quotes rather than - # the single quotes because the entity name is - # """ whether this is HTML or XML. If we - # quoted the single quotes, we'd have to decide - # between ' and &squot;. - replace_with = """ - value = value.replace('"', replace_with) - else: - # There are double quotes but no single quotes. - # We can use single quotes to quote the attribute. - quote_with = "'" - return quote_with + value + quote_with - - @classmethod - def substitute_xml(cls, value, make_quoted_attribute=False): - """Substitute XML entities for special XML characters. - - :param value: A string to be substituted. The less-than sign - will become <, the greater-than sign will become >, - and any ampersands will become &. If you want ampersands - that appear to be part of an entity definition to be left - alone, use substitute_xml_containing_entities() instead. - - :param make_quoted_attribute: If True, then the string will be - quoted, as befits an attribute value. - """ - # Escape angle brackets and ampersands. - value = cls.AMPERSAND_OR_BRACKET.sub( - cls._substitute_xml_entity, value) - - if make_quoted_attribute: - value = cls.quoted_attribute_value(value) - return value - - @classmethod - def substitute_xml_containing_entities( - cls, value, make_quoted_attribute=False): - """Substitute XML entities for special XML characters. - - :param value: A string to be substituted. The less-than sign will - become <, the greater-than sign will become >, and any - ampersands that are not part of an entity defition will - become &. - - :param make_quoted_attribute: If True, then the string will be - quoted, as befits an attribute value. - """ - # Escape angle brackets, and ampersands that aren't part of - # entities. - value = cls.BARE_AMPERSAND_OR_BRACKET.sub( - cls._substitute_xml_entity, value) - - if make_quoted_attribute: - value = cls.quoted_attribute_value(value) - return value - - - @classmethod - def substitute_html(cls, s): - """Replace certain Unicode characters with named HTML entities. - - This differs from data.encode(encoding, 'xmlcharrefreplace') - in that the goal is to make the result more readable (to those - with ASCII displays) rather than to recover from - errors. There's absolutely nothing wrong with a UTF-8 string - containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that - character with "é" will make it more readable to some - people. - """ - return cls.CHARACTER_TO_HTML_ENTITY_RE.sub( - cls._substitute_html_entity, s) - - -class UnicodeDammit: - """A class for detecting the encoding of a *ML document and - converting it to a Unicode string. If the source encoding is - windows-1252, can replace MS smart quotes with their HTML or XML - equivalents.""" - - # This dictionary maps commonly seen values for "charset" in HTML - # meta tags to the corresponding Python codec names. It only covers - # values that aren't in Python's aliases and can't be determined - # by the heuristics in find_codec. - CHARSET_ALIASES = {"macintosh": "mac-roman", - "x-sjis": "shift-jis"} - - ENCODINGS_WITH_SMART_QUOTES = [ - "windows-1252", - "iso-8859-1", - "iso-8859-2", - ] - - def __init__(self, markup, override_encodings=[], - smart_quotes_to=None, is_html=False): - self.declared_html_encoding = None - self.smart_quotes_to = smart_quotes_to - self.tried_encodings = [] - self.contains_replacement_characters = False - - if markup == '' or isinstance(markup, unicode): - self.markup = markup - self.unicode_markup = unicode(markup) - self.original_encoding = None - return - - new_markup, document_encoding, sniffed_encoding = \ - self._detectEncoding(markup, is_html) - self.markup = new_markup - - u = None - if new_markup != markup: - # _detectEncoding modified the markup, then converted it to - # Unicode and then to UTF-8. So convert it from UTF-8. - u = self._convert_from("utf8") - self.original_encoding = sniffed_encoding - - if not u: - for proposed_encoding in ( - override_encodings + [document_encoding, sniffed_encoding]): - if proposed_encoding is not None: - u = self._convert_from(proposed_encoding) - if u: - break - - # If no luck and we have auto-detection library, try that: - if not u and not isinstance(self.markup, unicode): - u = self._convert_from(chardet_dammit(self.markup)) - - # As a last resort, try utf-8 and windows-1252: - if not u: - for proposed_encoding in ("utf-8", "windows-1252"): - u = self._convert_from(proposed_encoding) - if u: - break - - # As an absolute last resort, try the encodings again with - # character replacement. - if not u: - for proposed_encoding in ( - override_encodings + [ - document_encoding, sniffed_encoding, "utf-8", "windows-1252"]): - if proposed_encoding != "ascii": - u = self._convert_from(proposed_encoding, "replace") - if u is not None: - logging.warning( - "Some characters could not be decoded, and were " - "replaced with REPLACEMENT CHARACTER.") - self.contains_replacement_characters = True - break - - # We could at this point force it to ASCII, but that would - # destroy so much data that I think giving up is better - self.unicode_markup = u - if not u: - self.original_encoding = None - - def _sub_ms_char(self, match): - """Changes a MS smart quote character to an XML or HTML - entity, or an ASCII character.""" - orig = match.group(1) - if self.smart_quotes_to == 'ascii': - sub = self.MS_CHARS_TO_ASCII.get(orig).encode() - else: - sub = self.MS_CHARS.get(orig) - if type(sub) == tuple: - if self.smart_quotes_to == 'xml': - sub = '&#x'.encode() + sub[1].encode() + ';'.encode() - else: - sub = '&'.encode() + sub[0].encode() + ';'.encode() - else: - sub = sub.encode() - return sub - - def _convert_from(self, proposed, errors="strict"): - proposed = self.find_codec(proposed) - if not proposed or (proposed, errors) in self.tried_encodings: - return None - self.tried_encodings.append((proposed, errors)) - markup = self.markup - # Convert smart quotes to HTML if coming from an encoding - # that might have them. - if (self.smart_quotes_to is not None - and proposed.lower() in self.ENCODINGS_WITH_SMART_QUOTES): - smart_quotes_re = b"([\x80-\x9f])" - smart_quotes_compiled = re.compile(smart_quotes_re) - markup = smart_quotes_compiled.sub(self._sub_ms_char, markup) - - try: - #print "Trying to convert document to %s (errors=%s)" % ( - # proposed, errors) - u = self._to_unicode(markup, proposed, errors) - self.markup = u - self.original_encoding = proposed - except Exception as e: - #print "That didn't work!" - #print e - return None - #print "Correct encoding: %s" % proposed - return self.markup - - def _to_unicode(self, data, encoding, errors="strict"): - '''Given a string and its encoding, decodes the string into Unicode. - %encoding is a string recognized by encodings.aliases''' - - # strip Byte Order Mark (if present) - if (len(data) >= 4) and (data[:2] == '\xfe\xff') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16be' - data = data[2:] - elif (len(data) >= 4) and (data[:2] == '\xff\xfe') \ - and (data[2:4] != '\x00\x00'): - encoding = 'utf-16le' - data = data[2:] - elif data[:3] == '\xef\xbb\xbf': - encoding = 'utf-8' - data = data[3:] - elif data[:4] == '\x00\x00\xfe\xff': - encoding = 'utf-32be' - data = data[4:] - elif data[:4] == '\xff\xfe\x00\x00': - encoding = 'utf-32le' - data = data[4:] - newdata = unicode(data, encoding, errors) - return newdata - - def _detectEncoding(self, xml_data, is_html=False): - """Given a document, tries to detect its XML encoding.""" - xml_encoding = sniffed_xml_encoding = None - try: - if xml_data[:4] == b'\x4c\x6f\xa7\x94': - # EBCDIC - xml_data = self._ebcdic_to_ascii(xml_data) - elif xml_data[:4] == b'\x00\x3c\x00\x3f': - # UTF-16BE - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xfe\xff') \ - and (xml_data[2:4] != b'\x00\x00'): - # UTF-16BE with BOM - sniffed_xml_encoding = 'utf-16be' - xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') - elif xml_data[:4] == b'\x3c\x00\x3f\x00': - # UTF-16LE - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') - elif (len(xml_data) >= 4) and (xml_data[:2] == b'\xff\xfe') and \ - (xml_data[2:4] != b'\x00\x00'): - # UTF-16LE with BOM - sniffed_xml_encoding = 'utf-16le' - xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') - elif xml_data[:4] == b'\x00\x00\x00\x3c': - # UTF-32BE - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') - elif xml_data[:4] == b'\x3c\x00\x00\x00': - # UTF-32LE - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') - elif xml_data[:4] == b'\x00\x00\xfe\xff': - # UTF-32BE with BOM - sniffed_xml_encoding = 'utf-32be' - xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') - elif xml_data[:4] == b'\xff\xfe\x00\x00': - # UTF-32LE with BOM - sniffed_xml_encoding = 'utf-32le' - xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') - elif xml_data[:3] == b'\xef\xbb\xbf': - # UTF-8 with BOM - sniffed_xml_encoding = 'utf-8' - xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') - else: - sniffed_xml_encoding = 'ascii' - pass - except: - xml_encoding_match = None - xml_encoding_match = xml_encoding_re.match(xml_data) - if not xml_encoding_match and is_html: - xml_encoding_match = html_meta_re.search(xml_data) - if xml_encoding_match is not None: - xml_encoding = xml_encoding_match.groups()[0].decode( - 'ascii').lower() - if is_html: - self.declared_html_encoding = xml_encoding - if sniffed_xml_encoding and \ - (xml_encoding in ('iso-10646-ucs-2', 'ucs-2', 'csunicode', - 'iso-10646-ucs-4', 'ucs-4', 'csucs4', - 'utf-16', 'utf-32', 'utf_16', 'utf_32', - 'utf16', 'u16')): - xml_encoding = sniffed_xml_encoding - return xml_data, xml_encoding, sniffed_xml_encoding - - def find_codec(self, charset): - return self._codec(self.CHARSET_ALIASES.get(charset, charset)) \ - or (charset and self._codec(charset.replace("-", ""))) \ - or (charset and self._codec(charset.replace("-", "_"))) \ - or charset - - def _codec(self, charset): - if not charset: - return charset - codec = None - try: - codecs.lookup(charset) - codec = charset - except (LookupError, ValueError): - pass - return codec - - EBCDIC_TO_ASCII_MAP = None - - def _ebcdic_to_ascii(self, s): - c = self.__class__ - if not c.EBCDIC_TO_ASCII_MAP: - emap = (0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, - 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, - 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, - 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, - 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, - 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, - 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, - 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, - 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200, - 201,202,106,107,108,109,110,111,112,113,114,203,204,205, - 206,207,208,209,126,115,116,117,118,119,120,121,122,210, - 211,212,213,214,215,216,217,218,219,220,221,222,223,224, - 225,226,227,228,229,230,231,123,65,66,67,68,69,70,71,72, - 73,232,233,234,235,236,237,125,74,75,76,77,78,79,80,81, - 82,238,239,240,241,242,243,92,159,83,84,85,86,87,88,89, - 90,244,245,246,247,248,249,48,49,50,51,52,53,54,55,56,57, - 250,251,252,253,254,255) - import string - c.EBCDIC_TO_ASCII_MAP = string.maketrans( - ''.join(map(chr, list(range(256)))), ''.join(map(chr, emap))) - return s.translate(c.EBCDIC_TO_ASCII_MAP) - - # A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities. - MS_CHARS = {b'\x80': ('euro', '20AC'), - b'\x81': ' ', - b'\x82': ('sbquo', '201A'), - b'\x83': ('fnof', '192'), - b'\x84': ('bdquo', '201E'), - b'\x85': ('hellip', '2026'), - b'\x86': ('dagger', '2020'), - b'\x87': ('Dagger', '2021'), - b'\x88': ('circ', '2C6'), - b'\x89': ('permil', '2030'), - b'\x8A': ('Scaron', '160'), - b'\x8B': ('lsaquo', '2039'), - b'\x8C': ('OElig', '152'), - b'\x8D': '?', - b'\x8E': ('#x17D', '17D'), - b'\x8F': '?', - b'\x90': '?', - b'\x91': ('lsquo', '2018'), - b'\x92': ('rsquo', '2019'), - b'\x93': ('ldquo', '201C'), - b'\x94': ('rdquo', '201D'), - b'\x95': ('bull', '2022'), - b'\x96': ('ndash', '2013'), - b'\x97': ('mdash', '2014'), - b'\x98': ('tilde', '2DC'), - b'\x99': ('trade', '2122'), - b'\x9a': ('scaron', '161'), - b'\x9b': ('rsaquo', '203A'), - b'\x9c': ('oelig', '153'), - b'\x9d': '?', - b'\x9e': ('#x17E', '17E'), - b'\x9f': ('Yuml', ''),} - - # A parochial partial mapping of ISO-Latin-1 to ASCII. Contains - # horrors like stripping diacritical marks to turn á into a, but also - # contains non-horrors like turning “ into ". - MS_CHARS_TO_ASCII = { - b'\x80' : 'EUR', - b'\x81' : ' ', - b'\x82' : ',', - b'\x83' : 'f', - b'\x84' : ',,', - b'\x85' : '...', - b'\x86' : '+', - b'\x87' : '++', - b'\x88' : '^', - b'\x89' : '%', - b'\x8a' : 'S', - b'\x8b' : '<', - b'\x8c' : 'OE', - b'\x8d' : '?', - b'\x8e' : 'Z', - b'\x8f' : '?', - b'\x90' : '?', - b'\x91' : "'", - b'\x92' : "'", - b'\x93' : '"', - b'\x94' : '"', - b'\x95' : '*', - b'\x96' : '-', - b'\x97' : '--', - b'\x98' : '~', - b'\x99' : '(TM)', - b'\x9a' : 's', - b'\x9b' : '>', - b'\x9c' : 'oe', - b'\x9d' : '?', - b'\x9e' : 'z', - b'\x9f' : 'Y', - b'\xa0' : ' ', - b'\xa1' : '!', - b'\xa2' : 'c', - b'\xa3' : 'GBP', - b'\xa4' : '$', #This approximation is especially parochial--this is the - #generic currency symbol. - b'\xa5' : 'YEN', - b'\xa6' : '|', - b'\xa7' : 'S', - b'\xa8' : '..', - b'\xa9' : '', - b'\xaa' : '(th)', - b'\xab' : '<<', - b'\xac' : '!', - b'\xad' : ' ', - b'\xae' : '(R)', - b'\xaf' : '-', - b'\xb0' : 'o', - b'\xb1' : '+-', - b'\xb2' : '2', - b'\xb3' : '3', - b'\xb4' : ("'", 'acute'), - b'\xb5' : 'u', - b'\xb6' : 'P', - b'\xb7' : '*', - b'\xb8' : ',', - b'\xb9' : '1', - b'\xba' : '(th)', - b'\xbb' : '>>', - b'\xbc' : '1/4', - b'\xbd' : '1/2', - b'\xbe' : '3/4', - b'\xbf' : '?', - b'\xc0' : 'A', - b'\xc1' : 'A', - b'\xc2' : 'A', - b'\xc3' : 'A', - b'\xc4' : 'A', - b'\xc5' : 'A', - b'\xc6' : 'AE', - b'\xc7' : 'C', - b'\xc8' : 'E', - b'\xc9' : 'E', - b'\xca' : 'E', - b'\xcb' : 'E', - b'\xcc' : 'I', - b'\xcd' : 'I', - b'\xce' : 'I', - b'\xcf' : 'I', - b'\xd0' : 'D', - b'\xd1' : 'N', - b'\xd2' : 'O', - b'\xd3' : 'O', - b'\xd4' : 'O', - b'\xd5' : 'O', - b'\xd6' : 'O', - b'\xd7' : '*', - b'\xd8' : 'O', - b'\xd9' : 'U', - b'\xda' : 'U', - b'\xdb' : 'U', - b'\xdc' : 'U', - b'\xdd' : 'Y', - b'\xde' : 'b', - b'\xdf' : 'B', - b'\xe0' : 'a', - b'\xe1' : 'a', - b'\xe2' : 'a', - b'\xe3' : 'a', - b'\xe4' : 'a', - b'\xe5' : 'a', - b'\xe6' : 'ae', - b'\xe7' : 'c', - b'\xe8' : 'e', - b'\xe9' : 'e', - b'\xea' : 'e', - b'\xeb' : 'e', - b'\xec' : 'i', - b'\xed' : 'i', - b'\xee' : 'i', - b'\xef' : 'i', - b'\xf0' : 'o', - b'\xf1' : 'n', - b'\xf2' : 'o', - b'\xf3' : 'o', - b'\xf4' : 'o', - b'\xf5' : 'o', - b'\xf6' : 'o', - b'\xf7' : '/', - b'\xf8' : 'o', - b'\xf9' : 'u', - b'\xfa' : 'u', - b'\xfb' : 'u', - b'\xfc' : 'u', - b'\xfd' : 'y', - b'\xfe' : 'b', - b'\xff' : 'y', - } - - # A map used when removing rogue Windows-1252/ISO-8859-1 - # characters in otherwise UTF-8 documents. - # - # Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in - # Windows-1252. - WINDOWS_1252_TO_UTF8 = { - 0x80 : b'\xe2\x82\xac', # € - 0x82 : b'\xe2\x80\x9a', # ‚ - 0x83 : b'\xc6\x92', # ƒ - 0x84 : b'\xe2\x80\x9e', # „ - 0x85 : b'\xe2\x80\xa6', # … - 0x86 : b'\xe2\x80\xa0', # † - 0x87 : b'\xe2\x80\xa1', # ‡ - 0x88 : b'\xcb\x86', # ˆ - 0x89 : b'\xe2\x80\xb0', # ‰ - 0x8a : b'\xc5\xa0', # Š - 0x8b : b'\xe2\x80\xb9', # ‹ - 0x8c : b'\xc5\x92', # Œ - 0x8e : b'\xc5\xbd', # Ž - 0x91 : b'\xe2\x80\x98', # ‘ - 0x92 : b'\xe2\x80\x99', # ’ - 0x93 : b'\xe2\x80\x9c', # “ - 0x94 : b'\xe2\x80\x9d', # ” - 0x95 : b'\xe2\x80\xa2', # • - 0x96 : b'\xe2\x80\x93', # – - 0x97 : b'\xe2\x80\x94', # — - 0x98 : b'\xcb\x9c', # ˜ - 0x99 : b'\xe2\x84\xa2', # ™ - 0x9a : b'\xc5\xa1', # š - 0x9b : b'\xe2\x80\xba', # › - 0x9c : b'\xc5\x93', # œ - 0x9e : b'\xc5\xbe', # ž - 0x9f : b'\xc5\xb8', # Ÿ - 0xa0 : b'\xc2\xa0', #   - 0xa1 : b'\xc2\xa1', # ¡ - 0xa2 : b'\xc2\xa2', # ¢ - 0xa3 : b'\xc2\xa3', # £ - 0xa4 : b'\xc2\xa4', # ¤ - 0xa5 : b'\xc2\xa5', # ¥ - 0xa6 : b'\xc2\xa6', # ¦ - 0xa7 : b'\xc2\xa7', # § - 0xa8 : b'\xc2\xa8', # ¨ - 0xa9 : b'\xc2\xa9', # © - 0xaa : b'\xc2\xaa', # ª - 0xab : b'\xc2\xab', # « - 0xac : b'\xc2\xac', # ¬ - 0xad : b'\xc2\xad', # ­ - 0xae : b'\xc2\xae', # ® - 0xaf : b'\xc2\xaf', # ¯ - 0xb0 : b'\xc2\xb0', # ° - 0xb1 : b'\xc2\xb1', # ± - 0xb2 : b'\xc2\xb2', # ² - 0xb3 : b'\xc2\xb3', # ³ - 0xb4 : b'\xc2\xb4', # ´ - 0xb5 : b'\xc2\xb5', # µ - 0xb6 : b'\xc2\xb6', # ¶ - 0xb7 : b'\xc2\xb7', # · - 0xb8 : b'\xc2\xb8', # ¸ - 0xb9 : b'\xc2\xb9', # ¹ - 0xba : b'\xc2\xba', # º - 0xbb : b'\xc2\xbb', # » - 0xbc : b'\xc2\xbc', # ¼ - 0xbd : b'\xc2\xbd', # ½ - 0xbe : b'\xc2\xbe', # ¾ - 0xbf : b'\xc2\xbf', # ¿ - 0xc0 : b'\xc3\x80', # À - 0xc1 : b'\xc3\x81', # Á - 0xc2 : b'\xc3\x82', #  - 0xc3 : b'\xc3\x83', # à - 0xc4 : b'\xc3\x84', # Ä - 0xc5 : b'\xc3\x85', # Å - 0xc6 : b'\xc3\x86', # Æ - 0xc7 : b'\xc3\x87', # Ç - 0xc8 : b'\xc3\x88', # È - 0xc9 : b'\xc3\x89', # É - 0xca : b'\xc3\x8a', # Ê - 0xcb : b'\xc3\x8b', # Ë - 0xcc : b'\xc3\x8c', # Ì - 0xcd : b'\xc3\x8d', # Í - 0xce : b'\xc3\x8e', # Î - 0xcf : b'\xc3\x8f', # Ï - 0xd0 : b'\xc3\x90', # Ð - 0xd1 : b'\xc3\x91', # Ñ - 0xd2 : b'\xc3\x92', # Ò - 0xd3 : b'\xc3\x93', # Ó - 0xd4 : b'\xc3\x94', # Ô - 0xd5 : b'\xc3\x95', # Õ - 0xd6 : b'\xc3\x96', # Ö - 0xd7 : b'\xc3\x97', # × - 0xd8 : b'\xc3\x98', # Ø - 0xd9 : b'\xc3\x99', # Ù - 0xda : b'\xc3\x9a', # Ú - 0xdb : b'\xc3\x9b', # Û - 0xdc : b'\xc3\x9c', # Ü - 0xdd : b'\xc3\x9d', # Ý - 0xde : b'\xc3\x9e', # Þ - 0xdf : b'\xc3\x9f', # ß - 0xe0 : b'\xc3\xa0', # à - 0xe1 : b'\xa1', # á - 0xe2 : b'\xc3\xa2', # â - 0xe3 : b'\xc3\xa3', # ã - 0xe4 : b'\xc3\xa4', # ä - 0xe5 : b'\xc3\xa5', # å - 0xe6 : b'\xc3\xa6', # æ - 0xe7 : b'\xc3\xa7', # ç - 0xe8 : b'\xc3\xa8', # è - 0xe9 : b'\xc3\xa9', # é - 0xea : b'\xc3\xaa', # ê - 0xeb : b'\xc3\xab', # ë - 0xec : b'\xc3\xac', # ì - 0xed : b'\xc3\xad', # í - 0xee : b'\xc3\xae', # î - 0xef : b'\xc3\xaf', # ï - 0xf0 : b'\xc3\xb0', # ð - 0xf1 : b'\xc3\xb1', # ñ - 0xf2 : b'\xc3\xb2', # ò - 0xf3 : b'\xc3\xb3', # ó - 0xf4 : b'\xc3\xb4', # ô - 0xf5 : b'\xc3\xb5', # õ - 0xf6 : b'\xc3\xb6', # ö - 0xf7 : b'\xc3\xb7', # ÷ - 0xf8 : b'\xc3\xb8', # ø - 0xf9 : b'\xc3\xb9', # ù - 0xfa : b'\xc3\xba', # ú - 0xfb : b'\xc3\xbb', # û - 0xfc : b'\xc3\xbc', # ü - 0xfd : b'\xc3\xbd', # ý - 0xfe : b'\xc3\xbe', # þ - } - - MULTIBYTE_MARKERS_AND_SIZES = [ - (0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF - (0xe0, 0xef, 3), # 3-byte characters start with E0-EF - (0xf0, 0xf4, 4), # 4-byte characters start with F0-F4 - ] - - FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0] - LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1] - - @classmethod - def detwingle(cls, in_bytes, main_encoding="utf8", - embedded_encoding="windows-1252"): - """Fix characters from one encoding embedded in some other encoding. - - Currently the only situation supported is Windows-1252 (or its - subset ISO-8859-1), embedded in UTF-8. - - The input must be a bytestring. If you've already converted - the document to Unicode, you're too late. - - The output is a bytestring in which `embedded_encoding` - characters have been converted to their `main_encoding` - equivalents. - """ - if embedded_encoding.replace('_', '-').lower() not in ( - 'windows-1252', 'windows_1252'): - raise NotImplementedError( - "Windows-1252 and ISO-8859-1 are the only currently supported " - "embedded encodings.") - - if main_encoding.lower() not in ('utf8', 'utf-8'): - raise NotImplementedError( - "UTF-8 is the only currently supported main encoding.") - - byte_chunks = [] - - chunk_start = 0 - pos = 0 - while pos < len(in_bytes): - byte = in_bytes[pos] - if not isinstance(byte, int): - # Python 2.x - byte = ord(byte) - if (byte >= cls.FIRST_MULTIBYTE_MARKER - and byte <= cls.LAST_MULTIBYTE_MARKER): - # This is the start of a UTF-8 multibyte character. Skip - # to the end. - for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES: - if byte >= start and byte <= end: - pos += size - break - elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8: - # We found a Windows-1252 character! - # Save the string up to this point as a chunk. - byte_chunks.append(in_bytes[chunk_start:pos]) - - # Now translate the Windows-1252 character into UTF-8 - # and add it as another, one-byte chunk. - byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte]) - pos += 1 - chunk_start = pos - else: - # Go on to the next character. - pos += 1 - if chunk_start == 0: - # The string is unchanged. - return in_bytes - else: - # Store the final chunk. - byte_chunks.append(in_bytes[chunk_start:]) - return b''.join(byte_chunks) - diff --git a/lib/bs4/diagnose.py b/lib/bs4/diagnose.py deleted file mode 100644 index 25fda5c..0000000 --- a/lib/bs4/diagnose.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Diagnostic functions, mainly for use when doing tech support.""" -from StringIO import StringIO -from HTMLParser import HTMLParser -from bs4 import BeautifulSoup, __version__ -from bs4.builder import builder_registry -import os -import random -import time -import traceback -import sys -import cProfile - -def diagnose(data): - """Diagnostic suite for isolating common problems.""" - print "Diagnostic running on Beautiful Soup %s" % __version__ - print "Python version %s" % sys.version - - basic_parsers = ["html.parser", "html5lib", "lxml"] - for name in basic_parsers: - for builder in builder_registry.builders: - if name in builder.features: - break - else: - basic_parsers.remove(name) - print ( - "I noticed that %s is not installed. Installing it may help." % - name) - - if 'lxml' in basic_parsers: - basic_parsers.append(["lxml", "xml"]) - from lxml import etree - print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION)) - - if 'html5lib' in basic_parsers: - import html5lib - print "Found html5lib version %s" % html5lib.__version__ - - if hasattr(data, 'read'): - data = data.read() - elif os.path.exists(data): - print '"%s" looks like a filename. Reading data from the file.' % data - data = open(data).read() - elif data.startswith("http:") or data.startswith("https:"): - print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data - print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup." - return - print - - for parser in basic_parsers: - print "Trying to parse your markup with %s" % parser - success = False - try: - soup = BeautifulSoup(data, parser) - success = True - except Exception, e: - print "%s could not parse the markup." % parser - traceback.print_exc() - if success: - print "Here's what %s did with the markup:" % parser - print soup.prettify() - - print "-" * 80 - -def lxml_trace(data, html=True): - """Print out the lxml events that occur during parsing. - - This lets you see how lxml parses a document when no Beautiful - Soup code is running. - """ - from lxml import etree - for event, element in etree.iterparse(StringIO(data), html=html): - print("%s, %4s, %s" % (event, element.tag, element.text)) - -class AnnouncingParser(HTMLParser): - """Announces HTMLParser parse events, without doing anything else.""" - - def _p(self, s): - print(s) - - def handle_starttag(self, name, attrs): - self._p("%s START" % name) - - def handle_endtag(self, name): - self._p("%s END" % name) - - def handle_data(self, data): - self._p("%s DATA" % data) - - def handle_charref(self, name): - self._p("%s CHARREF" % name) - - def handle_entityref(self, name): - self._p("%s ENTITYREF" % name) - - def handle_comment(self, data): - self._p("%s COMMENT" % data) - - def handle_decl(self, data): - self._p("%s DECL" % data) - - def unknown_decl(self, data): - self._p("%s UNKNOWN-DECL" % data) - - def handle_pi(self, data): - self._p("%s PI" % data) - -def htmlparser_trace(data): - """Print out the HTMLParser events that occur during parsing. - - This lets you see how HTMLParser parses a document when no - Beautiful Soup code is running. - """ - parser = AnnouncingParser() - parser.feed(data) - -_vowels = "aeiou" -_consonants = "bcdfghjklmnpqrstvwxyz" - -def rword(length=5): - "Generate a random word-like string." - s = '' - for i in range(length): - if i % 2 == 0: - t = _consonants - else: - t = _vowels - s += random.choice(t) - return s - -def rsentence(length=4): - "Generate a random sentence-like string." - return " ".join(rword(random.randint(4,9)) for i in range(length)) - -def rdoc(num_elements=1000): - """Randomly generate an invalid HTML document.""" - tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table'] - elements = [] - for i in range(num_elements): - choice = random.randint(0,3) - if choice == 0: - # New tag. - tag_name = random.choice(tag_names) - elements.append("<%s>" % tag_name) - elif choice == 1: - elements.append(rsentence(random.randint(1,4))) - elif choice == 2: - # Close a tag. - tag_name = random.choice(tag_names) - elements.append("" % tag_name) - return "" + "\n".join(elements) + "" - -def benchmark_parsers(num_elements=100000): - """Very basic head-to-head performance benchmark.""" - print "Comparative parser benchmark on Beautiful Soup %s" % __version__ - data = rdoc(num_elements) - print "Generated a large invalid HTML document (%d bytes)." % len(data) - - for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]: - success = False - try: - a = time.time() - soup = BeautifulSoup(data, parser) - b = time.time() - success = True - except Exception, e: - print "%s could not parse the markup." % parser - traceback.print_exc() - if success: - print "BS4+%s parsed the markup in %.2fs." % (parser, b-a) - - from lxml import etree - a = time.time() - etree.HTML(data) - b = time.time() - print "Raw lxml parsed the markup in %.2fs." % (b-a) - -if __name__ == '__main__': - diagnose(sys.stdin.read()) diff --git a/lib/bs4/element.py b/lib/bs4/element.py deleted file mode 100644 index f6864f2..0000000 --- a/lib/bs4/element.py +++ /dev/null @@ -1,1598 +0,0 @@ -import collections -import re -import sys -import warnings -from bs4.dammit import EntitySubstitution - -DEFAULT_OUTPUT_ENCODING = "utf-8" -PY3K = (sys.version_info[0] > 2) - -whitespace_re = re.compile("\s+") - -def _alias(attr): - """Alias one attribute name to another for backward compatibility""" - @property - def alias(self): - return getattr(self, attr) - - @alias.setter - def alias(self): - return setattr(self, attr) - return alias - - -class NamespacedAttribute(unicode): - - def __new__(cls, prefix, name, namespace=None): - if name is None: - obj = unicode.__new__(cls, prefix) - elif prefix is None: - # Not really namespaced. - obj = unicode.__new__(cls, name) - else: - obj = unicode.__new__(cls, prefix + ":" + name) - obj.prefix = prefix - obj.name = name - obj.namespace = namespace - return obj - -class AttributeValueWithCharsetSubstitution(unicode): - """A stand-in object for a character encoding specified in HTML.""" - -class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution): - """A generic stand-in for the value of a meta tag's 'charset' attribute. - - When Beautiful Soup parses the markup '', the - value of the 'charset' attribute will be one of these objects. - """ - - def __new__(cls, original_value): - obj = unicode.__new__(cls, original_value) - obj.original_value = original_value - return obj - - def encode(self, encoding): - return encoding - - -class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution): - """A generic stand-in for the value of a meta tag's 'content' attribute. - - When Beautiful Soup parses the markup: - - - The value of the 'content' attribute will be one of these objects. - """ - - CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M) - - def __new__(cls, original_value): - match = cls.CHARSET_RE.search(original_value) - if match is None: - # No substitution necessary. - return unicode.__new__(unicode, original_value) - - obj = unicode.__new__(cls, original_value) - obj.original_value = original_value - return obj - - def encode(self, encoding): - def rewrite(match): - return match.group(1) + encoding - return self.CHARSET_RE.sub(rewrite, self.original_value) - -class HTMLAwareEntitySubstitution(EntitySubstitution): - - """Entity substitution rules that are aware of some HTML quirks. - - Specifically, the contents of -""" - soup = BeautifulSoup(doc, "xml") - # lxml would have stripped this while parsing, but we can add - # it later. - soup.script.string = 'console.log("< < hey > > ");' - encoded = soup.encode() - self.assertTrue(b"< < hey > >" in encoded) - - def test_popping_namespaced_tag(self): - markup = 'b2012-07-02T20:33:42Zcd' - soup = self.soup(markup) - self.assertEqual( - unicode(soup.rss), markup) - - def test_docstring_includes_correct_encoding(self): - soup = self.soup("") - self.assertEqual( - soup.encode("latin1"), - b'\n') - - def test_large_xml_document(self): - """A large XML document should come out the same as it went in.""" - markup = (b'\n' - + b'0' * (2**12) - + b'') - soup = self.soup(markup) - self.assertEqual(soup.encode("utf-8"), markup) - - - def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): - self.assertSoupEquals("

", "

") - self.assertSoupEquals("

foo

") - - def test_namespaces_are_preserved(self): - markup = 'This tag is in the a namespaceThis tag is in the b namespace' - soup = self.soup(markup) - root = soup.root - self.assertEqual("http://example.com/", root['xmlns:a']) - self.assertEqual("http://example.net/", root['xmlns:b']) - - def test_closing_namespaced_tag(self): - markup = '

20010504

' - soup = self.soup(markup) - self.assertEqual(unicode(soup.p), markup) - - def test_namespaced_attributes(self): - markup = '' - soup = self.soup(markup) - self.assertEqual(unicode(soup.foo), markup) - - def test_namespaced_attributes_xml_namespace(self): - markup = 'bar' - soup = self.soup(markup) - self.assertEqual(unicode(soup.foo), markup) - -class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): - """Smoke test for a tree builder that supports HTML5.""" - - def test_real_xhtml_document(self): - # Since XHTML is not HTML5, HTML5 parsers are not tested to handle - # XHTML documents in any particular way. - pass - - def test_html_tags_have_namespace(self): - markup = "" - soup = self.soup(markup) - self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace) - - def test_svg_tags_have_namespace(self): - markup = '' - soup = self.soup(markup) - namespace = "http://www.w3.org/2000/svg" - self.assertEqual(namespace, soup.svg.namespace) - self.assertEqual(namespace, soup.circle.namespace) - - - def test_mathml_tags_have_namespace(self): - markup = '5' - soup = self.soup(markup) - namespace = 'http://www.w3.org/1998/Math/MathML' - self.assertEqual(namespace, soup.math.namespace) - self.assertEqual(namespace, soup.msqrt.namespace) - - def test_xml_declaration_becomes_comment(self): - markup = '' - soup = self.soup(markup) - self.assertTrue(isinstance(soup.contents[0], Comment)) - self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?') - self.assertEqual("html", soup.contents[0].next_element.name) - -def skipIf(condition, reason): - def nothing(test, *args, **kwargs): - return None - - def decorator(test_item): - if condition: - return nothing - else: - return test_item - - return decorator diff --git a/lib/bs4/tests/__init__.py b/lib/bs4/tests/__init__.py deleted file mode 100644 index 142c8cc..0000000 --- a/lib/bs4/tests/__init__.py +++ /dev/null @@ -1 +0,0 @@ -"The beautifulsoup tests." diff --git a/lib/bs4/tests/test_builder_registry.py b/lib/bs4/tests/test_builder_registry.py deleted file mode 100644 index 92ad10f..0000000 --- a/lib/bs4/tests/test_builder_registry.py +++ /dev/null @@ -1,141 +0,0 @@ -"""Tests of the builder registry.""" - -import unittest - -from bs4 import BeautifulSoup -from bs4.builder import ( - builder_registry as registry, - HTMLParserTreeBuilder, - TreeBuilderRegistry, -) - -try: - from bs4.builder import HTML5TreeBuilder - HTML5LIB_PRESENT = True -except ImportError: - HTML5LIB_PRESENT = False - -try: - from bs4.builder import ( - LXMLTreeBuilderForXML, - LXMLTreeBuilder, - ) - LXML_PRESENT = True -except ImportError: - LXML_PRESENT = False - - -class BuiltInRegistryTest(unittest.TestCase): - """Test the built-in registry with the default builders registered.""" - - def test_combination(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('fast', 'html'), - LXMLTreeBuilder) - - if LXML_PRESENT: - self.assertEqual(registry.lookup('permissive', 'xml'), - LXMLTreeBuilderForXML) - self.assertEqual(registry.lookup('strict', 'html'), - HTMLParserTreeBuilder) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html5lib', 'html'), - HTML5TreeBuilder) - - def test_lookup_by_markup_type(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('html'), LXMLTreeBuilder) - self.assertEqual(registry.lookup('xml'), LXMLTreeBuilderForXML) - else: - self.assertEqual(registry.lookup('xml'), None) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html'), HTML5TreeBuilder) - else: - self.assertEqual(registry.lookup('html'), HTMLParserTreeBuilder) - - def test_named_library(self): - if LXML_PRESENT: - self.assertEqual(registry.lookup('lxml', 'xml'), - LXMLTreeBuilderForXML) - self.assertEqual(registry.lookup('lxml', 'html'), - LXMLTreeBuilder) - if HTML5LIB_PRESENT: - self.assertEqual(registry.lookup('html5lib'), - HTML5TreeBuilder) - - self.assertEqual(registry.lookup('html.parser'), - HTMLParserTreeBuilder) - - def test_beautifulsoup_constructor_does_lookup(self): - # You can pass in a string. - BeautifulSoup("", features="html") - # Or a list of strings. - BeautifulSoup("", features=["html", "fast"]) - - # You'll get an exception if BS can't find an appropriate - # builder. - self.assertRaises(ValueError, BeautifulSoup, - "", features="no-such-feature") - -class RegistryTest(unittest.TestCase): - """Test the TreeBuilderRegistry class in general.""" - - def setUp(self): - self.registry = TreeBuilderRegistry() - - def builder_for_features(self, *feature_list): - cls = type('Builder_' + '_'.join(feature_list), - (object,), {'features' : feature_list}) - - self.registry.register(cls) - return cls - - def test_register_with_no_features(self): - builder = self.builder_for_features() - - # Since the builder advertises no features, you can't find it - # by looking up features. - self.assertEqual(self.registry.lookup('foo'), None) - - # But you can find it by doing a lookup with no features, if - # this happens to be the only registered builder. - self.assertEqual(self.registry.lookup(), builder) - - def test_register_with_features_makes_lookup_succeed(self): - builder = self.builder_for_features('foo', 'bar') - self.assertEqual(self.registry.lookup('foo'), builder) - self.assertEqual(self.registry.lookup('bar'), builder) - - def test_lookup_fails_when_no_builder_implements_feature(self): - builder = self.builder_for_features('foo', 'bar') - self.assertEqual(self.registry.lookup('baz'), None) - - def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): - builder1 = self.builder_for_features('foo') - builder2 = self.builder_for_features('bar') - self.assertEqual(self.registry.lookup(), builder2) - - def test_lookup_fails_when_no_tree_builders_registered(self): - self.assertEqual(self.registry.lookup(), None) - - def test_lookup_gets_most_recent_builder_supporting_all_features(self): - has_one = self.builder_for_features('foo') - has_the_other = self.builder_for_features('bar') - has_both_early = self.builder_for_features('foo', 'bar', 'baz') - has_both_late = self.builder_for_features('foo', 'bar', 'quux') - lacks_one = self.builder_for_features('bar') - has_the_other = self.builder_for_features('foo') - - # There are two builders featuring 'foo' and 'bar', but - # the one that also features 'quux' was registered later. - self.assertEqual(self.registry.lookup('foo', 'bar'), - has_both_late) - - # There is only one builder featuring 'foo', 'bar', and 'baz'. - self.assertEqual(self.registry.lookup('foo', 'bar', 'baz'), - has_both_early) - - def test_lookup_fails_when_cannot_reconcile_requested_features(self): - builder1 = self.builder_for_features('foo', 'bar') - builder2 = self.builder_for_features('foo', 'baz') - self.assertEqual(self.registry.lookup('bar', 'baz'), None) diff --git a/lib/bs4/tests/test_docs.py b/lib/bs4/tests/test_docs.py deleted file mode 100644 index 5b9f677..0000000 --- a/lib/bs4/tests/test_docs.py +++ /dev/null @@ -1,36 +0,0 @@ -"Test harness for doctests." - -# pylint: disable-msg=E0611,W0142 - -__metaclass__ = type -__all__ = [ - 'additional_tests', - ] - -import atexit -import doctest -import os -#from pkg_resources import ( -# resource_filename, resource_exists, resource_listdir, cleanup_resources) -import unittest - -DOCTEST_FLAGS = ( - doctest.ELLIPSIS | - doctest.NORMALIZE_WHITESPACE | - doctest.REPORT_NDIFF) - - -# def additional_tests(): -# "Run the doc tests (README.txt and docs/*, if any exist)" -# doctest_files = [ -# os.path.abspath(resource_filename('bs4', 'README.txt'))] -# if resource_exists('bs4', 'docs'): -# for name in resource_listdir('bs4', 'docs'): -# if name.endswith('.txt'): -# doctest_files.append( -# os.path.abspath( -# resource_filename('bs4', 'docs/%s' % name))) -# kwargs = dict(module_relative=False, optionflags=DOCTEST_FLAGS) -# atexit.register(cleanup_resources) -# return unittest.TestSuite(( -# doctest.DocFileSuite(*doctest_files, **kwargs))) diff --git a/lib/bs4/tests/test_html5lib.py b/lib/bs4/tests/test_html5lib.py deleted file mode 100644 index 2a3b41e..0000000 --- a/lib/bs4/tests/test_html5lib.py +++ /dev/null @@ -1,72 +0,0 @@ -"""Tests to ensure that the html5lib tree builder generates good trees.""" - -import warnings - -try: - from bs4.builder import HTML5TreeBuilder - HTML5LIB_PRESENT = True -except ImportError, e: - HTML5LIB_PRESENT = False -from bs4.element import SoupStrainer -from bs4.testing import ( - HTML5TreeBuilderSmokeTest, - SoupTest, - skipIf, -) - -@skipIf( - not HTML5LIB_PRESENT, - "html5lib seems not to be present, not testing its tree builder.") -class HTML5LibBuilderSmokeTest(SoupTest, HTML5TreeBuilderSmokeTest): - """See ``HTML5TreeBuilderSmokeTest``.""" - - @property - def default_builder(self): - return HTML5TreeBuilder() - - def test_soupstrainer(self): - # The html5lib tree builder does not support SoupStrainers. - strainer = SoupStrainer("b") - markup = "

A bold statement.

" - with warnings.catch_warnings(record=True) as w: - soup = self.soup(markup, parse_only=strainer) - self.assertEqual( - soup.decode(), self.document_for(markup)) - - self.assertTrue( - "the html5lib tree builder doesn't support parse_only" in - str(w[0].message)) - - def test_correctly_nested_tables(self): - """html5lib inserts tags where other parsers don't.""" - markup = ('' - '' - "') - - self.assertSoupEquals( - markup, - '
Here's another table:" - '' - '' - '
foo
Here\'s another table:' - '
foo
' - '
') - - self.assertSoupEquals( - "" - "" - "
Foo
Bar
Baz
") - - def test_xml_declaration_followed_by_doctype(self): - markup = ''' - - - - - -

foo

- -''' - soup = self.soup(markup) - # Verify that we can reach the

tag; this means the tree is connected. - self.assertEqual(b"

foo

", soup.p.encode()) diff --git a/lib/bs4/tests/test_htmlparser.py b/lib/bs4/tests/test_htmlparser.py deleted file mode 100644 index bcb5ed2..0000000 --- a/lib/bs4/tests/test_htmlparser.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Tests to ensure that the html.parser tree builder generates good -trees.""" - -from bs4.testing import SoupTest, HTMLTreeBuilderSmokeTest -from bs4.builder import HTMLParserTreeBuilder - -class HTMLParserTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest): - - @property - def default_builder(self): - return HTMLParserTreeBuilder() - - def test_namespaced_system_doctype(self): - # html.parser can't handle namespaced doctypes, so skip this one. - pass - - def test_namespaced_public_doctype(self): - # html.parser can't handle namespaced doctypes, so skip this one. - pass diff --git a/lib/bs4/tests/test_lxml.py b/lib/bs4/tests/test_lxml.py deleted file mode 100644 index 80458de..0000000 --- a/lib/bs4/tests/test_lxml.py +++ /dev/null @@ -1,88 +0,0 @@ -"""Tests to ensure that the lxml tree builder generates good trees.""" - -import re -import warnings - -try: - from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML - LXML_PRESENT = True - import lxml.etree - LXML_VERSION = lxml.etree.LXML_VERSION -except ImportError, e: - LXML_PRESENT = False - LXML_VERSION = (0,) - -from bs4 import ( - BeautifulSoup, - BeautifulStoneSoup, - ) -from bs4.element import Comment, Doctype, SoupStrainer -from bs4.testing import skipIf -from bs4.tests import test_htmlparser -from bs4.testing import ( - HTMLTreeBuilderSmokeTest, - XMLTreeBuilderSmokeTest, - SoupTest, - skipIf, -) - -@skipIf( - not LXML_PRESENT, - "lxml seems not to be present, not testing its tree builder.") -class LXMLTreeBuilderSmokeTest(SoupTest, HTMLTreeBuilderSmokeTest): - """See ``HTMLTreeBuilderSmokeTest``.""" - - @property - def default_builder(self): - return LXMLTreeBuilder() - - def test_out_of_range_entity(self): - self.assertSoupEquals( - "

foo�bar

", "

foobar

") - self.assertSoupEquals( - "

foo�bar

", "

foobar

") - self.assertSoupEquals( - "

foo�bar

", "

foobar

") - - # In lxml < 2.3.5, an empty doctype causes a segfault. Skip this - # test if an old version of lxml is installed. - - @skipIf( - not LXML_PRESENT or LXML_VERSION < (2,3,5,0), - "Skipping doctype test for old version of lxml to avoid segfault.") - def test_empty_doctype(self): - soup = self.soup("") - doctype = soup.contents[0] - self.assertEqual("", doctype.strip()) - - def test_beautifulstonesoup_is_xml_parser(self): - # Make sure that the deprecated BSS class uses an xml builder - # if one is installed. - with warnings.catch_warnings(record=False) as w: - soup = BeautifulStoneSoup("") - self.assertEqual(u"", unicode(soup.b)) - - def test_real_xhtml_document(self): - """lxml strips the XML definition from an XHTML doc, which is fine.""" - markup = b""" - - -Hello. -Goodbye. -""" - soup = self.soup(markup) - self.assertEqual( - soup.encode("utf-8").replace(b"\n", b''), - markup.replace(b'\n', b'').replace( - b'', b'')) - - -@skipIf( - not LXML_PRESENT, - "lxml seems not to be present, not testing its XML tree builder.") -class LXMLXMLTreeBuilderSmokeTest(SoupTest, XMLTreeBuilderSmokeTest): - """See ``HTMLTreeBuilderSmokeTest``.""" - - @property - def default_builder(self): - return LXMLTreeBuilderForXML() diff --git a/lib/bs4/tests/test_soup.py b/lib/bs4/tests/test_soup.py deleted file mode 100644 index b127716..0000000 --- a/lib/bs4/tests/test_soup.py +++ /dev/null @@ -1,383 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tests of Beautiful Soup as a whole.""" - -import logging -import unittest -import sys -from bs4 import ( - BeautifulSoup, - BeautifulStoneSoup, -) -from bs4.element import ( - CharsetMetaAttributeValue, - ContentMetaAttributeValue, - SoupStrainer, - NamespacedAttribute, - ) -import bs4.dammit -from bs4.dammit import EntitySubstitution, UnicodeDammit -from bs4.testing import ( - SoupTest, - skipIf, -) -import warnings - -try: - from bs4.builder import LXMLTreeBuilder, LXMLTreeBuilderForXML - LXML_PRESENT = True -except ImportError, e: - LXML_PRESENT = False - -PYTHON_2_PRE_2_7 = (sys.version_info < (2,7)) -PYTHON_3_PRE_3_2 = (sys.version_info[0] == 3 and sys.version_info < (3,2)) - -class TestDeprecatedConstructorArguments(SoupTest): - - def test_parseOnlyThese_renamed_to_parse_only(self): - with warnings.catch_warnings(record=True) as w: - soup = self.soup("
", parseOnlyThese=SoupStrainer("b")) - msg = str(w[0].message) - self.assertTrue("parseOnlyThese" in msg) - self.assertTrue("parse_only" in msg) - self.assertEqual(b"", soup.encode()) - - def test_fromEncoding_renamed_to_from_encoding(self): - with warnings.catch_warnings(record=True) as w: - utf8 = b"\xc3\xa9" - soup = self.soup(utf8, fromEncoding="utf8") - msg = str(w[0].message) - self.assertTrue("fromEncoding" in msg) - self.assertTrue("from_encoding" in msg) - self.assertEqual("utf8", soup.original_encoding) - - def test_unrecognized_keyword_argument(self): - self.assertRaises( - TypeError, self.soup, "", no_such_argument=True) - - @skipIf( - not LXML_PRESENT, - "lxml not present, not testing BeautifulStoneSoup.") - def test_beautifulstonesoup(self): - with warnings.catch_warnings(record=True) as w: - soup = BeautifulStoneSoup("") - self.assertTrue(isinstance(soup, BeautifulSoup)) - self.assertTrue("BeautifulStoneSoup class is deprecated") - -class TestSelectiveParsing(SoupTest): - - def test_parse_with_soupstrainer(self): - markup = "NoYesNoYes Yes" - strainer = SoupStrainer("b") - soup = self.soup(markup, parse_only=strainer) - self.assertEqual(soup.encode(), b"YesYes Yes") - - -class TestEntitySubstitution(unittest.TestCase): - """Standalone tests of the EntitySubstitution class.""" - def setUp(self): - self.sub = EntitySubstitution - - def test_simple_html_substitution(self): - # Unicode characters corresponding to named HTML entites - # are substituted, and no others. - s = u"foo\u2200\N{SNOWMAN}\u00f5bar" - self.assertEqual(self.sub.substitute_html(s), - u"foo∀\N{SNOWMAN}õbar") - - def test_smart_quote_substitution(self): - # MS smart quotes are a common source of frustration, so we - # give them a special test. - quotes = b"\x91\x92foo\x93\x94" - dammit = UnicodeDammit(quotes) - self.assertEqual(self.sub.substitute_html(dammit.markup), - "‘’foo“”") - - def test_xml_converstion_includes_no_quotes_if_make_quoted_attribute_is_false(self): - s = 'Welcome to "my bar"' - self.assertEqual(self.sub.substitute_xml(s, False), s) - - def test_xml_attribute_quoting_normally_uses_double_quotes(self): - self.assertEqual(self.sub.substitute_xml("Welcome", True), - '"Welcome"') - self.assertEqual(self.sub.substitute_xml("Bob's Bar", True), - '"Bob\'s Bar"') - - def test_xml_attribute_quoting_uses_single_quotes_when_value_contains_double_quotes(self): - s = 'Welcome to "my bar"' - self.assertEqual(self.sub.substitute_xml(s, True), - "'Welcome to \"my bar\"'") - - def test_xml_attribute_quoting_escapes_single_quotes_when_value_contains_both_single_and_double_quotes(self): - s = 'Welcome to "Bob\'s Bar"' - self.assertEqual( - self.sub.substitute_xml(s, True), - '"Welcome to "Bob\'s Bar""') - - def test_xml_quotes_arent_escaped_when_value_is_not_being_quoted(self): - quoted = 'Welcome to "Bob\'s Bar"' - self.assertEqual(self.sub.substitute_xml(quoted), quoted) - - def test_xml_quoting_handles_angle_brackets(self): - self.assertEqual( - self.sub.substitute_xml("foo"), - "foo<bar>") - - def test_xml_quoting_handles_ampersands(self): - self.assertEqual(self.sub.substitute_xml("AT&T"), "AT&T") - - def test_xml_quoting_including_ampersands_when_they_are_part_of_an_entity(self): - self.assertEqual( - self.sub.substitute_xml("ÁT&T"), - "&Aacute;T&T") - - def test_xml_quoting_ignoring_ampersands_when_they_are_part_of_an_entity(self): - self.assertEqual( - self.sub.substitute_xml_containing_entities("ÁT&T"), - "ÁT&T") - - def test_quotes_not_html_substituted(self): - """There's no need to do this except inside attribute values.""" - text = 'Bob\'s "bar"' - self.assertEqual(self.sub.substitute_html(text), text) - - -class TestEncodingConversion(SoupTest): - # Test Beautiful Soup's ability to decode and encode from various - # encodings. - - def setUp(self): - super(TestEncodingConversion, self).setUp() - self.unicode_data = u'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' - self.utf8_data = self.unicode_data.encode("utf-8") - # Just so you know what it looks like. - self.assertEqual( - self.utf8_data, - b'Sacr\xc3\xa9 bleu!') - - def test_ascii_in_unicode_out(self): - # ASCII input is converted to Unicode. The original_encoding - # attribute is set. - ascii = b"a" - soup_from_ascii = self.soup(ascii) - unicode_output = soup_from_ascii.decode() - self.assertTrue(isinstance(unicode_output, unicode)) - self.assertEqual(unicode_output, self.document_for(ascii.decode())) - self.assertEqual(soup_from_ascii.original_encoding.lower(), "ascii") - - def test_unicode_in_unicode_out(self): - # Unicode input is left alone. The original_encoding attribute - # is not set. - soup_from_unicode = self.soup(self.unicode_data) - self.assertEqual(soup_from_unicode.decode(), self.unicode_data) - self.assertEqual(soup_from_unicode.foo.string, u'Sacr\xe9 bleu!') - self.assertEqual(soup_from_unicode.original_encoding, None) - - def test_utf8_in_unicode_out(self): - # UTF-8 input is converted to Unicode. The original_encoding - # attribute is set. - soup_from_utf8 = self.soup(self.utf8_data) - self.assertEqual(soup_from_utf8.decode(), self.unicode_data) - self.assertEqual(soup_from_utf8.foo.string, u'Sacr\xe9 bleu!') - - def test_utf8_out(self): - # The internal data structures can be encoded as UTF-8. - soup_from_unicode = self.soup(self.unicode_data) - self.assertEqual(soup_from_unicode.encode('utf-8'), self.utf8_data) - - @skipIf( - PYTHON_2_PRE_2_7 or PYTHON_3_PRE_3_2, - "Bad HTMLParser detected; skipping test of non-ASCII characters in attribute name.") - def test_attribute_name_containing_unicode_characters(self): - markup = u'
' - self.assertEqual(self.soup(markup).div.encode("utf8"), markup.encode("utf8")) - -class TestUnicodeDammit(unittest.TestCase): - """Standalone tests of Unicode, Dammit.""" - - def test_smart_quotes_to_unicode(self): - markup = b"\x91\x92\x93\x94" - dammit = UnicodeDammit(markup) - self.assertEqual( - dammit.unicode_markup, u"\u2018\u2019\u201c\u201d") - - def test_smart_quotes_to_xml_entities(self): - markup = b"\x91\x92\x93\x94" - dammit = UnicodeDammit(markup, smart_quotes_to="xml") - self.assertEqual( - dammit.unicode_markup, "‘’“”") - - def test_smart_quotes_to_html_entities(self): - markup = b"\x91\x92\x93\x94" - dammit = UnicodeDammit(markup, smart_quotes_to="html") - self.assertEqual( - dammit.unicode_markup, "‘’“”") - - def test_smart_quotes_to_ascii(self): - markup = b"\x91\x92\x93\x94" - dammit = UnicodeDammit(markup, smart_quotes_to="ascii") - self.assertEqual( - dammit.unicode_markup, """''""""") - - def test_detect_utf8(self): - utf8 = b"\xc3\xa9" - dammit = UnicodeDammit(utf8) - self.assertEqual(dammit.unicode_markup, u'\xe9') - self.assertEqual(dammit.original_encoding.lower(), 'utf-8') - - def test_convert_hebrew(self): - hebrew = b"\xed\xe5\xec\xf9" - dammit = UnicodeDammit(hebrew, ["iso-8859-8"]) - self.assertEqual(dammit.original_encoding.lower(), 'iso-8859-8') - self.assertEqual(dammit.unicode_markup, u'\u05dd\u05d5\u05dc\u05e9') - - def test_dont_see_smart_quotes_where_there_are_none(self): - utf_8 = b"\343\202\261\343\203\274\343\202\277\343\202\244 Watch" - dammit = UnicodeDammit(utf_8) - self.assertEqual(dammit.original_encoding.lower(), 'utf-8') - self.assertEqual(dammit.unicode_markup.encode("utf-8"), utf_8) - - def test_ignore_inappropriate_codecs(self): - utf8_data = u"Räksmörgås".encode("utf-8") - dammit = UnicodeDammit(utf8_data, ["iso-8859-8"]) - self.assertEqual(dammit.original_encoding.lower(), 'utf-8') - - def test_ignore_invalid_codecs(self): - utf8_data = u"Räksmörgås".encode("utf-8") - for bad_encoding in ['.utf8', '...', 'utF---16.!']: - dammit = UnicodeDammit(utf8_data, [bad_encoding]) - self.assertEqual(dammit.original_encoding.lower(), 'utf-8') - - def test_detect_html5_style_meta_tag(self): - - for data in ( - b'', - b"", - b"", - b""): - dammit = UnicodeDammit(data, is_html=True) - self.assertEqual( - "euc-jp", dammit.original_encoding) - - def test_last_ditch_entity_replacement(self): - # This is a UTF-8 document that contains bytestrings - # completely incompatible with UTF-8 (ie. encoded with some other - # encoding). - # - # Since there is no consistent encoding for the document, - # Unicode, Dammit will eventually encode the document as UTF-8 - # and encode the incompatible characters as REPLACEMENT - # CHARACTER. - # - # If chardet is installed, it will detect that the document - # can be converted into ISO-8859-1 without errors. This happens - # to be the wrong encoding, but it is a consistent encoding, so the - # code we're testing here won't run. - # - # So we temporarily disable chardet if it's present. - doc = b"""\357\273\277 -\330\250\330\252\330\261 -\310\322\321\220\312\321\355\344""" - chardet = bs4.dammit.chardet_dammit - logging.disable(logging.WARNING) - try: - def noop(str): - return None - bs4.dammit.chardet_dammit = noop - dammit = UnicodeDammit(doc) - self.assertEqual(True, dammit.contains_replacement_characters) - self.assertTrue(u"\ufffd" in dammit.unicode_markup) - - soup = BeautifulSoup(doc, "html.parser") - self.assertTrue(soup.contains_replacement_characters) - finally: - logging.disable(logging.NOTSET) - bs4.dammit.chardet_dammit = chardet - - def test_sniffed_xml_encoding(self): - # A document written in UTF-16LE will be converted by a different - # code path that sniffs the byte order markers. - data = b'\xff\xfe<\x00a\x00>\x00\xe1\x00\xe9\x00<\x00/\x00a\x00>\x00' - dammit = UnicodeDammit(data) - self.assertEqual(u"áé", dammit.unicode_markup) - self.assertEqual("utf-16le", dammit.original_encoding) - - def test_detwingle(self): - # Here's a UTF8 document. - utf8 = (u"\N{SNOWMAN}" * 3).encode("utf8") - - # Here's a Windows-1252 document. - windows_1252 = ( - u"\N{LEFT DOUBLE QUOTATION MARK}Hi, I like Windows!" - u"\N{RIGHT DOUBLE QUOTATION MARK}").encode("windows_1252") - - # Through some unholy alchemy, they've been stuck together. - doc = utf8 + windows_1252 + utf8 - - # The document can't be turned into UTF-8: - self.assertRaises(UnicodeDecodeError, doc.decode, "utf8") - - # Unicode, Dammit thinks the whole document is Windows-1252, - # and decodes it into "☃☃☃“Hi, I like Windows!”☃☃☃" - - # But if we run it through fix_embedded_windows_1252, it's fixed: - - fixed = UnicodeDammit.detwingle(doc) - self.assertEqual( - u"☃☃☃“Hi, I like Windows!”☃☃☃", fixed.decode("utf8")) - - def test_detwingle_ignores_multibyte_characters(self): - # Each of these characters has a UTF-8 representation ending - # in \x93. \x93 is a smart quote if interpreted as - # Windows-1252. But our code knows to skip over multibyte - # UTF-8 characters, so they'll survive the process unscathed. - for tricky_unicode_char in ( - u"\N{LATIN SMALL LIGATURE OE}", # 2-byte char '\xc5\x93' - u"\N{LATIN SUBSCRIPT SMALL LETTER X}", # 3-byte char '\xe2\x82\x93' - u"\xf0\x90\x90\x93", # This is a CJK character, not sure which one. - ): - input = tricky_unicode_char.encode("utf8") - self.assertTrue(input.endswith(b'\x93')) - output = UnicodeDammit.detwingle(input) - self.assertEqual(output, input) - -class TestNamedspacedAttribute(SoupTest): - - def test_name_may_be_none(self): - a = NamespacedAttribute("xmlns", None) - self.assertEqual(a, "xmlns") - - def test_attribute_is_equivalent_to_colon_separated_string(self): - a = NamespacedAttribute("a", "b") - self.assertEqual("a:b", a) - - def test_attributes_are_equivalent_if_prefix_and_name_identical(self): - a = NamespacedAttribute("a", "b", "c") - b = NamespacedAttribute("a", "b", "c") - self.assertEqual(a, b) - - # The actual namespace is not considered. - c = NamespacedAttribute("a", "b", None) - self.assertEqual(a, c) - - # But name and prefix are important. - d = NamespacedAttribute("a", "z", "c") - self.assertNotEqual(a, d) - - e = NamespacedAttribute("z", "b", "c") - self.assertNotEqual(a, e) - - -class TestAttributeValueWithCharsetSubstitution(unittest.TestCase): - - def test_content_meta_attribute_value(self): - value = CharsetMetaAttributeValue("euc-jp") - self.assertEqual("euc-jp", value) - self.assertEqual("euc-jp", value.original_value) - self.assertEqual("utf8", value.encode("utf8")) - - - def test_content_meta_attribute_value(self): - value = ContentMetaAttributeValue("text/html; charset=euc-jp") - self.assertEqual("text/html; charset=euc-jp", value) - self.assertEqual("text/html; charset=euc-jp", value.original_value) - self.assertEqual("text/html; charset=utf8", value.encode("utf8")) diff --git a/lib/bs4/tests/test_tree.py b/lib/bs4/tests/test_tree.py deleted file mode 100644 index 2d09f96..0000000 --- a/lib/bs4/tests/test_tree.py +++ /dev/null @@ -1,1800 +0,0 @@ -# -*- coding: utf-8 -*- -"""Tests for Beautiful Soup's tree traversal methods. - -The tree traversal methods are the main advantage of using Beautiful -Soup over just using a parser. - -Different parsers will build different Beautiful Soup trees given the -same markup, but all Beautiful Soup trees can be traversed with the -methods tested here. -""" - -import copy -import pickle -import re -import warnings -from bs4 import BeautifulSoup -from bs4.builder import ( - builder_registry, - HTMLParserTreeBuilder, -) -from bs4.element import ( - CData, - Comment, - Doctype, - NavigableString, - SoupStrainer, - Tag, -) -from bs4.testing import ( - SoupTest, - skipIf, -) - -XML_BUILDER_PRESENT = (builder_registry.lookup("xml") is not None) -LXML_PRESENT = (builder_registry.lookup("lxml") is not None) - -class TreeTest(SoupTest): - - def assertSelects(self, tags, should_match): - """Make sure that the given tags have the correct text. - - This is used in tests that define a bunch of tags, each - containing a single string, and then select certain strings by - some mechanism. - """ - self.assertEqual([tag.string for tag in tags], should_match) - - def assertSelectsIDs(self, tags, should_match): - """Make sure that the given tags have the correct IDs. - - This is used in tests that define a bunch of tags, each - containing a single string, and then select certain strings by - some mechanism. - """ - self.assertEqual([tag['id'] for tag in tags], should_match) - - -class TestFind(TreeTest): - """Basic tests of the find() method. - - find() just calls find_all() with limit=1, so it's not tested all - that thouroughly here. - """ - - def test_find_tag(self): - soup = self.soup("1234") - self.assertEqual(soup.find("b").string, "2") - - def test_unicode_text_find(self): - soup = self.soup(u'

Räksmörgås

') - self.assertEqual(soup.find(text=u'Räksmörgås'), u'Räksmörgås') - -class TestFindAll(TreeTest): - """Basic tests of the find_all() method.""" - - def test_find_all_text_nodes(self): - """You can search the tree for text nodes.""" - soup = self.soup("Foobar\xbb") - # Exact match. - self.assertEqual(soup.find_all(text="bar"), [u"bar"]) - # Match any of a number of strings. - self.assertEqual( - soup.find_all(text=["Foo", "bar"]), [u"Foo", u"bar"]) - # Match a regular expression. - self.assertEqual(soup.find_all(text=re.compile('.*')), - [u"Foo", u"bar", u'\xbb']) - # Match anything. - self.assertEqual(soup.find_all(text=True), - [u"Foo", u"bar", u'\xbb']) - - def test_find_all_limit(self): - """You can limit the number of items returned by find_all.""" - soup = self.soup("12345") - self.assertSelects(soup.find_all('a', limit=3), ["1", "2", "3"]) - self.assertSelects(soup.find_all('a', limit=1), ["1"]) - self.assertSelects( - soup.find_all('a', limit=10), ["1", "2", "3", "4", "5"]) - - # A limit of 0 means no limit. - self.assertSelects( - soup.find_all('a', limit=0), ["1", "2", "3", "4", "5"]) - - def test_calling_a_tag_is_calling_findall(self): - soup = self.soup("123") - self.assertSelects(soup('a', limit=1), ["1"]) - self.assertSelects(soup.b(id="foo"), ["3"]) - - def test_find_all_with_self_referential_data_structure_does_not_cause_infinite_recursion(self): - soup = self.soup("") - # Create a self-referential list. - l = [] - l.append(l) - - # Without special code in _normalize_search_value, this would cause infinite - # recursion. - self.assertEqual([], soup.find_all(l)) - -class TestFindAllBasicNamespaces(TreeTest): - - def test_find_by_namespaced_name(self): - soup = self.soup('4') - self.assertEqual("4", soup.find("mathml:msqrt").string) - self.assertEqual("a", soup.find(attrs= { "svg:fill" : "red" }).name) - - -class TestFindAllByName(TreeTest): - """Test ways of finding tags by tag name.""" - - def setUp(self): - super(TreeTest, self).setUp() - self.tree = self.soup("""First tag. - Second tag. - Third Nested tag. tag.""") - - def test_find_all_by_tag_name(self): - # Find all the tags. - self.assertSelects( - self.tree.find_all('a'), ['First tag.', 'Nested tag.']) - - def test_find_all_by_name_and_text(self): - self.assertSelects( - self.tree.find_all('a', text='First tag.'), ['First tag.']) - - self.assertSelects( - self.tree.find_all('a', text=True), ['First tag.', 'Nested tag.']) - - self.assertSelects( - self.tree.find_all('a', text=re.compile("tag")), - ['First tag.', 'Nested tag.']) - - - def test_find_all_on_non_root_element(self): - # You can call find_all on any node, not just the root. - self.assertSelects(self.tree.c.find_all('a'), ['Nested tag.']) - - def test_calling_element_invokes_find_all(self): - self.assertSelects(self.tree('a'), ['First tag.', 'Nested tag.']) - - def test_find_all_by_tag_strainer(self): - self.assertSelects( - self.tree.find_all(SoupStrainer('a')), - ['First tag.', 'Nested tag.']) - - def test_find_all_by_tag_names(self): - self.assertSelects( - self.tree.find_all(['a', 'b']), - ['First tag.', 'Second tag.', 'Nested tag.']) - - def test_find_all_by_tag_dict(self): - self.assertSelects( - self.tree.find_all({'a' : True, 'b' : True}), - ['First tag.', 'Second tag.', 'Nested tag.']) - - def test_find_all_by_tag_re(self): - self.assertSelects( - self.tree.find_all(re.compile('^[ab]$')), - ['First tag.', 'Second tag.', 'Nested tag.']) - - def test_find_all_with_tags_matching_method(self): - # You can define an oracle method that determines whether - # a tag matches the search. - def id_matches_name(tag): - return tag.name == tag.get('id') - - tree = self.soup("""Match 1. - Does not match. - Match 2.""") - - self.assertSelects( - tree.find_all(id_matches_name), ["Match 1.", "Match 2."]) - - -class TestFindAllByAttribute(TreeTest): - - def test_find_all_by_attribute_name(self): - # You can pass in keyword arguments to find_all to search by - # attribute. - tree = self.soup(""" - Matching a. - - Non-matching Matching b.a. - """) - self.assertSelects(tree.find_all(id='first'), - ["Matching a.", "Matching b."]) - - def test_find_all_by_utf8_attribute_value(self): - peace = u"םולש".encode("utf8") - data = u''.encode("utf8") - soup = self.soup(data) - self.assertEqual([soup.a], soup.find_all(title=peace)) - self.assertEqual([soup.a], soup.find_all(title=peace.decode("utf8"))) - self.assertEqual([soup.a], soup.find_all(title=[peace, "something else"])) - - def test_find_all_by_attribute_dict(self): - # You can pass in a dictionary as the argument 'attrs'. This - # lets you search for attributes like 'name' (a fixed argument - # to find_all) and 'class' (a reserved word in Python.) - tree = self.soup(""" - Name match. - Class match. - Non-match. - A tag called 'name1'. - """) - - # This doesn't do what you want. - self.assertSelects(tree.find_all(name='name1'), - ["A tag called 'name1'."]) - # This does what you want. - self.assertSelects(tree.find_all(attrs={'name' : 'name1'}), - ["Name match."]) - - self.assertSelects(tree.find_all(attrs={'class' : 'class2'}), - ["Class match."]) - - def test_find_all_by_class(self): - tree = self.soup(""" - Class 1. - Class 2. - Class 1. - Class 3 and 4. - """) - - # Passing in the class_ keyword argument will search against - # the 'class' attribute. - self.assertSelects(tree.find_all('a', class_='1'), ['Class 1.']) - self.assertSelects(tree.find_all('c', class_='3'), ['Class 3 and 4.']) - self.assertSelects(tree.find_all('c', class_='4'), ['Class 3 and 4.']) - - # Passing in a string to 'attrs' will also search the CSS class. - self.assertSelects(tree.find_all('a', '1'), ['Class 1.']) - self.assertSelects(tree.find_all(attrs='1'), ['Class 1.', 'Class 1.']) - self.assertSelects(tree.find_all('c', '3'), ['Class 3 and 4.']) - self.assertSelects(tree.find_all('c', '4'), ['Class 3 and 4.']) - - def test_find_by_class_when_multiple_classes_present(self): - tree = self.soup("Found it") - - f = tree.find_all("gar", class_=re.compile("o")) - self.assertSelects(f, ["Found it"]) - - f = tree.find_all("gar", class_=re.compile("a")) - self.assertSelects(f, ["Found it"]) - - # Since the class is not the string "foo bar", but the two - # strings "foo" and "bar", this will not find anything. - f = tree.find_all("gar", class_=re.compile("o b")) - self.assertSelects(f, []) - - def test_find_all_with_non_dictionary_for_attrs_finds_by_class(self): - soup = self.soup("Found it") - - self.assertSelects(soup.find_all("a", re.compile("ba")), ["Found it"]) - - def big_attribute_value(value): - return len(value) > 3 - - self.assertSelects(soup.find_all("a", big_attribute_value), []) - - def small_attribute_value(value): - return len(value) <= 3 - - self.assertSelects( - soup.find_all("a", small_attribute_value), ["Found it"]) - - def test_find_all_with_string_for_attrs_finds_multiple_classes(self): - soup = self.soup('') - a, a2 = soup.find_all("a") - self.assertEqual([a, a2], soup.find_all("a", "foo")) - self.assertEqual([a], soup.find_all("a", "bar")) - - # If you specify the class as a string that contains a - # space, only that specific value will be found. - self.assertEqual([a], soup.find_all("a", class_="foo bar")) - self.assertEqual([a], soup.find_all("a", "foo bar")) - self.assertEqual([], soup.find_all("a", "bar foo")) - - def test_find_all_by_attribute_soupstrainer(self): - tree = self.soup(""" - Match. - Non-match.""") - - strainer = SoupStrainer(attrs={'id' : 'first'}) - self.assertSelects(tree.find_all(strainer), ['Match.']) - - def test_find_all_with_missing_atribute(self): - # You can pass in None as the value of an attribute to find_all. - # This will match tags that do not have that attribute set. - tree = self.soup("""ID present. - No ID present. - ID is empty.""") - self.assertSelects(tree.find_all('a', id=None), ["No ID present."]) - - def test_find_all_with_defined_attribute(self): - # You can pass in None as the value of an attribute to find_all. - # This will match tags that have that attribute set to any value. - tree = self.soup("""ID present. - No ID present. - ID is empty.""") - self.assertSelects( - tree.find_all(id=True), ["ID present.", "ID is empty."]) - - def test_find_all_with_numeric_attribute(self): - # If you search for a number, it's treated as a string. - tree = self.soup("""Unquoted attribute. - Quoted attribute.""") - - expected = ["Unquoted attribute.", "Quoted attribute."] - self.assertSelects(tree.find_all(id=1), expected) - self.assertSelects(tree.find_all(id="1"), expected) - - def test_find_all_with_list_attribute_values(self): - # You can pass a list of attribute values instead of just one, - # and you'll get tags that match any of the values. - tree = self.soup("""1 - 2 - 3 - No ID.""") - self.assertSelects(tree.find_all(id=["1", "3", "4"]), - ["1", "3"]) - - def test_find_all_with_regular_expression_attribute_value(self): - # You can pass a regular expression as an attribute value, and - # you'll get tags whose values for that attribute match the - # regular expression. - tree = self.soup("""One a. - Two as. - Mixed as and bs. - One b. - No ID.""") - - self.assertSelects(tree.find_all(id=re.compile("^a+$")), - ["One a.", "Two as."]) - - def test_find_by_name_and_containing_string(self): - soup = self.soup("foobarfoo") - a = soup.a - - self.assertEqual([a], soup.find_all("a", text="foo")) - self.assertEqual([], soup.find_all("a", text="bar")) - self.assertEqual([], soup.find_all("a", text="bar")) - - def test_find_by_name_and_containing_string_when_string_is_buried(self): - soup = self.soup("foofoo") - self.assertEqual(soup.find_all("a"), soup.find_all("a", text="foo")) - - def test_find_by_attribute_and_containing_string(self): - soup = self.soup('foofoo') - a = soup.a - - self.assertEqual([a], soup.find_all(id=2, text="foo")) - self.assertEqual([], soup.find_all(id=1, text="bar")) - - - - -class TestIndex(TreeTest): - """Test Tag.index""" - def test_index(self): - tree = self.soup("""
- Identical - Not identical - Identical - - Identical with child - Also not identical - Identical with child -
""") - div = tree.div - for i, element in enumerate(div.contents): - self.assertEqual(i, div.index(element)) - self.assertRaises(ValueError, tree.index, 1) - - -class TestParentOperations(TreeTest): - """Test navigation and searching through an element's parents.""" - - def setUp(self): - super(TestParentOperations, self).setUp() - self.tree = self.soup('''
    -
      -
        -
          - Start here -
        -
      ''') - self.start = self.tree.b - - - def test_parent(self): - self.assertEqual(self.start.parent['id'], 'bottom') - self.assertEqual(self.start.parent.parent['id'], 'middle') - self.assertEqual(self.start.parent.parent.parent['id'], 'top') - - def test_parent_of_top_tag_is_soup_object(self): - top_tag = self.tree.contents[0] - self.assertEqual(top_tag.parent, self.tree) - - def test_soup_object_has_no_parent(self): - self.assertEqual(None, self.tree.parent) - - def test_find_parents(self): - self.assertSelectsIDs( - self.start.find_parents('ul'), ['bottom', 'middle', 'top']) - self.assertSelectsIDs( - self.start.find_parents('ul', id="middle"), ['middle']) - - def test_find_parent(self): - self.assertEqual(self.start.find_parent('ul')['id'], 'bottom') - self.assertEqual(self.start.find_parent('ul', id='top')['id'], 'top') - - def test_parent_of_text_element(self): - text = self.tree.find(text="Start here") - self.assertEqual(text.parent.name, 'b') - - def test_text_element_find_parent(self): - text = self.tree.find(text="Start here") - self.assertEqual(text.find_parent('ul')['id'], 'bottom') - - def test_parent_generator(self): - parents = [parent['id'] for parent in self.start.parents - if parent is not None and 'id' in parent.attrs] - self.assertEqual(parents, ['bottom', 'middle', 'top']) - - -class ProximityTest(TreeTest): - - def setUp(self): - super(TreeTest, self).setUp() - self.tree = self.soup( - 'OneTwoThree') - - -class TestNextOperations(ProximityTest): - - def setUp(self): - super(TestNextOperations, self).setUp() - self.start = self.tree.b - - def test_next(self): - self.assertEqual(self.start.next_element, "One") - self.assertEqual(self.start.next_element.next_element['id'], "2") - - def test_next_of_last_item_is_none(self): - last = self.tree.find(text="Three") - self.assertEqual(last.next_element, None) - - def test_next_of_root_is_none(self): - # The document root is outside the next/previous chain. - self.assertEqual(self.tree.next_element, None) - - def test_find_all_next(self): - self.assertSelects(self.start.find_all_next('b'), ["Two", "Three"]) - self.start.find_all_next(id=3) - self.assertSelects(self.start.find_all_next(id=3), ["Three"]) - - def test_find_next(self): - self.assertEqual(self.start.find_next('b')['id'], '2') - self.assertEqual(self.start.find_next(text="Three"), "Three") - - def test_find_next_for_text_element(self): - text = self.tree.find(text="One") - self.assertEqual(text.find_next("b").string, "Two") - self.assertSelects(text.find_all_next("b"), ["Two", "Three"]) - - def test_next_generator(self): - start = self.tree.find(text="Two") - successors = [node for node in start.next_elements] - # There are two successors: the final tag and its text contents. - tag, contents = successors - self.assertEqual(tag['id'], '3') - self.assertEqual(contents, "Three") - -class TestPreviousOperations(ProximityTest): - - def setUp(self): - super(TestPreviousOperations, self).setUp() - self.end = self.tree.find(text="Three") - - def test_previous(self): - self.assertEqual(self.end.previous_element['id'], "3") - self.assertEqual(self.end.previous_element.previous_element, "Two") - - def test_previous_of_first_item_is_none(self): - first = self.tree.find('html') - self.assertEqual(first.previous_element, None) - - def test_previous_of_root_is_none(self): - # The document root is outside the next/previous chain. - # XXX This is broken! - #self.assertEqual(self.tree.previous_element, None) - pass - - def test_find_all_previous(self): - # The tag containing the "Three" node is the predecessor - # of the "Three" node itself, which is why "Three" shows up - # here. - self.assertSelects( - self.end.find_all_previous('b'), ["Three", "Two", "One"]) - self.assertSelects(self.end.find_all_previous(id=1), ["One"]) - - def test_find_previous(self): - self.assertEqual(self.end.find_previous('b')['id'], '3') - self.assertEqual(self.end.find_previous(text="One"), "One") - - def test_find_previous_for_text_element(self): - text = self.tree.find(text="Three") - self.assertEqual(text.find_previous("b").string, "Three") - self.assertSelects( - text.find_all_previous("b"), ["Three", "Two", "One"]) - - def test_previous_generator(self): - start = self.tree.find(text="One") - predecessors = [node for node in start.previous_elements] - - # There are four predecessors: the tag containing "One" - # the tag, the tag, and the tag. - b, body, head, html = predecessors - self.assertEqual(b['id'], '1') - self.assertEqual(body.name, "body") - self.assertEqual(head.name, "head") - self.assertEqual(html.name, "html") - - -class SiblingTest(TreeTest): - - def setUp(self): - super(SiblingTest, self).setUp() - markup = ''' - - - - - - - - - - - ''' - # All that whitespace looks good but makes the tests more - # difficult. Get rid of it. - markup = re.compile("\n\s*").sub("", markup) - self.tree = self.soup(markup) - - -class TestNextSibling(SiblingTest): - - def setUp(self): - super(TestNextSibling, self).setUp() - self.start = self.tree.find(id="1") - - def test_next_sibling_of_root_is_none(self): - self.assertEqual(self.tree.next_sibling, None) - - def test_next_sibling(self): - self.assertEqual(self.start.next_sibling['id'], '2') - self.assertEqual(self.start.next_sibling.next_sibling['id'], '3') - - # Note the difference between next_sibling and next_element. - self.assertEqual(self.start.next_element['id'], '1.1') - - def test_next_sibling_may_not_exist(self): - self.assertEqual(self.tree.html.next_sibling, None) - - nested_span = self.tree.find(id="1.1") - self.assertEqual(nested_span.next_sibling, None) - - last_span = self.tree.find(id="4") - self.assertEqual(last_span.next_sibling, None) - - def test_find_next_sibling(self): - self.assertEqual(self.start.find_next_sibling('span')['id'], '2') - - def test_next_siblings(self): - self.assertSelectsIDs(self.start.find_next_siblings("span"), - ['2', '3', '4']) - - self.assertSelectsIDs(self.start.find_next_siblings(id='3'), ['3']) - - def test_next_sibling_for_text_element(self): - soup = self.soup("Foobarbaz") - start = soup.find(text="Foo") - self.assertEqual(start.next_sibling.name, 'b') - self.assertEqual(start.next_sibling.next_sibling, 'baz') - - self.assertSelects(start.find_next_siblings('b'), ['bar']) - self.assertEqual(start.find_next_sibling(text="baz"), "baz") - self.assertEqual(start.find_next_sibling(text="nonesuch"), None) - - -class TestPreviousSibling(SiblingTest): - - def setUp(self): - super(TestPreviousSibling, self).setUp() - self.end = self.tree.find(id="4") - - def test_previous_sibling_of_root_is_none(self): - self.assertEqual(self.tree.previous_sibling, None) - - def test_previous_sibling(self): - self.assertEqual(self.end.previous_sibling['id'], '3') - self.assertEqual(self.end.previous_sibling.previous_sibling['id'], '2') - - # Note the difference between previous_sibling and previous_element. - self.assertEqual(self.end.previous_element['id'], '3.1') - - def test_previous_sibling_may_not_exist(self): - self.assertEqual(self.tree.html.previous_sibling, None) - - nested_span = self.tree.find(id="1.1") - self.assertEqual(nested_span.previous_sibling, None) - - first_span = self.tree.find(id="1") - self.assertEqual(first_span.previous_sibling, None) - - def test_find_previous_sibling(self): - self.assertEqual(self.end.find_previous_sibling('span')['id'], '3') - - def test_previous_siblings(self): - self.assertSelectsIDs(self.end.find_previous_siblings("span"), - ['3', '2', '1']) - - self.assertSelectsIDs(self.end.find_previous_siblings(id='1'), ['1']) - - def test_previous_sibling_for_text_element(self): - soup = self.soup("Foobarbaz") - start = soup.find(text="baz") - self.assertEqual(start.previous_sibling.name, 'b') - self.assertEqual(start.previous_sibling.previous_sibling, 'Foo') - - self.assertSelects(start.find_previous_siblings('b'), ['bar']) - self.assertEqual(start.find_previous_sibling(text="Foo"), "Foo") - self.assertEqual(start.find_previous_sibling(text="nonesuch"), None) - - -class TestTagCreation(SoupTest): - """Test the ability to create new tags.""" - def test_new_tag(self): - soup = self.soup("") - new_tag = soup.new_tag("foo", bar="baz") - self.assertTrue(isinstance(new_tag, Tag)) - self.assertEqual("foo", new_tag.name) - self.assertEqual(dict(bar="baz"), new_tag.attrs) - self.assertEqual(None, new_tag.parent) - - def test_tag_inherits_self_closing_rules_from_builder(self): - if XML_BUILDER_PRESENT: - xml_soup = BeautifulSoup("", "xml") - xml_br = xml_soup.new_tag("br") - xml_p = xml_soup.new_tag("p") - - # Both the
      and

      tag are empty-element, just because - # they have no contents. - self.assertEqual(b"
      ", xml_br.encode()) - self.assertEqual(b"

      ", xml_p.encode()) - - html_soup = BeautifulSoup("", "html") - html_br = html_soup.new_tag("br") - html_p = html_soup.new_tag("p") - - # The HTML builder users HTML's rules about which tags are - # empty-element tags, and the new tags reflect these rules. - self.assertEqual(b"
      ", html_br.encode()) - self.assertEqual(b"

      ", html_p.encode()) - - def test_new_string_creates_navigablestring(self): - soup = self.soup("") - s = soup.new_string("foo") - self.assertEqual("foo", s) - self.assertTrue(isinstance(s, NavigableString)) - - def test_new_string_can_create_navigablestring_subclass(self): - soup = self.soup("") - s = soup.new_string("foo", Comment) - self.assertEqual("foo", s) - self.assertTrue(isinstance(s, Comment)) - -class TestTreeModification(SoupTest): - - def test_attribute_modification(self): - soup = self.soup('') - soup.a['id'] = 2 - self.assertEqual(soup.decode(), self.document_for('')) - del(soup.a['id']) - self.assertEqual(soup.decode(), self.document_for('')) - soup.a['id2'] = 'foo' - self.assertEqual(soup.decode(), self.document_for('')) - - def test_new_tag_creation(self): - builder = builder_registry.lookup('html')() - soup = self.soup("", builder=builder) - a = Tag(soup, builder, 'a') - ol = Tag(soup, builder, 'ol') - a['href'] = 'http://foo.com/' - soup.body.insert(0, a) - soup.body.insert(1, ol) - self.assertEqual( - soup.body.encode(), - b'
        ') - - def test_append_to_contents_moves_tag(self): - doc = """

        Don't leave me here.

        -

        Don\'t leave!

        """ - soup = self.soup(doc) - second_para = soup.find(id='2') - bold = soup.b - - # Move the tag to the end of the second paragraph. - soup.find(id='2').append(soup.b) - - # The tag is now a child of the second paragraph. - self.assertEqual(bold.parent, second_para) - - self.assertEqual( - soup.decode(), self.document_for( - '

        Don\'t leave me .

        \n' - '

        Don\'t leave!here

        ')) - - def test_replace_with_returns_thing_that_was_replaced(self): - text = "" - soup = self.soup(text) - a = soup.a - new_a = a.replace_with(soup.c) - self.assertEqual(a, new_a) - - def test_unwrap_returns_thing_that_was_replaced(self): - text = "" - soup = self.soup(text) - a = soup.a - new_a = a.unwrap() - self.assertEqual(a, new_a) - - def test_replace_tag_with_itself(self): - text = "Foo" - soup = self.soup(text) - c = soup.c - soup.c.replace_with(c) - self.assertEqual(soup.decode(), self.document_for(text)) - - def test_replace_tag_with_its_parent_raises_exception(self): - text = "" - soup = self.soup(text) - self.assertRaises(ValueError, soup.b.replace_with, soup.a) - - def test_insert_tag_into_itself_raises_exception(self): - text = "" - soup = self.soup(text) - self.assertRaises(ValueError, soup.a.insert, 0, soup.a) - - def test_replace_with_maintains_next_element_throughout(self): - soup = self.soup('

        onethree

        ') - a = soup.a - b = a.contents[0] - # Make it so the tag has two text children. - a.insert(1, "two") - - # Now replace each one with the empty string. - left, right = a.contents - left.replaceWith('') - right.replaceWith('') - - # The tag is still connected to the tree. - self.assertEqual("three", soup.b.string) - - def test_replace_final_node(self): - soup = self.soup("Argh!") - soup.find(text="Argh!").replace_with("Hooray!") - new_text = soup.find(text="Hooray!") - b = soup.b - self.assertEqual(new_text.previous_element, b) - self.assertEqual(new_text.parent, b) - self.assertEqual(new_text.previous_element.next_element, new_text) - self.assertEqual(new_text.next_element, None) - - def test_consecutive_text_nodes(self): - # A builder should never create two consecutive text nodes, - # but if you insert one next to another, Beautiful Soup will - # handle it correctly. - soup = self.soup("Argh!") - soup.b.insert(1, "Hooray!") - - self.assertEqual( - soup.decode(), self.document_for( - "Argh!Hooray!")) - - new_text = soup.find(text="Hooray!") - self.assertEqual(new_text.previous_element, "Argh!") - self.assertEqual(new_text.previous_element.next_element, new_text) - - self.assertEqual(new_text.previous_sibling, "Argh!") - self.assertEqual(new_text.previous_sibling.next_sibling, new_text) - - self.assertEqual(new_text.next_sibling, None) - self.assertEqual(new_text.next_element, soup.c) - - def test_insert_string(self): - soup = self.soup("") - soup.a.insert(0, "bar") - soup.a.insert(0, "foo") - # The string were added to the tag. - self.assertEqual(["foo", "bar"], soup.a.contents) - # And they were converted to NavigableStrings. - self.assertEqual(soup.a.contents[0].next_element, "bar") - - def test_insert_tag(self): - builder = self.default_builder - soup = self.soup( - "Findlady!", builder=builder) - magic_tag = Tag(soup, builder, 'magictag') - magic_tag.insert(0, "the") - soup.a.insert(1, magic_tag) - - self.assertEqual( - soup.decode(), self.document_for( - "Findthelady!")) - - # Make sure all the relationships are hooked up correctly. - b_tag = soup.b - self.assertEqual(b_tag.next_sibling, magic_tag) - self.assertEqual(magic_tag.previous_sibling, b_tag) - - find = b_tag.find(text="Find") - self.assertEqual(find.next_element, magic_tag) - self.assertEqual(magic_tag.previous_element, find) - - c_tag = soup.c - self.assertEqual(magic_tag.next_sibling, c_tag) - self.assertEqual(c_tag.previous_sibling, magic_tag) - - the = magic_tag.find(text="the") - self.assertEqual(the.parent, magic_tag) - self.assertEqual(the.next_element, c_tag) - self.assertEqual(c_tag.previous_element, the) - - def test_append_child_thats_already_at_the_end(self): - data = "" - soup = self.soup(data) - soup.a.append(soup.b) - self.assertEqual(data, soup.decode()) - - def test_move_tag_to_beginning_of_parent(self): - data = "" - soup = self.soup(data) - soup.a.insert(0, soup.d) - self.assertEqual("", soup.decode()) - - def test_insert_works_on_empty_element_tag(self): - # This is a little strange, since most HTML parsers don't allow - # markup like this to come through. But in general, we don't - # know what the parser would or wouldn't have allowed, so - # I'm letting this succeed for now. - soup = self.soup("
        ") - soup.br.insert(1, "Contents") - self.assertEqual(str(soup.br), "
        Contents
        ") - - def test_insert_before(self): - soup = self.soup("foobar") - soup.b.insert_before("BAZ") - soup.a.insert_before("QUUX") - self.assertEqual( - soup.decode(), self.document_for("QUUXfooBAZbar")) - - soup.a.insert_before(soup.b) - self.assertEqual( - soup.decode(), self.document_for("QUUXbarfooBAZ")) - - def test_insert_after(self): - soup = self.soup("foobar") - soup.b.insert_after("BAZ") - soup.a.insert_after("QUUX") - self.assertEqual( - soup.decode(), self.document_for("fooQUUXbarBAZ")) - soup.b.insert_after(soup.a) - self.assertEqual( - soup.decode(), self.document_for("QUUXbarfooBAZ")) - - def test_insert_after_raises_exception_if_after_has_no_meaning(self): - soup = self.soup("") - tag = soup.new_tag("a") - string = soup.new_string("") - self.assertRaises(ValueError, string.insert_after, tag) - self.assertRaises(NotImplementedError, soup.insert_after, tag) - self.assertRaises(ValueError, tag.insert_after, tag) - - def test_insert_before_raises_notimplementederror_if_before_has_no_meaning(self): - soup = self.soup("") - tag = soup.new_tag("a") - string = soup.new_string("") - self.assertRaises(ValueError, string.insert_before, tag) - self.assertRaises(NotImplementedError, soup.insert_before, tag) - self.assertRaises(ValueError, tag.insert_before, tag) - - def test_replace_with(self): - soup = self.soup( - "

        There's no business like show business

        ") - no, show = soup.find_all('b') - show.replace_with(no) - self.assertEqual( - soup.decode(), - self.document_for( - "

        There's business like no business

        ")) - - self.assertEqual(show.parent, None) - self.assertEqual(no.parent, soup.p) - self.assertEqual(no.next_element, "no") - self.assertEqual(no.next_sibling, " business") - - def test_replace_first_child(self): - data = "" - soup = self.soup(data) - soup.b.replace_with(soup.c) - self.assertEqual("", soup.decode()) - - def test_replace_last_child(self): - data = "" - soup = self.soup(data) - soup.c.replace_with(soup.b) - self.assertEqual("", soup.decode()) - - def test_nested_tag_replace_with(self): - soup = self.soup( - """Wereservetherighttorefuseservice""") - - # Replace the entire tag and its contents ("reserve the - # right") with the tag ("refuse"). - remove_tag = soup.b - move_tag = soup.f - remove_tag.replace_with(move_tag) - - self.assertEqual( - soup.decode(), self.document_for( - "Werefusetoservice")) - - # The tag is now an orphan. - self.assertEqual(remove_tag.parent, None) - self.assertEqual(remove_tag.find(text="right").next_element, None) - self.assertEqual(remove_tag.previous_element, None) - self.assertEqual(remove_tag.next_sibling, None) - self.assertEqual(remove_tag.previous_sibling, None) - - # The tag is now connected to the tag. - self.assertEqual(move_tag.parent, soup.a) - self.assertEqual(move_tag.previous_element, "We") - self.assertEqual(move_tag.next_element.next_element, soup.e) - self.assertEqual(move_tag.next_sibling, None) - - # The gap where the tag used to be has been mended, and - # the word "to" is now connected to the tag. - to_text = soup.find(text="to") - g_tag = soup.g - self.assertEqual(to_text.next_element, g_tag) - self.assertEqual(to_text.next_sibling, g_tag) - self.assertEqual(g_tag.previous_element, to_text) - self.assertEqual(g_tag.previous_sibling, to_text) - - def test_unwrap(self): - tree = self.soup(""" -

        Unneeded formatting is unneeded

        - """) - tree.em.unwrap() - self.assertEqual(tree.em, None) - self.assertEqual(tree.p.text, "Unneeded formatting is unneeded") - - def test_wrap(self): - soup = self.soup("I wish I was bold.") - value = soup.string.wrap(soup.new_tag("b")) - self.assertEqual(value.decode(), "I wish I was bold.") - self.assertEqual( - soup.decode(), self.document_for("I wish I was bold.")) - - def test_wrap_extracts_tag_from_elsewhere(self): - soup = self.soup("I wish I was bold.") - soup.b.next_sibling.wrap(soup.b) - self.assertEqual( - soup.decode(), self.document_for("I wish I was bold.")) - - def test_wrap_puts_new_contents_at_the_end(self): - soup = self.soup("I like being bold.I wish I was bold.") - soup.b.next_sibling.wrap(soup.b) - self.assertEqual(2, len(soup.b.contents)) - self.assertEqual( - soup.decode(), self.document_for( - "I like being bold.I wish I was bold.")) - - def test_extract(self): - soup = self.soup( - 'Some content. More content.') - - self.assertEqual(len(soup.body.contents), 3) - extracted = soup.find(id="nav").extract() - - self.assertEqual( - soup.decode(), "Some content. More content.") - self.assertEqual(extracted.decode(), '') - - # The extracted tag is now an orphan. - self.assertEqual(len(soup.body.contents), 2) - self.assertEqual(extracted.parent, None) - self.assertEqual(extracted.previous_element, None) - self.assertEqual(extracted.next_element.next_element, None) - - # The gap where the extracted tag used to be has been mended. - content_1 = soup.find(text="Some content. ") - content_2 = soup.find(text=" More content.") - self.assertEqual(content_1.next_element, content_2) - self.assertEqual(content_1.next_sibling, content_2) - self.assertEqual(content_2.previous_element, content_1) - self.assertEqual(content_2.previous_sibling, content_1) - - def test_extract_distinguishes_between_identical_strings(self): - soup = self.soup("
        foobar") - foo_1 = soup.a.string - bar_1 = soup.b.string - foo_2 = soup.new_string("foo") - bar_2 = soup.new_string("bar") - soup.a.append(foo_2) - soup.b.append(bar_2) - - # Now there are two identical strings in the tag, and two - # in the tag. Let's remove the first "foo" and the second - # "bar". - foo_1.extract() - bar_2.extract() - self.assertEqual(foo_2, soup.a.string) - self.assertEqual(bar_2, soup.b.string) - - def test_clear(self): - """Tag.clear()""" - soup = self.soup("

        String Italicized and another

        ") - # clear using extract() - a = soup.a - soup.p.clear() - self.assertEqual(len(soup.p.contents), 0) - self.assertTrue(hasattr(a, "contents")) - - # clear using decompose() - em = a.em - a.clear(decompose=True) - self.assertEqual(0, len(em.contents)) - - def test_string_set(self): - """Tag.string = 'string'""" - soup = self.soup(" ") - soup.a.string = "foo" - self.assertEqual(soup.a.contents, ["foo"]) - soup.b.string = "bar" - self.assertEqual(soup.b.contents, ["bar"]) - - def test_string_set_does_not_affect_original_string(self): - soup = self.soup("foobar") - soup.b.string = soup.c.string - self.assertEqual(soup.a.encode(), b"barbar") - - def test_set_string_preserves_class_of_string(self): - soup = self.soup("") - cdata = CData("foo") - soup.a.string = cdata - self.assertTrue(isinstance(soup.a.string, CData)) - -class TestElementObjects(SoupTest): - """Test various features of element objects.""" - - def test_len(self): - """The length of an element is its number of children.""" - soup = self.soup("123") - - # The BeautifulSoup object itself contains one element: the - # tag. - self.assertEqual(len(soup.contents), 1) - self.assertEqual(len(soup), 1) - - # The tag contains three elements: the text node "1", the - # tag, and the text node "3". - self.assertEqual(len(soup.top), 3) - self.assertEqual(len(soup.top.contents), 3) - - def test_member_access_invokes_find(self): - """Accessing a Python member .foo invokes find('foo')""" - soup = self.soup('') - self.assertEqual(soup.b, soup.find('b')) - self.assertEqual(soup.b.i, soup.find('b').find('i')) - self.assertEqual(soup.a, None) - - def test_deprecated_member_access(self): - soup = self.soup('') - with warnings.catch_warnings(record=True) as w: - tag = soup.bTag - self.assertEqual(soup.b, tag) - self.assertEqual( - '.bTag is deprecated, use .find("b") instead.', - str(w[0].message)) - - def test_has_attr(self): - """has_attr() checks for the presence of an attribute. - - Please note note: has_attr() is different from - __in__. has_attr() checks the tag's attributes and __in__ - checks the tag's chidlren. - """ - soup = self.soup("") - self.assertTrue(soup.foo.has_attr('attr')) - self.assertFalse(soup.foo.has_attr('attr2')) - - - def test_attributes_come_out_in_alphabetical_order(self): - markup = '' - self.assertSoupEquals(markup, '') - - def test_string(self): - # A tag that contains only a text node makes that node - # available as .string. - soup = self.soup("foo") - self.assertEqual(soup.b.string, 'foo') - - def test_empty_tag_has_no_string(self): - # A tag with no children has no .stirng. - soup = self.soup("") - self.assertEqual(soup.b.string, None) - - def test_tag_with_multiple_children_has_no_string(self): - # A tag with no children has no .string. - soup = self.soup("foo") - self.assertEqual(soup.b.string, None) - - soup = self.soup("foobar
        ") - self.assertEqual(soup.b.string, None) - - # Even if all the children are strings, due to trickery, - # it won't work--but this would be a good optimization. - soup = self.soup("foo
        ") - soup.a.insert(1, "bar") - self.assertEqual(soup.a.string, None) - - def test_tag_with_recursive_string_has_string(self): - # A tag with a single child which has a .string inherits that - # .string. - soup = self.soup("foo") - self.assertEqual(soup.a.string, "foo") - self.assertEqual(soup.string, "foo") - - def test_lack_of_string(self): - """Only a tag containing a single text node has a .string.""" - soup = self.soup("feo") - self.assertFalse(soup.b.string) - - soup = self.soup("") - self.assertFalse(soup.b.string) - - def test_all_text(self): - """Tag.text and Tag.get_text(sep=u"") -> all child text, concatenated""" - soup = self.soup("ar t ") - self.assertEqual(soup.a.text, "ar t ") - self.assertEqual(soup.a.get_text(strip=True), "art") - self.assertEqual(soup.a.get_text(","), "a,r, , t ") - self.assertEqual(soup.a.get_text(",", strip=True), "a,r,t") - - def test_get_text_ignores_comments(self): - soup = self.soup("foobar") - self.assertEqual(soup.get_text(), "foobar") - - self.assertEqual( - soup.get_text(types=(NavigableString, Comment)), "fooIGNOREbar") - self.assertEqual( - soup.get_text(types=None), "fooIGNOREbar") - - def test_all_strings_ignores_comments(self): - soup = self.soup("foobar") - self.assertEqual(['foo', 'bar'], list(soup.strings)) - -class TestCDAtaListAttributes(SoupTest): - - """Testing cdata-list attributes like 'class'. - """ - def test_single_value_becomes_list(self): - soup = self.soup("") - self.assertEqual(["foo"],soup.a['class']) - - def test_multiple_values_becomes_list(self): - soup = self.soup("") - self.assertEqual(["foo", "bar"], soup.a['class']) - - def test_multiple_values_separated_by_weird_whitespace(self): - soup = self.soup("") - self.assertEqual(["foo", "bar", "baz"],soup.a['class']) - - def test_attributes_joined_into_string_on_output(self): - soup = self.soup("") - self.assertEqual(b'', soup.a.encode()) - - def test_accept_charset(self): - soup = self.soup('
        ') - self.assertEqual(['ISO-8859-1', 'UTF-8'], soup.form['accept-charset']) - - def test_cdata_attribute_applying_only_to_one_tag(self): - data = '' - soup = self.soup(data) - # We saw in another test that accept-charset is a cdata-list - # attribute for the tag. But it's not a cdata-list - # attribute for any other tag. - self.assertEqual('ISO-8859-1 UTF-8', soup.a['accept-charset']) - - -class TestPersistence(SoupTest): - "Testing features like pickle and deepcopy." - - def setUp(self): - super(TestPersistence, self).setUp() - self.page = """ - - - -Beautiful Soup: We called him Tortoise because he taught us. - - - - - - -foo -bar - -""" - self.tree = self.soup(self.page) - - def test_pickle_and_unpickle_identity(self): - # Pickling a tree, then unpickling it, yields a tree identical - # to the original. - dumped = pickle.dumps(self.tree, 2) - loaded = pickle.loads(dumped) - self.assertEqual(loaded.__class__, BeautifulSoup) - self.assertEqual(loaded.decode(), self.tree.decode()) - - def test_deepcopy_identity(self): - # Making a deepcopy of a tree yields an identical tree. - copied = copy.deepcopy(self.tree) - self.assertEqual(copied.decode(), self.tree.decode()) - - def test_unicode_pickle(self): - # A tree containing Unicode characters can be pickled. - html = u"\N{SNOWMAN}" - soup = self.soup(html) - dumped = pickle.dumps(soup, pickle.HIGHEST_PROTOCOL) - loaded = pickle.loads(dumped) - self.assertEqual(loaded.decode(), soup.decode()) - - -class TestSubstitutions(SoupTest): - - def test_default_formatter_is_minimal(self): - markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" - soup = self.soup(markup) - decoded = soup.decode(formatter="minimal") - # The < is converted back into < but the e-with-acute is left alone. - self.assertEqual( - decoded, - self.document_for( - u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>")) - - def test_formatter_html(self): - markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" - soup = self.soup(markup) - decoded = soup.decode(formatter="html") - self.assertEqual( - decoded, - self.document_for("<<Sacré bleu!>>")) - - def test_formatter_minimal(self): - markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" - soup = self.soup(markup) - decoded = soup.decode(formatter="minimal") - # The < is converted back into < but the e-with-acute is left alone. - self.assertEqual( - decoded, - self.document_for( - u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>")) - - def test_formatter_null(self): - markup = u"<<Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>" - soup = self.soup(markup) - decoded = soup.decode(formatter=None) - # Neither the angle brackets nor the e-with-acute are converted. - # This is not valid HTML, but it's what the user wanted. - self.assertEqual(decoded, - self.document_for(u"<>")) - - def test_formatter_custom(self): - markup = u"<foo>bar" - soup = self.soup(markup) - decoded = soup.decode(formatter = lambda x: x.upper()) - # Instead of normal entity conversion code, the custom - # callable is called on every string. - self.assertEqual( - decoded, - self.document_for(u"BAR")) - - def test_formatter_is_run_on_attribute_values(self): - markup = u'e' - soup = self.soup(markup) - a = soup.a - - expect_minimal = u'e' - - self.assertEqual(expect_minimal, a.decode()) - self.assertEqual(expect_minimal, a.decode(formatter="minimal")) - - expect_html = u'e' - self.assertEqual(expect_html, a.decode(formatter="html")) - - self.assertEqual(markup, a.decode(formatter=None)) - expect_upper = u'E' - self.assertEqual(expect_upper, a.decode(formatter=lambda x: x.upper())) - - def test_formatter_skips_script_tag_for_html_documents(self): - doc = """ - -""" - encoded = BeautifulSoup(doc).encode() - self.assertTrue(b"< < hey > >" in encoded) - - def test_formatter_skips_style_tag_for_html_documents(self): - doc = """ - -""" - encoded = BeautifulSoup(doc).encode() - self.assertTrue(b"< < hey > >" in encoded) - - def test_prettify_leaves_preformatted_text_alone(self): - soup = self.soup("
        foo
          \tbar\n  \n  
        baz ") - # Everything outside the
         tag is reformatted, but everything
        -        # inside is left alone.
        -        self.assertEqual(
        -            u'
        \n foo\n
          \tbar\n  \n  
        \n baz\n
        ', - soup.div.prettify()) - - def test_prettify_accepts_formatter(self): - soup = BeautifulSoup("foo") - pretty = soup.prettify(formatter = lambda x: x.upper()) - self.assertTrue("FOO" in pretty) - - def test_prettify_outputs_unicode_by_default(self): - soup = self.soup("") - self.assertEqual(unicode, type(soup.prettify())) - - def test_prettify_can_encode_data(self): - soup = self.soup("") - self.assertEqual(bytes, type(soup.prettify("utf-8"))) - - def test_html_entity_substitution_off_by_default(self): - markup = u"Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!" - soup = self.soup(markup) - encoded = soup.b.encode("utf-8") - self.assertEqual(encoded, markup.encode('utf-8')) - - def test_encoding_substitution(self): - # Here's the tag saying that a document is - # encoded in Shift-JIS. - meta_tag = ('') - soup = self.soup(meta_tag) - - # Parse the document, and the charset apprears unchanged. - self.assertEqual(soup.meta['content'], 'text/html; charset=x-sjis') - - # Encode the document into some encoding, and the encoding is - # substituted into the meta tag. - utf_8 = soup.encode("utf-8") - self.assertTrue(b"charset=utf-8" in utf_8) - - euc_jp = soup.encode("euc_jp") - self.assertTrue(b"charset=euc_jp" in euc_jp) - - shift_jis = soup.encode("shift-jis") - self.assertTrue(b"charset=shift-jis" in shift_jis) - - utf_16_u = soup.encode("utf-16").decode("utf-16") - self.assertTrue("charset=utf-16" in utf_16_u) - - def test_encoding_substitution_doesnt_happen_if_tag_is_strained(self): - markup = ('
        foo
        ') - - # Beautiful Soup used to try to rewrite the meta tag even if the - # meta tag got filtered out by the strainer. This test makes - # sure that doesn't happen. - strainer = SoupStrainer('pre') - soup = self.soup(markup, parse_only=strainer) - self.assertEqual(soup.contents[0].name, 'pre') - -class TestEncoding(SoupTest): - """Test the ability to encode objects into strings.""" - - def test_unicode_string_can_be_encoded(self): - html = u"\N{SNOWMAN}" - soup = self.soup(html) - self.assertEqual(soup.b.string.encode("utf-8"), - u"\N{SNOWMAN}".encode("utf-8")) - - def test_tag_containing_unicode_string_can_be_encoded(self): - html = u"\N{SNOWMAN}" - soup = self.soup(html) - self.assertEqual( - soup.b.encode("utf-8"), html.encode("utf-8")) - - def test_encoding_substitutes_unrecognized_characters_by_default(self): - html = u"\N{SNOWMAN}" - soup = self.soup(html) - self.assertEqual(soup.b.encode("ascii"), b"") - - def test_encoding_can_be_made_strict(self): - html = u"\N{SNOWMAN}" - soup = self.soup(html) - self.assertRaises( - UnicodeEncodeError, soup.encode, "ascii", errors="strict") - - def test_decode_contents(self): - html = u"\N{SNOWMAN}" - soup = self.soup(html) - self.assertEqual(u"\N{SNOWMAN}", soup.b.decode_contents()) - - def test_encode_contents(self): - html = u"\N{SNOWMAN}" - soup = self.soup(html) - self.assertEqual( - u"\N{SNOWMAN}".encode("utf8"), soup.b.encode_contents( - encoding="utf8")) - - def test_deprecated_renderContents(self): - html = u"\N{SNOWMAN}" - soup = self.soup(html) - self.assertEqual( - u"\N{SNOWMAN}".encode("utf8"), soup.b.renderContents()) - -class TestNavigableStringSubclasses(SoupTest): - - def test_cdata(self): - # None of the current builders turn CDATA sections into CData - # objects, but you can create them manually. - soup = self.soup("") - cdata = CData("foo") - soup.insert(1, cdata) - self.assertEqual(str(soup), "") - self.assertEqual(soup.find(text="foo"), "foo") - self.assertEqual(soup.contents[0], "foo") - - def test_cdata_is_never_formatted(self): - """Text inside a CData object is passed into the formatter. - - But the return value is ignored. - """ - - self.count = 0 - def increment(*args): - self.count += 1 - return "BITTER FAILURE" - - soup = self.soup("") - cdata = CData("<><><>") - soup.insert(1, cdata) - self.assertEqual( - b"<><>]]>", soup.encode(formatter=increment)) - self.assertEqual(1, self.count) - - def test_doctype_ends_in_newline(self): - # Unlike other NavigableString subclasses, a DOCTYPE always ends - # in a newline. - doctype = Doctype("foo") - soup = self.soup("") - soup.insert(1, doctype) - self.assertEqual(soup.encode(), b"\n") - - -class TestSoupSelector(TreeTest): - - HTML = """ - - - -The title - - - - -
        -
        -

        An H1

        -

        Some text

        -

        Some more text

        -

        An H2

        -

        Another

        -Bob -

        Another H2

        -me - -span1a1 -span1a2 test - -span2a1 - - - -
        -

        English

        -

        English UK

        -

        English US

        -

        French

        -
        - - -""" - - def setUp(self): - self.soup = BeautifulSoup(self.HTML) - - def assertSelects(self, selector, expected_ids): - el_ids = [el['id'] for el in self.soup.select(selector)] - el_ids.sort() - expected_ids.sort() - self.assertEqual(expected_ids, el_ids, - "Selector %s, expected [%s], got [%s]" % ( - selector, ', '.join(expected_ids), ', '.join(el_ids) - ) - ) - - assertSelect = assertSelects - - def assertSelectMultiple(self, *tests): - for selector, expected_ids in tests: - self.assertSelect(selector, expected_ids) - - def test_one_tag_one(self): - els = self.soup.select('title') - self.assertEqual(len(els), 1) - self.assertEqual(els[0].name, 'title') - self.assertEqual(els[0].contents, [u'The title']) - - def test_one_tag_many(self): - els = self.soup.select('div') - self.assertEqual(len(els), 3) - for div in els: - self.assertEqual(div.name, 'div') - - def test_tag_in_tag_one(self): - els = self.soup.select('div div') - self.assertSelects('div div', ['inner']) - - def test_tag_in_tag_many(self): - for selector in ('html div', 'html body div', 'body div'): - self.assertSelects(selector, ['main', 'inner', 'footer']) - - def test_tag_no_match(self): - self.assertEqual(len(self.soup.select('del')), 0) - - def test_invalid_tag(self): - self.assertRaises(ValueError, self.soup.select, 'tag%t') - - def test_header_tags(self): - self.assertSelectMultiple( - ('h1', ['header1']), - ('h2', ['header2', 'header3']), - ) - - def test_class_one(self): - for selector in ('.onep', 'p.onep', 'html p.onep'): - els = self.soup.select(selector) - self.assertEqual(len(els), 1) - self.assertEqual(els[0].name, 'p') - self.assertEqual(els[0]['class'], ['onep']) - - def test_class_mismatched_tag(self): - els = self.soup.select('div.onep') - self.assertEqual(len(els), 0) - - def test_one_id(self): - for selector in ('div#inner', '#inner', 'div div#inner'): - self.assertSelects(selector, ['inner']) - - def test_bad_id(self): - els = self.soup.select('#doesnotexist') - self.assertEqual(len(els), 0) - - def test_items_in_id(self): - els = self.soup.select('div#inner p') - self.assertEqual(len(els), 3) - for el in els: - self.assertEqual(el.name, 'p') - self.assertEqual(els[1]['class'], ['onep']) - self.assertFalse(els[0].has_attr('class')) - - def test_a_bunch_of_emptys(self): - for selector in ('div#main del', 'div#main div.oops', 'div div#main'): - self.assertEqual(len(self.soup.select(selector)), 0) - - def test_multi_class_support(self): - for selector in ('.class1', 'p.class1', '.class2', 'p.class2', - '.class3', 'p.class3', 'html p.class2', 'div#inner .class2'): - self.assertSelects(selector, ['pmulti']) - - def test_multi_class_selection(self): - for selector in ('.class1.class3', '.class3.class2', - '.class1.class2.class3'): - self.assertSelects(selector, ['pmulti']) - - def test_child_selector(self): - self.assertSelects('.s1 > a', ['s1a1', 's1a2']) - self.assertSelects('.s1 > a span', ['s1a2s1']) - - def test_child_selector_id(self): - self.assertSelects('.s1 > a#s1a2 span', ['s1a2s1']) - - def test_attribute_equals(self): - self.assertSelectMultiple( - ('p[class="onep"]', ['p1']), - ('p[id="p1"]', ['p1']), - ('[class="onep"]', ['p1']), - ('[id="p1"]', ['p1']), - ('link[rel="stylesheet"]', ['l1']), - ('link[type="text/css"]', ['l1']), - ('link[href="blah.css"]', ['l1']), - ('link[href="no-blah.css"]', []), - ('[rel="stylesheet"]', ['l1']), - ('[type="text/css"]', ['l1']), - ('[href="blah.css"]', ['l1']), - ('[href="no-blah.css"]', []), - ('p[href="no-blah.css"]', []), - ('[href="no-blah.css"]', []), - ) - - def test_attribute_tilde(self): - self.assertSelectMultiple( - ('p[class~="class1"]', ['pmulti']), - ('p[class~="class2"]', ['pmulti']), - ('p[class~="class3"]', ['pmulti']), - ('[class~="class1"]', ['pmulti']), - ('[class~="class2"]', ['pmulti']), - ('[class~="class3"]', ['pmulti']), - ('a[rel~="friend"]', ['bob']), - ('a[rel~="met"]', ['bob']), - ('[rel~="friend"]', ['bob']), - ('[rel~="met"]', ['bob']), - ) - - def test_attribute_startswith(self): - self.assertSelectMultiple( - ('[rel^="style"]', ['l1']), - ('link[rel^="style"]', ['l1']), - ('notlink[rel^="notstyle"]', []), - ('[rel^="notstyle"]', []), - ('link[rel^="notstyle"]', []), - ('link[href^="bla"]', ['l1']), - ('a[href^="http://"]', ['bob', 'me']), - ('[href^="http://"]', ['bob', 'me']), - ('[id^="p"]', ['pmulti', 'p1']), - ('[id^="m"]', ['me', 'main']), - ('div[id^="m"]', ['main']), - ('a[id^="m"]', ['me']), - ) - - def test_attribute_endswith(self): - self.assertSelectMultiple( - ('[href$=".css"]', ['l1']), - ('link[href$=".css"]', ['l1']), - ('link[id$="1"]', ['l1']), - ('[id$="1"]', ['l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1']), - ('div[id$="1"]', []), - ('[id$="noending"]', []), - ) - - def test_attribute_contains(self): - self.assertSelectMultiple( - # From test_attribute_startswith - ('[rel*="style"]', ['l1']), - ('link[rel*="style"]', ['l1']), - ('notlink[rel*="notstyle"]', []), - ('[rel*="notstyle"]', []), - ('link[rel*="notstyle"]', []), - ('link[href*="bla"]', ['l1']), - ('a[href*="http://"]', ['bob', 'me']), - ('[href*="http://"]', ['bob', 'me']), - ('[id*="p"]', ['pmulti', 'p1']), - ('div[id*="m"]', ['main']), - ('a[id*="m"]', ['me']), - # From test_attribute_endswith - ('[href*=".css"]', ['l1']), - ('link[href*=".css"]', ['l1']), - ('link[id*="1"]', ['l1']), - ('[id*="1"]', ['l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1']), - ('div[id*="1"]', []), - ('[id*="noending"]', []), - # New for this test - ('[href*="."]', ['bob', 'me', 'l1']), - ('a[href*="."]', ['bob', 'me']), - ('link[href*="."]', ['l1']), - ('div[id*="n"]', ['main', 'inner']), - ('div[id*="nn"]', ['inner']), - ) - - def test_attribute_exact_or_hypen(self): - self.assertSelectMultiple( - ('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), - ('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), - ('p[lang|="fr"]', ['lang-fr']), - ('p[lang|="gb"]', []), - ) - - def test_attribute_exists(self): - self.assertSelectMultiple( - ('[rel]', ['l1', 'bob', 'me']), - ('link[rel]', ['l1']), - ('a[rel]', ['bob', 'me']), - ('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']), - ('p[class]', ['p1', 'pmulti']), - ('[blah]', []), - ('p[blah]', []), - ) - - def test_nth_of_type(self): - # Try to select first paragraph - els = self.soup.select('div#inner p:nth-of-type(1)') - self.assertEqual(len(els), 1) - self.assertEqual(els[0].string, u'Some text') - - # Try to select third paragraph - els = self.soup.select('div#inner p:nth-of-type(3)') - self.assertEqual(len(els), 1) - self.assertEqual(els[0].string, u'Another') - - # Try to select (non-existent!) fourth paragraph - els = self.soup.select('div#inner p:nth-of-type(4)') - self.assertEqual(len(els), 0) - - # Pass in an invalid value. - self.assertRaises( - ValueError, self.soup.select, 'div p:nth-of-type(0)') - - def test_nth_of_type_direct_descendant(self): - els = self.soup.select('div#inner > p:nth-of-type(1)') - self.assertEqual(len(els), 1) - self.assertEqual(els[0].string, u'Some text') - - def test_id_child_selector_nth_of_type(self): - self.assertSelects('#inner > p:nth-of-type(2)', ['p1']) - - def test_select_on_element(self): - # Other tests operate on the tree; this operates on an element - # within the tree. - inner = self.soup.find("div", id="main") - selected = inner.select("div") - # The
        tag was selected. The