2016-07-23 23:59:02 +12:00
|
|
|
from discord.ext import commands
|
2017-01-21 12:32:45 +13:00
|
|
|
|
|
|
|
from . import utils
|
|
|
|
|
2016-10-24 15:43:59 +13:00
|
|
|
from bs4 import BeautifulSoup as bs
|
2016-08-18 09:28:45 +12:00
|
|
|
|
|
|
|
import discord
|
2016-07-24 03:33:22 +12:00
|
|
|
import random
|
2016-08-16 08:43:18 +12:00
|
|
|
import re
|
2016-09-29 20:40:29 +13:00
|
|
|
import math
|
2016-11-02 08:59:45 +13:00
|
|
|
|
2016-08-18 09:28:45 +12:00
|
|
|
|
2016-07-23 23:59:02 +12:00
|
|
|
class Links:
|
|
|
|
"""This class contains all the commands that make HTTP requests
|
|
|
|
In other words, all commands here rely on other URL's to complete their requests"""
|
2016-08-18 09:28:45 +12:00
|
|
|
|
2016-07-23 23:59:02 +12:00
|
|
|
def __init__(self, bot):
|
2016-07-24 00:00:21 +12:00
|
|
|
self.bot = bot
|
2016-11-02 08:59:45 +13:00
|
|
|
|
2017-03-06 15:45:44 +13:00
|
|
|
@commands.command(aliases=['g'])
|
2017-01-21 12:32:45 +13:00
|
|
|
@utils.custom_perms(send_messages=True)
|
2016-10-24 15:43:23 +13:00
|
|
|
async def google(self, ctx, *, query: str):
|
2016-11-29 17:55:55 +13:00
|
|
|
"""Searches google for a provided query
|
|
|
|
|
|
|
|
EXAMPLE: !g Random cat pictures!
|
|
|
|
RESULT: Links to sites with random cat pictures!"""
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.message.channel.trigger_typing()
|
|
|
|
|
2016-10-24 15:41:19 +13:00
|
|
|
url = "https://www.google.com/search"
|
|
|
|
|
|
|
|
# Turn safe filter on or off, based on whether or not this is a nsfw channel
|
2017-03-08 17:28:30 +13:00
|
|
|
nsfw = await utils.channel_is_nsfw(ctx.message.channel)
|
|
|
|
safe = 'off' if nsfw else 'on'
|
2016-10-24 15:41:19 +13:00
|
|
|
|
|
|
|
params = {'q': query,
|
2016-10-24 16:20:18 +13:00
|
|
|
'safe': safe,
|
2016-10-24 16:20:55 +13:00
|
|
|
'hl': 'en',
|
2016-10-24 16:20:18 +13:00
|
|
|
'cr': 'countryUS'}
|
2016-10-24 15:41:19 +13:00
|
|
|
|
|
|
|
# Our format we'll end up using to send to the channel
|
|
|
|
fmt = ""
|
|
|
|
|
|
|
|
# First make the request to google to get the results
|
2017-01-21 12:32:45 +13:00
|
|
|
data = await utils.request(url, payload=params, attr='text')
|
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
if data is None:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("I failed to connect to google! (That can happen??)")
|
2016-11-02 08:59:45 +13:00
|
|
|
return
|
|
|
|
|
|
|
|
# Convert to a BeautifulSoup element and loop through each result clasified by h3 tags with a class of 'r'
|
|
|
|
soup = bs(data, 'html.parser')
|
2016-10-24 15:41:19 +13:00
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
for element in soup.find_all('h3', class_='r')[:3]:
|
|
|
|
# Get the link's href tag, which looks like q=[url here]&sa
|
|
|
|
# Use a lookahead and lookbehind to find this url exactly
|
|
|
|
try:
|
|
|
|
result_url = re.search('(?<=q=).*(?=&sa=)', element.find('a').get('href')).group(0)
|
|
|
|
except AttributeError:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("I couldn't find any results for {}!".format(query))
|
2016-11-02 08:59:45 +13:00
|
|
|
return
|
2016-10-24 15:41:19 +13:00
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
# Get the next sibling, find the span where the description is, and get the text from this
|
|
|
|
try:
|
|
|
|
description = element.next_sibling.find('span', class_='st').text
|
|
|
|
except:
|
|
|
|
description = ""
|
2016-10-24 15:41:19 +13:00
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
# Add this to our text we'll use to send
|
|
|
|
fmt += '\n\n**URL**: <{}>\n**Description**: {}'.format(result_url, description)
|
2016-10-24 15:41:19 +13:00
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
fmt = "**Top 3 results for the query** _{}_:{}".format(query, fmt)
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send(fmt)
|
2016-10-24 15:41:19 +13:00
|
|
|
|
2017-03-06 15:45:44 +13:00
|
|
|
@commands.command(aliases=['yt'])
|
2017-01-21 12:32:45 +13:00
|
|
|
@utils.custom_perms(send_messages=True)
|
2016-11-02 09:25:46 +13:00
|
|
|
async def youtube(self, ctx, *, query: str):
|
2016-11-29 17:55:55 +13:00
|
|
|
"""Searches youtube for a provided query
|
|
|
|
|
|
|
|
EXAMPLE: !youtube Cat videos!
|
|
|
|
RESULT: Cat videos!"""
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.message.channel.trigger_typing()
|
|
|
|
|
2017-01-21 12:32:45 +13:00
|
|
|
key = utils.youtube_key
|
2016-10-04 15:42:47 +13:00
|
|
|
url = "https://www.googleapis.com/youtube/v3/search"
|
|
|
|
params = {'key': key,
|
2016-10-06 07:44:49 +13:00
|
|
|
'part': 'snippet, id',
|
|
|
|
'type': 'video',
|
|
|
|
'q': query}
|
2016-10-04 15:42:47 +13:00
|
|
|
|
2017-01-21 12:32:45 +13:00
|
|
|
data = await utils.request(url, payload=params)
|
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
if data is None:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("Sorry but I failed to connect to youtube!")
|
2016-11-02 08:59:45 +13:00
|
|
|
return
|
2016-10-04 15:42:47 +13:00
|
|
|
|
|
|
|
try:
|
|
|
|
result = data['items'][0]
|
|
|
|
except IndexError:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("I could not find any results with the search term {}".format(query))
|
2016-10-04 15:42:47 +13:00
|
|
|
return
|
|
|
|
|
|
|
|
result_url = "https://youtube.com/watch?v={}".format(result['id']['videoId'])
|
|
|
|
title = result['snippet']['title']
|
|
|
|
description = result['snippet']['description']
|
|
|
|
|
2016-10-29 12:52:18 +13:00
|
|
|
fmt = "**Title:** {}\n\n**Description:** {}\n\n**URL:** <{}>".format(title, description, result_url)
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send(fmt)
|
2016-10-04 15:42:47 +13:00
|
|
|
|
2017-03-06 15:45:44 +13:00
|
|
|
@commands.command()
|
2017-01-21 12:32:45 +13:00
|
|
|
@utils.custom_perms(send_messages=True)
|
2016-11-02 09:25:46 +13:00
|
|
|
async def wiki(self, ctx, *, query: str):
|
2016-11-29 17:55:55 +13:00
|
|
|
"""Pulls the top match for a specific term from wikipedia, and returns the result
|
|
|
|
|
|
|
|
EXAMPLE: !wiki Test
|
|
|
|
RESULT: A link to the wikipedia article for the word test"""
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.message.channel.trigger_typing()
|
|
|
|
|
2016-10-02 09:29:53 +13:00
|
|
|
# All we need to do is search for the term provided, so the action, list, and format never need to change
|
|
|
|
base_url = "https://en.wikipedia.org/w/api.php"
|
|
|
|
params = {"action": "query",
|
2016-10-03 07:08:20 +13:00
|
|
|
"list": "search",
|
|
|
|
"format": "json",
|
2016-10-11 16:33:47 +13:00
|
|
|
"srsearch": query}
|
2016-10-02 09:29:53 +13:00
|
|
|
|
2017-01-21 12:41:50 +13:00
|
|
|
data = await utils.request(base_url, payload=params)
|
2017-01-21 12:32:45 +13:00
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
if data is None:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("Sorry but I failed to connect to Wikipedia!")
|
2016-11-02 08:59:45 +13:00
|
|
|
return
|
|
|
|
|
2016-08-18 09:28:45 +12:00
|
|
|
if len(data['query']['search']) == 0:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("I could not find any results with that term, I tried my best :c")
|
2016-08-18 09:28:45 +12:00
|
|
|
return
|
|
|
|
# Wiki articles' URLs are in the format https://en.wikipedia.org/wiki/[Titlehere]
|
|
|
|
# Replace spaces with %20
|
|
|
|
url = "https://en.wikipedia.org/wiki/{}".format(data['query']['search'][0]['title'].replace(' ', '%20'))
|
|
|
|
snippet = data['query']['search'][0]['snippet']
|
|
|
|
# The next part replaces some of the HTML formatting that's provided
|
|
|
|
# These are the only ones I've encountered so far through testing, there may be more though
|
|
|
|
snippet = re.sub('<span class=\\"searchmatch\\">', '', snippet)
|
|
|
|
snippet = re.sub('</span>', '', snippet)
|
|
|
|
snippet = re.sub('"', '"', snippet)
|
|
|
|
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send(
|
2016-10-29 12:52:18 +13:00
|
|
|
"Here is the best match I found with the query `{}`:\nURL: <{}>\nSnippet: \n```\n{}```".format(query, url,
|
2016-11-02 08:59:45 +13:00
|
|
|
snippet))
|
2016-08-18 09:28:45 +12:00
|
|
|
|
2017-03-06 15:45:44 +13:00
|
|
|
@commands.command()
|
2017-01-21 12:32:45 +13:00
|
|
|
@utils.custom_perms(send_messages=True)
|
2016-11-02 09:25:46 +13:00
|
|
|
async def urban(self, ctx, *, msg: str):
|
2016-11-29 17:55:55 +13:00
|
|
|
"""Pulls the top urbandictionary.com definition for a term
|
|
|
|
|
|
|
|
EXAMPLE: !urban a normal phrase
|
|
|
|
RESULT: Probably something lewd; this is urban dictionary we're talking about"""
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.message.channel.trigger_typing()
|
|
|
|
|
2016-10-02 09:29:53 +13:00
|
|
|
url = "http://api.urbandictionary.com/v0/define"
|
2016-10-11 16:33:47 +13:00
|
|
|
params = {"term": msg}
|
2016-07-25 02:02:50 +12:00
|
|
|
try:
|
2017-01-21 12:32:45 +13:00
|
|
|
data = await utils.request(url, payload=params)
|
2016-11-02 08:59:45 +13:00
|
|
|
if data is None:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("Sorry but I failed to connect to urban dictionary!")
|
2017-01-26 08:24:16 +13:00
|
|
|
return
|
2016-10-11 16:46:32 +13:00
|
|
|
|
2016-08-16 15:30:52 +12:00
|
|
|
# List is the list of definitions found, if it's empty then nothing was found
|
2016-07-23 23:59:02 +12:00
|
|
|
if len(data['list']) == 0:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("No result with that term!")
|
2016-08-16 15:30:52 +12:00
|
|
|
# If the list is not empty, use the first result and print it's defintion
|
2016-07-23 23:59:02 +12:00
|
|
|
else:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send(data['list'][0]['definition'])
|
2016-10-11 16:46:32 +13:00
|
|
|
# Urban dictionary has some long definitions, some might not be able to be sent
|
2016-07-23 23:59:02 +12:00
|
|
|
except discord.HTTPException:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send('```\nError: Definition is too long for me to send```')
|
2016-11-02 08:59:45 +13:00
|
|
|
except KeyError:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("Sorry but I failed to connect to urban dictionary!")
|
2016-07-23 23:59:02 +12:00
|
|
|
|
2017-03-08 11:35:30 +13:00
|
|
|
@commands.command()
|
2017-01-21 12:32:45 +13:00
|
|
|
@utils.custom_perms(send_messages=True)
|
2016-07-23 23:59:02 +12:00
|
|
|
async def derpi(self, ctx, *search: str):
|
2016-11-29 17:55:55 +13:00
|
|
|
"""Provides a random image from the first page of derpibooru.org for the following term
|
|
|
|
|
|
|
|
EXAMPLE: !derpi Rainbow Dash
|
|
|
|
RESULT: A picture of Rainbow Dash!"""
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.message.channel.trigger_typing()
|
|
|
|
|
2016-07-23 23:59:02 +12:00
|
|
|
if len(search) > 0:
|
2016-10-02 09:29:53 +13:00
|
|
|
url = 'https://derpibooru.org/search.json'
|
|
|
|
|
2016-09-30 16:27:08 +13:00
|
|
|
# Ensure a filter was not provided, as we either want to use our own, or none (for safe pics)
|
2016-10-02 10:55:13 +13:00
|
|
|
query = ' '.join(value for value in search if not re.search('&?filter_id=[0-9]+', value))
|
2016-10-02 09:29:53 +13:00
|
|
|
params = {'q': query}
|
2016-09-29 12:39:34 +13:00
|
|
|
|
2017-03-08 17:28:30 +13:00
|
|
|
nsfw = await utils.channel_is_nsfw(ctx.message.channel)
|
2016-08-16 15:30:52 +12:00
|
|
|
# If this is a nsfw channel, we just need to tack on 'explicit' to the terms
|
|
|
|
# Also use the custom filter that I have setup, that blocks some certain tags
|
|
|
|
# If the channel is not nsfw, we don't need to do anything, as the default filter blocks explicit
|
2017-03-08 17:28:30 +13:00
|
|
|
if nsfw:
|
2016-10-02 10:55:13 +13:00
|
|
|
params['q'] += ", (explicit OR suggestive)"
|
2016-10-02 09:29:53 +13:00
|
|
|
params['filter_id'] = 95938
|
2016-08-30 18:47:52 +12:00
|
|
|
else:
|
2016-10-02 10:55:13 +13:00
|
|
|
params['q'] += ", safe"
|
2017-03-26 15:06:36 +13:00
|
|
|
# Lets filter out some of the "crap" that's on derpibooru by requiring an image with a score higher than 15
|
|
|
|
params['q'] += ', score.gt:15'
|
2016-10-02 09:29:53 +13:00
|
|
|
|
2016-08-09 06:44:03 +12:00
|
|
|
try:
|
2016-10-25 14:52:50 +13:00
|
|
|
# Get the response from derpibooru and parse the 'search' result from it
|
2017-01-21 12:32:45 +13:00
|
|
|
data = await utils.request(url, payload=params)
|
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
if data is None:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("Sorry but I failed to connect to Derpibooru!")
|
2016-11-02 08:59:45 +13:00
|
|
|
return
|
|
|
|
results = data['search']
|
2016-08-09 06:44:03 +12:00
|
|
|
except KeyError:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("No results with that search term, {0}!".format(ctx.message.author.mention))
|
2016-08-09 06:44:03 +12:00
|
|
|
return
|
2016-07-23 23:59:02 +12:00
|
|
|
|
2016-09-29 20:40:29 +13:00
|
|
|
# The first request we've made ensures there are results
|
|
|
|
# Now we can get the total count from that, and make another request based on the number of pages as well
|
2016-07-23 23:59:02 +12:00
|
|
|
if len(results) > 0:
|
2017-01-21 12:32:45 +13:00
|
|
|
# Get the total number of pages
|
2016-09-29 20:40:29 +13:00
|
|
|
pages = math.ceil(data['total'] / len(results))
|
2017-01-21 12:32:45 +13:00
|
|
|
# Set a new paramater to set which page to use, randomly based on the number of pages
|
2016-10-02 09:29:53 +13:00
|
|
|
params['page'] = random.SystemRandom().randint(1, pages)
|
2017-01-26 08:24:16 +13:00
|
|
|
data = await utils.request(url, payload=params)
|
2016-11-02 08:59:45 +13:00
|
|
|
if data is None:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("Sorry but I failed to connect to Derpibooru!")
|
2016-11-02 08:59:45 +13:00
|
|
|
return
|
2017-01-21 12:32:45 +13:00
|
|
|
# Now get the results again
|
2016-09-29 20:40:29 +13:00
|
|
|
results = data['search']
|
|
|
|
|
2017-01-21 12:32:45 +13:00
|
|
|
# Get the image link from the now random page'd and random result from that page
|
2016-08-14 07:58:47 +12:00
|
|
|
index = random.SystemRandom().randint(0, len(results) - 1)
|
2016-09-29 20:45:06 +13:00
|
|
|
image_link = 'https://derpibooru.org/{}'.format(results[index]['id'])
|
2016-07-23 23:59:02 +12:00
|
|
|
else:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("No results with that search term, {0}!".format(ctx.message.author.mention))
|
2016-07-23 23:59:02 +12:00
|
|
|
return
|
|
|
|
else:
|
|
|
|
# If no search term was provided, search for a random image
|
2017-01-21 12:32:45 +13:00
|
|
|
# .url will be the URL we end up at, not the one requested.
|
|
|
|
# https://derpibooru.org/images/random redirects to a random image, so this is exactly what we want
|
|
|
|
image_link = await utils.request('https://derpibooru.org/images/random', attr='url')
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send(image_link)
|
2016-08-18 09:28:45 +12:00
|
|
|
|
2017-03-08 11:35:30 +13:00
|
|
|
@commands.command()
|
2017-01-21 12:32:45 +13:00
|
|
|
@utils.custom_perms(send_messages=True)
|
2016-07-23 23:59:02 +12:00
|
|
|
async def e621(self, ctx, *, tags: str):
|
|
|
|
"""Searches for a random image from e621.net
|
|
|
|
Format for the search terms need to be 'search term 1, search term 2, etc.'
|
2016-11-29 17:55:55 +13:00
|
|
|
If the channel the command is ran in, is registered as a nsfw channel, this image will be explicit
|
|
|
|
|
|
|
|
EXAMPLE: !e621 dragon
|
|
|
|
RESULT: A picture of a dragon (hopefully, screw your tagging system e621)"""
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.message.channel.trigger_typing()
|
2016-08-18 09:28:45 +12:00
|
|
|
|
|
|
|
# This changes the formatting for queries, so we don't
|
|
|
|
# Have to use e621's stupid formatting when using the command
|
2017-03-06 15:45:44 +13:00
|
|
|
|
2016-07-23 23:59:02 +12:00
|
|
|
tags = tags.replace(' ', '_')
|
2016-10-04 14:04:23 +13:00
|
|
|
tags = tags.replace(',_', ' ')
|
|
|
|
|
2016-10-02 09:29:53 +13:00
|
|
|
url = 'https://e621.net/post/index.json'
|
|
|
|
params = {'limit': 320,
|
2016-10-03 07:08:20 +13:00
|
|
|
'tags': tags}
|
2016-07-23 23:59:02 +12:00
|
|
|
|
2017-03-08 17:28:30 +13:00
|
|
|
nsfw = await utils.channel_is_nsfw(ctx.message.channel)
|
2016-10-03 07:08:20 +13:00
|
|
|
|
2016-08-18 09:28:45 +12:00
|
|
|
# e621 by default does not filter explicit content, so tack on
|
|
|
|
# safe/explicit based on if this channel is nsfw or not
|
2017-03-08 17:28:30 +13:00
|
|
|
params['tags'] += " rating:explicit" if nsfw else " rating:safe"
|
2016-10-03 07:08:20 +13:00
|
|
|
|
2017-01-21 12:32:45 +13:00
|
|
|
data = await utils.request(url, payload=params)
|
|
|
|
|
2016-11-02 08:59:45 +13:00
|
|
|
if data is None:
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("Sorry, I had trouble connecting at the moment; please try again later")
|
2016-10-02 07:12:17 +13:00
|
|
|
return
|
2016-08-18 09:28:45 +12:00
|
|
|
|
2016-09-23 15:45:26 +12:00
|
|
|
# Try to find an image from the list. If there were no results, we're going to attempt to find
|
|
|
|
# A number between (0,-1) and receive an error.
|
|
|
|
# The response should be in a list format, so we'll end up getting a key error if the response was in json
|
|
|
|
# i.e. it responded with a 404/504/etc.
|
|
|
|
try:
|
2016-10-03 07:08:20 +13:00
|
|
|
rand_image = data[random.SystemRandom().randint(0, len(data) - 1)]['file_url']
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send(rand_image)
|
2016-10-03 07:11:24 +13:00
|
|
|
except (ValueError, KeyError):
|
2017-03-06 15:45:44 +13:00
|
|
|
await ctx.send("No results with that tag {}".format(ctx.message.author.mention))
|
2016-07-25 01:49:05 +12:00
|
|
|
return
|
2016-07-24 00:01:16 +12:00
|
|
|
|
2016-08-18 09:28:45 +12:00
|
|
|
|
2016-07-24 00:01:16 +12:00
|
|
|
def setup(bot):
|
|
|
|
bot.add_cog(Links(bot))
|