2016-08-31 07:28:58 +12:00
|
|
|
import ruamel.yaml as yaml
|
2016-07-09 13:27:19 +12:00
|
|
|
import asyncio
|
2016-08-31 07:21:17 +12:00
|
|
|
import rethinkdb as r
|
2016-09-01 15:24:21 +12:00
|
|
|
import pendulum
|
2016-07-09 13:27:19 +12:00
|
|
|
|
|
|
|
loop = asyncio.get_event_loop()
|
|
|
|
|
2016-08-19 13:56:41 +12:00
|
|
|
# Ensure that the required config.yml file actually exists
|
2016-08-12 14:14:30 +12:00
|
|
|
try:
|
|
|
|
with open("config.yml", "r") as f:
|
|
|
|
global_config = yaml.load(f)
|
|
|
|
except FileNotFoundError:
|
|
|
|
print("You have no config file setup! Please use config.yml.sample to setup a valid config file")
|
|
|
|
quit()
|
2016-07-11 09:57:52 +12:00
|
|
|
|
2016-08-31 07:51:04 +12:00
|
|
|
try:
|
|
|
|
bot_token = global_config["bot_token"]
|
|
|
|
except KeyError:
|
|
|
|
print("You have no bot_token saved, this is a requirement for running a bot.")
|
|
|
|
print("Please use config.yml.sample to setup a valid config file")
|
|
|
|
quit()
|
|
|
|
|
|
|
|
try:
|
|
|
|
owner_ids = global_config["owner_id"]
|
|
|
|
except KeyError:
|
|
|
|
print("You have no owner_id saved! You're not going to be able to run certain commands without this.")
|
|
|
|
print("Please use config.yml.sample to setup a valid config file")
|
|
|
|
quit()
|
|
|
|
|
2016-09-01 07:09:11 +12:00
|
|
|
|
|
|
|
# This is a simple class for the cache concept, all it holds is it's own key and the values
|
|
|
|
# With a method that gets content based on it's key
|
|
|
|
class Cache:
|
|
|
|
def __init__(self, key):
|
|
|
|
self.key = key
|
2016-09-01 08:28:52 +12:00
|
|
|
self.values = {}
|
2016-09-01 15:24:21 +12:00
|
|
|
self.refreshed = pendulum.utcnow()
|
2016-09-01 07:09:11 +12:00
|
|
|
loop.create_task(self.update())
|
|
|
|
|
|
|
|
async def update(self):
|
|
|
|
self.values = await _get_content(self.key)
|
2016-09-01 15:24:21 +12:00
|
|
|
self.refreshed = pendulum.utcnow()
|
2016-08-31 12:44:24 +12:00
|
|
|
|
|
|
|
|
2016-08-19 13:56:41 +12:00
|
|
|
# Default bot's description
|
2016-08-31 10:33:37 +12:00
|
|
|
bot_description = global_config.get("description")
|
2016-08-19 13:56:41 +12:00
|
|
|
# Bot's default prefix for commands
|
2016-09-01 07:09:11 +12:00
|
|
|
default_prefix = global_config.get("command_prefix", "!")
|
2016-08-19 13:56:41 +12:00
|
|
|
# The key for bots.discord.pw and carbonitex
|
2016-08-12 14:14:30 +12:00
|
|
|
discord_bots_key = global_config.get('discord_bots_key', "")
|
2016-08-19 13:56:41 +12:00
|
|
|
carbon_key = global_config.get('carbon_key', "")
|
|
|
|
# The invite link for the server made for the bot
|
2016-08-16 15:30:52 +12:00
|
|
|
dev_server = global_config.get("dev_server", "")
|
2016-07-09 13:27:19 +12:00
|
|
|
|
2016-08-31 07:21:17 +12:00
|
|
|
# The variables needed for sharding
|
|
|
|
shard_count = global_config.get('shard_count', '')
|
|
|
|
shard_id = global_config.get('shard_id', '')
|
|
|
|
|
2016-08-19 13:56:41 +12:00
|
|
|
# A list of all the outputs for the battle command
|
2016-08-31 10:33:37 +12:00
|
|
|
battle_wins = global_config.get("battleWins", [])
|
2016-08-19 13:56:41 +12:00
|
|
|
# The default status the bot will use
|
2016-08-31 07:51:04 +12:00
|
|
|
default_status = global_config.get("default_status", "")
|
2016-08-19 18:48:15 +12:00
|
|
|
# The steam API key
|
|
|
|
steam_key = global_config.get("steam_key", "")
|
|
|
|
|
2016-08-31 07:21:17 +12:00
|
|
|
# The rethinkdb hostname
|
2016-08-31 07:51:04 +12:00
|
|
|
db_host = global_config.get('db_host', 'localhost')
|
2016-08-31 07:21:17 +12:00
|
|
|
# The rethinkdb database name
|
2016-08-31 07:51:04 +12:00
|
|
|
db_name = global_config.get('db_name', 'Discord_Bot')
|
2016-08-31 07:21:17 +12:00
|
|
|
# The rethinkdb certification
|
2016-08-31 07:51:04 +12:00
|
|
|
db_cert = global_config.get('db_cert', '')
|
2016-08-31 07:21:17 +12:00
|
|
|
# The rethinkdb port
|
|
|
|
db_port = global_config.get('db_port', 28015)
|
2016-09-01 08:21:52 +12:00
|
|
|
# The user and password assigned
|
|
|
|
db_user = global_config.get('db_user', 'admin')
|
|
|
|
db_pass = global_config.get('db_pass', '')
|
2016-08-31 13:48:30 +12:00
|
|
|
# We've set all the options we need to be able to connect
|
|
|
|
# so create a dictionary that we can use to unload to connect
|
2016-09-01 08:21:52 +12:00
|
|
|
# db_opts = {'host': db_host, 'db': db_name, 'port': db_port, 'ssl':
|
|
|
|
# {'ca_certs': db_cert}, 'user': db_user, 'password': db_pass}
|
|
|
|
db_opts = {'host': db_host, 'db': db_name, 'port': db_port, 'user': db_user, 'password': db_pass}
|
2016-09-01 07:09:11 +12:00
|
|
|
|
2016-09-01 07:17:33 +12:00
|
|
|
possible_keys = ['prefixes', 'battling', 'battle_records', 'boops', 'server_alerts', 'user_notifications',
|
|
|
|
'nsfw_channels', 'custom_permissions', 'rules', 'overwatch', 'picarto', 'twitch', 'strawpolls', 'tags',
|
2016-09-01 09:19:39 +12:00
|
|
|
'tictactoe', 'bot_data']
|
2016-09-01 07:09:11 +12:00
|
|
|
# This will be a dictionary that holds the cache object, based on the key that is saved
|
|
|
|
cache = {}
|
|
|
|
|
2016-09-01 09:19:39 +12:00
|
|
|
sharded_data = {}
|
|
|
|
|
2016-09-01 07:09:11 +12:00
|
|
|
# Populate cache with each object
|
2016-09-01 15:24:21 +12:00
|
|
|
for k in possible_keys:
|
|
|
|
cache[k] = Cache(k)
|
2016-09-01 09:45:00 +12:00
|
|
|
|
2016-09-01 07:09:11 +12:00
|
|
|
|
|
|
|
def command_prefix(bot, message):
|
|
|
|
# We do not want to make a query for every message that is sent
|
|
|
|
# So assume it's in cache, or it doesn't exist
|
|
|
|
# If the prefix does exist in the database and isn't in our cache; too bad, something has messed up
|
|
|
|
# But it is not worth a query for every single message the bot detects, to fix
|
2016-09-01 10:53:33 +12:00
|
|
|
try:
|
|
|
|
prefix = cache['prefixes'].values.get(message.server.id)
|
|
|
|
return prefix or default_prefix
|
|
|
|
except KeyError:
|
|
|
|
return default_prefix
|
2016-09-01 07:09:11 +12:00
|
|
|
|
2016-07-11 09:57:52 +12:00
|
|
|
|
2016-08-31 10:33:37 +12:00
|
|
|
async def save_content(table: str, content):
|
2016-08-31 12:44:24 +12:00
|
|
|
# We need to make sure we're using asyncio
|
2016-08-31 07:21:17 +12:00
|
|
|
r.set_loop_type("asyncio")
|
2016-08-31 12:44:24 +12:00
|
|
|
# Just connect to the database
|
2016-08-31 13:48:30 +12:00
|
|
|
conn = await r.connect(**db_opts)
|
2016-08-31 07:21:17 +12:00
|
|
|
# We need to make at least one query to ensure the key exists, so attempt to create it as our query
|
|
|
|
try:
|
2016-08-31 10:33:37 +12:00
|
|
|
await r.table_create(table).run(conn)
|
2016-08-31 07:21:17 +12:00
|
|
|
except r.ReqlOpFailedError:
|
|
|
|
pass
|
2016-08-31 13:48:30 +12:00
|
|
|
# So the table already existed, or it has now been created, we can update the data now
|
|
|
|
# Since we're handling everything that is rewritten in the code itself, we just need to delete then insert
|
2016-08-31 10:33:37 +12:00
|
|
|
await r.table(table).delete().run(conn)
|
|
|
|
await r.table(table).insert(content).run(conn)
|
2016-09-01 15:51:21 +12:00
|
|
|
await conn.close()
|
2016-08-31 12:44:24 +12:00
|
|
|
|
2016-09-01 07:09:11 +12:00
|
|
|
# Now that we've saved the new content, we should update our cache
|
|
|
|
cached = cache.get(table)
|
|
|
|
# While this should theoretically never happen, we just want to make sure
|
2016-09-01 17:32:15 +12:00
|
|
|
if cached is None or isinstance(cached, dict) or len(cached.values) == 0:
|
2016-09-01 07:09:11 +12:00
|
|
|
cache[table] = Cache(table)
|
|
|
|
else:
|
2016-09-01 16:41:43 +12:00
|
|
|
await cached.update()
|
2016-08-31 07:21:17 +12:00
|
|
|
|
|
|
|
|
2016-09-01 15:24:21 +12:00
|
|
|
async def get_content(key: str):
|
2016-09-01 15:51:21 +12:00
|
|
|
cached = cache.get(key)
|
2016-09-01 15:24:21 +12:00
|
|
|
# We want to check here if the key exists in cache, and it was not created more than an hour ago
|
|
|
|
# We also want to make sure that if what we're getting in cache has content
|
|
|
|
# if not, lets make sure something didn't go awry, by getting from the database instead
|
2016-09-01 16:47:03 +12:00
|
|
|
if cached is None or isinstance(cached, dict) or len(cached.values) == 0 or (
|
2016-09-01 16:46:22 +12:00
|
|
|
pendulum.utcnow() - cached.refreshed).hours >= 1:
|
2016-09-01 07:09:11 +12:00
|
|
|
value = await _get_content(key)
|
2016-09-01 09:19:39 +12:00
|
|
|
# If we found this object not cached, cache it
|
2016-09-01 15:51:21 +12:00
|
|
|
cache[key] = value
|
2016-09-01 07:09:11 +12:00
|
|
|
else:
|
|
|
|
value = cached.values
|
|
|
|
return value
|
|
|
|
|
2016-09-01 16:46:22 +12:00
|
|
|
|
2016-09-01 07:09:11 +12:00
|
|
|
# This is our internal method to get content from the database
|
2016-09-01 10:50:40 +12:00
|
|
|
async def _get_content(key: str):
|
2016-08-31 11:59:55 +12:00
|
|
|
# We need to make sure we're using asyncio
|
2016-08-31 10:33:37 +12:00
|
|
|
r.set_loop_type("asyncio")
|
2016-08-31 11:59:55 +12:00
|
|
|
# Just connect to the database
|
2016-08-31 13:48:30 +12:00
|
|
|
conn = await r.connect(**db_opts)
|
2016-08-31 11:59:55 +12:00
|
|
|
# We should only ever get one result, so use it if it exists, otherwise return none
|
2016-07-18 03:17:47 +12:00
|
|
|
try:
|
2016-08-31 12:23:07 +12:00
|
|
|
cursor = await r.table(key).run(conn)
|
2016-09-01 15:58:41 +12:00
|
|
|
await conn.close()
|
2016-08-31 11:59:55 +12:00
|
|
|
items = list(cursor.items)[0]
|
2016-08-31 12:23:07 +12:00
|
|
|
except (IndexError, r.ReqlOpFailedError):
|
2016-09-01 16:41:43 +12:00
|
|
|
await conn.close()
|
2016-08-31 13:48:30 +12:00
|
|
|
return {}
|
2016-08-31 11:59:55 +12:00
|
|
|
# Rethink db stores an internal id per table, delete this and return the rest
|
|
|
|
del items['id']
|
|
|
|
return items
|