Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Enhancement: Improving memory efficiency by eliminating the vtable in the managed class. #876

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
165 changes: 149 additions & 16 deletions include/dpp/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,116 @@
************************************************************************************/

#pragma once
#include <dpp/export.h>
#include <dpp/snowflake.h>
#include <dpp/exception.h>
#include <dpp/channel.h>
#include <dpp/managed.h>
#include <unordered_map>
#include <dpp/export.h>
#include <dpp/emoji.h>
#include <dpp/guild.h>
#include <dpp/user.h>
#include <dpp/role.h>
#include <typeindex>
#include <mutex>
#include <shared_mutex>

namespace dpp {

class cache_registry;

extern DPP_EXPORT std::unordered_map<std::type_index, cache_registry*> regs;
extern DPP_EXPORT std::unordered_map<managed*, time_t> deletion_queue;
extern DPP_EXPORT std::mutex deletion_mutex;

template<typename T> class cache;

/**
* @brief The `cache_registry` class defines an abstract base class for cache objects.
*
* It provides a mechanism for registering, managing, and performing garbage collection
* on cache objects. Cache objects must inherit from this class and implement the
* required functions.
*/
class cache_registry {
public:
/**
* @brief Pure virtual function for performing garbage collection.
*/
virtual void garbage_collection() = 0;

/**
* @brief Pure virtual function to get the type index for a cache.
* @return Type index representing the cache's type.
*/
virtual std::type_index get_type_index() const = 0;

/**
* @brief Static method to register a cache object.
* @param registry A pointer to the cache_registry object to register.
*/
static void register_cache(cache_registry* registry) {
auto type_index = registry->get_type_index();
if (registries().find(type_index) != registries().end()) {
throw cache_exception{ "Sorry, but there was already a cache by that type that has been registered." };
}
registries()[registry->get_type_index()] = registry;
}

/**
* @brief Static method to call garbageCollection for all registered caches.
* This method invokes garbage collection on all registered cache objects.
*/
static void call_garbage_collection() {
for (auto& [key, value] : registries()) {
value->garbage_collection();
}
}

/**
* @brief Static method to retrieve a cache object by type.
*
* @tparam T The type of the cache to retrieve.
* @return A pointer to the cache object of type T if found, or nullptr if not found.
*/
template <typename T>
static cache<T>* get_cache() {
std::type_index typeIndex(typeid(T));
for (auto& [key,value]: registries()) {
if (value->get_type_index() == typeIndex) {
return static_cast<cache<T>*>(value);
}
}
// If cache of type T not found, create and register a new one, then return it.
register_cache(new cache<T>{});
return get_cache<T>();
}

private:
/**
* @brief Static vector to store registered cache objects.
* This vector holds pointers to all registered cache objects.
*/
static std::unordered_map<std::type_index, cache_registry*>& registries() {
return regs;
}
};

/** forward declaration */
class guild_member;
class channel;
class guild;
class emoji;
class user;
class role;

#define cache_decl(type, setter, getter, counter) /** Find an object in the cache by id. @return type* Pointer to the object or nullptr when it's not found */ DPP_EXPORT class type * setter (snowflake id); DPP_EXPORT cache<class type> * getter (); /** Get the amount of cached type objects. */ DPP_EXPORT uint64_t counter ();

/* Declare major caches */
cache_decl(user, find_user, get_user_cache, get_user_count);
cache_decl(guild, find_guild, get_guild_cache, get_guild_count);
cache_decl(role, find_role, get_role_cache, get_role_count);
cache_decl(channel, find_channel, get_channel_cache, get_channel_count);
cache_decl(emoji, find_emoji, get_emoji_cache, get_emoji_count);

/**
* @brief A cache object maintains a cache of dpp::managed objects.
Expand All @@ -47,7 +143,7 @@ class guild_member;
* designed with thread safety in mind.
* @tparam T class type to store, which should be derived from dpp::managed.
*/
template<class T> class cache {
template<class T> class cache : public cache_registry {
private:
/**
* @brief Mutex to protect the cache
Expand All @@ -62,12 +158,18 @@ template<class T> class cache {
std::unordered_map<snowflake, T*>* cache_map;
public:

// Implement the get_type_index function
std::type_index get_type_index() const override {
return std::type_index(typeid(T));
}

/**
* @brief Construct a new cache object.
*
* Caches must contain classes derived from dpp::managed.
*/
cache() {
cache_registry::register_cache(this);
cache_map = new std::unordered_map<snowflake, T*>;
}

Expand Down Expand Up @@ -241,6 +343,50 @@ template<class T> class cache {
cache_map = n;
}

/** Run garbage collection across all caches removing deleted items
* that have been deleted over 60 seconds ago.
*/
/* Because other threads and systems may run for a short while after an event is received, we don't immediately
* delete pointers when objects are replaced. We put them into a queue, and periodically delete pointers in the
* queue. This also rehashes unordered_maps to ensure they free their memory.
*/
inline void garbage_collection() {
time_t now = time(nullptr);
bool repeat = false;
{
std::lock_guard<std::mutex> delete_lock(deletion_mutex);
do {
repeat = false;
for (auto g = deletion_queue.begin(); g != deletion_queue.end(); ++g) {
if (now > g->second + 60) {
delete static_cast<const T*>(g->first);
deletion_queue.erase(g);
repeat = true;
break;
}
}
} while (repeat);
if (deletion_queue.size() == 0) {
deletion_queue = {};
}
}
if constexpr (std::is_same_v<emoji, T>) {
dpp::get_emoji_cache()->rehash();
}
else if constexpr (std::is_same_v<role, T>) {
dpp::get_role_cache()->rehash();
}
else if constexpr (std::is_same_v<channel, T>) {
dpp::get_channel_cache()->rehash();
}
else if constexpr (std::is_same_v<guild, T>) {
dpp::get_guild_cache()->rehash();
}
else if constexpr (std::is_same_v<user, T>) {
dpp::get_user_cache()->rehash();
}
}

/**
* @brief Get "real" size in RAM of the cached objects
*
Expand All @@ -255,19 +401,6 @@ template<class T> class cache {

};

/** Run garbage collection across all caches removing deleted items
* that have been deleted over 60 seconds ago.
*/
void DPP_EXPORT garbage_collection();

#define cache_decl(type, setter, getter, counter) /** Find an object in the cache by id. @return type* Pointer to the object or nullptr when it's not found */ DPP_EXPORT class type * setter (snowflake id); DPP_EXPORT cache<class type> * getter (); /** Get the amount of cached type objects. */ DPP_EXPORT uint64_t counter ();

/* Declare major caches */
cache_decl(user, find_user, get_user_cache, get_user_count);
cache_decl(guild, find_guild, get_guild_cache, get_guild_count);
cache_decl(role, find_role, get_role_cache, get_role_count);
cache_decl(channel, find_channel, get_channel_cache, get_channel_count);
cache_decl(emoji, find_emoji, get_emoji_cache, get_emoji_count);

} // namespace dpp

2 changes: 1 addition & 1 deletion include/dpp/emoji.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ class DPP_EXPORT emoji : public managed, public json_interface<emoji> {
/**
* @brief Destroy the emoji object
*/
~emoji() override = default;
~emoji() = default;

/**
* @brief Copy assignment operator, copies another emoji's data
Expand Down
10 changes: 6 additions & 4 deletions include/dpp/managed.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,10 +42,6 @@ namespace dpp {
* @param nid ID to set
*/
managed(const snowflake nid = 0);
/**
* @brief Destroy the managed object
*/
virtual ~managed() = default;

/**
* @brief Get the creation time of this object according to Discord.
Expand All @@ -72,6 +68,12 @@ namespace dpp {
* @return false objects are the same id
*/
bool operator!=(const managed& other) const noexcept;

protected:
/**
* @brief Destroy the managed object
*/
~managed() = default;
RealTimeChris marked this conversation as resolved.
Show resolved Hide resolved
};

} // namespace dpp
44 changes: 4 additions & 40 deletions src/dpp/cache.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,57 +28,21 @@

namespace dpp {

std::unordered_map<std::type_index, cache_registry*> regs;
std::unordered_map<managed*, time_t> deletion_queue;
std::mutex deletion_mutex;

#define cache_helper(type, cache_name, setter, getter, counter) \
cache<type>* cache_name = nullptr; \
type * setter (snowflake id) { \
return cache_name ? ( type * ) cache_name ->find(id) : nullptr; \
return ( type * ) cache_registry::get_cache<type>()->find(id); \
} \
cache<type>* getter () { \
if (! cache_name ) { \
cache_name = new cache<type>(); \
} \
return cache_name ; \
return cache_registry::get_cache<type>(); \
} \
uint64_t counter () { \
return ( cache_name ? cache_name ->count() : 0 ); \
return cache_registry::get_cache<type>()->count() ; \
}


/* Because other threads and systems may run for a short while after an event is received, we don't immediately
* delete pointers when objects are replaced. We put them into a queue, and periodically delete pointers in the
* queue. This also rehashes unordered_maps to ensure they free their memory.
*/
void garbage_collection() {
time_t now = time(nullptr);
bool repeat = false;
{
std::lock_guard<std::mutex> delete_lock(deletion_mutex);
do {
repeat = false;
for (auto g = deletion_queue.begin(); g != deletion_queue.end(); ++g) {
if (now > g->second + 60) {
delete g->first;
deletion_queue.erase(g);
repeat = true;
break;
}
}
} while (repeat);
if (deletion_queue.size() == 0) {
deletion_queue = {};
}
}
dpp::get_user_cache()->rehash();
dpp::get_channel_cache()->rehash();
dpp::get_guild_cache()->rehash();
dpp::get_role_cache()->rehash();
dpp::get_emoji_cache()->rehash();
}


cache_helper(user, user_cache, find_user, get_user_cache, get_user_count);
cache_helper(channel, channel_cache, find_channel, get_channel_cache, get_channel_count);
cache_helper(role, role_cache, find_role, get_role_cache, get_role_count);
Expand Down
2 changes: 1 addition & 1 deletion src/dpp/discordclient.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -505,7 +505,7 @@ void discord_client::one_second_timer()
creator->tick_timers();

if ((time(nullptr) % 60) == 0) {
dpp::garbage_collection();
cache_registry::call_garbage_collection();
}
}
}
Expand Down
7 changes: 4 additions & 3 deletions src/unittest/test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -628,8 +628,9 @@ Markdown lol \\|\\|spoiler\\|\\| \\~\\~strikethrough\\~\\~ \\`small \\*code\\* b

{
set_test(TS, false);
dpp::managed m(189759562910400512);
set_test(TS, ((uint64_t) m.get_creation_time()) == 1465312605);
dpp::user m{};
m.id = 189759562910400512;
set_test(TS, ((uint64_t)m.get_creation_time()) == 1465312605);
}

{
Expand Down Expand Up @@ -1349,7 +1350,7 @@ Markdown lol \\|\\|spoiler\\|\\| \\~\\~strikethrough\\~\\~ \\`small \\*code\\* b

bool message_edit_tested = false;
bot.on_message_update([&](const dpp::message_update_t &event) {
if (event.msg.author == bot.me.id) {
if (event.msg.author.id == bot.me.id) {
RealTimeChris marked this conversation as resolved.
Show resolved Hide resolved
if (event.msg.content == "test edit" && !message_edit_tested) {
message_edit_tested = true;
set_test(EDITEVENT, true);
Expand Down
Loading