tahoma2d/toonz/sources/common/tfx/tpredictivecachemanager.cpp

221 lines
7.6 KiB
C++
Raw Normal View History

2016-03-19 06:57:51 +13:00
#include "trenderer.h"
#include "trasterfx.h"
#include <algorithm>
#include "tpredictivecachemanager.h"
//************************************************************************************************
// Preliminaries
//************************************************************************************************
class TPredictiveCacheManagerGenerator final
2016-06-15 18:43:10 +12:00
: public TRenderResourceManagerGenerator {
2016-03-19 06:57:51 +13:00
public:
2016-06-15 18:43:10 +12:00
TPredictiveCacheManagerGenerator() : TRenderResourceManagerGenerator(true) {}
2016-03-19 06:57:51 +13:00
2016-06-19 20:06:29 +12:00
TRenderResourceManager *operator()(void) override {
2016-06-15 18:43:10 +12:00
return new TPredictiveCacheManager;
}
2016-03-19 06:57:51 +13:00
};
2016-06-15 18:43:10 +12:00
MANAGER_FILESCOPE_DECLARATION_DEP(TPredictiveCacheManager,
TPredictiveCacheManagerGenerator,
TFxCacheManager::deps())
2016-03-19 06:57:51 +13:00
//-------------------------------------------------------------------------
2016-06-15 18:43:10 +12:00
TPredictiveCacheManager *TPredictiveCacheManager::instance() {
return static_cast<TPredictiveCacheManager *>(
// TPredictiveCacheManager::gen()->getManager(TRenderer::instance())
TPredictiveCacheManager::gen()->getManager(TRenderer::renderId()));
2016-03-19 06:57:51 +13:00
}
//************************************************************************************************
// TPredictiveCacheManager::Imp definition
//************************************************************************************************
//=======================
// PredictionData
//-----------------------
struct PredictionData {
2016-06-15 18:43:10 +12:00
const ResourceDeclaration *m_decl;
int m_usageCount;
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
PredictionData(const ResourceDeclaration *declaration)
: m_decl(declaration), m_usageCount(1) {}
2016-03-19 06:57:51 +13:00
};
//============================================================================================
//=====================================
// TPredictiveCacheManager::Imp
//-------------------------------------
2016-06-15 18:43:10 +12:00
class TPredictiveCacheManager::Imp {
2016-03-19 06:57:51 +13:00
public:
2016-06-15 18:43:10 +12:00
int m_renderStatus;
bool m_enabled;
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
std::map<TCacheResourceP, PredictionData> m_resources;
QMutex m_mutex;
2016-03-19 06:57:51 +13:00
public:
2016-06-15 18:43:10 +12:00
Imp()
: m_renderStatus(TRenderer::IDLE)
, m_enabled(TRenderer::instance().isPrecomputingEnabled()) {}
void run(TCacheResourceP &resource, const std::string &alias, const TFxP &fx,
double frame, const TRenderSettings &rs,
ResourceDeclaration *resData) {
switch (m_renderStatus) {
case TRenderer::IDLE:
case TRenderer::COMPUTING:
getResourceComputing(resource, alias, fx, frame, rs, resData);
break;
case TRenderer::TESTRUN:
getResourceTestRun(resource, alias, fx, frame, rs, resData);
break;
}
}
2016-04-14 22:15:09 +12:00
private:
2016-06-15 18:43:10 +12:00
void getResourceTestRun(TCacheResourceP &resource, const std::string &alias,
const TFxP &fx, double frame,
const TRenderSettings &rs,
ResourceDeclaration *resData);
void getResourceComputing(TCacheResourceP &resource, const std::string &alias,
const TFxP &fx, double frame,
const TRenderSettings &rs,
ResourceDeclaration *resData);
2016-03-19 06:57:51 +13:00
};
//************************************************************************************************
// TPredictiveCacheManager methods
//************************************************************************************************
TPredictiveCacheManager::TPredictiveCacheManager()
2016-06-15 18:43:10 +12:00
: m_imp(new TPredictiveCacheManager::Imp()) {}
2016-03-19 06:57:51 +13:00
//---------------------------------------------------------------------------
2016-06-15 18:43:10 +12:00
TPredictiveCacheManager::~TPredictiveCacheManager() {}
2016-03-19 06:57:51 +13:00
//---------------------------------------------------------------------------
2016-06-15 18:43:10 +12:00
void TPredictiveCacheManager::setMaxTileSize(int maxTileSize) {}
2016-03-19 06:57:51 +13:00
//---------------------------------------------------------------------------
2016-06-15 18:43:10 +12:00
void TPredictiveCacheManager::setBPP(int bpp) {}
2016-03-19 06:57:51 +13:00
//---------------------------------------------------------------------------
2016-06-15 18:43:10 +12:00
void TPredictiveCacheManager::getResource(TCacheResourceP &resource,
const std::string &alias,
const TFxP &fx, double frame,
const TRenderSettings &rs,
ResourceDeclaration *resData) {
if (!m_imp->m_enabled) return;
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
m_imp->run(resource, alias, fx, frame, rs, resData);
2016-03-19 06:57:51 +13:00
}
//************************************************************************************************
// Notification-related functions
//************************************************************************************************
void TPredictiveCacheManager::Imp::getResourceTestRun(
2016-06-15 18:43:10 +12:00
TCacheResourceP &resource, const std::string &alias, const TFxP &fx,
double frame, const TRenderSettings &rs, ResourceDeclaration *resData) {
assert(resData && resData->m_rawData);
if (!(resData && resData->m_rawData))
// This is a very rare case. I've seen it happen once in a 'pathologic' case
// which involved affines truncation while building aliases.
// The rendering system didn't expect the truncated part 'resurface' in a
// downstream fx with a slightly different affine alias.
// TODO: Affines should be coded completely in the aliases... in a compact
// way though.
return;
if (!resource) resource = TCacheResourceP(alias, true);
// Lock against concurrent threads
// QMutexLocker locker(&m_mutex); //preComputing is currently
// single-threaded
std::map<TCacheResourceP, PredictionData>::iterator it =
m_resources.find(resource);
if (it != m_resources.end())
it->second.m_usageCount++;
else {
// Already initializes usageCount at 1
m_resources.insert(std::make_pair(resource, PredictionData(resData))).first;
}
2016-03-19 06:57:51 +13:00
}
//---------------------------------------------------------------------------
void TPredictiveCacheManager::Imp::getResourceComputing(
2016-06-15 18:43:10 +12:00
TCacheResourceP &resource, const std::string &alias, const TFxP &fx,
double frame, const TRenderSettings &rs, ResourceDeclaration *resData) {
// If there is no declaration data, either the request can be resolved in one
// computation code (therefore it is uninteresting for us), or it was never
// declared.
// Anyway, return.
if (!resData) return;
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
// NO! The refCount is dynamically depleted - could become 0 from n...
// assert(!(resData->m_tiles.size() == 1 && resData->m_tiles[0].m_refCount ==
// 1));
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
if (!resource) resource = TCacheResourceP(alias);
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
if (!resource) return;
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
// Lock against concurrent threads
QMutexLocker locker(&m_mutex);
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
std::map<TCacheResourceP, PredictionData>::iterator it =
m_resources.find(resource);
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
if (it == m_resources.end()) return;
2016-03-19 06:57:51 +13:00
2016-06-15 18:43:10 +12:00
if (--it->second.m_usageCount <= 0) m_resources.erase(it);
2016-03-19 06:57:51 +13:00
}
//---------------------------------------------------------------------------
2016-06-15 18:43:10 +12:00
void TPredictiveCacheManager::onRenderStatusStart(int renderStatus) {
m_imp->m_renderStatus = renderStatus;
2016-03-19 06:57:51 +13:00
}
//---------------------------------------------------------------------------
2016-06-15 18:43:10 +12:00
void TPredictiveCacheManager::onRenderStatusEnd(int renderStatus) {
switch (renderStatus) {
case TRenderer::TESTRUN:
// All resources which have just 1 computation tile, which is also
// referenced
// only once, are released.
std::map<TCacheResourceP, PredictionData>::iterator it;
for (it = m_imp->m_resources.begin(); it != m_imp->m_resources.end();) {
const ResourceDeclaration *decl = it->second.m_decl;
if (decl->m_tiles.size() == 1 && decl->m_tiles[0].m_refCount == 1) {
std::map<TCacheResourceP, PredictionData>::iterator jt = it++;
m_imp->m_resources.erase(jt);
} else
it++;
}
}
2016-03-19 06:57:51 +13:00
}