core: more thread safety

This commit is contained in:
Vaxry 2024-02-20 01:30:47 +00:00
parent cb81a977c4
commit 0bcdca6e7b
3 changed files with 14 additions and 9 deletions

View file

@ -124,6 +124,10 @@ void CHyprlock::run() {
std::thread pollThr([this, &pollfds]() { std::thread pollThr([this, &pollfds]() {
while (1) { while (1) {
int ret = poll(pollfds, 1, 5000 /* 5 seconds, reasonable. It's because we might need to terminate */); int ret = poll(pollfds, 1, 5000 /* 5 seconds, reasonable. It's because we might need to terminate */);
if (m_bTerminate)
break;
if (ret < 0) { if (ret < 0) {
Debug::log(CRIT, "[core] Polling fds failed with {}", errno); Debug::log(CRIT, "[core] Polling fds failed with {}", errno);
m_bTerminate = true; m_bTerminate = true;
@ -138,9 +142,6 @@ void CHyprlock::run() {
} }
} }
if (m_bTerminate)
break;
if (ret != 0) { if (ret != 0) {
Debug::log(TRACE, "[core] got poll event"); Debug::log(TRACE, "[core] got poll event");
std::lock_guard<std::mutex> lg2(m_sLoopState.eventLoopMutex); std::lock_guard<std::mutex> lg2(m_sLoopState.eventLoopMutex);
@ -236,6 +237,9 @@ void CHyprlock::run() {
std::lock_guard<std::mutex> lg2(m_sLoopState.timerRequestMutex); std::lock_guard<std::mutex> lg2(m_sLoopState.timerRequestMutex);
m_sLoopState.timerCV.notify_all(); m_sLoopState.timerCV.notify_all();
g_pRenderer->asyncResourceGatherer->notify();
wl_display_disconnect(m_sWaylandState.display);
Debug::log(LOG, "Reached the end, exiting"); Debug::log(LOG, "Reached the end, exiting");
} }
@ -464,10 +468,6 @@ void CHyprlock::unlockSession() {
Debug::log(LOG, "Unlocked, exiting!"); Debug::log(LOG, "Unlocked, exiting!");
m_bTerminate = true; m_bTerminate = true;
wl_display_roundtrip(m_sWaylandState.display);
exit(0);
} }
void CHyprlock::onLockLocked() { void CHyprlock::onLockLocked() {

View file

@ -231,7 +231,6 @@ void CAsyncResourceGatherer::asyncAssetSpinLock() {
void CAsyncResourceGatherer::requestAsyncAssetPreload(const SPreloadRequest& request) { void CAsyncResourceGatherer::requestAsyncAssetPreload(const SPreloadRequest& request) {
std::lock_guard<std::mutex> lg(asyncLoopState.requestMutex); std::lock_guard<std::mutex> lg(asyncLoopState.requestMutex);
asyncLoopState.requests.push_back(request); asyncLoopState.requests.push_back(request);
std::unique_lock lk(cvmtx);
asyncLoopState.pending = true; asyncLoopState.pending = true;
asyncLoopState.loopGuard.notify_all(); asyncLoopState.loopGuard.notify_all();
} }
@ -240,4 +239,9 @@ void CAsyncResourceGatherer::unloadAsset(SPreloadedAsset* asset) {
std::lock_guard<std::mutex> lg(asyncLoopState.assetsMutex); std::lock_guard<std::mutex> lg(asyncLoopState.assetsMutex);
std::erase_if(assets, [asset](const auto& a) { return &a.second == asset; }); std::erase_if(assets, [asset](const auto& a) { return &a.second == asset; });
} }
void CAsyncResourceGatherer::notify() {
asyncLoopState.pending = true;
asyncLoopState.loopGuard.notify_all();
}

View file

@ -43,6 +43,7 @@ class CAsyncResourceGatherer {
void requestAsyncAssetPreload(const SPreloadRequest& request); void requestAsyncAssetPreload(const SPreloadRequest& request);
void unloadAsset(SPreloadedAsset* asset); void unloadAsset(SPreloadedAsset* asset);
void notify();
private: private:
std::thread initThread; std::thread initThread;