#pragma once
#include "Include.h"
namespace Helper
{
inline std::string time_now() {
boost::posix_time::ptime now = boost::posix_time::microsec_clock::local_time();
return to_simple_string(now);
}
template <class T>
class WaitFreeSPSCQueue
{
public:
WaitFreeSPSCQueue()
: mSize(1000)
, mQueue(mSize)
, mHead(0)
, mTail(0)
{
mQueue.reserve(mSize);
}
WaitFreeSPSCQueue(unsigned long size) : mSize(size)
{}
bool enqueue(T data)
{
auto currentHead = mHead.load(std::memory_order_relaxed);
unsigned long newHead = (currentHead + 1) % mSize;
if(newHead == mTail.load(std::memory_order_acquire)) //Max queue size reached
return false;
mQueue[currentHead] = std::move(data);
mHead.store(newHead, std::memory_order_release);
return true;
}
bool dequeue(T& output)
{
auto currentTail = mTail.load(std::memory_order_relaxed);
if(currentTail == mHead.load(std::memory_order_acquire)) //Queue is empty
return false;
unsigned long newTail = (currentTail + 1) % mSize;
output = std::move(mQueue[currentTail]);
mTail.store(newTail, std::memory_order_release);
return true;
}
private:
unsigned long mSize = 0;
std::vector<T> mQueue;
alignas(CACHE_LINE_SYS_SIZE) std::atomic<unsigned long> mHead;
alignas(CACHE_LINE_SYS_SIZE) std::atomic<unsigned long> mTail;
};
//Naive lock free structure (because it does a lot of pointer chasing... very bad cache locality)
//TODO I will probably no longer implement this since it is not needed yet
class LockFreeStack
{
public:
private:
struct Node
{
int value;
Node* next = nullptr;
};
alignas(64) std::atomic<Node* > head = nullptr;
};
//LRU Cache with option to disable duplicates (better search time for the unordered map)
class LRUCache
{
public:
LRUCache(bool allowDups = true, uint8_t capacity = 5)
: mAllowDups(allowDups)
, mCapacity(capacity)
, mSize(0)
{
// Depenging on ABI implementation, this might be exactly set to mCapacity, or rounded up to nearest prime number,
// Either way, safe for insertions where the inv value is <= mCapacity, very unlickly to have collisions in that case
mLookup.reserve(mCapacity);
}
int get(int key)
{
const auto& lookupIt = mLookup.find(key);
if(lookupIt == mLookup.end()) return -1;
auto cacheIt = lookupIt->second;
int tmp = *cacheIt;
mCache.erase(cacheIt);
mCache.push_front(tmp);
mLookup.erase(key);
mLookup.emplace(key, mCache.begin());
return tmp;
}
bool push(int key, std::function<void(int)> callback = nullptr)
{
if(!mAllowDups)
{
const auto& lookupIt = mLookup.find(key);
if(lookupIt != mLookup.end()) return -1;
}
//Removing least accessed item
if(mSize >= mCapacity)
{
auto lastCacheIt = mCache.end();
if(callback) callback(*lastCacheIt);
mLookup.erase(*lastCacheIt);
mCache.erase(lastCacheIt);
}
mCache.push_front(key);
mLookup.emplace(key, mCache.begin());
mSize++;
return 0;
}
bool remove(int key)
{
const auto& lookupIt = mLookup.find(key);
if(lookupIt == mLookup.end()) return -1;
auto cacheIt = lookupIt->second;
int tmp = *cacheIt;
mCache.erase(cacheIt);
mLookup.erase(key);
return 0;
}
private:
uint8_t mCapacity;
uint8_t mSize;
bool mAllowDups;
std::list<int> mCache;
std::unordered_map<int, std::list<int>::iterator> mLookup;
};
}