File: clip_cache.h

package info (click to toggle)
tilemaker 3.0.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 78,284 kB
  • sloc: cpp: 28,715; ansic: 4,052; makefile: 180; ruby: 77; sh: 6
file content (79 lines) | stat: -rw-r--r-- 2,368 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
#ifndef _CLIP_CACHE_H
#define _CLIP_CACHE_H

#include "coordinates.h"
#include "coordinates_geom.h"
#include "geom.h"
#include <mutex>

class TileBbox;

template <class T>
class ClipCache {
public:
	ClipCache(size_t threadNum, unsigned int baseZoom):
		baseZoom(baseZoom),
		clipCache(threadNum * 16),
		clipCacheMutex(threadNum * 16),
		clipCacheSize(threadNum * 16) {
	}

	const std::shared_ptr<T> get(uint zoom, TileCoordinate x, TileCoordinate y, NodeID objectID) const{
		// Look for a previously clipped version at z-1, z-2, ...

		std::lock_guard<std::mutex> lock(clipCacheMutex[objectID % clipCacheMutex.size()]);
		while (zoom > 0) {
			zoom--;
			x /= 2;
			y /= 2;
			const auto& cache = clipCache[objectID % clipCache.size()];
			const auto& rv = cache.find(std::make_tuple(zoom, TileCoordinates(x, y), objectID));
			if (rv != cache.end()) {
				return rv->second;
			}
		}

		return nullptr;
	}

	void add(const TileBbox& bbox, const NodeID objectID, const T& output) {
		// The point of caching is to reuse the clip, so caching at the terminal zoom is
		// pointless.
		if (bbox.zoom == baseZoom)
			return;

		std::shared_ptr<T> copy = std::make_shared<T>();
		boost::geometry::assign(*copy, output);

		size_t index = objectID % clipCacheMutex.size();
		std::vector<std::shared_ptr<T>> objects;
		std::lock_guard<std::mutex> lock(clipCacheMutex[index]);
		auto& cache = clipCache[index];
		// Reset the cache periodically so it doesn't grow without bound.
		//
		// I also tried boost's lru_cache -- but it seemed to perform worse, maybe
		// due to the bookkeeping? We could try authoring a bounded map that
		// evicts in FIFO order, which will have less bookkeeping.
		clipCacheSize[index]++;
		if (clipCacheSize[index] > 1024) {
			clipCacheSize[index] = 0;
			// Copy the map's contents to a vector so that calling .clear()
			// and releasing the lock can happen separately from running the
			// destructors of all of the objects.
			objects.reserve(1025);
			for (const auto& x : cache)
				objects.push_back(x.second);
			cache.clear();
		}

		cache[std::make_tuple(bbox.zoom, bbox.index, objectID)] = copy;
	}

private:
	unsigned int baseZoom;
	std::vector<std::map<std::tuple<uint16_t, TileCoordinates, NodeID>, std::shared_ptr<T>>> clipCache;
	mutable std::vector<std::mutex> clipCacheMutex;
	std::vector<size_t> clipCacheSize;
};

#endif