File: reduce.py

package info (click to toggle)
redis 5%3A8.0.2-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 22,304 kB
  • sloc: ansic: 216,903; tcl: 51,562; sh: 4,625; perl: 4,214; cpp: 3,568; python: 2,954; makefile: 2,055; ruby: 639; javascript: 30; csh: 7
file content (71 lines) | stat: -rw-r--r-- 3,283 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
from test import TestCase, fill_redis_with_vectors, generate_random_vector

class Reduce(TestCase):
    def getname(self):
        return "Dimension Reduction"

    def estimated_runtime(self):
        return 0.2

    def test(self):
        original_dim = 100
        reduced_dim = 80
        count = 1000
        k = 50  # Number of nearest neighbors to check

        # Fill Redis with vectors using REDUCE and get reference data
        data = fill_redis_with_vectors(self.redis, self.test_key, count, original_dim, reduced_dim)

        # Verify dimension is reduced
        dim = self.redis.execute_command('VDIM', self.test_key)
        assert dim == reduced_dim, f"Expected dimension {reduced_dim}, got {dim}"

        # Generate query vector and get nearest neighbors using Redis
        query_vec = generate_random_vector(original_dim)
        redis_raw = self.redis.execute_command('VSIM', self.test_key, 'VALUES', 
                                             original_dim, *[str(x) for x in query_vec],
                                             'COUNT', k, 'WITHSCORES')

        # Convert Redis results to dict
        redis_results = {}
        for i in range(0, len(redis_raw), 2):
            key = redis_raw[i].decode()
            score = float(redis_raw[i+1])
            redis_results[key] = score

        # Get results from linear scan with original vectors
        linear_results = data.find_k_nearest(query_vec, k)
        linear_items = {name: score for name, score in linear_results}

        # Compare overlap between reduced and non-reduced results
        redis_set = set(redis_results.keys())
        linear_set = set(linear_items.keys())
        overlap = len(redis_set & linear_set)
        overlap_ratio = overlap / k

        # With random projection, we expect some loss of accuracy but should
        # maintain at least some similarity structure.
        # Note that gaussian distribution is the worse with this test, so
        # in real world practice, things will be better.
        min_expected_overlap = 0.1  # At least 10% overlap in top-k
        assert overlap_ratio >= min_expected_overlap, \
            f"Dimension reduction lost too much structure. Only {overlap_ratio*100:.1f}% overlap in top {k}"

        # For items that appear in both results, scores should be reasonably correlated
        common_items = redis_set & linear_set
        for item in common_items:
            redis_score = redis_results[item]
            linear_score = linear_items[item]
            # Allow for some deviation due to dimensionality reduction
            assert abs(redis_score - linear_score) < 0.2, \
                f"Score mismatch too high for {item}: Redis={redis_score:.3f} Linear={linear_score:.3f}"

        # If test fails, print comparison for debugging
        if overlap_ratio < min_expected_overlap:
            print("\nLow overlap in results. Details:")
            print("\nTop results from linear scan (original vectors):")
            for name, score in linear_results:
                print(f"{name}: {score:.3f}")
            print("\nTop results from Redis (reduced vectors):")
            for item, score in sorted(redis_results.items(), key=lambda x: x[1], reverse=True):
                print(f"{item}: {score:.3f}")