File: t3_inode_cache.py

package info (click to toggle)
s3ql 2.21%2Bdfsg-3
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 2,776 kB
  • ctags: 2,427
  • sloc: python: 14,869; makefile: 128; sh: 33; ansic: 22
file content (101 lines) | stat: -rwxr-xr-x 3,140 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
#!/usr/bin/env python3
'''
t2_inode_cache.py - this file is part of S3QL.

Copyright © 2008 Nikolaus Rath <Nikolaus@rath.org>

This work can be distributed under the terms of the GNU GPLv3.
'''

if __name__ == '__main__':
    import pytest
    import sys
    sys.exit(pytest.main([__file__] + sys.argv[1:]))

from s3ql import inode_cache
from s3ql.mkfs import init_tables
from s3ql.common import time_ns
from s3ql.metadata import create_tables
from s3ql.database import Connection
import unittest
import tempfile
import os

class cache_tests(unittest.TestCase):

    def setUp(self):
        # Destructors are not guaranteed to run, and we can't unlink
        # the file immediately because apsw refers to it by name.
        # Therefore, we unlink the file manually in tearDown()
        self.dbfile = tempfile.NamedTemporaryFile(delete=False)

        self.db = Connection(self.dbfile.name)
        create_tables(self.db)
        init_tables(self.db)
        self.cache = inode_cache.InodeCache(self.db, 0)

    def tearDown(self):
        self.cache.destroy()
        os.unlink(self.dbfile.name)

    def test_create(self):
        attrs = {'mode': 784,
                 'refcount': 3,
                 'uid': 7,
                 'gid': 2,
                 'size': 34674,
                 'rdev': 11,
                 'atime_ns': time_ns(),
                 'ctime_ns': time_ns(),
                 'mtime_ns': time_ns() }

        inode = self.cache.create_inode(**attrs)

        for key in list(attrs.keys()):
            self.assertEqual(attrs[key], getattr(inode, key))

        self.assertTrue(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))


    def test_del(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime_ns': time_ns(),
                'ctime_ns': time_ns(),
                'mtime_ns': time_ns() }
        inode = self.cache.create_inode(**attrs)
        del self.cache[inode.id]
        self.assertFalse(self.db.has_val('SELECT 1 FROM inodes WHERE id=?', (inode.id,)))
        self.assertRaises(KeyError, self.cache.__delitem__, inode.id)

    def test_get(self):
        attrs = {'mode': 784,
                'refcount': 3,
                'uid': 7,
                'gid': 2,
                'size': 34674,
                'rdev': 11,
                'atime_ns': time_ns(),
                'ctime_ns': time_ns(),
                'mtime_ns': time_ns() }

        inode = self.cache.create_inode(**attrs)
        for (key, val) in attrs.items():
            self.assertEqual(getattr(inode, key), val)

        # Create another inode
        self.cache.create_inode(**attrs)

        self.db.execute('DELETE FROM inodes WHERE id=?', (inode.id,))
        # Entry should still be in cache
        self.assertEqual(inode, self.cache[inode.id])

        # Now it should be out of the cache
        for _ in range(inode_cache.CACHE_SIZE + 1):
            self.cache.create_inode(**attrs)

        self.assertRaises(KeyError, self.cache.__getitem__, inode.id)