1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128
|
# frozen_string_literal: true
RSpec.shared_examples 'UpdateProjectStatistics' do |with_counter_attribute|
let(:project) { subject.project }
let(:project_statistics_name) { described_class.project_statistics_name }
let(:statistic_attribute) { described_class.statistic_attribute }
def reload_stat
project.statistics.reload.send(project_statistics_name).to_i
end
def read_attribute
subject.read_attribute(statistic_attribute).to_i
end
def read_pending_increment
Gitlab::Redis::SharedState.with do |redis|
key = project.statistics.counter(project_statistics_name).key
redis.get(key).to_i
end
end
it { is_expected.to be_new_record }
def expect_flush_counter_increments_worker_performed
expect(FlushCounterIncrementsWorker)
.to receive(:perform_in)
.with(Gitlab::Counters::BufferedCounter::WORKER_DELAY, project.statistics.class.name, project.statistics.id, project_statistics_name.to_s)
yield
# simulate worker running now
expect(Namespaces::ScheduleAggregationWorker).to receive(:perform_async)
FlushCounterIncrementsWorker.new.perform(project.statistics.class.name, project.statistics.id, project_statistics_name)
end
if with_counter_attribute
context 'when statistic is a counter attribute', :clean_gitlab_redis_shared_state do
context 'when creating' do
it 'stores pending increments for async update' do
initial_stat = reload_stat
expected_increment = read_attribute
expect_flush_counter_increments_worker_performed do
subject.save!
expect(read_pending_increment).to eq(expected_increment)
expect(expected_increment).to be > initial_stat
expect(expected_increment).to be_positive
end
end
end
context 'when updating' do
let(:delta) { 42 }
before do
subject.save!
redis_shared_state_cleanup!
end
it 'stores pending increments for async update' do
expected_increment = have_attributes(amount: delta, ref: subject.id)
expect(ProjectStatistics)
.to receive(:increment_statistic)
.with(project, project_statistics_name, expected_increment)
.and_call_original
subject.write_attribute(statistic_attribute, read_attribute + delta)
expect_flush_counter_increments_worker_performed do
subject.save!
expect(read_pending_increment).to eq(delta)
end
end
it 'avoids N + 1 queries' do
subject.write_attribute(statistic_attribute, read_attribute + delta)
control_count = ActiveRecord::QueryRecorder.new do
subject.save!
end
subject.write_attribute(statistic_attribute, read_attribute + delta)
expect do
subject.save!
end.not_to exceed_query_limit(control_count)
end
end
context 'when destroying' do
before do
subject.save!
redis_shared_state_cleanup!
end
it 'stores pending increment for async update' do
initial_stat = reload_stat
expected_increment = -read_attribute
expect_flush_counter_increments_worker_performed do
subject.destroy!
expect(read_pending_increment).to eq(expected_increment)
expect(expected_increment).to be < initial_stat
expect(expected_increment).to be_negative
end
end
context 'when it is destroyed from the project level' do
it 'does not store pending increments for async update' do
expect { Projects::DestroyService.new(project, project.first_owner).execute }.not_to change { read_pending_increment }
end
it 'does not schedule a namespace statistics worker' do
expect(Namespaces::ScheduleAggregationWorker)
.not_to receive(:perform_async)
expect(Projects::DestroyService.new(project, project.first_owner).execute).to eq(true)
end
end
end
end
end
end
|