1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110
|
/*
* Copyright (c) 2008, 2009, 2010, 2011, 2012, 2013, 2015 Nicira, Inc.
* Copyright (c) 2019, 2020, 2021 Intel Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <config.h>
#include "dpif-netdev-private-dfc.h"
static void
emc_clear_entry(struct emc_entry *ce)
{
if (ce->flow) {
dp_netdev_flow_unref(ce->flow);
ce->flow = NULL;
}
}
static void
smc_clear_entry(struct smc_bucket *b, int idx)
{
b->flow_idx[idx] = UINT16_MAX;
}
static void
emc_cache_init(struct emc_cache *flow_cache)
{
int i;
flow_cache->sweep_idx = 0;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
flow_cache->entries[i].flow = NULL;
flow_cache->entries[i].key.hash = 0;
flow_cache->entries[i].key.len = sizeof(struct miniflow);
flowmap_init(&flow_cache->entries[i].key.mf.map);
}
}
static void
smc_cache_init(struct smc_cache *smc_cache)
{
int i, j;
for (i = 0; i < SMC_BUCKET_CNT; i++) {
for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) {
smc_cache->buckets[i].flow_idx[j] = UINT16_MAX;
}
}
}
void
dfc_cache_init(struct dfc_cache *flow_cache)
{
emc_cache_init(&flow_cache->emc_cache);
smc_cache_init(&flow_cache->smc_cache);
}
static void
emc_cache_uninit(struct emc_cache *flow_cache)
{
int i;
for (i = 0; i < ARRAY_SIZE(flow_cache->entries); i++) {
emc_clear_entry(&flow_cache->entries[i]);
}
}
static void
smc_cache_uninit(struct smc_cache *smc)
{
int i, j;
for (i = 0; i < SMC_BUCKET_CNT; i++) {
for (j = 0; j < SMC_ENTRY_PER_BUCKET; j++) {
smc_clear_entry(&(smc->buckets[i]), j);
}
}
}
void
dfc_cache_uninit(struct dfc_cache *flow_cache)
{
smc_cache_uninit(&flow_cache->smc_cache);
emc_cache_uninit(&flow_cache->emc_cache);
}
/* Check and clear dead flow references slowly (one entry at each
* invocation). */
void
emc_cache_slow_sweep(struct emc_cache *flow_cache)
{
struct emc_entry *entry = &flow_cache->entries[flow_cache->sweep_idx];
if (!emc_entry_alive(entry)) {
emc_clear_entry(entry);
}
flow_cache->sweep_idx = (flow_cache->sweep_idx + 1) & EM_FLOW_HASH_MASK;
}
|