mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
net: openvswitch: improve the coding style
Not change the logic, just improve the coding style. Cc: Pravin B Shelar <pshelar@ovn.org> Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com> Acked-by: Pravin B Shelar <pshelar@ovn.org> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
34e1ec319e
commit
cf3266ad48
4 changed files with 55 additions and 41 deletions
|
@ -111,12 +111,16 @@ static void flow_free(struct sw_flow *flow)
|
|||
if (ovs_identifier_is_key(&flow->id))
|
||||
kfree(flow->id.unmasked_key);
|
||||
if (flow->sf_acts)
|
||||
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)flow->sf_acts);
|
||||
ovs_nla_free_flow_actions((struct sw_flow_actions __force *)
|
||||
flow->sf_acts);
|
||||
/* We open code this to make sure cpu 0 is always considered */
|
||||
for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask))
|
||||
for (cpu = 0; cpu < nr_cpu_ids;
|
||||
cpu = cpumask_next(cpu, &flow->cpu_used_mask)) {
|
||||
if (flow->stats[cpu])
|
||||
kmem_cache_free(flow_stats_cache,
|
||||
(struct sw_flow_stats __force *)flow->stats[cpu]);
|
||||
}
|
||||
|
||||
kmem_cache_free(flow_cache, flow);
|
||||
}
|
||||
|
||||
|
@ -192,7 +196,7 @@ static void tbl_mask_array_reset_counters(struct mask_array *ma)
|
|||
* zero based counter we store the value at reset, and subtract it
|
||||
* later when processing.
|
||||
*/
|
||||
for (i = 0; i < ma->max; i++) {
|
||||
for (i = 0; i < ma->max; i++) {
|
||||
ma->masks_usage_zero_cntr[i] = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
|
@ -273,7 +277,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
|
|||
|
||||
if (ma_count >= ma->max) {
|
||||
err = tbl_mask_array_realloc(tbl, ma->max +
|
||||
MASK_ARRAY_SIZE_MIN);
|
||||
MASK_ARRAY_SIZE_MIN);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -288,7 +292,7 @@ static int tbl_mask_array_add_mask(struct flow_table *tbl,
|
|||
BUG_ON(ovsl_dereference(ma->masks[ma_count]));
|
||||
|
||||
rcu_assign_pointer(ma->masks[ma_count], new);
|
||||
WRITE_ONCE(ma->count, ma_count +1);
|
||||
WRITE_ONCE(ma->count, ma_count + 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -309,10 +313,10 @@ static void tbl_mask_array_del_mask(struct flow_table *tbl,
|
|||
return;
|
||||
|
||||
found:
|
||||
WRITE_ONCE(ma->count, ma_count -1);
|
||||
WRITE_ONCE(ma->count, ma_count - 1);
|
||||
|
||||
rcu_assign_pointer(ma->masks[i], ma->masks[ma_count -1]);
|
||||
RCU_INIT_POINTER(ma->masks[ma_count -1], NULL);
|
||||
rcu_assign_pointer(ma->masks[i], ma->masks[ma_count - 1]);
|
||||
RCU_INIT_POINTER(ma->masks[ma_count - 1], NULL);
|
||||
|
||||
kfree_rcu(mask, rcu);
|
||||
|
||||
|
@ -448,16 +452,17 @@ free_mask_cache:
|
|||
|
||||
static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
|
||||
{
|
||||
struct table_instance *ti = container_of(rcu, struct table_instance, rcu);
|
||||
struct table_instance *ti;
|
||||
|
||||
ti = container_of(rcu, struct table_instance, rcu);
|
||||
__table_instance_destroy(ti);
|
||||
}
|
||||
|
||||
static void table_instance_flow_free(struct flow_table *table,
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti,
|
||||
struct sw_flow *flow,
|
||||
bool count)
|
||||
struct table_instance *ti,
|
||||
struct table_instance *ufid_ti,
|
||||
struct sw_flow *flow,
|
||||
bool count)
|
||||
{
|
||||
hlist_del_rcu(&flow->flow_table.node[ti->node_ver]);
|
||||
if (count)
|
||||
|
@ -484,9 +489,9 @@ void table_instance_flow_flush(struct flow_table *table,
|
|||
return;
|
||||
|
||||
for (i = 0; i < ti->n_buckets; i++) {
|
||||
struct sw_flow *flow;
|
||||
struct hlist_head *head = &ti->buckets[i];
|
||||
struct hlist_node *n;
|
||||
struct sw_flow *flow;
|
||||
|
||||
hlist_for_each_entry_safe(flow, n, head,
|
||||
flow_table.node[ti->node_ver]) {
|
||||
|
@ -661,7 +666,7 @@ static int flow_key_start(const struct sw_flow_key *key)
|
|||
return 0;
|
||||
else
|
||||
return rounddown(offsetof(struct sw_flow_key, phy),
|
||||
sizeof(long));
|
||||
sizeof(long));
|
||||
}
|
||||
|
||||
static bool cmp_key(const struct sw_flow_key *key1,
|
||||
|
@ -673,7 +678,7 @@ static bool cmp_key(const struct sw_flow_key *key1,
|
|||
long diffs = 0;
|
||||
int i;
|
||||
|
||||
for (i = key_start; i < key_end; i += sizeof(long))
|
||||
for (i = key_start; i < key_end; i += sizeof(long))
|
||||
diffs |= *cp1++ ^ *cp2++;
|
||||
|
||||
return diffs == 0;
|
||||
|
@ -713,7 +718,7 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
|
|||
(*n_mask_hit)++;
|
||||
|
||||
hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
|
||||
lockdep_ovsl_is_held()) {
|
||||
lockdep_ovsl_is_held()) {
|
||||
if (flow->mask == mask && flow->flow_table.hash == hash &&
|
||||
flow_cmp_masked_key(flow, &masked_key, &mask->range))
|
||||
return flow;
|
||||
|
@ -897,7 +902,8 @@ static bool ovs_flow_cmp_ufid(const struct sw_flow *flow,
|
|||
return !memcmp(flow->id.ufid, sfid->ufid, sfid->ufid_len);
|
||||
}
|
||||
|
||||
bool ovs_flow_cmp(const struct sw_flow *flow, const struct sw_flow_match *match)
|
||||
bool ovs_flow_cmp(const struct sw_flow *flow,
|
||||
const struct sw_flow_match *match)
|
||||
{
|
||||
if (ovs_identifier_is_ufid(&flow->id))
|
||||
return flow_cmp_masked_key(flow, match->key, &match->range);
|
||||
|
@ -916,7 +922,7 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
|
|||
hash = ufid_hash(ufid);
|
||||
head = find_bucket(ti, hash);
|
||||
hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
|
||||
lockdep_ovsl_is_held()) {
|
||||
lockdep_ovsl_is_held()) {
|
||||
if (flow->ufid_table.hash == hash &&
|
||||
ovs_flow_cmp_ufid(flow, ufid))
|
||||
return flow;
|
||||
|
@ -1107,7 +1113,7 @@ void ovs_flow_masks_rebalance(struct flow_table *table)
|
|||
if (!masks_and_count)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ma->max; i++) {
|
||||
for (i = 0; i < ma->max; i++) {
|
||||
struct sw_flow_mask *mask;
|
||||
unsigned int start;
|
||||
int cpu;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue