Skip to content

Commit 4aa6cda

Browse files
rarindamwenlingz
authored andcommitted
HV: Fix missing brackets for MISRA C Violations
Patch 5 of 7 Added changes to make sure Misra C violations are fixed for rules 11S and 12S. Signed-off-by: Arindam Roy <arindam.roy@intel.com>
1 parent d16d9e5 commit 4aa6cda

File tree

8 files changed

+249
-130
lines changed

8 files changed

+249
-130
lines changed

hypervisor/arch/x86/softirq.c

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,22 +47,25 @@ void exec_softirq(void)
4747
if (cpu_id >= phys_cpu_num)
4848
return;
4949

50-
if (((*bitmap) & SOFTIRQ_MASK) == 0UL)
50+
if (((*bitmap) & SOFTIRQ_MASK) == 0UL) {
5151
return;
52+
}
5253

5354
/* Disable softirq
5455
* SOFTIRQ_ATOMIC bit = 0 means softirq already in execution
5556
*/
56-
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap))
57+
if (!bitmap_test_and_clear(SOFTIRQ_ATOMIC, bitmap)) {
5758
return;
59+
}
5860

5961
again:
6062
CPU_IRQ_ENABLE();
6163

6264
while (1) {
6365
softirq_id = ffs64(*bitmap);
64-
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX))
66+
if ((softirq_id == INVALID_BIT_INDEX) || (softirq_id >= SOFTIRQ_MAX)) {
6567
break;
68+
}
6669

6770
bitmap_clear(softirq_id, bitmap);
6871

@@ -81,8 +84,9 @@ void exec_softirq(void)
8184

8285
CPU_IRQ_DISABLE();
8386

84-
if (((*bitmap) & SOFTIRQ_MASK) != 0U)
87+
if (((*bitmap) & SOFTIRQ_MASK) != 0U) {
8588
goto again;
89+
}
8690

8791
enable_softirq(cpu_id);
8892
}

hypervisor/arch/x86/timer.c

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -16,8 +16,9 @@ uint32_t tsc_khz = 0U;
1616
static void run_timer(struct timer *timer)
1717
{
1818
/* deadline = 0 means stop timer, we should skip */
19-
if ((timer->func != NULL) && timer->fire_tsc != 0UL)
19+
if ((timer->func != NULL) && timer->fire_tsc != 0UL) {
2020
timer->func(timer->priv_data);
21+
}
2122

2223
TRACE_2L(TRACE_TIMER_ACTION_PCKUP, timer->fire_tsc, 0);
2324
}
@@ -54,17 +55,20 @@ static void __add_timer(struct per_cpu_timers *cpu_timer,
5455
prev = &cpu_timer->timer_list;
5556
list_for_each(pos, &cpu_timer->timer_list) {
5657
tmp = list_entry(pos, struct timer, node);
57-
if (tmp->fire_tsc < tsc)
58+
if (tmp->fire_tsc < tsc) {
5859
prev = &tmp->node;
59-
else
60+
}
61+
else {
6062
break;
63+
}
6164
}
6265

6366
list_add(&timer->node, prev);
6467

65-
if (need_update != NULL)
68+
if (need_update != NULL) {
6669
/* update the physical timer if we're on the timer_list head */
6770
*need_update = (prev == &cpu_timer->timer_list);
71+
}
6872
}
6973

7074
int add_timer(struct timer *timer)
@@ -73,20 +77,23 @@ int add_timer(struct timer *timer)
7377
uint16_t pcpu_id;
7478
bool need_update;
7579

76-
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL)
80+
if (timer == NULL || timer->func == NULL || timer->fire_tsc == 0UL) {
7781
return -EINVAL;
82+
}
7883

7984
/* limit minimal periodic timer cycle period */
80-
if (timer->mode == TICK_MODE_PERIODIC)
85+
if (timer->mode == TICK_MODE_PERIODIC) {
8186
timer->period_in_cycle = max(timer->period_in_cycle,
8287
us_to_ticks(MIN_TIMER_PERIOD_US));
88+
}
8389

8490
pcpu_id = get_cpu_id();
8591
cpu_timer = &per_cpu(cpu_timers, pcpu_id);
8692
__add_timer(cpu_timer, timer, &need_update);
8793

88-
if (need_update)
94+
if (need_update) {
8995
update_physical_timer(cpu_timer);
96+
}
9097

9198
TRACE_2L(TRACE_TIMER_ACTION_ADDED, timer->fire_tsc, 0);
9299
return 0;
@@ -95,8 +102,9 @@ int add_timer(struct timer *timer)
95102

96103
void del_timer(struct timer *timer)
97104
{
98-
if ((timer != NULL) && !list_empty(&timer->node))
105+
if ((timer != NULL) && !list_empty(&timer->node)) {
99106
list_del_init(&timer->node);
107+
}
100108
}
101109

102110
static int request_timer_irq(uint16_t pcpu_id,
@@ -105,8 +113,9 @@ static int request_timer_irq(uint16_t pcpu_id,
105113
{
106114
struct dev_handler_node *node = NULL;
107115

108-
if (pcpu_id >= phys_cpu_num)
116+
if (pcpu_id >= phys_cpu_num) {
109117
return -EINVAL;
118+
}
110119

111120
if (per_cpu(timer_node, pcpu_id) != NULL) {
112121
pr_err("CPU%d timer isr already added", pcpu_id);
@@ -165,8 +174,9 @@ void timer_cleanup(void)
165174
{
166175
uint16_t pcpu_id = get_cpu_id();
167176

168-
if (per_cpu(timer_node, pcpu_id) != NULL)
177+
if (per_cpu(timer_node, pcpu_id) != NULL) {
169178
unregister_handler_common(per_cpu(timer_node, pcpu_id));
179+
}
170180

171181
per_cpu(timer_node, pcpu_id) = NULL;
172182
}
@@ -202,8 +212,9 @@ void timer_softirq(uint16_t pcpu_id)
202212
timer->fire_tsc += timer->period_in_cycle;
203213
__add_timer(cpu_timer, timer, NULL);
204214
}
205-
} else
215+
} else {
206216
break;
217+
}
207218
}
208219

209220
/* update nearest timer */
@@ -276,9 +287,10 @@ static uint64_t native_calibrate_tsc(void)
276287
cpuid(0x15, &eax_denominator, &ebx_numerator,
277288
&ecx_hz, &reserved);
278289

279-
if (eax_denominator != 0U && ebx_numerator != 0U)
290+
if (eax_denominator != 0U && ebx_numerator != 0U) {
280291
return (uint64_t) ecx_hz *
281292
ebx_numerator / eax_denominator;
293+
}
282294
}
283295

284296
return 0;
@@ -288,8 +300,9 @@ void calibrate_tsc(void)
288300
{
289301
uint64_t tsc_hz;
290302
tsc_hz = native_calibrate_tsc();
291-
if (tsc_hz == 0U)
303+
if (tsc_hz == 0U) {
292304
tsc_hz = pit_calibrate_tsc(CAL_MS);
305+
}
293306
tsc_khz = (uint32_t)(tsc_hz / 1000UL);
294307
printf("%s, tsc_khz=%lu\n", __func__, tsc_khz);
295308
}

0 commit comments

Comments
 (0)