@@ -70,11 +70,9 @@ static void soft_timer_start(struct hrtimer *hrt, u64 ns)
70
70
HRTIMER_MODE_ABS );
71
71
}
72
72
73
- static void soft_timer_cancel (struct hrtimer * hrt , struct work_struct * work )
73
+ static void soft_timer_cancel (struct hrtimer * hrt )
74
74
{
75
75
hrtimer_cancel (hrt );
76
- if (work )
77
- cancel_work_sync (work );
78
76
}
79
77
80
78
static irqreturn_t kvm_arch_timer_handler (int irq , void * dev_id )
@@ -102,23 +100,6 @@ static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id)
102
100
return IRQ_HANDLED ;
103
101
}
104
102
105
- /*
106
- * Work function for handling the backup timer that we schedule when a vcpu is
107
- * no longer running, but had a timer programmed to fire in the future.
108
- */
109
- static void kvm_timer_inject_irq_work (struct work_struct * work )
110
- {
111
- struct kvm_vcpu * vcpu ;
112
-
113
- vcpu = container_of (work , struct kvm_vcpu , arch .timer_cpu .expired );
114
-
115
- /*
116
- * If the vcpu is blocked we want to wake it up so that it will see
117
- * the timer has expired when entering the guest.
118
- */
119
- kvm_vcpu_wake_up (vcpu );
120
- }
121
-
122
103
static u64 kvm_timer_compute_delta (struct arch_timer_context * timer_ctx )
123
104
{
124
105
u64 cval , now ;
@@ -188,7 +169,7 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt)
188
169
return HRTIMER_RESTART ;
189
170
}
190
171
191
- schedule_work ( & timer -> expired );
172
+ kvm_vcpu_wake_up ( vcpu );
192
173
return HRTIMER_NORESTART ;
193
174
}
194
175
@@ -300,7 +281,7 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu)
300
281
* then we also don't need a soft timer.
301
282
*/
302
283
if (kvm_timer_should_fire (ptimer ) || !kvm_timer_irq_can_fire (ptimer )) {
303
- soft_timer_cancel (& timer -> phys_timer , NULL );
284
+ soft_timer_cancel (& timer -> phys_timer );
304
285
return ;
305
286
}
306
287
@@ -426,7 +407,7 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu)
426
407
427
408
vtimer_restore_state (vcpu );
428
409
429
- soft_timer_cancel (& timer -> bg_timer , & timer -> expired );
410
+ soft_timer_cancel (& timer -> bg_timer );
430
411
}
431
412
432
413
static void set_cntvoff (u64 cntvoff )
@@ -544,7 +525,7 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
544
525
* In any case, we re-schedule the hrtimer for the physical timer when
545
526
* coming back to the VCPU thread in kvm_timer_vcpu_load().
546
527
*/
547
- soft_timer_cancel (& timer -> phys_timer , NULL );
528
+ soft_timer_cancel (& timer -> phys_timer );
548
529
549
530
/*
550
531
* The kernel may decide to run userspace after calling vcpu_put, so
@@ -637,7 +618,6 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
637
618
update_vtimer_cntvoff (vcpu , kvm_phys_timer_read ());
638
619
vcpu_ptimer (vcpu )-> cntvoff = 0 ;
639
620
640
- INIT_WORK (& timer -> expired , kvm_timer_inject_irq_work );
641
621
hrtimer_init (& timer -> bg_timer , CLOCK_MONOTONIC , HRTIMER_MODE_ABS );
642
622
timer -> bg_timer .function = kvm_bg_timer_expire ;
643
623
@@ -794,8 +774,8 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
794
774
struct arch_timer_cpu * timer = & vcpu -> arch .timer_cpu ;
795
775
struct arch_timer_context * vtimer = vcpu_vtimer (vcpu );
796
776
797
- soft_timer_cancel (& timer -> bg_timer , & timer -> expired );
798
- soft_timer_cancel (& timer -> phys_timer , NULL );
777
+ soft_timer_cancel (& timer -> bg_timer );
778
+ soft_timer_cancel (& timer -> phys_timer );
799
779
kvm_vgic_unmap_phys_irq (vcpu , vtimer -> irq .irq );
800
780
}
801
781
0 commit comments