@@ -79,84 +79,96 @@ void vmbus_setevent(struct vmbus_channel *channel)
79
79
}
80
80
EXPORT_SYMBOL_GPL (vmbus_setevent );
81
81
82
- /*
83
- * vmbus_open - Open the specified channel.
84
- */
85
- int vmbus_open (struct vmbus_channel * newchannel , u32 send_ringbuffer_size ,
86
- u32 recv_ringbuffer_size , void * userdata , u32 userdatalen ,
87
- void (* onchannelcallback )(void * context ), void * context )
82
+ /* vmbus_free_ring - drop mapping of ring buffer */
83
+ void vmbus_free_ring (struct vmbus_channel * channel )
88
84
{
89
- struct vmbus_channel_open_channel * open_msg ;
90
- struct vmbus_channel_msginfo * open_info = NULL ;
91
- unsigned long flags ;
92
- int ret , err = 0 ;
93
- struct page * page ;
94
- unsigned int order ;
85
+ hv_ringbuffer_cleanup (& channel -> outbound );
86
+ hv_ringbuffer_cleanup (& channel -> inbound );
95
87
96
- if (send_ringbuffer_size % PAGE_SIZE ||
97
- recv_ringbuffer_size % PAGE_SIZE )
98
- return - EINVAL ;
88
+ if (channel -> ringbuffer_page ) {
89
+ __free_pages (channel -> ringbuffer_page ,
90
+ get_order (channel -> ringbuffer_pagecount
91
+ << PAGE_SHIFT ));
92
+ channel -> ringbuffer_page = NULL ;
93
+ }
94
+ }
95
+ EXPORT_SYMBOL_GPL (vmbus_free_ring );
99
96
100
- order = get_order (send_ringbuffer_size + recv_ringbuffer_size );
97
+ /* vmbus_alloc_ring - allocate and map pages for ring buffer */
98
+ int vmbus_alloc_ring (struct vmbus_channel * newchannel ,
99
+ u32 send_size , u32 recv_size )
100
+ {
101
+ struct page * page ;
102
+ int order ;
101
103
102
- spin_lock_irqsave (& newchannel -> lock , flags );
103
- if (newchannel -> state == CHANNEL_OPEN_STATE ) {
104
- newchannel -> state = CHANNEL_OPENING_STATE ;
105
- } else {
106
- spin_unlock_irqrestore (& newchannel -> lock , flags );
104
+ if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE )
107
105
return - EINVAL ;
108
- }
109
- spin_unlock_irqrestore (& newchannel -> lock , flags );
110
-
111
- newchannel -> onchannel_callback = onchannelcallback ;
112
- newchannel -> channel_callback_context = context ;
113
106
114
107
/* Allocate the ring buffer */
108
+ order = get_order (send_size + recv_size );
115
109
page = alloc_pages_node (cpu_to_node (newchannel -> target_cpu ),
116
110
GFP_KERNEL |__GFP_ZERO , order );
117
111
118
112
if (!page )
119
113
page = alloc_pages (GFP_KERNEL |__GFP_ZERO , order );
120
114
121
- if (!page ) {
122
- err = - ENOMEM ;
123
- goto error_set_chnstate ;
124
- }
115
+ if (!page )
116
+ return - ENOMEM ;
125
117
126
118
newchannel -> ringbuffer_page = page ;
127
- newchannel -> ringbuffer_pagecount = (send_ringbuffer_size +
128
- recv_ringbuffer_size ) >> PAGE_SHIFT ;
119
+ newchannel -> ringbuffer_pagecount = (send_size + recv_size ) >> PAGE_SHIFT ;
120
+ newchannel -> ringbuffer_send_offset = send_size >> PAGE_SHIFT ;
129
121
130
- ret = hv_ringbuffer_init (& newchannel -> outbound , page ,
131
- send_ringbuffer_size >> PAGE_SHIFT );
122
+ return 0 ;
123
+ }
124
+ EXPORT_SYMBOL_GPL (vmbus_alloc_ring );
132
125
133
- if (ret != 0 ) {
134
- err = ret ;
135
- goto error_free_pages ;
136
- }
126
+ static int __vmbus_open (struct vmbus_channel * newchannel ,
127
+ void * userdata , u32 userdatalen ,
128
+ void (* onchannelcallback )(void * context ), void * context )
129
+ {
130
+ struct vmbus_channel_open_channel * open_msg ;
131
+ struct vmbus_channel_msginfo * open_info = NULL ;
132
+ struct page * page = newchannel -> ringbuffer_page ;
133
+ u32 send_pages , recv_pages ;
134
+ unsigned long flags ;
135
+ int err ;
137
136
138
- ret = hv_ringbuffer_init (& newchannel -> inbound ,
139
- & page [send_ringbuffer_size >> PAGE_SHIFT ],
140
- recv_ringbuffer_size >> PAGE_SHIFT );
141
- if (ret != 0 ) {
142
- err = ret ;
143
- goto error_free_pages ;
137
+ if (userdatalen > MAX_USER_DEFINED_BYTES )
138
+ return - EINVAL ;
139
+
140
+ send_pages = newchannel -> ringbuffer_send_offset ;
141
+ recv_pages = newchannel -> ringbuffer_pagecount - send_pages ;
142
+
143
+ spin_lock_irqsave (& newchannel -> lock , flags );
144
+ if (newchannel -> state != CHANNEL_OPEN_STATE ) {
145
+ spin_unlock_irqrestore (& newchannel -> lock , flags );
146
+ return - EINVAL ;
144
147
}
148
+ spin_unlock_irqrestore (& newchannel -> lock , flags );
145
149
150
+ newchannel -> state = CHANNEL_OPENING_STATE ;
151
+ newchannel -> onchannel_callback = onchannelcallback ;
152
+ newchannel -> channel_callback_context = context ;
153
+
154
+ err = hv_ringbuffer_init (& newchannel -> outbound , page , send_pages );
155
+ if (err )
156
+ goto error_clean_ring ;
157
+
158
+ err = hv_ringbuffer_init (& newchannel -> inbound ,
159
+ & page [send_pages ], recv_pages );
160
+ if (err )
161
+ goto error_clean_ring ;
146
162
147
163
/* Establish the gpadl for the ring buffer */
148
164
newchannel -> ringbuffer_gpadlhandle = 0 ;
149
165
150
- ret = vmbus_establish_gpadl (newchannel ,
151
- page_address (page ),
152
- send_ringbuffer_size +
153
- recv_ringbuffer_size ,
166
+ err = vmbus_establish_gpadl (newchannel ,
167
+ page_address (newchannel -> ringbuffer_page ),
168
+ (send_pages + recv_pages ) << PAGE_SHIFT ,
154
169
& newchannel -> ringbuffer_gpadlhandle );
155
-
156
- if (ret != 0 ) {
157
- err = ret ;
158
- goto error_free_pages ;
159
- }
170
+ if (err )
171
+ goto error_clean_ring ;
160
172
161
173
/* Create and init the channel open message */
162
174
open_info = kmalloc (sizeof (* open_info ) +
@@ -175,15 +187,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
175
187
open_msg -> openid = newchannel -> offermsg .child_relid ;
176
188
open_msg -> child_relid = newchannel -> offermsg .child_relid ;
177
189
open_msg -> ringbuffer_gpadlhandle = newchannel -> ringbuffer_gpadlhandle ;
178
- open_msg -> downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
179
- PAGE_SHIFT ;
190
+ open_msg -> downstream_ringbuffer_pageoffset = newchannel -> ringbuffer_send_offset ;
180
191
open_msg -> target_vp = newchannel -> target_vp ;
181
192
182
- if (userdatalen > MAX_USER_DEFINED_BYTES ) {
183
- err = - EINVAL ;
184
- goto error_free_gpadl ;
185
- }
186
-
187
193
if (userdatalen )
188
194
memcpy (open_msg -> userdata , userdata , userdatalen );
189
195
@@ -194,18 +200,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
194
200
195
201
if (newchannel -> rescind ) {
196
202
err = - ENODEV ;
197
- goto error_free_gpadl ;
203
+ goto error_free_info ;
198
204
}
199
205
200
- ret = vmbus_post_msg (open_msg ,
206
+ err = vmbus_post_msg (open_msg ,
201
207
sizeof (struct vmbus_channel_open_channel ), true);
202
208
203
- trace_vmbus_open (open_msg , ret );
209
+ trace_vmbus_open (open_msg , err );
204
210
205
- if (ret != 0 ) {
206
- err = ret ;
211
+ if (err != 0 )
207
212
goto error_clean_msglist ;
208
- }
209
213
210
214
wait_for_completion (& open_info -> waitevent );
211
215
@@ -215,12 +219,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
215
219
216
220
if (newchannel -> rescind ) {
217
221
err = - ENODEV ;
218
- goto error_free_gpadl ;
222
+ goto error_free_info ;
219
223
}
220
224
221
225
if (open_info -> response .open_result .status ) {
222
226
err = - EAGAIN ;
223
- goto error_free_gpadl ;
227
+ goto error_free_info ;
224
228
}
225
229
226
230
newchannel -> state = CHANNEL_OPENED_STATE ;
@@ -231,18 +235,50 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
231
235
spin_lock_irqsave (& vmbus_connection .channelmsg_lock , flags );
232
236
list_del (& open_info -> msglistentry );
233
237
spin_unlock_irqrestore (& vmbus_connection .channelmsg_lock , flags );
234
-
238
+ error_free_info :
239
+ kfree (open_info );
235
240
error_free_gpadl :
236
241
vmbus_teardown_gpadl (newchannel , newchannel -> ringbuffer_gpadlhandle );
237
- kfree ( open_info ) ;
238
- error_free_pages :
242
+ newchannel -> ringbuffer_gpadlhandle = 0 ;
243
+ error_clean_ring :
239
244
hv_ringbuffer_cleanup (& newchannel -> outbound );
240
245
hv_ringbuffer_cleanup (& newchannel -> inbound );
241
- __free_pages (page , order );
242
- error_set_chnstate :
243
246
newchannel -> state = CHANNEL_OPEN_STATE ;
244
247
return err ;
245
248
}
249
+
250
+ /*
251
+ * vmbus_connect_ring - Open the channel but reuse ring buffer
252
+ */
253
+ int vmbus_connect_ring (struct vmbus_channel * newchannel ,
254
+ void (* onchannelcallback )(void * context ), void * context )
255
+ {
256
+ return __vmbus_open (newchannel , NULL , 0 , onchannelcallback , context );
257
+ }
258
+ EXPORT_SYMBOL_GPL (vmbus_connect_ring );
259
+
260
+ /*
261
+ * vmbus_open - Open the specified channel.
262
+ */
263
+ int vmbus_open (struct vmbus_channel * newchannel ,
264
+ u32 send_ringbuffer_size , u32 recv_ringbuffer_size ,
265
+ void * userdata , u32 userdatalen ,
266
+ void (* onchannelcallback )(void * context ), void * context )
267
+ {
268
+ int err ;
269
+
270
+ err = vmbus_alloc_ring (newchannel , send_ringbuffer_size ,
271
+ recv_ringbuffer_size );
272
+ if (err )
273
+ return err ;
274
+
275
+ err = __vmbus_open (newchannel , userdata , userdatalen ,
276
+ onchannelcallback , context );
277
+ if (err )
278
+ vmbus_free_ring (newchannel );
279
+
280
+ return err ;
281
+ }
246
282
EXPORT_SYMBOL_GPL (vmbus_open );
247
283
248
284
/* Used for Hyper-V Socket: a guest client's connect() to the host */
@@ -610,10 +646,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
610
646
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
611
647
* here we should skip most of the below cleanup work.
612
648
*/
613
- if (channel -> state != CHANNEL_OPENED_STATE ) {
614
- ret = - EINVAL ;
615
- goto out ;
616
- }
649
+ if (channel -> state != CHANNEL_OPENED_STATE )
650
+ return - EINVAL ;
617
651
618
652
channel -> state = CHANNEL_OPEN_STATE ;
619
653
@@ -635,11 +669,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
635
669
* If we failed to post the close msg,
636
670
* it is perhaps better to leak memory.
637
671
*/
638
- goto out ;
639
672
}
640
673
641
674
/* Tear down the gpadl for the channel's ring buffer */
642
- if (channel -> ringbuffer_gpadlhandle ) {
675
+ else if (channel -> ringbuffer_gpadlhandle ) {
643
676
ret = vmbus_teardown_gpadl (channel ,
644
677
channel -> ringbuffer_gpadlhandle );
645
678
if (ret ) {
@@ -648,59 +681,63 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
648
681
* If we failed to teardown gpadl,
649
682
* it is perhaps better to leak memory.
650
683
*/
651
- goto out ;
652
684
}
653
- }
654
-
655
- /* Cleanup the ring buffers for this channel */
656
- hv_ringbuffer_cleanup (& channel -> outbound );
657
- hv_ringbuffer_cleanup (& channel -> inbound );
658
685
659
- __free_pages ( channel -> ringbuffer_page ,
660
- get_order ( channel -> ringbuffer_pagecount << PAGE_SHIFT ));
686
+ channel -> ringbuffer_gpadlhandle = 0 ;
687
+ }
661
688
662
- out :
663
689
return ret ;
664
690
}
665
691
666
- /*
667
- * vmbus_close - Close the specified channel
668
- */
669
- void vmbus_close (struct vmbus_channel * channel )
692
+ /* disconnect ring - close all channels */
693
+ int vmbus_disconnect_ring (struct vmbus_channel * channel )
670
694
{
671
- struct list_head * cur , * tmp ;
672
- struct vmbus_channel * cur_channel ;
695
+ struct vmbus_channel * cur_channel , * tmp ;
696
+ unsigned long flags ;
697
+ LIST_HEAD (list );
698
+ int ret ;
673
699
674
- if (channel -> primary_channel != NULL ) {
675
- /*
676
- * We will only close sub-channels when
677
- * the primary is closed.
678
- */
679
- return ;
680
- }
681
- /*
682
- * Close all the sub-channels first and then close the
683
- * primary channel.
684
- */
685
- list_for_each_safe (cur , tmp , & channel -> sc_list ) {
686
- cur_channel = list_entry (cur , struct vmbus_channel , sc_list );
687
- if (cur_channel -> rescind ) {
700
+ if (channel -> primary_channel != NULL )
701
+ return - EINVAL ;
702
+
703
+ /* Snapshot the list of subchannels */
704
+ spin_lock_irqsave (& channel -> lock , flags );
705
+ list_splice_init (& channel -> sc_list , & list );
706
+ channel -> num_sc = 0 ;
707
+ spin_unlock_irqrestore (& channel -> lock , flags );
708
+
709
+ list_for_each_entry_safe (cur_channel , tmp , & list , sc_list ) {
710
+ if (cur_channel -> rescind )
688
711
wait_for_completion (& cur_channel -> rescind_event );
689
- mutex_lock (& vmbus_connection .channel_mutex );
690
- vmbus_close_internal (cur_channel );
691
- hv_process_channel_removal (cur_channel );
692
- } else {
693
- mutex_lock (& vmbus_connection .channel_mutex );
694
- vmbus_close_internal (cur_channel );
712
+
713
+ mutex_lock (& vmbus_connection .channel_mutex );
714
+ if (vmbus_close_internal (cur_channel ) == 0 ) {
715
+ vmbus_free_ring (cur_channel );
716
+
717
+ if (cur_channel -> rescind )
718
+ hv_process_channel_removal (cur_channel );
695
719
}
696
720
mutex_unlock (& vmbus_connection .channel_mutex );
697
721
}
722
+
698
723
/*
699
724
* Now close the primary.
700
725
*/
701
726
mutex_lock (& vmbus_connection .channel_mutex );
702
- vmbus_close_internal (channel );
727
+ ret = vmbus_close_internal (channel );
703
728
mutex_unlock (& vmbus_connection .channel_mutex );
729
+
730
+ return ret ;
731
+ }
732
+ EXPORT_SYMBOL_GPL (vmbus_disconnect_ring );
733
+
734
+ /*
735
+ * vmbus_close - Close the specified channel
736
+ */
737
+ void vmbus_close (struct vmbus_channel * channel )
738
+ {
739
+ if (vmbus_disconnect_ring (channel ) == 0 )
740
+ vmbus_free_ring (channel );
704
741
}
705
742
EXPORT_SYMBOL_GPL (vmbus_close );
706
743
0 commit comments