Skip to content

Commit ae6935e

Browse files
shemmingergregkh
authored andcommitted
vmbus: split ring buffer allocation from open
The UIO driver needs the ring buffer to be persistent(reused) across open/close. Split the allocation and setup of ring buffer out of vmbus_open. For normal usage vmbus_open/vmbus_close there are no changes; only impacts uio_hv_generic which needs to keep ring buffer memory and reuse when application restarts. Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 52a42c2 commit ae6935e

File tree

3 files changed

+162
-115
lines changed

3 files changed

+162
-115
lines changed

drivers/hv/channel.c

Lines changed: 152 additions & 115 deletions
Original file line numberDiff line numberDiff line change
@@ -79,84 +79,96 @@ void vmbus_setevent(struct vmbus_channel *channel)
7979
}
8080
EXPORT_SYMBOL_GPL(vmbus_setevent);
8181

82-
/*
83-
* vmbus_open - Open the specified channel.
84-
*/
85-
int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
86-
u32 recv_ringbuffer_size, void *userdata, u32 userdatalen,
87-
void (*onchannelcallback)(void *context), void *context)
82+
/* vmbus_free_ring - drop mapping of ring buffer */
83+
void vmbus_free_ring(struct vmbus_channel *channel)
8884
{
89-
struct vmbus_channel_open_channel *open_msg;
90-
struct vmbus_channel_msginfo *open_info = NULL;
91-
unsigned long flags;
92-
int ret, err = 0;
93-
struct page *page;
94-
unsigned int order;
85+
hv_ringbuffer_cleanup(&channel->outbound);
86+
hv_ringbuffer_cleanup(&channel->inbound);
9587

96-
if (send_ringbuffer_size % PAGE_SIZE ||
97-
recv_ringbuffer_size % PAGE_SIZE)
98-
return -EINVAL;
88+
if (channel->ringbuffer_page) {
89+
__free_pages(channel->ringbuffer_page,
90+
get_order(channel->ringbuffer_pagecount
91+
<< PAGE_SHIFT));
92+
channel->ringbuffer_page = NULL;
93+
}
94+
}
95+
EXPORT_SYMBOL_GPL(vmbus_free_ring);
9996

100-
order = get_order(send_ringbuffer_size + recv_ringbuffer_size);
97+
/* vmbus_alloc_ring - allocate and map pages for ring buffer */
98+
int vmbus_alloc_ring(struct vmbus_channel *newchannel,
99+
u32 send_size, u32 recv_size)
100+
{
101+
struct page *page;
102+
int order;
101103

102-
spin_lock_irqsave(&newchannel->lock, flags);
103-
if (newchannel->state == CHANNEL_OPEN_STATE) {
104-
newchannel->state = CHANNEL_OPENING_STATE;
105-
} else {
106-
spin_unlock_irqrestore(&newchannel->lock, flags);
104+
if (send_size % PAGE_SIZE || recv_size % PAGE_SIZE)
107105
return -EINVAL;
108-
}
109-
spin_unlock_irqrestore(&newchannel->lock, flags);
110-
111-
newchannel->onchannel_callback = onchannelcallback;
112-
newchannel->channel_callback_context = context;
113106

114107
/* Allocate the ring buffer */
108+
order = get_order(send_size + recv_size);
115109
page = alloc_pages_node(cpu_to_node(newchannel->target_cpu),
116110
GFP_KERNEL|__GFP_ZERO, order);
117111

118112
if (!page)
119113
page = alloc_pages(GFP_KERNEL|__GFP_ZERO, order);
120114

121-
if (!page) {
122-
err = -ENOMEM;
123-
goto error_set_chnstate;
124-
}
115+
if (!page)
116+
return -ENOMEM;
125117

126118
newchannel->ringbuffer_page = page;
127-
newchannel->ringbuffer_pagecount = (send_ringbuffer_size +
128-
recv_ringbuffer_size) >> PAGE_SHIFT;
119+
newchannel->ringbuffer_pagecount = (send_size + recv_size) >> PAGE_SHIFT;
120+
newchannel->ringbuffer_send_offset = send_size >> PAGE_SHIFT;
129121

130-
ret = hv_ringbuffer_init(&newchannel->outbound, page,
131-
send_ringbuffer_size >> PAGE_SHIFT);
122+
return 0;
123+
}
124+
EXPORT_SYMBOL_GPL(vmbus_alloc_ring);
132125

133-
if (ret != 0) {
134-
err = ret;
135-
goto error_free_pages;
136-
}
126+
static int __vmbus_open(struct vmbus_channel *newchannel,
127+
void *userdata, u32 userdatalen,
128+
void (*onchannelcallback)(void *context), void *context)
129+
{
130+
struct vmbus_channel_open_channel *open_msg;
131+
struct vmbus_channel_msginfo *open_info = NULL;
132+
struct page *page = newchannel->ringbuffer_page;
133+
u32 send_pages, recv_pages;
134+
unsigned long flags;
135+
int err;
137136

138-
ret = hv_ringbuffer_init(&newchannel->inbound,
139-
&page[send_ringbuffer_size >> PAGE_SHIFT],
140-
recv_ringbuffer_size >> PAGE_SHIFT);
141-
if (ret != 0) {
142-
err = ret;
143-
goto error_free_pages;
137+
if (userdatalen > MAX_USER_DEFINED_BYTES)
138+
return -EINVAL;
139+
140+
send_pages = newchannel->ringbuffer_send_offset;
141+
recv_pages = newchannel->ringbuffer_pagecount - send_pages;
142+
143+
spin_lock_irqsave(&newchannel->lock, flags);
144+
if (newchannel->state != CHANNEL_OPEN_STATE) {
145+
spin_unlock_irqrestore(&newchannel->lock, flags);
146+
return -EINVAL;
144147
}
148+
spin_unlock_irqrestore(&newchannel->lock, flags);
145149

150+
newchannel->state = CHANNEL_OPENING_STATE;
151+
newchannel->onchannel_callback = onchannelcallback;
152+
newchannel->channel_callback_context = context;
153+
154+
err = hv_ringbuffer_init(&newchannel->outbound, page, send_pages);
155+
if (err)
156+
goto error_clean_ring;
157+
158+
err = hv_ringbuffer_init(&newchannel->inbound,
159+
&page[send_pages], recv_pages);
160+
if (err)
161+
goto error_clean_ring;
146162

147163
/* Establish the gpadl for the ring buffer */
148164
newchannel->ringbuffer_gpadlhandle = 0;
149165

150-
ret = vmbus_establish_gpadl(newchannel,
151-
page_address(page),
152-
send_ringbuffer_size +
153-
recv_ringbuffer_size,
166+
err = vmbus_establish_gpadl(newchannel,
167+
page_address(newchannel->ringbuffer_page),
168+
(send_pages + recv_pages) << PAGE_SHIFT,
154169
&newchannel->ringbuffer_gpadlhandle);
155-
156-
if (ret != 0) {
157-
err = ret;
158-
goto error_free_pages;
159-
}
170+
if (err)
171+
goto error_clean_ring;
160172

161173
/* Create and init the channel open message */
162174
open_info = kmalloc(sizeof(*open_info) +
@@ -175,15 +187,9 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
175187
open_msg->openid = newchannel->offermsg.child_relid;
176188
open_msg->child_relid = newchannel->offermsg.child_relid;
177189
open_msg->ringbuffer_gpadlhandle = newchannel->ringbuffer_gpadlhandle;
178-
open_msg->downstream_ringbuffer_pageoffset = send_ringbuffer_size >>
179-
PAGE_SHIFT;
190+
open_msg->downstream_ringbuffer_pageoffset = newchannel->ringbuffer_send_offset;
180191
open_msg->target_vp = newchannel->target_vp;
181192

182-
if (userdatalen > MAX_USER_DEFINED_BYTES) {
183-
err = -EINVAL;
184-
goto error_free_gpadl;
185-
}
186-
187193
if (userdatalen)
188194
memcpy(open_msg->userdata, userdata, userdatalen);
189195

@@ -194,18 +200,16 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
194200

195201
if (newchannel->rescind) {
196202
err = -ENODEV;
197-
goto error_free_gpadl;
203+
goto error_free_info;
198204
}
199205

200-
ret = vmbus_post_msg(open_msg,
206+
err = vmbus_post_msg(open_msg,
201207
sizeof(struct vmbus_channel_open_channel), true);
202208

203-
trace_vmbus_open(open_msg, ret);
209+
trace_vmbus_open(open_msg, err);
204210

205-
if (ret != 0) {
206-
err = ret;
211+
if (err != 0)
207212
goto error_clean_msglist;
208-
}
209213

210214
wait_for_completion(&open_info->waitevent);
211215

@@ -215,12 +219,12 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
215219

216220
if (newchannel->rescind) {
217221
err = -ENODEV;
218-
goto error_free_gpadl;
222+
goto error_free_info;
219223
}
220224

221225
if (open_info->response.open_result.status) {
222226
err = -EAGAIN;
223-
goto error_free_gpadl;
227+
goto error_free_info;
224228
}
225229

226230
newchannel->state = CHANNEL_OPENED_STATE;
@@ -231,18 +235,50 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
231235
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
232236
list_del(&open_info->msglistentry);
233237
spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
234-
238+
error_free_info:
239+
kfree(open_info);
235240
error_free_gpadl:
236241
vmbus_teardown_gpadl(newchannel, newchannel->ringbuffer_gpadlhandle);
237-
kfree(open_info);
238-
error_free_pages:
242+
newchannel->ringbuffer_gpadlhandle = 0;
243+
error_clean_ring:
239244
hv_ringbuffer_cleanup(&newchannel->outbound);
240245
hv_ringbuffer_cleanup(&newchannel->inbound);
241-
__free_pages(page, order);
242-
error_set_chnstate:
243246
newchannel->state = CHANNEL_OPEN_STATE;
244247
return err;
245248
}
249+
250+
/*
251+
* vmbus_connect_ring - Open the channel but reuse ring buffer
252+
*/
253+
int vmbus_connect_ring(struct vmbus_channel *newchannel,
254+
void (*onchannelcallback)(void *context), void *context)
255+
{
256+
return __vmbus_open(newchannel, NULL, 0, onchannelcallback, context);
257+
}
258+
EXPORT_SYMBOL_GPL(vmbus_connect_ring);
259+
260+
/*
261+
* vmbus_open - Open the specified channel.
262+
*/
263+
int vmbus_open(struct vmbus_channel *newchannel,
264+
u32 send_ringbuffer_size, u32 recv_ringbuffer_size,
265+
void *userdata, u32 userdatalen,
266+
void (*onchannelcallback)(void *context), void *context)
267+
{
268+
int err;
269+
270+
err = vmbus_alloc_ring(newchannel, send_ringbuffer_size,
271+
recv_ringbuffer_size);
272+
if (err)
273+
return err;
274+
275+
err = __vmbus_open(newchannel, userdata, userdatalen,
276+
onchannelcallback, context);
277+
if (err)
278+
vmbus_free_ring(newchannel);
279+
280+
return err;
281+
}
246282
EXPORT_SYMBOL_GPL(vmbus_open);
247283

248284
/* Used for Hyper-V Socket: a guest client's connect() to the host */
@@ -610,10 +646,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
610646
* in Hyper-V Manager), the driver's remove() invokes vmbus_close():
611647
* here we should skip most of the below cleanup work.
612648
*/
613-
if (channel->state != CHANNEL_OPENED_STATE) {
614-
ret = -EINVAL;
615-
goto out;
616-
}
649+
if (channel->state != CHANNEL_OPENED_STATE)
650+
return -EINVAL;
617651

618652
channel->state = CHANNEL_OPEN_STATE;
619653

@@ -635,11 +669,10 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
635669
* If we failed to post the close msg,
636670
* it is perhaps better to leak memory.
637671
*/
638-
goto out;
639672
}
640673

641674
/* Tear down the gpadl for the channel's ring buffer */
642-
if (channel->ringbuffer_gpadlhandle) {
675+
else if (channel->ringbuffer_gpadlhandle) {
643676
ret = vmbus_teardown_gpadl(channel,
644677
channel->ringbuffer_gpadlhandle);
645678
if (ret) {
@@ -648,59 +681,63 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
648681
* If we failed to teardown gpadl,
649682
* it is perhaps better to leak memory.
650683
*/
651-
goto out;
652684
}
653-
}
654-
655-
/* Cleanup the ring buffers for this channel */
656-
hv_ringbuffer_cleanup(&channel->outbound);
657-
hv_ringbuffer_cleanup(&channel->inbound);
658685

659-
__free_pages(channel->ringbuffer_page,
660-
get_order(channel->ringbuffer_pagecount << PAGE_SHIFT));
686+
channel->ringbuffer_gpadlhandle = 0;
687+
}
661688

662-
out:
663689
return ret;
664690
}
665691

666-
/*
667-
* vmbus_close - Close the specified channel
668-
*/
669-
void vmbus_close(struct vmbus_channel *channel)
692+
/* disconnect ring - close all channels */
693+
int vmbus_disconnect_ring(struct vmbus_channel *channel)
670694
{
671-
struct list_head *cur, *tmp;
672-
struct vmbus_channel *cur_channel;
695+
struct vmbus_channel *cur_channel, *tmp;
696+
unsigned long flags;
697+
LIST_HEAD(list);
698+
int ret;
673699

674-
if (channel->primary_channel != NULL) {
675-
/*
676-
* We will only close sub-channels when
677-
* the primary is closed.
678-
*/
679-
return;
680-
}
681-
/*
682-
* Close all the sub-channels first and then close the
683-
* primary channel.
684-
*/
685-
list_for_each_safe(cur, tmp, &channel->sc_list) {
686-
cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
687-
if (cur_channel->rescind) {
700+
if (channel->primary_channel != NULL)
701+
return -EINVAL;
702+
703+
/* Snapshot the list of subchannels */
704+
spin_lock_irqsave(&channel->lock, flags);
705+
list_splice_init(&channel->sc_list, &list);
706+
channel->num_sc = 0;
707+
spin_unlock_irqrestore(&channel->lock, flags);
708+
709+
list_for_each_entry_safe(cur_channel, tmp, &list, sc_list) {
710+
if (cur_channel->rescind)
688711
wait_for_completion(&cur_channel->rescind_event);
689-
mutex_lock(&vmbus_connection.channel_mutex);
690-
vmbus_close_internal(cur_channel);
691-
hv_process_channel_removal(cur_channel);
692-
} else {
693-
mutex_lock(&vmbus_connection.channel_mutex);
694-
vmbus_close_internal(cur_channel);
712+
713+
mutex_lock(&vmbus_connection.channel_mutex);
714+
if (vmbus_close_internal(cur_channel) == 0) {
715+
vmbus_free_ring(cur_channel);
716+
717+
if (cur_channel->rescind)
718+
hv_process_channel_removal(cur_channel);
695719
}
696720
mutex_unlock(&vmbus_connection.channel_mutex);
697721
}
722+
698723
/*
699724
* Now close the primary.
700725
*/
701726
mutex_lock(&vmbus_connection.channel_mutex);
702-
vmbus_close_internal(channel);
727+
ret = vmbus_close_internal(channel);
703728
mutex_unlock(&vmbus_connection.channel_mutex);
729+
730+
return ret;
731+
}
732+
EXPORT_SYMBOL_GPL(vmbus_disconnect_ring);
733+
734+
/*
735+
* vmbus_close - Close the specified channel
736+
*/
737+
void vmbus_close(struct vmbus_channel *channel)
738+
{
739+
if (vmbus_disconnect_ring(channel) == 0)
740+
vmbus_free_ring(channel);
704741
}
705742
EXPORT_SYMBOL_GPL(vmbus_close);
706743

drivers/hv/ring_buffer.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -241,6 +241,7 @@ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
241241
void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info)
242242
{
243243
vunmap(ring_info->ring_buffer);
244+
ring_info->ring_buffer = NULL;
244245
}
245246

246247
/* Write to the ring buffer. */

0 commit comments

Comments
 (0)