@@ -91,11 +91,14 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
91
91
unsigned long flags ;
92
92
int ret , err = 0 ;
93
93
struct page * page ;
94
+ unsigned int order ;
94
95
95
96
if (send_ringbuffer_size % PAGE_SIZE ||
96
97
recv_ringbuffer_size % PAGE_SIZE )
97
98
return - EINVAL ;
98
99
100
+ order = get_order (send_ringbuffer_size + recv_ringbuffer_size );
101
+
99
102
spin_lock_irqsave (& newchannel -> lock , flags );
100
103
if (newchannel -> state == CHANNEL_OPEN_STATE ) {
101
104
newchannel -> state = CHANNEL_OPENING_STATE ;
@@ -110,21 +113,17 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
110
113
111
114
/* Allocate the ring buffer */
112
115
page = alloc_pages_node (cpu_to_node (newchannel -> target_cpu ),
113
- GFP_KERNEL |__GFP_ZERO ,
114
- get_order (send_ringbuffer_size +
115
- recv_ringbuffer_size ));
116
+ GFP_KERNEL |__GFP_ZERO , order );
116
117
117
118
if (!page )
118
- page = alloc_pages (GFP_KERNEL |__GFP_ZERO ,
119
- get_order (send_ringbuffer_size +
120
- recv_ringbuffer_size ));
119
+ page = alloc_pages (GFP_KERNEL |__GFP_ZERO , order );
121
120
122
121
if (!page ) {
123
122
err = - ENOMEM ;
124
123
goto error_set_chnstate ;
125
124
}
126
125
127
- newchannel -> ringbuffer_pages = page_address ( page ) ;
126
+ newchannel -> ringbuffer_page = page ;
128
127
newchannel -> ringbuffer_pagecount = (send_ringbuffer_size +
129
128
recv_ringbuffer_size ) >> PAGE_SHIFT ;
130
129
@@ -239,8 +238,7 @@ int vmbus_open(struct vmbus_channel *newchannel, u32 send_ringbuffer_size,
239
238
error_free_pages :
240
239
hv_ringbuffer_cleanup (& newchannel -> outbound );
241
240
hv_ringbuffer_cleanup (& newchannel -> inbound );
242
- __free_pages (page ,
243
- get_order (send_ringbuffer_size + recv_ringbuffer_size ));
241
+ __free_pages (page , order );
244
242
error_set_chnstate :
245
243
newchannel -> state = CHANNEL_OPEN_STATE ;
246
244
return err ;
@@ -658,8 +656,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel)
658
656
hv_ringbuffer_cleanup (& channel -> outbound );
659
657
hv_ringbuffer_cleanup (& channel -> inbound );
660
658
661
- free_pages (( unsigned long ) channel -> ringbuffer_pages ,
662
- get_order (channel -> ringbuffer_pagecount * PAGE_SIZE ));
659
+ __free_pages ( channel -> ringbuffer_page ,
660
+ get_order (channel -> ringbuffer_pagecount << PAGE_SHIFT ));
663
661
664
662
out :
665
663
return ret ;
0 commit comments