Skip to content

Commit d4ed4a5

Browse files
Douglas Fulleridryomov
authored andcommitted
libceph: support for lock.lock_info
Add an interface for the Ceph OSD lock.lock_info method and associated data structures. Based heavily on code by Mike Christie <michaelc@cs.wisc.edu>. Signed-off-by: Douglas Fuller <dfuller@redhat.com> [idryomov@gmail.com: refactor, misc fixes throughout] Signed-off-by: Ilya Dryomov <idryomov@gmail.com> Reviewed-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Alex Elder <elder@linaro.org>
1 parent f66241c commit d4ed4a5

File tree

2 files changed

+167
-0
lines changed

2 files changed

+167
-0
lines changed

include/linux/ceph/cls_lock_client.h

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,20 @@ enum ceph_cls_lock_type {
99
CEPH_CLS_LOCK_SHARED = 2,
1010
};
1111

12+
struct ceph_locker_id {
13+
struct ceph_entity_name name; /* locker's client name */
14+
char *cookie; /* locker's cookie */
15+
};
16+
17+
struct ceph_locker_info {
18+
struct ceph_entity_addr addr; /* locker's address */
19+
};
20+
21+
struct ceph_locker {
22+
struct ceph_locker_id id;
23+
struct ceph_locker_info info;
24+
};
25+
1226
int ceph_cls_lock(struct ceph_osd_client *osdc,
1327
struct ceph_object_id *oid,
1428
struct ceph_object_locator *oloc,
@@ -24,4 +38,12 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc,
2438
char *lock_name, char *cookie,
2539
struct ceph_entity_name *locker);
2640

41+
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers);
42+
43+
int ceph_cls_lock_info(struct ceph_osd_client *osdc,
44+
struct ceph_object_id *oid,
45+
struct ceph_object_locator *oloc,
46+
char *lock_name, u8 *type, char **tag,
47+
struct ceph_locker **lockers, u32 *num_lockers);
48+
2749
#endif

net/ceph/cls_lock_client.c

Lines changed: 145 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,3 +178,148 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc,
178178
return ret;
179179
}
180180
EXPORT_SYMBOL(ceph_cls_break_lock);
181+
182+
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers)
183+
{
184+
int i;
185+
186+
for (i = 0; i < num_lockers; i++)
187+
kfree(lockers[i].id.cookie);
188+
kfree(lockers);
189+
}
190+
EXPORT_SYMBOL(ceph_free_lockers);
191+
192+
static int decode_locker(void **p, void *end, struct ceph_locker *locker)
193+
{
194+
u8 struct_v;
195+
u32 len;
196+
char *s;
197+
int ret;
198+
199+
ret = ceph_start_decoding(p, end, 1, "locker_id_t", &struct_v, &len);
200+
if (ret)
201+
return ret;
202+
203+
ceph_decode_copy(p, &locker->id.name, sizeof(locker->id.name));
204+
s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
205+
if (IS_ERR(s))
206+
return PTR_ERR(s);
207+
208+
locker->id.cookie = s;
209+
210+
ret = ceph_start_decoding(p, end, 1, "locker_info_t", &struct_v, &len);
211+
if (ret)
212+
return ret;
213+
214+
*p += sizeof(struct ceph_timespec); /* skip expiration */
215+
ceph_decode_copy(p, &locker->info.addr, sizeof(locker->info.addr));
216+
ceph_decode_addr(&locker->info.addr);
217+
len = ceph_decode_32(p);
218+
*p += len; /* skip description */
219+
220+
dout("%s %s%llu cookie %s addr %s\n", __func__,
221+
ENTITY_NAME(locker->id.name), locker->id.cookie,
222+
ceph_pr_addr(&locker->info.addr.in_addr));
223+
return 0;
224+
}
225+
226+
static int decode_lockers(void **p, void *end, u8 *type, char **tag,
227+
struct ceph_locker **lockers, u32 *num_lockers)
228+
{
229+
u8 struct_v;
230+
u32 struct_len;
231+
char *s;
232+
int i;
233+
int ret;
234+
235+
ret = ceph_start_decoding(p, end, 1, "cls_lock_get_info_reply",
236+
&struct_v, &struct_len);
237+
if (ret)
238+
return ret;
239+
240+
*num_lockers = ceph_decode_32(p);
241+
*lockers = kcalloc(*num_lockers, sizeof(**lockers), GFP_NOIO);
242+
if (!*lockers)
243+
return -ENOMEM;
244+
245+
for (i = 0; i < *num_lockers; i++) {
246+
ret = decode_locker(p, end, *lockers + i);
247+
if (ret)
248+
goto err_free_lockers;
249+
}
250+
251+
*type = ceph_decode_8(p);
252+
s = ceph_extract_encoded_string(p, end, NULL, GFP_NOIO);
253+
if (IS_ERR(s)) {
254+
ret = PTR_ERR(s);
255+
goto err_free_lockers;
256+
}
257+
258+
*tag = s;
259+
return 0;
260+
261+
err_free_lockers:
262+
ceph_free_lockers(*lockers, *num_lockers);
263+
return ret;
264+
}
265+
266+
/*
267+
* On success, the caller is responsible for:
268+
*
269+
* kfree(tag);
270+
* ceph_free_lockers(lockers, num_lockers);
271+
*/
272+
int ceph_cls_lock_info(struct ceph_osd_client *osdc,
273+
struct ceph_object_id *oid,
274+
struct ceph_object_locator *oloc,
275+
char *lock_name, u8 *type, char **tag,
276+
struct ceph_locker **lockers, u32 *num_lockers)
277+
{
278+
int get_info_op_buf_size;
279+
int name_len = strlen(lock_name);
280+
struct page *get_info_op_page, *reply_page;
281+
size_t reply_len;
282+
void *p, *end;
283+
int ret;
284+
285+
get_info_op_buf_size = name_len + sizeof(__le32) +
286+
CEPH_ENCODING_START_BLK_LEN;
287+
if (get_info_op_buf_size > PAGE_SIZE)
288+
return -E2BIG;
289+
290+
get_info_op_page = alloc_page(GFP_NOIO);
291+
if (!get_info_op_page)
292+
return -ENOMEM;
293+
294+
reply_page = alloc_page(GFP_NOIO);
295+
if (!reply_page) {
296+
__free_page(get_info_op_page);
297+
return -ENOMEM;
298+
}
299+
300+
p = page_address(get_info_op_page);
301+
end = p + get_info_op_buf_size;
302+
303+
/* encode cls_lock_get_info_op struct */
304+
ceph_start_encoding(&p, 1, 1,
305+
get_info_op_buf_size - CEPH_ENCODING_START_BLK_LEN);
306+
ceph_encode_string(&p, end, lock_name, name_len);
307+
308+
dout("%s lock_name %s\n", __func__, lock_name);
309+
ret = ceph_osdc_call(osdc, oid, oloc, "lock", "get_info",
310+
CEPH_OSD_FLAG_READ, get_info_op_page,
311+
get_info_op_buf_size, reply_page, &reply_len);
312+
313+
dout("%s: status %d\n", __func__, ret);
314+
if (ret >= 0) {
315+
p = page_address(reply_page);
316+
end = p + reply_len;
317+
318+
ret = decode_lockers(&p, end, type, tag, lockers, num_lockers);
319+
}
320+
321+
__free_page(get_info_op_page);
322+
__free_page(reply_page);
323+
return ret;
324+
}
325+
EXPORT_SYMBOL(ceph_cls_lock_info);

0 commit comments

Comments
 (0)