3
3
*
4
4
* This file contains AppArmor security identifier (secid) manipulation fns
5
5
*
6
- * Copyright 2009-2010 Canonical Ltd.
6
+ * Copyright 2009-2017 Canonical Ltd.
7
7
*
8
8
* This program is free software; you can redistribute it and/or
9
9
* modify it under the terms of the GNU General Public License as
10
10
* published by the Free Software Foundation, version 2 of the
11
11
* License.
12
12
*
13
13
*
14
- * AppArmor allocates a unique secid for every profile loaded. If a profile
15
- * is replaced it receives the secid of the profile it is replacing.
16
- *
17
- * The secid value of 0 is invalid.
14
+ * AppArmor allocates a unique secid for every label used. If a label
15
+ * is replaced it receives the secid of the label it is replacing.
18
16
*/
19
17
20
- #include <linux/spinlock.h>
21
18
#include <linux/errno.h>
22
19
#include <linux/err.h>
20
+ #include <linux/gfp.h>
21
+ #include <linux/slab.h>
22
+ #include <linux/spinlock.h>
23
23
24
+ #include "include/cred.h"
25
+ #include "include/lib.h"
24
26
#include "include/secid.h"
27
+ #include "include/label.h"
28
+ #include "include/policy_ns.h"
25
29
26
- /* global counter from which secids are allocated */
27
- static u32 global_secid ;
30
+ /*
31
+ * secids - do not pin labels with a refcount. They rely on the label
32
+ * properly updating/freeing them
33
+ *
34
+ * A singly linked free list is used to track secids that have been
35
+ * freed and reuse them before allocating new ones
36
+ */
37
+
38
+ #define FREE_LIST_HEAD 1
39
+
40
+ static RADIX_TREE (aa_secids_map , GFP_ATOMIC ) ;
28
41
static DEFINE_SPINLOCK (secid_lock );
42
+ static u32 alloced_secid = FREE_LIST_HEAD ;
43
+ static u32 free_list = FREE_LIST_HEAD ;
44
+ static unsigned long free_count ;
45
+
46
+ /*
47
+ * TODO: allow policy to reserve a secid range?
48
+ * TODO: add secid pinning
49
+ * TODO: use secid_update in label replace
50
+ */
51
+
52
+ #define SECID_MAX U32_MAX
53
+
54
+ /* TODO: mark free list as exceptional */
55
+ static void * to_ptr (u32 secid )
56
+ {
57
+ return (void * )
58
+ ((((unsigned long ) secid ) << RADIX_TREE_EXCEPTIONAL_SHIFT ));
59
+ }
60
+
61
+ static u32 to_secid (void * ptr )
62
+ {
63
+ return (u32 ) (((unsigned long ) ptr ) >> RADIX_TREE_EXCEPTIONAL_SHIFT );
64
+ }
65
+
66
+
67
+ /* TODO: tag free_list entries to mark them as different */
68
+ static u32 __pop (struct aa_label * label )
69
+ {
70
+ u32 secid = free_list ;
71
+ void __rcu * * slot ;
72
+ void * entry ;
73
+
74
+ if (free_list == FREE_LIST_HEAD )
75
+ return AA_SECID_INVALID ;
76
+
77
+ slot = radix_tree_lookup_slot (& aa_secids_map , secid );
78
+ AA_BUG (!slot );
79
+ entry = radix_tree_deref_slot_protected (slot , & secid_lock );
80
+ free_list = to_secid (entry );
81
+ radix_tree_replace_slot (& aa_secids_map , slot , label );
82
+ free_count -- ;
83
+
84
+ return secid ;
85
+ }
86
+
87
+ static void __push (u32 secid )
88
+ {
89
+ void __rcu * * slot ;
90
+
91
+ slot = radix_tree_lookup_slot (& aa_secids_map , secid );
92
+ AA_BUG (!slot );
93
+ radix_tree_replace_slot (& aa_secids_map , slot , to_ptr (free_list ));
94
+ free_list = secid ;
95
+ free_count ++ ;
96
+ }
97
+
98
+ static struct aa_label * __secid_update (u32 secid , struct aa_label * label )
99
+ {
100
+ struct aa_label * old ;
101
+ void __rcu * * slot ;
102
+
103
+ slot = radix_tree_lookup_slot (& aa_secids_map , secid );
104
+ AA_BUG (!slot );
105
+ old = radix_tree_deref_slot_protected (slot , & secid_lock );
106
+ radix_tree_replace_slot (& aa_secids_map , slot , label );
107
+
108
+ return old ;
109
+ }
110
+
111
+ /**
112
+ * aa_secid_update - update a secid mapping to a new label
113
+ * @secid: secid to update
114
+ * @label: label the secid will now map to
115
+ */
116
+ void aa_secid_update (u32 secid , struct aa_label * label )
117
+ {
118
+ struct aa_label * old ;
119
+ unsigned long flags ;
120
+
121
+ spin_lock_irqsave (& secid_lock , flags );
122
+ old = __secid_update (secid , label );
123
+ spin_unlock_irqrestore (& secid_lock , flags );
124
+ }
125
+
126
+ /**
127
+ *
128
+ * see label for inverse aa_label_to_secid
129
+ */
130
+ struct aa_label * aa_secid_to_label (u32 secid )
131
+ {
132
+ struct aa_label * label ;
133
+
134
+ rcu_read_lock ();
135
+ label = radix_tree_lookup (& aa_secids_map , secid );
136
+ rcu_read_unlock ();
137
+
138
+ return label ;
139
+ }
140
+
141
+ int apparmor_secid_to_secctx (u32 secid , char * * secdata , u32 * seclen )
142
+ {
143
+ /* TODO: cache secctx and ref count so we don't have to recreate */
144
+ struct aa_label * label = aa_secid_to_label (secid );
145
+
146
+ AA_BUG (!secdata );
147
+ AA_BUG (!seclen );
148
+
149
+ if (!label )
150
+ return - EINVAL ;
151
+
152
+ if (secdata )
153
+ * seclen = aa_label_asxprint (secdata , root_ns , label ,
154
+ FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
155
+ FLAG_HIDDEN_UNCONFINED |
156
+ FLAG_ABS_ROOT , GFP_ATOMIC );
157
+ else
158
+ * seclen = aa_label_snxprint (NULL , 0 , root_ns , label ,
159
+ FLAG_SHOW_MODE | FLAG_VIEW_SUBNS |
160
+ FLAG_HIDDEN_UNCONFINED |
161
+ FLAG_ABS_ROOT );
162
+ if (* seclen < 0 )
163
+ return - ENOMEM ;
164
+
165
+ return 0 ;
166
+ }
167
+
168
+
169
+ int apparmor_secctx_to_secid (const char * secdata , u32 seclen , u32 * secid )
170
+ {
171
+ struct aa_label * label ;
172
+
173
+ label = aa_label_strn_parse (& root_ns -> unconfined -> label , secdata ,
174
+ seclen , GFP_KERNEL , false, false);
175
+ if (IS_ERR (label ))
176
+ return PTR_ERR (label );
177
+ * secid = label -> secid ;
178
+
179
+ return 0 ;
180
+ }
181
+
182
+ void apparmor_release_secctx (char * secdata , u32 seclen )
183
+ {
184
+ kfree (secdata );
185
+ }
29
186
30
- /* TODO FIXME: add secid to profile mapping, and secid recycling */
31
187
32
188
/**
33
189
* aa_alloc_secid - allocate a new secid for a profile
34
190
*/
35
- u32 aa_alloc_secid (void )
191
+ u32 aa_alloc_secid (struct aa_label * label , gfp_t gfp )
36
192
{
193
+ unsigned long flags ;
37
194
u32 secid ;
38
195
39
- /*
40
- * TODO FIXME: secid recycling - part of profile mapping table
41
- */
42
- spin_lock (& secid_lock );
43
- secid = (++ global_secid );
44
- spin_unlock (& secid_lock );
196
+ /* racey, but at worst causes new allocation instead of reuse */
197
+ if (free_list == FREE_LIST_HEAD ) {
198
+ bool preload = 0 ;
199
+ int res ;
200
+
201
+ retry :
202
+ if (gfpflags_allow_blocking (gfp ) && !radix_tree_preload (gfp ))
203
+ preload = 1 ;
204
+ spin_lock_irqsave (& secid_lock , flags );
205
+ if (alloced_secid != SECID_MAX ) {
206
+ secid = ++ alloced_secid ;
207
+ res = radix_tree_insert (& aa_secids_map , secid , label );
208
+ AA_BUG (res == - EEXIST );
209
+ } else {
210
+ secid = AA_SECID_INVALID ;
211
+ }
212
+ spin_unlock_irqrestore (& secid_lock , flags );
213
+ if (preload )
214
+ radix_tree_preload_end ();
215
+ } else {
216
+ spin_lock_irqsave (& secid_lock , flags );
217
+ /* remove entry from free list */
218
+ secid = __pop (label );
219
+ if (secid == AA_SECID_INVALID ) {
220
+ spin_unlock_irqrestore (& secid_lock , flags );
221
+ goto retry ;
222
+ }
223
+ spin_unlock_irqrestore (& secid_lock , flags );
224
+ }
225
+
45
226
return secid ;
46
227
}
47
228
@@ -51,5 +232,9 @@ u32 aa_alloc_secid(void)
51
232
*/
52
233
void aa_free_secid (u32 secid )
53
234
{
54
- ; /* NOP ATM */
235
+ unsigned long flags ;
236
+
237
+ spin_lock_irqsave (& secid_lock , flags );
238
+ __push (secid );
239
+ spin_unlock_irqrestore (& secid_lock , flags );
55
240
}
0 commit comments