diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /fs/kernfs | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'fs/kernfs')
-rw-r--r-- | fs/kernfs/Kconfig | 8 | ||||
-rw-r--r-- | fs/kernfs/Makefile | 6 | ||||
-rw-r--r-- | fs/kernfs/dir.c | 1716 | ||||
-rw-r--r-- | fs/kernfs/file.c | 1020 | ||||
-rw-r--r-- | fs/kernfs/inode.c | 435 | ||||
-rw-r--r-- | fs/kernfs/kernfs-internal.h | 127 | ||||
-rw-r--r-- | fs/kernfs/mount.c | 397 | ||||
-rw-r--r-- | fs/kernfs/symlink.c | 153 |
8 files changed, 3862 insertions, 0 deletions
diff --git a/fs/kernfs/Kconfig b/fs/kernfs/Kconfig new file mode 100644 index 000000000..e7f09105f --- /dev/null +++ b/fs/kernfs/Kconfig | |||
@@ -0,0 +1,8 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | ||
2 | # | ||
3 | # KERNFS should be selected by its users | ||
4 | # | ||
5 | |||
6 | config KERNFS | ||
7 | bool | ||
8 | default n | ||
diff --git a/fs/kernfs/Makefile b/fs/kernfs/Makefile new file mode 100644 index 000000000..4ca54ff54 --- /dev/null +++ b/fs/kernfs/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | ||
2 | # | ||
3 | # Makefile for the kernfs pseudo filesystem | ||
4 | # | ||
5 | |||
6 | obj-y := mount.o inode.o dir.o file.o symlink.o | ||
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c new file mode 100644 index 000000000..8b3c86a50 --- /dev/null +++ b/fs/kernfs/dir.c | |||
@@ -0,0 +1,1716 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * fs/kernfs/dir.c - kernfs directory implementation | ||
4 | * | ||
5 | * Copyright (c) 2001-3 Patrick Mochel | ||
6 | * Copyright (c) 2007 SUSE Linux Products GmbH | ||
7 | * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/sched.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/namei.h> | ||
13 | #include <linux/idr.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/security.h> | ||
16 | #include <linux/hash.h> | ||
17 | |||
18 | #include "kernfs-internal.h" | ||
19 | |||
20 | DEFINE_MUTEX(kernfs_mutex); | ||
21 | static DEFINE_SPINLOCK(kernfs_rename_lock); /* kn->parent and ->name */ | ||
22 | /* | ||
23 | * Don't use rename_lock to piggy back on pr_cont_buf. We don't want to | ||
24 | * call pr_cont() while holding rename_lock. Because sometimes pr_cont() | ||
25 | * will perform wakeups when releasing console_sem. Holding rename_lock | ||
26 | * will introduce deadlock if the scheduler reads the kernfs_name in the | ||
27 | * wakeup path. | ||
28 | */ | ||
29 | static DEFINE_SPINLOCK(kernfs_pr_cont_lock); | ||
30 | static char kernfs_pr_cont_buf[PATH_MAX]; /* protected by pr_cont_lock */ | ||
31 | static DEFINE_SPINLOCK(kernfs_idr_lock); /* root->ino_idr */ | ||
32 | |||
33 | #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb) | ||
34 | |||
35 | static bool kernfs_active(struct kernfs_node *kn) | ||
36 | { | ||
37 | lockdep_assert_held(&kernfs_mutex); | ||
38 | return atomic_read(&kn->active) >= 0; | ||
39 | } | ||
40 | |||
41 | static bool kernfs_lockdep(struct kernfs_node *kn) | ||
42 | { | ||
43 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
44 | return kn->flags & KERNFS_LOCKDEP; | ||
45 | #else | ||
46 | return false; | ||
47 | #endif | ||
48 | } | ||
49 | |||
50 | static int kernfs_name_locked(struct kernfs_node *kn, char *buf, size_t buflen) | ||
51 | { | ||
52 | if (!kn) | ||
53 | return strlcpy(buf, "(null)", buflen); | ||
54 | |||
55 | return strlcpy(buf, kn->parent ? kn->name : "/", buflen); | ||
56 | } | ||
57 | |||
58 | /* kernfs_node_depth - compute depth from @from to @to */ | ||
59 | static size_t kernfs_depth(struct kernfs_node *from, struct kernfs_node *to) | ||
60 | { | ||
61 | size_t depth = 0; | ||
62 | |||
63 | while (to->parent && to != from) { | ||
64 | depth++; | ||
65 | to = to->parent; | ||
66 | } | ||
67 | return depth; | ||
68 | } | ||
69 | |||
70 | static struct kernfs_node *kernfs_common_ancestor(struct kernfs_node *a, | ||
71 | struct kernfs_node *b) | ||
72 | { | ||
73 | size_t da, db; | ||
74 | struct kernfs_root *ra = kernfs_root(a), *rb = kernfs_root(b); | ||
75 | |||
76 | if (ra != rb) | ||
77 | return NULL; | ||
78 | |||
79 | da = kernfs_depth(ra->kn, a); | ||
80 | db = kernfs_depth(rb->kn, b); | ||
81 | |||
82 | while (da > db) { | ||
83 | a = a->parent; | ||
84 | da--; | ||
85 | } | ||
86 | while (db > da) { | ||
87 | b = b->parent; | ||
88 | db--; | ||
89 | } | ||
90 | |||
91 | /* worst case b and a will be the same at root */ | ||
92 | while (b != a) { | ||
93 | b = b->parent; | ||
94 | a = a->parent; | ||
95 | } | ||
96 | |||
97 | return a; | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * kernfs_path_from_node_locked - find a pseudo-absolute path to @kn_to, | ||
102 | * where kn_from is treated as root of the path. | ||
103 | * @kn_from: kernfs node which should be treated as root for the path | ||
104 | * @kn_to: kernfs node to which path is needed | ||
105 | * @buf: buffer to copy the path into | ||
106 | * @buflen: size of @buf | ||
107 | * | ||
108 | * We need to handle couple of scenarios here: | ||
109 | * [1] when @kn_from is an ancestor of @kn_to at some level | ||
110 | * kn_from: /n1/n2/n3 | ||
111 | * kn_to: /n1/n2/n3/n4/n5 | ||
112 | * result: /n4/n5 | ||
113 | * | ||
114 | * [2] when @kn_from is on a different hierarchy and we need to find common | ||
115 | * ancestor between @kn_from and @kn_to. | ||
116 | * kn_from: /n1/n2/n3/n4 | ||
117 | * kn_to: /n1/n2/n5 | ||
118 | * result: /../../n5 | ||
119 | * OR | ||
120 | * kn_from: /n1/n2/n3/n4/n5 [depth=5] | ||
121 | * kn_to: /n1/n2/n3 [depth=3] | ||
122 | * result: /../.. | ||
123 | * | ||
124 | * [3] when @kn_to is NULL result will be "(null)" | ||
125 | * | ||
126 | * Returns the length of the full path. If the full length is equal to or | ||
127 | * greater than @buflen, @buf contains the truncated path with the trailing | ||
128 | * '\0'. On error, -errno is returned. | ||
129 | */ | ||
130 | static int kernfs_path_from_node_locked(struct kernfs_node *kn_to, | ||
131 | struct kernfs_node *kn_from, | ||
132 | char *buf, size_t buflen) | ||
133 | { | ||
134 | struct kernfs_node *kn, *common; | ||
135 | const char parent_str[] = "/.."; | ||
136 | size_t depth_from, depth_to, len = 0; | ||
137 | int i, j; | ||
138 | |||
139 | if (!kn_to) | ||
140 | return strlcpy(buf, "(null)", buflen); | ||
141 | |||
142 | if (!kn_from) | ||
143 | kn_from = kernfs_root(kn_to)->kn; | ||
144 | |||
145 | if (kn_from == kn_to) | ||
146 | return strlcpy(buf, "/", buflen); | ||
147 | |||
148 | if (!buf) | ||
149 | return -EINVAL; | ||
150 | |||
151 | common = kernfs_common_ancestor(kn_from, kn_to); | ||
152 | if (WARN_ON(!common)) | ||
153 | return -EINVAL; | ||
154 | |||
155 | depth_to = kernfs_depth(common, kn_to); | ||
156 | depth_from = kernfs_depth(common, kn_from); | ||
157 | |||
158 | buf[0] = '\0'; | ||
159 | |||
160 | for (i = 0; i < depth_from; i++) | ||
161 | len += strlcpy(buf + len, parent_str, | ||
162 | len < buflen ? buflen - len : 0); | ||
163 | |||
164 | /* Calculate how many bytes we need for the rest */ | ||
165 | for (i = depth_to - 1; i >= 0; i--) { | ||
166 | for (kn = kn_to, j = 0; j < i; j++) | ||
167 | kn = kn->parent; | ||
168 | len += strlcpy(buf + len, "/", | ||
169 | len < buflen ? buflen - len : 0); | ||
170 | len += strlcpy(buf + len, kn->name, | ||
171 | len < buflen ? buflen - len : 0); | ||
172 | } | ||
173 | |||
174 | return len; | ||
175 | } | ||
176 | |||
177 | /** | ||
178 | * kernfs_name - obtain the name of a given node | ||
179 | * @kn: kernfs_node of interest | ||
180 | * @buf: buffer to copy @kn's name into | ||
181 | * @buflen: size of @buf | ||
182 | * | ||
183 | * Copies the name of @kn into @buf of @buflen bytes. The behavior is | ||
184 | * similar to strlcpy(). It returns the length of @kn's name and if @buf | ||
185 | * isn't long enough, it's filled upto @buflen-1 and nul terminated. | ||
186 | * | ||
187 | * Fills buffer with "(null)" if @kn is NULL. | ||
188 | * | ||
189 | * This function can be called from any context. | ||
190 | */ | ||
191 | int kernfs_name(struct kernfs_node *kn, char *buf, size_t buflen) | ||
192 | { | ||
193 | unsigned long flags; | ||
194 | int ret; | ||
195 | |||
196 | spin_lock_irqsave(&kernfs_rename_lock, flags); | ||
197 | ret = kernfs_name_locked(kn, buf, buflen); | ||
198 | spin_unlock_irqrestore(&kernfs_rename_lock, flags); | ||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | /** | ||
203 | * kernfs_path_from_node - build path of node @to relative to @from. | ||
204 | * @from: parent kernfs_node relative to which we need to build the path | ||
205 | * @to: kernfs_node of interest | ||
206 | * @buf: buffer to copy @to's path into | ||
207 | * @buflen: size of @buf | ||
208 | * | ||
209 | * Builds @to's path relative to @from in @buf. @from and @to must | ||
210 | * be on the same kernfs-root. If @from is not parent of @to, then a relative | ||
211 | * path (which includes '..'s) as needed to reach from @from to @to is | ||
212 | * returned. | ||
213 | * | ||
214 | * Returns the length of the full path. If the full length is equal to or | ||
215 | * greater than @buflen, @buf contains the truncated path with the trailing | ||
216 | * '\0'. On error, -errno is returned. | ||
217 | */ | ||
218 | int kernfs_path_from_node(struct kernfs_node *to, struct kernfs_node *from, | ||
219 | char *buf, size_t buflen) | ||
220 | { | ||
221 | unsigned long flags; | ||
222 | int ret; | ||
223 | |||
224 | spin_lock_irqsave(&kernfs_rename_lock, flags); | ||
225 | ret = kernfs_path_from_node_locked(to, from, buf, buflen); | ||
226 | spin_unlock_irqrestore(&kernfs_rename_lock, flags); | ||
227 | return ret; | ||
228 | } | ||
229 | EXPORT_SYMBOL_GPL(kernfs_path_from_node); | ||
230 | |||
231 | /** | ||
232 | * pr_cont_kernfs_name - pr_cont name of a kernfs_node | ||
233 | * @kn: kernfs_node of interest | ||
234 | * | ||
235 | * This function can be called from any context. | ||
236 | */ | ||
237 | void pr_cont_kernfs_name(struct kernfs_node *kn) | ||
238 | { | ||
239 | unsigned long flags; | ||
240 | |||
241 | spin_lock_irqsave(&kernfs_pr_cont_lock, flags); | ||
242 | |||
243 | kernfs_name(kn, kernfs_pr_cont_buf, sizeof(kernfs_pr_cont_buf)); | ||
244 | pr_cont("%s", kernfs_pr_cont_buf); | ||
245 | |||
246 | spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * pr_cont_kernfs_path - pr_cont path of a kernfs_node | ||
251 | * @kn: kernfs_node of interest | ||
252 | * | ||
253 | * This function can be called from any context. | ||
254 | */ | ||
255 | void pr_cont_kernfs_path(struct kernfs_node *kn) | ||
256 | { | ||
257 | unsigned long flags; | ||
258 | int sz; | ||
259 | |||
260 | spin_lock_irqsave(&kernfs_pr_cont_lock, flags); | ||
261 | |||
262 | sz = kernfs_path_from_node(kn, NULL, kernfs_pr_cont_buf, | ||
263 | sizeof(kernfs_pr_cont_buf)); | ||
264 | if (sz < 0) { | ||
265 | pr_cont("(error)"); | ||
266 | goto out; | ||
267 | } | ||
268 | |||
269 | if (sz >= sizeof(kernfs_pr_cont_buf)) { | ||
270 | pr_cont("(name too long)"); | ||
271 | goto out; | ||
272 | } | ||
273 | |||
274 | pr_cont("%s", kernfs_pr_cont_buf); | ||
275 | |||
276 | out: | ||
277 | spin_unlock_irqrestore(&kernfs_pr_cont_lock, flags); | ||
278 | } | ||
279 | |||
280 | /** | ||
281 | * kernfs_get_parent - determine the parent node and pin it | ||
282 | * @kn: kernfs_node of interest | ||
283 | * | ||
284 | * Determines @kn's parent, pins and returns it. This function can be | ||
285 | * called from any context. | ||
286 | */ | ||
287 | struct kernfs_node *kernfs_get_parent(struct kernfs_node *kn) | ||
288 | { | ||
289 | struct kernfs_node *parent; | ||
290 | unsigned long flags; | ||
291 | |||
292 | spin_lock_irqsave(&kernfs_rename_lock, flags); | ||
293 | parent = kn->parent; | ||
294 | kernfs_get(parent); | ||
295 | spin_unlock_irqrestore(&kernfs_rename_lock, flags); | ||
296 | |||
297 | return parent; | ||
298 | } | ||
299 | |||
300 | /** | ||
301 | * kernfs_name_hash | ||
302 | * @name: Null terminated string to hash | ||
303 | * @ns: Namespace tag to hash | ||
304 | * | ||
305 | * Returns 31 bit hash of ns + name (so it fits in an off_t ) | ||
306 | */ | ||
307 | static unsigned int kernfs_name_hash(const char *name, const void *ns) | ||
308 | { | ||
309 | unsigned long hash = init_name_hash(ns); | ||
310 | unsigned int len = strlen(name); | ||
311 | while (len--) | ||
312 | hash = partial_name_hash(*name++, hash); | ||
313 | hash = end_name_hash(hash); | ||
314 | hash &= 0x7fffffffU; | ||
315 | /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */ | ||
316 | if (hash < 2) | ||
317 | hash += 2; | ||
318 | if (hash >= INT_MAX) | ||
319 | hash = INT_MAX - 1; | ||
320 | return hash; | ||
321 | } | ||
322 | |||
323 | static int kernfs_name_compare(unsigned int hash, const char *name, | ||
324 | const void *ns, const struct kernfs_node *kn) | ||
325 | { | ||
326 | if (hash < kn->hash) | ||
327 | return -1; | ||
328 | if (hash > kn->hash) | ||
329 | return 1; | ||
330 | if (ns < kn->ns) | ||
331 | return -1; | ||
332 | if (ns > kn->ns) | ||
333 | return 1; | ||
334 | return strcmp(name, kn->name); | ||
335 | } | ||
336 | |||
337 | static int kernfs_sd_compare(const struct kernfs_node *left, | ||
338 | const struct kernfs_node *right) | ||
339 | { | ||
340 | return kernfs_name_compare(left->hash, left->name, left->ns, right); | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * kernfs_link_sibling - link kernfs_node into sibling rbtree | ||
345 | * @kn: kernfs_node of interest | ||
346 | * | ||
347 | * Link @kn into its sibling rbtree which starts from | ||
348 | * @kn->parent->dir.children. | ||
349 | * | ||
350 | * Locking: | ||
351 | * mutex_lock(kernfs_mutex) | ||
352 | * | ||
353 | * RETURNS: | ||
354 | * 0 on susccess -EEXIST on failure. | ||
355 | */ | ||
356 | static int kernfs_link_sibling(struct kernfs_node *kn) | ||
357 | { | ||
358 | struct rb_node **node = &kn->parent->dir.children.rb_node; | ||
359 | struct rb_node *parent = NULL; | ||
360 | |||
361 | while (*node) { | ||
362 | struct kernfs_node *pos; | ||
363 | int result; | ||
364 | |||
365 | pos = rb_to_kn(*node); | ||
366 | parent = *node; | ||
367 | result = kernfs_sd_compare(kn, pos); | ||
368 | if (result < 0) | ||
369 | node = &pos->rb.rb_left; | ||
370 | else if (result > 0) | ||
371 | node = &pos->rb.rb_right; | ||
372 | else | ||
373 | return -EEXIST; | ||
374 | } | ||
375 | |||
376 | /* add new node and rebalance the tree */ | ||
377 | rb_link_node(&kn->rb, parent, node); | ||
378 | rb_insert_color(&kn->rb, &kn->parent->dir.children); | ||
379 | |||
380 | /* successfully added, account subdir number */ | ||
381 | if (kernfs_type(kn) == KERNFS_DIR) | ||
382 | kn->parent->dir.subdirs++; | ||
383 | |||
384 | return 0; | ||
385 | } | ||
386 | |||
387 | /** | ||
388 | * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree | ||
389 | * @kn: kernfs_node of interest | ||
390 | * | ||
391 | * Try to unlink @kn from its sibling rbtree which starts from | ||
392 | * kn->parent->dir.children. Returns %true if @kn was actually | ||
393 | * removed, %false if @kn wasn't on the rbtree. | ||
394 | * | ||
395 | * Locking: | ||
396 | * mutex_lock(kernfs_mutex) | ||
397 | */ | ||
398 | static bool kernfs_unlink_sibling(struct kernfs_node *kn) | ||
399 | { | ||
400 | if (RB_EMPTY_NODE(&kn->rb)) | ||
401 | return false; | ||
402 | |||
403 | if (kernfs_type(kn) == KERNFS_DIR) | ||
404 | kn->parent->dir.subdirs--; | ||
405 | |||
406 | rb_erase(&kn->rb, &kn->parent->dir.children); | ||
407 | RB_CLEAR_NODE(&kn->rb); | ||
408 | return true; | ||
409 | } | ||
410 | |||
411 | /** | ||
412 | * kernfs_get_active - get an active reference to kernfs_node | ||
413 | * @kn: kernfs_node to get an active reference to | ||
414 | * | ||
415 | * Get an active reference of @kn. This function is noop if @kn | ||
416 | * is NULL. | ||
417 | * | ||
418 | * RETURNS: | ||
419 | * Pointer to @kn on success, NULL on failure. | ||
420 | */ | ||
421 | struct kernfs_node *kernfs_get_active(struct kernfs_node *kn) | ||
422 | { | ||
423 | if (unlikely(!kn)) | ||
424 | return NULL; | ||
425 | |||
426 | if (!atomic_inc_unless_negative(&kn->active)) | ||
427 | return NULL; | ||
428 | |||
429 | if (kernfs_lockdep(kn)) | ||
430 | rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_); | ||
431 | return kn; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * kernfs_put_active - put an active reference to kernfs_node | ||
436 | * @kn: kernfs_node to put an active reference to | ||
437 | * | ||
438 | * Put an active reference to @kn. This function is noop if @kn | ||
439 | * is NULL. | ||
440 | */ | ||
441 | void kernfs_put_active(struct kernfs_node *kn) | ||
442 | { | ||
443 | int v; | ||
444 | |||
445 | if (unlikely(!kn)) | ||
446 | return; | ||
447 | |||
448 | if (kernfs_lockdep(kn)) | ||
449 | rwsem_release(&kn->dep_map, _RET_IP_); | ||
450 | v = atomic_dec_return(&kn->active); | ||
451 | if (likely(v != KN_DEACTIVATED_BIAS)) | ||
452 | return; | ||
453 | |||
454 | wake_up_all(&kernfs_root(kn)->deactivate_waitq); | ||
455 | } | ||
456 | |||
457 | /** | ||
458 | * kernfs_drain - drain kernfs_node | ||
459 | * @kn: kernfs_node to drain | ||
460 | * | ||
461 | * Drain existing usages and nuke all existing mmaps of @kn. Mutiple | ||
462 | * removers may invoke this function concurrently on @kn and all will | ||
463 | * return after draining is complete. | ||
464 | */ | ||
465 | static void kernfs_drain(struct kernfs_node *kn) | ||
466 | __releases(&kernfs_mutex) __acquires(&kernfs_mutex) | ||
467 | { | ||
468 | struct kernfs_root *root = kernfs_root(kn); | ||
469 | |||
470 | lockdep_assert_held(&kernfs_mutex); | ||
471 | WARN_ON_ONCE(kernfs_active(kn)); | ||
472 | |||
473 | mutex_unlock(&kernfs_mutex); | ||
474 | |||
475 | if (kernfs_lockdep(kn)) { | ||
476 | rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_); | ||
477 | if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS) | ||
478 | lock_contended(&kn->dep_map, _RET_IP_); | ||
479 | } | ||
480 | |||
481 | /* but everyone should wait for draining */ | ||
482 | wait_event(root->deactivate_waitq, | ||
483 | atomic_read(&kn->active) == KN_DEACTIVATED_BIAS); | ||
484 | |||
485 | if (kernfs_lockdep(kn)) { | ||
486 | lock_acquired(&kn->dep_map, _RET_IP_); | ||
487 | rwsem_release(&kn->dep_map, _RET_IP_); | ||
488 | } | ||
489 | |||
490 | kernfs_drain_open_files(kn); | ||
491 | |||
492 | mutex_lock(&kernfs_mutex); | ||
493 | } | ||
494 | |||
495 | /** | ||
496 | * kernfs_get - get a reference count on a kernfs_node | ||
497 | * @kn: the target kernfs_node | ||
498 | */ | ||
499 | void kernfs_get(struct kernfs_node *kn) | ||
500 | { | ||
501 | if (kn) { | ||
502 | WARN_ON(!atomic_read(&kn->count)); | ||
503 | atomic_inc(&kn->count); | ||
504 | } | ||
505 | } | ||
506 | EXPORT_SYMBOL_GPL(kernfs_get); | ||
507 | |||
508 | /** | ||
509 | * kernfs_put - put a reference count on a kernfs_node | ||
510 | * @kn: the target kernfs_node | ||
511 | * | ||
512 | * Put a reference count of @kn and destroy it if it reached zero. | ||
513 | */ | ||
514 | void kernfs_put(struct kernfs_node *kn) | ||
515 | { | ||
516 | struct kernfs_node *parent; | ||
517 | struct kernfs_root *root; | ||
518 | |||
519 | if (!kn || !atomic_dec_and_test(&kn->count)) | ||
520 | return; | ||
521 | root = kernfs_root(kn); | ||
522 | repeat: | ||
523 | /* | ||
524 | * Moving/renaming is always done while holding reference. | ||
525 | * kn->parent won't change beneath us. | ||
526 | */ | ||
527 | parent = kn->parent; | ||
528 | |||
529 | WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS, | ||
530 | "kernfs_put: %s/%s: released with incorrect active_ref %d\n", | ||
531 | parent ? parent->name : "", kn->name, atomic_read(&kn->active)); | ||
532 | |||
533 | if (kernfs_type(kn) == KERNFS_LINK) | ||
534 | kernfs_put(kn->symlink.target_kn); | ||
535 | |||
536 | kfree_const(kn->name); | ||
537 | |||
538 | if (kn->iattr) { | ||
539 | simple_xattrs_free(&kn->iattr->xattrs); | ||
540 | kmem_cache_free(kernfs_iattrs_cache, kn->iattr); | ||
541 | } | ||
542 | spin_lock(&kernfs_idr_lock); | ||
543 | idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); | ||
544 | spin_unlock(&kernfs_idr_lock); | ||
545 | kmem_cache_free(kernfs_node_cache, kn); | ||
546 | |||
547 | kn = parent; | ||
548 | if (kn) { | ||
549 | if (atomic_dec_and_test(&kn->count)) | ||
550 | goto repeat; | ||
551 | } else { | ||
552 | /* just released the root kn, free @root too */ | ||
553 | idr_destroy(&root->ino_idr); | ||
554 | kfree(root); | ||
555 | } | ||
556 | } | ||
557 | EXPORT_SYMBOL_GPL(kernfs_put); | ||
558 | |||
559 | static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags) | ||
560 | { | ||
561 | struct kernfs_node *kn; | ||
562 | |||
563 | if (flags & LOOKUP_RCU) | ||
564 | return -ECHILD; | ||
565 | |||
566 | /* Always perform fresh lookup for negatives */ | ||
567 | if (d_really_is_negative(dentry)) | ||
568 | goto out_bad_unlocked; | ||
569 | |||
570 | kn = kernfs_dentry_node(dentry); | ||
571 | mutex_lock(&kernfs_mutex); | ||
572 | |||
573 | /* The kernfs node has been deactivated */ | ||
574 | if (!kernfs_active(kn)) | ||
575 | goto out_bad; | ||
576 | |||
577 | /* The kernfs node has been moved? */ | ||
578 | if (kernfs_dentry_node(dentry->d_parent) != kn->parent) | ||
579 | goto out_bad; | ||
580 | |||
581 | /* The kernfs node has been renamed */ | ||
582 | if (strcmp(dentry->d_name.name, kn->name) != 0) | ||
583 | goto out_bad; | ||
584 | |||
585 | /* The kernfs node has been moved to a different namespace */ | ||
586 | if (kn->parent && kernfs_ns_enabled(kn->parent) && | ||
587 | kernfs_info(dentry->d_sb)->ns != kn->ns) | ||
588 | goto out_bad; | ||
589 | |||
590 | mutex_unlock(&kernfs_mutex); | ||
591 | return 1; | ||
592 | out_bad: | ||
593 | mutex_unlock(&kernfs_mutex); | ||
594 | out_bad_unlocked: | ||
595 | return 0; | ||
596 | } | ||
597 | |||
598 | const struct dentry_operations kernfs_dops = { | ||
599 | .d_revalidate = kernfs_dop_revalidate, | ||
600 | }; | ||
601 | |||
602 | /** | ||
603 | * kernfs_node_from_dentry - determine kernfs_node associated with a dentry | ||
604 | * @dentry: the dentry in question | ||
605 | * | ||
606 | * Return the kernfs_node associated with @dentry. If @dentry is not a | ||
607 | * kernfs one, %NULL is returned. | ||
608 | * | ||
609 | * While the returned kernfs_node will stay accessible as long as @dentry | ||
610 | * is accessible, the returned node can be in any state and the caller is | ||
611 | * fully responsible for determining what's accessible. | ||
612 | */ | ||
613 | struct kernfs_node *kernfs_node_from_dentry(struct dentry *dentry) | ||
614 | { | ||
615 | if (dentry->d_sb->s_op == &kernfs_sops && | ||
616 | !d_really_is_negative(dentry)) | ||
617 | return kernfs_dentry_node(dentry); | ||
618 | return NULL; | ||
619 | } | ||
620 | |||
621 | static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root, | ||
622 | struct kernfs_node *parent, | ||
623 | const char *name, umode_t mode, | ||
624 | kuid_t uid, kgid_t gid, | ||
625 | unsigned flags) | ||
626 | { | ||
627 | struct kernfs_node *kn; | ||
628 | u32 id_highbits; | ||
629 | int ret; | ||
630 | |||
631 | name = kstrdup_const(name, GFP_KERNEL); | ||
632 | if (!name) | ||
633 | return NULL; | ||
634 | |||
635 | kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL); | ||
636 | if (!kn) | ||
637 | goto err_out1; | ||
638 | |||
639 | idr_preload(GFP_KERNEL); | ||
640 | spin_lock(&kernfs_idr_lock); | ||
641 | ret = idr_alloc_cyclic(&root->ino_idr, kn, 1, 0, GFP_ATOMIC); | ||
642 | if (ret >= 0 && ret < root->last_id_lowbits) | ||
643 | root->id_highbits++; | ||
644 | id_highbits = root->id_highbits; | ||
645 | root->last_id_lowbits = ret; | ||
646 | spin_unlock(&kernfs_idr_lock); | ||
647 | idr_preload_end(); | ||
648 | if (ret < 0) | ||
649 | goto err_out2; | ||
650 | |||
651 | kn->id = (u64)id_highbits << 32 | ret; | ||
652 | |||
653 | atomic_set(&kn->count, 1); | ||
654 | atomic_set(&kn->active, KN_DEACTIVATED_BIAS); | ||
655 | RB_CLEAR_NODE(&kn->rb); | ||
656 | |||
657 | kn->name = name; | ||
658 | kn->mode = mode; | ||
659 | kn->flags = flags; | ||
660 | |||
661 | if (!uid_eq(uid, GLOBAL_ROOT_UID) || !gid_eq(gid, GLOBAL_ROOT_GID)) { | ||
662 | struct iattr iattr = { | ||
663 | .ia_valid = ATTR_UID | ATTR_GID, | ||
664 | .ia_uid = uid, | ||
665 | .ia_gid = gid, | ||
666 | }; | ||
667 | |||
668 | ret = __kernfs_setattr(kn, &iattr); | ||
669 | if (ret < 0) | ||
670 | goto err_out3; | ||
671 | } | ||
672 | |||
673 | if (parent) { | ||
674 | ret = security_kernfs_init_security(parent, kn); | ||
675 | if (ret) | ||
676 | goto err_out3; | ||
677 | } | ||
678 | |||
679 | return kn; | ||
680 | |||
681 | err_out3: | ||
682 | idr_remove(&root->ino_idr, (u32)kernfs_ino(kn)); | ||
683 | err_out2: | ||
684 | kmem_cache_free(kernfs_node_cache, kn); | ||
685 | err_out1: | ||
686 | kfree_const(name); | ||
687 | return NULL; | ||
688 | } | ||
689 | |||
690 | struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, | ||
691 | const char *name, umode_t mode, | ||
692 | kuid_t uid, kgid_t gid, | ||
693 | unsigned flags) | ||
694 | { | ||
695 | struct kernfs_node *kn; | ||
696 | |||
697 | kn = __kernfs_new_node(kernfs_root(parent), parent, | ||
698 | name, mode, uid, gid, flags); | ||
699 | if (kn) { | ||
700 | kernfs_get(parent); | ||
701 | kn->parent = parent; | ||
702 | } | ||
703 | return kn; | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * kernfs_find_and_get_node_by_id - get kernfs_node from node id | ||
708 | * @root: the kernfs root | ||
709 | * @id: the target node id | ||
710 | * | ||
711 | * @id's lower 32bits encode ino and upper gen. If the gen portion is | ||
712 | * zero, all generations are matched. | ||
713 | * | ||
714 | * RETURNS: | ||
715 | * NULL on failure. Return a kernfs node with reference counter incremented | ||
716 | */ | ||
717 | struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root, | ||
718 | u64 id) | ||
719 | { | ||
720 | struct kernfs_node *kn; | ||
721 | ino_t ino = kernfs_id_ino(id); | ||
722 | u32 gen = kernfs_id_gen(id); | ||
723 | |||
724 | spin_lock(&kernfs_idr_lock); | ||
725 | |||
726 | kn = idr_find(&root->ino_idr, (u32)ino); | ||
727 | if (!kn) | ||
728 | goto err_unlock; | ||
729 | |||
730 | if (sizeof(ino_t) >= sizeof(u64)) { | ||
731 | /* we looked up with the low 32bits, compare the whole */ | ||
732 | if (kernfs_ino(kn) != ino) | ||
733 | goto err_unlock; | ||
734 | } else { | ||
735 | /* 0 matches all generations */ | ||
736 | if (unlikely(gen && kernfs_gen(kn) != gen)) | ||
737 | goto err_unlock; | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * ACTIVATED is protected with kernfs_mutex but it was clear when | ||
742 | * @kn was added to idr and we just wanna see it set. No need to | ||
743 | * grab kernfs_mutex. | ||
744 | */ | ||
745 | if (unlikely(!(kn->flags & KERNFS_ACTIVATED) || | ||
746 | !atomic_inc_not_zero(&kn->count))) | ||
747 | goto err_unlock; | ||
748 | |||
749 | spin_unlock(&kernfs_idr_lock); | ||
750 | return kn; | ||
751 | err_unlock: | ||
752 | spin_unlock(&kernfs_idr_lock); | ||
753 | return NULL; | ||
754 | } | ||
755 | |||
756 | /** | ||
757 | * kernfs_add_one - add kernfs_node to parent without warning | ||
758 | * @kn: kernfs_node to be added | ||
759 | * | ||
760 | * The caller must already have initialized @kn->parent. This | ||
761 | * function increments nlink of the parent's inode if @kn is a | ||
762 | * directory and link into the children list of the parent. | ||
763 | * | ||
764 | * RETURNS: | ||
765 | * 0 on success, -EEXIST if entry with the given name already | ||
766 | * exists. | ||
767 | */ | ||
768 | int kernfs_add_one(struct kernfs_node *kn) | ||
769 | { | ||
770 | struct kernfs_node *parent = kn->parent; | ||
771 | struct kernfs_iattrs *ps_iattr; | ||
772 | bool has_ns; | ||
773 | int ret; | ||
774 | |||
775 | mutex_lock(&kernfs_mutex); | ||
776 | |||
777 | ret = -EINVAL; | ||
778 | has_ns = kernfs_ns_enabled(parent); | ||
779 | if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", | ||
780 | has_ns ? "required" : "invalid", parent->name, kn->name)) | ||
781 | goto out_unlock; | ||
782 | |||
783 | if (kernfs_type(parent) != KERNFS_DIR) | ||
784 | goto out_unlock; | ||
785 | |||
786 | ret = -ENOENT; | ||
787 | if (parent->flags & KERNFS_EMPTY_DIR) | ||
788 | goto out_unlock; | ||
789 | |||
790 | if ((parent->flags & KERNFS_ACTIVATED) && !kernfs_active(parent)) | ||
791 | goto out_unlock; | ||
792 | |||
793 | kn->hash = kernfs_name_hash(kn->name, kn->ns); | ||
794 | |||
795 | ret = kernfs_link_sibling(kn); | ||
796 | if (ret) | ||
797 | goto out_unlock; | ||
798 | |||
799 | /* Update timestamps on the parent */ | ||
800 | ps_iattr = parent->iattr; | ||
801 | if (ps_iattr) { | ||
802 | ktime_get_real_ts64(&ps_iattr->ia_ctime); | ||
803 | ps_iattr->ia_mtime = ps_iattr->ia_ctime; | ||
804 | } | ||
805 | |||
806 | mutex_unlock(&kernfs_mutex); | ||
807 | |||
808 | /* | ||
809 | * Activate the new node unless CREATE_DEACTIVATED is requested. | ||
810 | * If not activated here, the kernfs user is responsible for | ||
811 | * activating the node with kernfs_activate(). A node which hasn't | ||
812 | * been activated is not visible to userland and its removal won't | ||
813 | * trigger deactivation. | ||
814 | */ | ||
815 | if (!(kernfs_root(kn)->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) | ||
816 | kernfs_activate(kn); | ||
817 | return 0; | ||
818 | |||
819 | out_unlock: | ||
820 | mutex_unlock(&kernfs_mutex); | ||
821 | return ret; | ||
822 | } | ||
823 | |||
824 | /** | ||
825 | * kernfs_find_ns - find kernfs_node with the given name | ||
826 | * @parent: kernfs_node to search under | ||
827 | * @name: name to look for | ||
828 | * @ns: the namespace tag to use | ||
829 | * | ||
830 | * Look for kernfs_node with name @name under @parent. Returns pointer to | ||
831 | * the found kernfs_node on success, %NULL on failure. | ||
832 | */ | ||
833 | static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent, | ||
834 | const unsigned char *name, | ||
835 | const void *ns) | ||
836 | { | ||
837 | struct rb_node *node = parent->dir.children.rb_node; | ||
838 | bool has_ns = kernfs_ns_enabled(parent); | ||
839 | unsigned int hash; | ||
840 | |||
841 | lockdep_assert_held(&kernfs_mutex); | ||
842 | |||
843 | if (has_ns != (bool)ns) { | ||
844 | WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n", | ||
845 | has_ns ? "required" : "invalid", parent->name, name); | ||
846 | return NULL; | ||
847 | } | ||
848 | |||
849 | hash = kernfs_name_hash(name, ns); | ||
850 | while (node) { | ||
851 | struct kernfs_node *kn; | ||
852 | int result; | ||
853 | |||
854 | kn = rb_to_kn(node); | ||
855 | result = kernfs_name_compare(hash, name, ns, kn); | ||
856 | if (result < 0) | ||
857 | node = node->rb_left; | ||
858 | else if (result > 0) | ||
859 | node = node->rb_right; | ||
860 | else | ||
861 | return kn; | ||
862 | } | ||
863 | return NULL; | ||
864 | } | ||
865 | |||
866 | static struct kernfs_node *kernfs_walk_ns(struct kernfs_node *parent, | ||
867 | const unsigned char *path, | ||
868 | const void *ns) | ||
869 | { | ||
870 | size_t len; | ||
871 | char *p, *name; | ||
872 | |||
873 | lockdep_assert_held(&kernfs_mutex); | ||
874 | |||
875 | spin_lock_irq(&kernfs_pr_cont_lock); | ||
876 | |||
877 | len = strlcpy(kernfs_pr_cont_buf, path, sizeof(kernfs_pr_cont_buf)); | ||
878 | |||
879 | if (len >= sizeof(kernfs_pr_cont_buf)) { | ||
880 | spin_unlock_irq(&kernfs_pr_cont_lock); | ||
881 | return NULL; | ||
882 | } | ||
883 | |||
884 | p = kernfs_pr_cont_buf; | ||
885 | |||
886 | while ((name = strsep(&p, "/")) && parent) { | ||
887 | if (*name == '\0') | ||
888 | continue; | ||
889 | parent = kernfs_find_ns(parent, name, ns); | ||
890 | } | ||
891 | |||
892 | spin_unlock_irq(&kernfs_pr_cont_lock); | ||
893 | |||
894 | return parent; | ||
895 | } | ||
896 | |||
897 | /** | ||
898 | * kernfs_find_and_get_ns - find and get kernfs_node with the given name | ||
899 | * @parent: kernfs_node to search under | ||
900 | * @name: name to look for | ||
901 | * @ns: the namespace tag to use | ||
902 | * | ||
903 | * Look for kernfs_node with name @name under @parent and get a reference | ||
904 | * if found. This function may sleep and returns pointer to the found | ||
905 | * kernfs_node on success, %NULL on failure. | ||
906 | */ | ||
907 | struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent, | ||
908 | const char *name, const void *ns) | ||
909 | { | ||
910 | struct kernfs_node *kn; | ||
911 | |||
912 | mutex_lock(&kernfs_mutex); | ||
913 | kn = kernfs_find_ns(parent, name, ns); | ||
914 | kernfs_get(kn); | ||
915 | mutex_unlock(&kernfs_mutex); | ||
916 | |||
917 | return kn; | ||
918 | } | ||
919 | EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns); | ||
920 | |||
921 | /** | ||
922 | * kernfs_walk_and_get_ns - find and get kernfs_node with the given path | ||
923 | * @parent: kernfs_node to search under | ||
924 | * @path: path to look for | ||
925 | * @ns: the namespace tag to use | ||
926 | * | ||
927 | * Look for kernfs_node with path @path under @parent and get a reference | ||
928 | * if found. This function may sleep and returns pointer to the found | ||
929 | * kernfs_node on success, %NULL on failure. | ||
930 | */ | ||
931 | struct kernfs_node *kernfs_walk_and_get_ns(struct kernfs_node *parent, | ||
932 | const char *path, const void *ns) | ||
933 | { | ||
934 | struct kernfs_node *kn; | ||
935 | |||
936 | mutex_lock(&kernfs_mutex); | ||
937 | kn = kernfs_walk_ns(parent, path, ns); | ||
938 | kernfs_get(kn); | ||
939 | mutex_unlock(&kernfs_mutex); | ||
940 | |||
941 | return kn; | ||
942 | } | ||
943 | |||
944 | /** | ||
945 | * kernfs_create_root - create a new kernfs hierarchy | ||
946 | * @scops: optional syscall operations for the hierarchy | ||
947 | * @flags: KERNFS_ROOT_* flags | ||
948 | * @priv: opaque data associated with the new directory | ||
949 | * | ||
950 | * Returns the root of the new hierarchy on success, ERR_PTR() value on | ||
951 | * failure. | ||
952 | */ | ||
953 | struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops, | ||
954 | unsigned int flags, void *priv) | ||
955 | { | ||
956 | struct kernfs_root *root; | ||
957 | struct kernfs_node *kn; | ||
958 | |||
959 | root = kzalloc(sizeof(*root), GFP_KERNEL); | ||
960 | if (!root) | ||
961 | return ERR_PTR(-ENOMEM); | ||
962 | |||
963 | idr_init(&root->ino_idr); | ||
964 | INIT_LIST_HEAD(&root->supers); | ||
965 | |||
966 | /* | ||
967 | * On 64bit ino setups, id is ino. On 32bit, low 32bits are ino. | ||
968 | * High bits generation. The starting value for both ino and | ||
969 | * genenration is 1. Initialize upper 32bit allocation | ||
970 | * accordingly. | ||
971 | */ | ||
972 | if (sizeof(ino_t) >= sizeof(u64)) | ||
973 | root->id_highbits = 0; | ||
974 | else | ||
975 | root->id_highbits = 1; | ||
976 | |||
977 | kn = __kernfs_new_node(root, NULL, "", S_IFDIR | S_IRUGO | S_IXUGO, | ||
978 | GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, | ||
979 | KERNFS_DIR); | ||
980 | if (!kn) { | ||
981 | idr_destroy(&root->ino_idr); | ||
982 | kfree(root); | ||
983 | return ERR_PTR(-ENOMEM); | ||
984 | } | ||
985 | |||
986 | kn->priv = priv; | ||
987 | kn->dir.root = root; | ||
988 | |||
989 | root->syscall_ops = scops; | ||
990 | root->flags = flags; | ||
991 | root->kn = kn; | ||
992 | init_waitqueue_head(&root->deactivate_waitq); | ||
993 | |||
994 | if (!(root->flags & KERNFS_ROOT_CREATE_DEACTIVATED)) | ||
995 | kernfs_activate(kn); | ||
996 | |||
997 | return root; | ||
998 | } | ||
999 | |||
1000 | /** | ||
1001 | * kernfs_destroy_root - destroy a kernfs hierarchy | ||
1002 | * @root: root of the hierarchy to destroy | ||
1003 | * | ||
1004 | * Destroy the hierarchy anchored at @root by removing all existing | ||
1005 | * directories and destroying @root. | ||
1006 | */ | ||
1007 | void kernfs_destroy_root(struct kernfs_root *root) | ||
1008 | { | ||
1009 | kernfs_remove(root->kn); /* will also free @root */ | ||
1010 | } | ||
1011 | |||
1012 | /** | ||
1013 | * kernfs_create_dir_ns - create a directory | ||
1014 | * @parent: parent in which to create a new directory | ||
1015 | * @name: name of the new directory | ||
1016 | * @mode: mode of the new directory | ||
1017 | * @uid: uid of the new directory | ||
1018 | * @gid: gid of the new directory | ||
1019 | * @priv: opaque data associated with the new directory | ||
1020 | * @ns: optional namespace tag of the directory | ||
1021 | * | ||
1022 | * Returns the created node on success, ERR_PTR() value on failure. | ||
1023 | */ | ||
1024 | struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent, | ||
1025 | const char *name, umode_t mode, | ||
1026 | kuid_t uid, kgid_t gid, | ||
1027 | void *priv, const void *ns) | ||
1028 | { | ||
1029 | struct kernfs_node *kn; | ||
1030 | int rc; | ||
1031 | |||
1032 | /* allocate */ | ||
1033 | kn = kernfs_new_node(parent, name, mode | S_IFDIR, | ||
1034 | uid, gid, KERNFS_DIR); | ||
1035 | if (!kn) | ||
1036 | return ERR_PTR(-ENOMEM); | ||
1037 | |||
1038 | kn->dir.root = parent->dir.root; | ||
1039 | kn->ns = ns; | ||
1040 | kn->priv = priv; | ||
1041 | |||
1042 | /* link in */ | ||
1043 | rc = kernfs_add_one(kn); | ||
1044 | if (!rc) | ||
1045 | return kn; | ||
1046 | |||
1047 | kernfs_put(kn); | ||
1048 | return ERR_PTR(rc); | ||
1049 | } | ||
1050 | |||
1051 | /** | ||
1052 | * kernfs_create_empty_dir - create an always empty directory | ||
1053 | * @parent: parent in which to create a new directory | ||
1054 | * @name: name of the new directory | ||
1055 | * | ||
1056 | * Returns the created node on success, ERR_PTR() value on failure. | ||
1057 | */ | ||
1058 | struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent, | ||
1059 | const char *name) | ||
1060 | { | ||
1061 | struct kernfs_node *kn; | ||
1062 | int rc; | ||
1063 | |||
1064 | /* allocate */ | ||
1065 | kn = kernfs_new_node(parent, name, S_IRUGO|S_IXUGO|S_IFDIR, | ||
1066 | GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, KERNFS_DIR); | ||
1067 | if (!kn) | ||
1068 | return ERR_PTR(-ENOMEM); | ||
1069 | |||
1070 | kn->flags |= KERNFS_EMPTY_DIR; | ||
1071 | kn->dir.root = parent->dir.root; | ||
1072 | kn->ns = NULL; | ||
1073 | kn->priv = NULL; | ||
1074 | |||
1075 | /* link in */ | ||
1076 | rc = kernfs_add_one(kn); | ||
1077 | if (!rc) | ||
1078 | return kn; | ||
1079 | |||
1080 | kernfs_put(kn); | ||
1081 | return ERR_PTR(rc); | ||
1082 | } | ||
1083 | |||
1084 | static struct dentry *kernfs_iop_lookup(struct inode *dir, | ||
1085 | struct dentry *dentry, | ||
1086 | unsigned int flags) | ||
1087 | { | ||
1088 | struct dentry *ret; | ||
1089 | struct kernfs_node *parent = dir->i_private; | ||
1090 | struct kernfs_node *kn; | ||
1091 | struct inode *inode; | ||
1092 | const void *ns = NULL; | ||
1093 | |||
1094 | mutex_lock(&kernfs_mutex); | ||
1095 | |||
1096 | if (kernfs_ns_enabled(parent)) | ||
1097 | ns = kernfs_info(dir->i_sb)->ns; | ||
1098 | |||
1099 | kn = kernfs_find_ns(parent, dentry->d_name.name, ns); | ||
1100 | |||
1101 | /* no such entry */ | ||
1102 | if (!kn || !kernfs_active(kn)) { | ||
1103 | ret = NULL; | ||
1104 | goto out_unlock; | ||
1105 | } | ||
1106 | |||
1107 | /* attach dentry and inode */ | ||
1108 | inode = kernfs_get_inode(dir->i_sb, kn); | ||
1109 | if (!inode) { | ||
1110 | ret = ERR_PTR(-ENOMEM); | ||
1111 | goto out_unlock; | ||
1112 | } | ||
1113 | |||
1114 | /* instantiate and hash dentry */ | ||
1115 | ret = d_splice_alias(inode, dentry); | ||
1116 | out_unlock: | ||
1117 | mutex_unlock(&kernfs_mutex); | ||
1118 | return ret; | ||
1119 | } | ||
1120 | |||
1121 | static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry, | ||
1122 | umode_t mode) | ||
1123 | { | ||
1124 | struct kernfs_node *parent = dir->i_private; | ||
1125 | struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops; | ||
1126 | int ret; | ||
1127 | |||
1128 | if (!scops || !scops->mkdir) | ||
1129 | return -EPERM; | ||
1130 | |||
1131 | if (!kernfs_get_active(parent)) | ||
1132 | return -ENODEV; | ||
1133 | |||
1134 | ret = scops->mkdir(parent, dentry->d_name.name, mode); | ||
1135 | |||
1136 | kernfs_put_active(parent); | ||
1137 | return ret; | ||
1138 | } | ||
1139 | |||
1140 | static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry) | ||
1141 | { | ||
1142 | struct kernfs_node *kn = kernfs_dentry_node(dentry); | ||
1143 | struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; | ||
1144 | int ret; | ||
1145 | |||
1146 | if (!scops || !scops->rmdir) | ||
1147 | return -EPERM; | ||
1148 | |||
1149 | if (!kernfs_get_active(kn)) | ||
1150 | return -ENODEV; | ||
1151 | |||
1152 | ret = scops->rmdir(kn); | ||
1153 | |||
1154 | kernfs_put_active(kn); | ||
1155 | return ret; | ||
1156 | } | ||
1157 | |||
1158 | static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry, | ||
1159 | struct inode *new_dir, struct dentry *new_dentry, | ||
1160 | unsigned int flags) | ||
1161 | { | ||
1162 | struct kernfs_node *kn = kernfs_dentry_node(old_dentry); | ||
1163 | struct kernfs_node *new_parent = new_dir->i_private; | ||
1164 | struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops; | ||
1165 | int ret; | ||
1166 | |||
1167 | if (flags) | ||
1168 | return -EINVAL; | ||
1169 | |||
1170 | if (!scops || !scops->rename) | ||
1171 | return -EPERM; | ||
1172 | |||
1173 | if (!kernfs_get_active(kn)) | ||
1174 | return -ENODEV; | ||
1175 | |||
1176 | if (!kernfs_get_active(new_parent)) { | ||
1177 | kernfs_put_active(kn); | ||
1178 | return -ENODEV; | ||
1179 | } | ||
1180 | |||
1181 | ret = scops->rename(kn, new_parent, new_dentry->d_name.name); | ||
1182 | |||
1183 | kernfs_put_active(new_parent); | ||
1184 | kernfs_put_active(kn); | ||
1185 | return ret; | ||
1186 | } | ||
1187 | |||
1188 | const struct inode_operations kernfs_dir_iops = { | ||
1189 | .lookup = kernfs_iop_lookup, | ||
1190 | .permission = kernfs_iop_permission, | ||
1191 | .setattr = kernfs_iop_setattr, | ||
1192 | .getattr = kernfs_iop_getattr, | ||
1193 | .listxattr = kernfs_iop_listxattr, | ||
1194 | |||
1195 | .mkdir = kernfs_iop_mkdir, | ||
1196 | .rmdir = kernfs_iop_rmdir, | ||
1197 | .rename = kernfs_iop_rename, | ||
1198 | }; | ||
1199 | |||
1200 | static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos) | ||
1201 | { | ||
1202 | struct kernfs_node *last; | ||
1203 | |||
1204 | while (true) { | ||
1205 | struct rb_node *rbn; | ||
1206 | |||
1207 | last = pos; | ||
1208 | |||
1209 | if (kernfs_type(pos) != KERNFS_DIR) | ||
1210 | break; | ||
1211 | |||
1212 | rbn = rb_first(&pos->dir.children); | ||
1213 | if (!rbn) | ||
1214 | break; | ||
1215 | |||
1216 | pos = rb_to_kn(rbn); | ||
1217 | } | ||
1218 | |||
1219 | return last; | ||
1220 | } | ||
1221 | |||
1222 | /** | ||
1223 | * kernfs_next_descendant_post - find the next descendant for post-order walk | ||
1224 | * @pos: the current position (%NULL to initiate traversal) | ||
1225 | * @root: kernfs_node whose descendants to walk | ||
1226 | * | ||
1227 | * Find the next descendant to visit for post-order traversal of @root's | ||
1228 | * descendants. @root is included in the iteration and the last node to be | ||
1229 | * visited. | ||
1230 | */ | ||
1231 | static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos, | ||
1232 | struct kernfs_node *root) | ||
1233 | { | ||
1234 | struct rb_node *rbn; | ||
1235 | |||
1236 | lockdep_assert_held(&kernfs_mutex); | ||
1237 | |||
1238 | /* if first iteration, visit leftmost descendant which may be root */ | ||
1239 | if (!pos) | ||
1240 | return kernfs_leftmost_descendant(root); | ||
1241 | |||
1242 | /* if we visited @root, we're done */ | ||
1243 | if (pos == root) | ||
1244 | return NULL; | ||
1245 | |||
1246 | /* if there's an unvisited sibling, visit its leftmost descendant */ | ||
1247 | rbn = rb_next(&pos->rb); | ||
1248 | if (rbn) | ||
1249 | return kernfs_leftmost_descendant(rb_to_kn(rbn)); | ||
1250 | |||
1251 | /* no sibling left, visit parent */ | ||
1252 | return pos->parent; | ||
1253 | } | ||
1254 | |||
1255 | /** | ||
1256 | * kernfs_activate - activate a node which started deactivated | ||
1257 | * @kn: kernfs_node whose subtree is to be activated | ||
1258 | * | ||
1259 | * If the root has KERNFS_ROOT_CREATE_DEACTIVATED set, a newly created node | ||
1260 | * needs to be explicitly activated. A node which hasn't been activated | ||
1261 | * isn't visible to userland and deactivation is skipped during its | ||
1262 | * removal. This is useful to construct atomic init sequences where | ||
1263 | * creation of multiple nodes should either succeed or fail atomically. | ||
1264 | * | ||
1265 | * The caller is responsible for ensuring that this function is not called | ||
1266 | * after kernfs_remove*() is invoked on @kn. | ||
1267 | */ | ||
1268 | void kernfs_activate(struct kernfs_node *kn) | ||
1269 | { | ||
1270 | struct kernfs_node *pos; | ||
1271 | |||
1272 | mutex_lock(&kernfs_mutex); | ||
1273 | |||
1274 | pos = NULL; | ||
1275 | while ((pos = kernfs_next_descendant_post(pos, kn))) { | ||
1276 | if (pos->flags & KERNFS_ACTIVATED) | ||
1277 | continue; | ||
1278 | |||
1279 | WARN_ON_ONCE(pos->parent && RB_EMPTY_NODE(&pos->rb)); | ||
1280 | WARN_ON_ONCE(atomic_read(&pos->active) != KN_DEACTIVATED_BIAS); | ||
1281 | |||
1282 | atomic_sub(KN_DEACTIVATED_BIAS, &pos->active); | ||
1283 | pos->flags |= KERNFS_ACTIVATED; | ||
1284 | } | ||
1285 | |||
1286 | mutex_unlock(&kernfs_mutex); | ||
1287 | } | ||
1288 | |||
1289 | static void __kernfs_remove(struct kernfs_node *kn) | ||
1290 | { | ||
1291 | struct kernfs_node *pos; | ||
1292 | |||
1293 | lockdep_assert_held(&kernfs_mutex); | ||
1294 | |||
1295 | /* | ||
1296 | * Short-circuit if non-root @kn has already finished removal. | ||
1297 | * This is for kernfs_remove_self() which plays with active ref | ||
1298 | * after removal. | ||
1299 | */ | ||
1300 | if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb))) | ||
1301 | return; | ||
1302 | |||
1303 | pr_debug("kernfs %s: removing\n", kn->name); | ||
1304 | |||
1305 | /* prevent any new usage under @kn by deactivating all nodes */ | ||
1306 | pos = NULL; | ||
1307 | while ((pos = kernfs_next_descendant_post(pos, kn))) | ||
1308 | if (kernfs_active(pos)) | ||
1309 | atomic_add(KN_DEACTIVATED_BIAS, &pos->active); | ||
1310 | |||
1311 | /* deactivate and unlink the subtree node-by-node */ | ||
1312 | do { | ||
1313 | pos = kernfs_leftmost_descendant(kn); | ||
1314 | |||
1315 | /* | ||
1316 | * kernfs_drain() drops kernfs_mutex temporarily and @pos's | ||
1317 | * base ref could have been put by someone else by the time | ||
1318 | * the function returns. Make sure it doesn't go away | ||
1319 | * underneath us. | ||
1320 | */ | ||
1321 | kernfs_get(pos); | ||
1322 | |||
1323 | /* | ||
1324 | * Drain iff @kn was activated. This avoids draining and | ||
1325 | * its lockdep annotations for nodes which have never been | ||
1326 | * activated and allows embedding kernfs_remove() in create | ||
1327 | * error paths without worrying about draining. | ||
1328 | */ | ||
1329 | if (kn->flags & KERNFS_ACTIVATED) | ||
1330 | kernfs_drain(pos); | ||
1331 | else | ||
1332 | WARN_ON_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS); | ||
1333 | |||
1334 | /* | ||
1335 | * kernfs_unlink_sibling() succeeds once per node. Use it | ||
1336 | * to decide who's responsible for cleanups. | ||
1337 | */ | ||
1338 | if (!pos->parent || kernfs_unlink_sibling(pos)) { | ||
1339 | struct kernfs_iattrs *ps_iattr = | ||
1340 | pos->parent ? pos->parent->iattr : NULL; | ||
1341 | |||
1342 | /* update timestamps on the parent */ | ||
1343 | if (ps_iattr) { | ||
1344 | ktime_get_real_ts64(&ps_iattr->ia_ctime); | ||
1345 | ps_iattr->ia_mtime = ps_iattr->ia_ctime; | ||
1346 | } | ||
1347 | |||
1348 | kernfs_put(pos); | ||
1349 | } | ||
1350 | |||
1351 | kernfs_put(pos); | ||
1352 | } while (pos != kn); | ||
1353 | } | ||
1354 | |||
1355 | /** | ||
1356 | * kernfs_remove - remove a kernfs_node recursively | ||
1357 | * @kn: the kernfs_node to remove | ||
1358 | * | ||
1359 | * Remove @kn along with all its subdirectories and files. | ||
1360 | */ | ||
1361 | void kernfs_remove(struct kernfs_node *kn) | ||
1362 | { | ||
1363 | mutex_lock(&kernfs_mutex); | ||
1364 | __kernfs_remove(kn); | ||
1365 | mutex_unlock(&kernfs_mutex); | ||
1366 | } | ||
1367 | |||
1368 | /** | ||
1369 | * kernfs_break_active_protection - break out of active protection | ||
1370 | * @kn: the self kernfs_node | ||
1371 | * | ||
1372 | * The caller must be running off of a kernfs operation which is invoked | ||
1373 | * with an active reference - e.g. one of kernfs_ops. Each invocation of | ||
1374 | * this function must also be matched with an invocation of | ||
1375 | * kernfs_unbreak_active_protection(). | ||
1376 | * | ||
1377 | * This function releases the active reference of @kn the caller is | ||
1378 | * holding. Once this function is called, @kn may be removed at any point | ||
1379 | * and the caller is solely responsible for ensuring that the objects it | ||
1380 | * dereferences are accessible. | ||
1381 | */ | ||
1382 | void kernfs_break_active_protection(struct kernfs_node *kn) | ||
1383 | { | ||
1384 | /* | ||
1385 | * Take out ourself out of the active ref dependency chain. If | ||
1386 | * we're called without an active ref, lockdep will complain. | ||
1387 | */ | ||
1388 | kernfs_put_active(kn); | ||
1389 | } | ||
1390 | |||
1391 | /** | ||
1392 | * kernfs_unbreak_active_protection - undo kernfs_break_active_protection() | ||
1393 | * @kn: the self kernfs_node | ||
1394 | * | ||
1395 | * If kernfs_break_active_protection() was called, this function must be | ||
1396 | * invoked before finishing the kernfs operation. Note that while this | ||
1397 | * function restores the active reference, it doesn't and can't actually | ||
1398 | * restore the active protection - @kn may already or be in the process of | ||
1399 | * being removed. Once kernfs_break_active_protection() is invoked, that | ||
1400 | * protection is irreversibly gone for the kernfs operation instance. | ||
1401 | * | ||
1402 | * While this function may be called at any point after | ||
1403 | * kernfs_break_active_protection() is invoked, its most useful location | ||
1404 | * would be right before the enclosing kernfs operation returns. | ||
1405 | */ | ||
1406 | void kernfs_unbreak_active_protection(struct kernfs_node *kn) | ||
1407 | { | ||
1408 | /* | ||
1409 | * @kn->active could be in any state; however, the increment we do | ||
1410 | * here will be undone as soon as the enclosing kernfs operation | ||
1411 | * finishes and this temporary bump can't break anything. If @kn | ||
1412 | * is alive, nothing changes. If @kn is being deactivated, the | ||
1413 | * soon-to-follow put will either finish deactivation or restore | ||
1414 | * deactivated state. If @kn is already removed, the temporary | ||
1415 | * bump is guaranteed to be gone before @kn is released. | ||
1416 | */ | ||
1417 | atomic_inc(&kn->active); | ||
1418 | if (kernfs_lockdep(kn)) | ||
1419 | rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_); | ||
1420 | } | ||
1421 | |||
1422 | /** | ||
1423 | * kernfs_remove_self - remove a kernfs_node from its own method | ||
1424 | * @kn: the self kernfs_node to remove | ||
1425 | * | ||
1426 | * The caller must be running off of a kernfs operation which is invoked | ||
1427 | * with an active reference - e.g. one of kernfs_ops. This can be used to | ||
1428 | * implement a file operation which deletes itself. | ||
1429 | * | ||
1430 | * For example, the "delete" file for a sysfs device directory can be | ||
1431 | * implemented by invoking kernfs_remove_self() on the "delete" file | ||
1432 | * itself. This function breaks the circular dependency of trying to | ||
1433 | * deactivate self while holding an active ref itself. It isn't necessary | ||
1434 | * to modify the usual removal path to use kernfs_remove_self(). The | ||
1435 | * "delete" implementation can simply invoke kernfs_remove_self() on self | ||
1436 | * before proceeding with the usual removal path. kernfs will ignore later | ||
1437 | * kernfs_remove() on self. | ||
1438 | * | ||
1439 | * kernfs_remove_self() can be called multiple times concurrently on the | ||
1440 | * same kernfs_node. Only the first one actually performs removal and | ||
1441 | * returns %true. All others will wait until the kernfs operation which | ||
1442 | * won self-removal finishes and return %false. Note that the losers wait | ||
1443 | * for the completion of not only the winning kernfs_remove_self() but also | ||
1444 | * the whole kernfs_ops which won the arbitration. This can be used to | ||
1445 | * guarantee, for example, all concurrent writes to a "delete" file to | ||
1446 | * finish only after the whole operation is complete. | ||
1447 | */ | ||
1448 | bool kernfs_remove_self(struct kernfs_node *kn) | ||
1449 | { | ||
1450 | bool ret; | ||
1451 | |||
1452 | mutex_lock(&kernfs_mutex); | ||
1453 | kernfs_break_active_protection(kn); | ||
1454 | |||
1455 | /* | ||
1456 | * SUICIDAL is used to arbitrate among competing invocations. Only | ||
1457 | * the first one will actually perform removal. When the removal | ||
1458 | * is complete, SUICIDED is set and the active ref is restored | ||
1459 | * while holding kernfs_mutex. The ones which lost arbitration | ||
1460 | * waits for SUICDED && drained which can happen only after the | ||
1461 | * enclosing kernfs operation which executed the winning instance | ||
1462 | * of kernfs_remove_self() finished. | ||
1463 | */ | ||
1464 | if (!(kn->flags & KERNFS_SUICIDAL)) { | ||
1465 | kn->flags |= KERNFS_SUICIDAL; | ||
1466 | __kernfs_remove(kn); | ||
1467 | kn->flags |= KERNFS_SUICIDED; | ||
1468 | ret = true; | ||
1469 | } else { | ||
1470 | wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq; | ||
1471 | DEFINE_WAIT(wait); | ||
1472 | |||
1473 | while (true) { | ||
1474 | prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE); | ||
1475 | |||
1476 | if ((kn->flags & KERNFS_SUICIDED) && | ||
1477 | atomic_read(&kn->active) == KN_DEACTIVATED_BIAS) | ||
1478 | break; | ||
1479 | |||
1480 | mutex_unlock(&kernfs_mutex); | ||
1481 | schedule(); | ||
1482 | mutex_lock(&kernfs_mutex); | ||
1483 | } | ||
1484 | finish_wait(waitq, &wait); | ||
1485 | WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb)); | ||
1486 | ret = false; | ||
1487 | } | ||
1488 | |||
1489 | /* | ||
1490 | * This must be done while holding kernfs_mutex; otherwise, waiting | ||
1491 | * for SUICIDED && deactivated could finish prematurely. | ||
1492 | */ | ||
1493 | kernfs_unbreak_active_protection(kn); | ||
1494 | |||
1495 | mutex_unlock(&kernfs_mutex); | ||
1496 | return ret; | ||
1497 | } | ||
1498 | |||
1499 | /** | ||
1500 | * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it | ||
1501 | * @parent: parent of the target | ||
1502 | * @name: name of the kernfs_node to remove | ||
1503 | * @ns: namespace tag of the kernfs_node to remove | ||
1504 | * | ||
1505 | * Look for the kernfs_node with @name and @ns under @parent and remove it. | ||
1506 | * Returns 0 on success, -ENOENT if such entry doesn't exist. | ||
1507 | */ | ||
1508 | int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, | ||
1509 | const void *ns) | ||
1510 | { | ||
1511 | struct kernfs_node *kn; | ||
1512 | |||
1513 | if (!parent) { | ||
1514 | WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n", | ||
1515 | name); | ||
1516 | return -ENOENT; | ||
1517 | } | ||
1518 | |||
1519 | mutex_lock(&kernfs_mutex); | ||
1520 | |||
1521 | kn = kernfs_find_ns(parent, name, ns); | ||
1522 | if (kn) { | ||
1523 | kernfs_get(kn); | ||
1524 | __kernfs_remove(kn); | ||
1525 | kernfs_put(kn); | ||
1526 | } | ||
1527 | |||
1528 | mutex_unlock(&kernfs_mutex); | ||
1529 | |||
1530 | if (kn) | ||
1531 | return 0; | ||
1532 | else | ||
1533 | return -ENOENT; | ||
1534 | } | ||
1535 | |||
1536 | /** | ||
1537 | * kernfs_rename_ns - move and rename a kernfs_node | ||
1538 | * @kn: target node | ||
1539 | * @new_parent: new parent to put @sd under | ||
1540 | * @new_name: new name | ||
1541 | * @new_ns: new namespace tag | ||
1542 | */ | ||
1543 | int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, | ||
1544 | const char *new_name, const void *new_ns) | ||
1545 | { | ||
1546 | struct kernfs_node *old_parent; | ||
1547 | const char *old_name = NULL; | ||
1548 | int error; | ||
1549 | |||
1550 | /* can't move or rename root */ | ||
1551 | if (!kn->parent) | ||
1552 | return -EINVAL; | ||
1553 | |||
1554 | mutex_lock(&kernfs_mutex); | ||
1555 | |||
1556 | error = -ENOENT; | ||
1557 | if (!kernfs_active(kn) || !kernfs_active(new_parent) || | ||
1558 | (new_parent->flags & KERNFS_EMPTY_DIR)) | ||
1559 | goto out; | ||
1560 | |||
1561 | error = 0; | ||
1562 | if ((kn->parent == new_parent) && (kn->ns == new_ns) && | ||
1563 | (strcmp(kn->name, new_name) == 0)) | ||
1564 | goto out; /* nothing to rename */ | ||
1565 | |||
1566 | error = -EEXIST; | ||
1567 | if (kernfs_find_ns(new_parent, new_name, new_ns)) | ||
1568 | goto out; | ||
1569 | |||
1570 | /* rename kernfs_node */ | ||
1571 | if (strcmp(kn->name, new_name) != 0) { | ||
1572 | error = -ENOMEM; | ||
1573 | new_name = kstrdup_const(new_name, GFP_KERNEL); | ||
1574 | if (!new_name) | ||
1575 | goto out; | ||
1576 | } else { | ||
1577 | new_name = NULL; | ||
1578 | } | ||
1579 | |||
1580 | /* | ||
1581 | * Move to the appropriate place in the appropriate directories rbtree. | ||
1582 | */ | ||
1583 | kernfs_unlink_sibling(kn); | ||
1584 | kernfs_get(new_parent); | ||
1585 | |||
1586 | /* rename_lock protects ->parent and ->name accessors */ | ||
1587 | spin_lock_irq(&kernfs_rename_lock); | ||
1588 | |||
1589 | old_parent = kn->parent; | ||
1590 | kn->parent = new_parent; | ||
1591 | |||
1592 | kn->ns = new_ns; | ||
1593 | if (new_name) { | ||
1594 | old_name = kn->name; | ||
1595 | kn->name = new_name; | ||
1596 | } | ||
1597 | |||
1598 | spin_unlock_irq(&kernfs_rename_lock); | ||
1599 | |||
1600 | kn->hash = kernfs_name_hash(kn->name, kn->ns); | ||
1601 | kernfs_link_sibling(kn); | ||
1602 | |||
1603 | kernfs_put(old_parent); | ||
1604 | kfree_const(old_name); | ||
1605 | |||
1606 | error = 0; | ||
1607 | out: | ||
1608 | mutex_unlock(&kernfs_mutex); | ||
1609 | return error; | ||
1610 | } | ||
1611 | |||
1612 | /* Relationship between s_mode and the DT_xxx types */ | ||
1613 | static inline unsigned char dt_type(struct kernfs_node *kn) | ||
1614 | { | ||
1615 | return (kn->mode >> 12) & 15; | ||
1616 | } | ||
1617 | |||
1618 | static int kernfs_dir_fop_release(struct inode *inode, struct file *filp) | ||
1619 | { | ||
1620 | kernfs_put(filp->private_data); | ||
1621 | return 0; | ||
1622 | } | ||
1623 | |||
1624 | static struct kernfs_node *kernfs_dir_pos(const void *ns, | ||
1625 | struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos) | ||
1626 | { | ||
1627 | if (pos) { | ||
1628 | int valid = kernfs_active(pos) && | ||
1629 | pos->parent == parent && hash == pos->hash; | ||
1630 | kernfs_put(pos); | ||
1631 | if (!valid) | ||
1632 | pos = NULL; | ||
1633 | } | ||
1634 | if (!pos && (hash > 1) && (hash < INT_MAX)) { | ||
1635 | struct rb_node *node = parent->dir.children.rb_node; | ||
1636 | while (node) { | ||
1637 | pos = rb_to_kn(node); | ||
1638 | |||
1639 | if (hash < pos->hash) | ||
1640 | node = node->rb_left; | ||
1641 | else if (hash > pos->hash) | ||
1642 | node = node->rb_right; | ||
1643 | else | ||
1644 | break; | ||
1645 | } | ||
1646 | } | ||
1647 | /* Skip over entries which are dying/dead or in the wrong namespace */ | ||
1648 | while (pos && (!kernfs_active(pos) || pos->ns != ns)) { | ||
1649 | struct rb_node *node = rb_next(&pos->rb); | ||
1650 | if (!node) | ||
1651 | pos = NULL; | ||
1652 | else | ||
1653 | pos = rb_to_kn(node); | ||
1654 | } | ||
1655 | return pos; | ||
1656 | } | ||
1657 | |||
1658 | static struct kernfs_node *kernfs_dir_next_pos(const void *ns, | ||
1659 | struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos) | ||
1660 | { | ||
1661 | pos = kernfs_dir_pos(ns, parent, ino, pos); | ||
1662 | if (pos) { | ||
1663 | do { | ||
1664 | struct rb_node *node = rb_next(&pos->rb); | ||
1665 | if (!node) | ||
1666 | pos = NULL; | ||
1667 | else | ||
1668 | pos = rb_to_kn(node); | ||
1669 | } while (pos && (!kernfs_active(pos) || pos->ns != ns)); | ||
1670 | } | ||
1671 | return pos; | ||
1672 | } | ||
1673 | |||
1674 | static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx) | ||
1675 | { | ||
1676 | struct dentry *dentry = file->f_path.dentry; | ||
1677 | struct kernfs_node *parent = kernfs_dentry_node(dentry); | ||
1678 | struct kernfs_node *pos = file->private_data; | ||
1679 | const void *ns = NULL; | ||
1680 | |||
1681 | if (!dir_emit_dots(file, ctx)) | ||
1682 | return 0; | ||
1683 | mutex_lock(&kernfs_mutex); | ||
1684 | |||
1685 | if (kernfs_ns_enabled(parent)) | ||
1686 | ns = kernfs_info(dentry->d_sb)->ns; | ||
1687 | |||
1688 | for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos); | ||
1689 | pos; | ||
1690 | pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) { | ||
1691 | const char *name = pos->name; | ||
1692 | unsigned int type = dt_type(pos); | ||
1693 | int len = strlen(name); | ||
1694 | ino_t ino = kernfs_ino(pos); | ||
1695 | |||
1696 | ctx->pos = pos->hash; | ||
1697 | file->private_data = pos; | ||
1698 | kernfs_get(pos); | ||
1699 | |||
1700 | mutex_unlock(&kernfs_mutex); | ||
1701 | if (!dir_emit(ctx, name, len, ino, type)) | ||
1702 | return 0; | ||
1703 | mutex_lock(&kernfs_mutex); | ||
1704 | } | ||
1705 | mutex_unlock(&kernfs_mutex); | ||
1706 | file->private_data = NULL; | ||
1707 | ctx->pos = INT_MAX; | ||
1708 | return 0; | ||
1709 | } | ||
1710 | |||
1711 | const struct file_operations kernfs_dir_fops = { | ||
1712 | .read = generic_read_dir, | ||
1713 | .iterate_shared = kernfs_fop_readdir, | ||
1714 | .release = kernfs_dir_fop_release, | ||
1715 | .llseek = generic_file_llseek, | ||
1716 | }; | ||
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c new file mode 100644 index 000000000..c75719312 --- /dev/null +++ b/fs/kernfs/file.c | |||
@@ -0,0 +1,1020 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * fs/kernfs/file.c - kernfs file implementation | ||
4 | * | ||
5 | * Copyright (c) 2001-3 Patrick Mochel | ||
6 | * Copyright (c) 2007 SUSE Linux Products GmbH | ||
7 | * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/fs.h> | ||
11 | #include <linux/seq_file.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/poll.h> | ||
14 | #include <linux/pagemap.h> | ||
15 | #include <linux/sched/mm.h> | ||
16 | #include <linux/fsnotify.h> | ||
17 | #include <linux/uio.h> | ||
18 | |||
19 | #include "kernfs-internal.h" | ||
20 | |||
21 | /* | ||
22 | * There's one kernfs_open_file for each open file and one kernfs_open_node | ||
23 | * for each kernfs_node with one or more open files. | ||
24 | * | ||
25 | * kernfs_node->attr.open points to kernfs_open_node. attr.open is | ||
26 | * protected by kernfs_open_node_lock. | ||
27 | * | ||
28 | * filp->private_data points to seq_file whose ->private points to | ||
29 | * kernfs_open_file. kernfs_open_files are chained at | ||
30 | * kernfs_open_node->files, which is protected by kernfs_open_file_mutex. | ||
31 | */ | ||
32 | static DEFINE_SPINLOCK(kernfs_open_node_lock); | ||
33 | static DEFINE_MUTEX(kernfs_open_file_mutex); | ||
34 | |||
35 | struct kernfs_open_node { | ||
36 | atomic_t refcnt; | ||
37 | atomic_t event; | ||
38 | wait_queue_head_t poll; | ||
39 | struct list_head files; /* goes through kernfs_open_file.list */ | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * kernfs_notify() may be called from any context and bounces notifications | ||
44 | * through a work item. To minimize space overhead in kernfs_node, the | ||
45 | * pending queue is implemented as a singly linked list of kernfs_nodes. | ||
46 | * The list is terminated with the self pointer so that whether a | ||
47 | * kernfs_node is on the list or not can be determined by testing the next | ||
48 | * pointer for NULL. | ||
49 | */ | ||
50 | #define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list) | ||
51 | |||
52 | static DEFINE_SPINLOCK(kernfs_notify_lock); | ||
53 | static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL; | ||
54 | |||
55 | static struct kernfs_open_file *kernfs_of(struct file *file) | ||
56 | { | ||
57 | return ((struct seq_file *)file->private_data)->private; | ||
58 | } | ||
59 | |||
60 | /* | ||
61 | * Determine the kernfs_ops for the given kernfs_node. This function must | ||
62 | * be called while holding an active reference. | ||
63 | */ | ||
64 | static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn) | ||
65 | { | ||
66 | if (kn->flags & KERNFS_LOCKDEP) | ||
67 | lockdep_assert_held(kn); | ||
68 | return kn->attr.ops; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * As kernfs_seq_stop() is also called after kernfs_seq_start() or | ||
73 | * kernfs_seq_next() failure, it needs to distinguish whether it's stopping | ||
74 | * a seq_file iteration which is fully initialized with an active reference | ||
75 | * or an aborted kernfs_seq_start() due to get_active failure. The | ||
76 | * position pointer is the only context for each seq_file iteration and | ||
77 | * thus the stop condition should be encoded in it. As the return value is | ||
78 | * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable | ||
79 | * choice to indicate get_active failure. | ||
80 | * | ||
81 | * Unfortunately, this is complicated due to the optional custom seq_file | ||
82 | * operations which may return ERR_PTR(-ENODEV) too. kernfs_seq_stop() | ||
83 | * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or | ||
84 | * custom seq_file operations and thus can't decide whether put_active | ||
85 | * should be performed or not only on ERR_PTR(-ENODEV). | ||
86 | * | ||
87 | * This is worked around by factoring out the custom seq_stop() and | ||
88 | * put_active part into kernfs_seq_stop_active(), skipping it from | ||
89 | * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after | ||
90 | * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures | ||
91 | * that kernfs_seq_stop_active() is skipped only after get_active failure. | ||
92 | */ | ||
93 | static void kernfs_seq_stop_active(struct seq_file *sf, void *v) | ||
94 | { | ||
95 | struct kernfs_open_file *of = sf->private; | ||
96 | const struct kernfs_ops *ops = kernfs_ops(of->kn); | ||
97 | |||
98 | if (ops->seq_stop) | ||
99 | ops->seq_stop(sf, v); | ||
100 | kernfs_put_active(of->kn); | ||
101 | } | ||
102 | |||
103 | static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos) | ||
104 | { | ||
105 | struct kernfs_open_file *of = sf->private; | ||
106 | const struct kernfs_ops *ops; | ||
107 | |||
108 | /* | ||
109 | * @of->mutex nests outside active ref and is primarily to ensure that | ||
110 | * the ops aren't called concurrently for the same open file. | ||
111 | */ | ||
112 | mutex_lock(&of->mutex); | ||
113 | if (!kernfs_get_active(of->kn)) | ||
114 | return ERR_PTR(-ENODEV); | ||
115 | |||
116 | ops = kernfs_ops(of->kn); | ||
117 | if (ops->seq_start) { | ||
118 | void *next = ops->seq_start(sf, ppos); | ||
119 | /* see the comment above kernfs_seq_stop_active() */ | ||
120 | if (next == ERR_PTR(-ENODEV)) | ||
121 | kernfs_seq_stop_active(sf, next); | ||
122 | return next; | ||
123 | } else { | ||
124 | /* | ||
125 | * The same behavior and code as single_open(). Returns | ||
126 | * !NULL if pos is at the beginning; otherwise, NULL. | ||
127 | */ | ||
128 | return NULL + !*ppos; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos) | ||
133 | { | ||
134 | struct kernfs_open_file *of = sf->private; | ||
135 | const struct kernfs_ops *ops = kernfs_ops(of->kn); | ||
136 | |||
137 | if (ops->seq_next) { | ||
138 | void *next = ops->seq_next(sf, v, ppos); | ||
139 | /* see the comment above kernfs_seq_stop_active() */ | ||
140 | if (next == ERR_PTR(-ENODEV)) | ||
141 | kernfs_seq_stop_active(sf, next); | ||
142 | return next; | ||
143 | } else { | ||
144 | /* | ||
145 | * The same behavior and code as single_open(), always | ||
146 | * terminate after the initial read. | ||
147 | */ | ||
148 | ++*ppos; | ||
149 | return NULL; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | static void kernfs_seq_stop(struct seq_file *sf, void *v) | ||
154 | { | ||
155 | struct kernfs_open_file *of = sf->private; | ||
156 | |||
157 | if (v != ERR_PTR(-ENODEV)) | ||
158 | kernfs_seq_stop_active(sf, v); | ||
159 | mutex_unlock(&of->mutex); | ||
160 | } | ||
161 | |||
162 | static int kernfs_seq_show(struct seq_file *sf, void *v) | ||
163 | { | ||
164 | struct kernfs_open_file *of = sf->private; | ||
165 | |||
166 | of->event = atomic_read(&of->kn->attr.open->event); | ||
167 | |||
168 | return of->kn->attr.ops->seq_show(sf, v); | ||
169 | } | ||
170 | |||
171 | static const struct seq_operations kernfs_seq_ops = { | ||
172 | .start = kernfs_seq_start, | ||
173 | .next = kernfs_seq_next, | ||
174 | .stop = kernfs_seq_stop, | ||
175 | .show = kernfs_seq_show, | ||
176 | }; | ||
177 | |||
178 | /* | ||
179 | * As reading a bin file can have side-effects, the exact offset and bytes | ||
180 | * specified in read(2) call should be passed to the read callback making | ||
181 | * it difficult to use seq_file. Implement simplistic custom buffering for | ||
182 | * bin files. | ||
183 | */ | ||
184 | static ssize_t kernfs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) | ||
185 | { | ||
186 | struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); | ||
187 | ssize_t len = min_t(size_t, iov_iter_count(iter), PAGE_SIZE); | ||
188 | const struct kernfs_ops *ops; | ||
189 | char *buf; | ||
190 | |||
191 | buf = of->prealloc_buf; | ||
192 | if (buf) | ||
193 | mutex_lock(&of->prealloc_mutex); | ||
194 | else | ||
195 | buf = kmalloc(len, GFP_KERNEL); | ||
196 | if (!buf) | ||
197 | return -ENOMEM; | ||
198 | |||
199 | /* | ||
200 | * @of->mutex nests outside active ref and is used both to ensure that | ||
201 | * the ops aren't called concurrently for the same open file. | ||
202 | */ | ||
203 | mutex_lock(&of->mutex); | ||
204 | if (!kernfs_get_active(of->kn)) { | ||
205 | len = -ENODEV; | ||
206 | mutex_unlock(&of->mutex); | ||
207 | goto out_free; | ||
208 | } | ||
209 | |||
210 | of->event = atomic_read(&of->kn->attr.open->event); | ||
211 | ops = kernfs_ops(of->kn); | ||
212 | if (ops->read) | ||
213 | len = ops->read(of, buf, len, iocb->ki_pos); | ||
214 | else | ||
215 | len = -EINVAL; | ||
216 | |||
217 | kernfs_put_active(of->kn); | ||
218 | mutex_unlock(&of->mutex); | ||
219 | |||
220 | if (len < 0) | ||
221 | goto out_free; | ||
222 | |||
223 | if (copy_to_iter(buf, len, iter) != len) { | ||
224 | len = -EFAULT; | ||
225 | goto out_free; | ||
226 | } | ||
227 | |||
228 | iocb->ki_pos += len; | ||
229 | |||
230 | out_free: | ||
231 | if (buf == of->prealloc_buf) | ||
232 | mutex_unlock(&of->prealloc_mutex); | ||
233 | else | ||
234 | kfree(buf); | ||
235 | return len; | ||
236 | } | ||
237 | |||
238 | static ssize_t kernfs_fop_read_iter(struct kiocb *iocb, struct iov_iter *iter) | ||
239 | { | ||
240 | if (kernfs_of(iocb->ki_filp)->kn->flags & KERNFS_HAS_SEQ_SHOW) | ||
241 | return seq_read_iter(iocb, iter); | ||
242 | return kernfs_file_read_iter(iocb, iter); | ||
243 | } | ||
244 | |||
245 | /* | ||
246 | * Copy data in from userland and pass it to the matching kernfs write | ||
247 | * operation. | ||
248 | * | ||
249 | * There is no easy way for us to know if userspace is only doing a partial | ||
250 | * write, so we don't support them. We expect the entire buffer to come on | ||
251 | * the first write. Hint: if you're writing a value, first read the file, | ||
252 | * modify only the the value you're changing, then write entire buffer | ||
253 | * back. | ||
254 | */ | ||
255 | static ssize_t kernfs_fop_write_iter(struct kiocb *iocb, struct iov_iter *iter) | ||
256 | { | ||
257 | struct kernfs_open_file *of = kernfs_of(iocb->ki_filp); | ||
258 | ssize_t len = iov_iter_count(iter); | ||
259 | const struct kernfs_ops *ops; | ||
260 | char *buf; | ||
261 | |||
262 | if (of->atomic_write_len) { | ||
263 | if (len > of->atomic_write_len) | ||
264 | return -E2BIG; | ||
265 | } else { | ||
266 | len = min_t(size_t, len, PAGE_SIZE); | ||
267 | } | ||
268 | |||
269 | buf = of->prealloc_buf; | ||
270 | if (buf) | ||
271 | mutex_lock(&of->prealloc_mutex); | ||
272 | else | ||
273 | buf = kmalloc(len + 1, GFP_KERNEL); | ||
274 | if (!buf) | ||
275 | return -ENOMEM; | ||
276 | |||
277 | if (copy_from_iter(buf, len, iter) != len) { | ||
278 | len = -EFAULT; | ||
279 | goto out_free; | ||
280 | } | ||
281 | buf[len] = '\0'; /* guarantee string termination */ | ||
282 | |||
283 | /* | ||
284 | * @of->mutex nests outside active ref and is used both to ensure that | ||
285 | * the ops aren't called concurrently for the same open file. | ||
286 | */ | ||
287 | mutex_lock(&of->mutex); | ||
288 | if (!kernfs_get_active(of->kn)) { | ||
289 | mutex_unlock(&of->mutex); | ||
290 | len = -ENODEV; | ||
291 | goto out_free; | ||
292 | } | ||
293 | |||
294 | ops = kernfs_ops(of->kn); | ||
295 | if (ops->write) | ||
296 | len = ops->write(of, buf, len, iocb->ki_pos); | ||
297 | else | ||
298 | len = -EINVAL; | ||
299 | |||
300 | kernfs_put_active(of->kn); | ||
301 | mutex_unlock(&of->mutex); | ||
302 | |||
303 | if (len > 0) | ||
304 | iocb->ki_pos += len; | ||
305 | |||
306 | out_free: | ||
307 | if (buf == of->prealloc_buf) | ||
308 | mutex_unlock(&of->prealloc_mutex); | ||
309 | else | ||
310 | kfree(buf); | ||
311 | return len; | ||
312 | } | ||
313 | |||
314 | static void kernfs_vma_open(struct vm_area_struct *vma) | ||
315 | { | ||
316 | struct file *file = vma->vm_file; | ||
317 | struct kernfs_open_file *of = kernfs_of(file); | ||
318 | |||
319 | if (!of->vm_ops) | ||
320 | return; | ||
321 | |||
322 | if (!kernfs_get_active(of->kn)) | ||
323 | return; | ||
324 | |||
325 | if (of->vm_ops->open) | ||
326 | of->vm_ops->open(vma); | ||
327 | |||
328 | kernfs_put_active(of->kn); | ||
329 | } | ||
330 | |||
331 | static vm_fault_t kernfs_vma_fault(struct vm_fault *vmf) | ||
332 | { | ||
333 | struct file *file = vmf->vma->vm_file; | ||
334 | struct kernfs_open_file *of = kernfs_of(file); | ||
335 | vm_fault_t ret; | ||
336 | |||
337 | if (!of->vm_ops) | ||
338 | return VM_FAULT_SIGBUS; | ||
339 | |||
340 | if (!kernfs_get_active(of->kn)) | ||
341 | return VM_FAULT_SIGBUS; | ||
342 | |||
343 | ret = VM_FAULT_SIGBUS; | ||
344 | if (of->vm_ops->fault) | ||
345 | ret = of->vm_ops->fault(vmf); | ||
346 | |||
347 | kernfs_put_active(of->kn); | ||
348 | return ret; | ||
349 | } | ||
350 | |||
351 | static vm_fault_t kernfs_vma_page_mkwrite(struct vm_fault *vmf) | ||
352 | { | ||
353 | struct file *file = vmf->vma->vm_file; | ||
354 | struct kernfs_open_file *of = kernfs_of(file); | ||
355 | vm_fault_t ret; | ||
356 | |||
357 | if (!of->vm_ops) | ||
358 | return VM_FAULT_SIGBUS; | ||
359 | |||
360 | if (!kernfs_get_active(of->kn)) | ||
361 | return VM_FAULT_SIGBUS; | ||
362 | |||
363 | ret = 0; | ||
364 | if (of->vm_ops->page_mkwrite) | ||
365 | ret = of->vm_ops->page_mkwrite(vmf); | ||
366 | else | ||
367 | file_update_time(file); | ||
368 | |||
369 | kernfs_put_active(of->kn); | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr, | ||
374 | void *buf, int len, int write) | ||
375 | { | ||
376 | struct file *file = vma->vm_file; | ||
377 | struct kernfs_open_file *of = kernfs_of(file); | ||
378 | int ret; | ||
379 | |||
380 | if (!of->vm_ops) | ||
381 | return -EINVAL; | ||
382 | |||
383 | if (!kernfs_get_active(of->kn)) | ||
384 | return -EINVAL; | ||
385 | |||
386 | ret = -EINVAL; | ||
387 | if (of->vm_ops->access) | ||
388 | ret = of->vm_ops->access(vma, addr, buf, len, write); | ||
389 | |||
390 | kernfs_put_active(of->kn); | ||
391 | return ret; | ||
392 | } | ||
393 | |||
394 | #ifdef CONFIG_NUMA | ||
395 | static int kernfs_vma_set_policy(struct vm_area_struct *vma, | ||
396 | struct mempolicy *new) | ||
397 | { | ||
398 | struct file *file = vma->vm_file; | ||
399 | struct kernfs_open_file *of = kernfs_of(file); | ||
400 | int ret; | ||
401 | |||
402 | if (!of->vm_ops) | ||
403 | return 0; | ||
404 | |||
405 | if (!kernfs_get_active(of->kn)) | ||
406 | return -EINVAL; | ||
407 | |||
408 | ret = 0; | ||
409 | if (of->vm_ops->set_policy) | ||
410 | ret = of->vm_ops->set_policy(vma, new); | ||
411 | |||
412 | kernfs_put_active(of->kn); | ||
413 | return ret; | ||
414 | } | ||
415 | |||
416 | static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma, | ||
417 | unsigned long addr) | ||
418 | { | ||
419 | struct file *file = vma->vm_file; | ||
420 | struct kernfs_open_file *of = kernfs_of(file); | ||
421 | struct mempolicy *pol; | ||
422 | |||
423 | if (!of->vm_ops) | ||
424 | return vma->vm_policy; | ||
425 | |||
426 | if (!kernfs_get_active(of->kn)) | ||
427 | return vma->vm_policy; | ||
428 | |||
429 | pol = vma->vm_policy; | ||
430 | if (of->vm_ops->get_policy) | ||
431 | pol = of->vm_ops->get_policy(vma, addr); | ||
432 | |||
433 | kernfs_put_active(of->kn); | ||
434 | return pol; | ||
435 | } | ||
436 | |||
437 | #endif | ||
438 | |||
439 | static const struct vm_operations_struct kernfs_vm_ops = { | ||
440 | .open = kernfs_vma_open, | ||
441 | .fault = kernfs_vma_fault, | ||
442 | .page_mkwrite = kernfs_vma_page_mkwrite, | ||
443 | .access = kernfs_vma_access, | ||
444 | #ifdef CONFIG_NUMA | ||
445 | .set_policy = kernfs_vma_set_policy, | ||
446 | .get_policy = kernfs_vma_get_policy, | ||
447 | #endif | ||
448 | }; | ||
449 | |||
450 | static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma) | ||
451 | { | ||
452 | struct kernfs_open_file *of = kernfs_of(file); | ||
453 | const struct kernfs_ops *ops; | ||
454 | int rc; | ||
455 | |||
456 | /* | ||
457 | * mmap path and of->mutex are prone to triggering spurious lockdep | ||
458 | * warnings and we don't want to add spurious locking dependency | ||
459 | * between the two. Check whether mmap is actually implemented | ||
460 | * without grabbing @of->mutex by testing HAS_MMAP flag. See the | ||
461 | * comment in kernfs_file_open() for more details. | ||
462 | */ | ||
463 | if (!(of->kn->flags & KERNFS_HAS_MMAP)) | ||
464 | return -ENODEV; | ||
465 | |||
466 | mutex_lock(&of->mutex); | ||
467 | |||
468 | rc = -ENODEV; | ||
469 | if (!kernfs_get_active(of->kn)) | ||
470 | goto out_unlock; | ||
471 | |||
472 | ops = kernfs_ops(of->kn); | ||
473 | rc = ops->mmap(of, vma); | ||
474 | if (rc) | ||
475 | goto out_put; | ||
476 | |||
477 | /* | ||
478 | * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup() | ||
479 | * to satisfy versions of X which crash if the mmap fails: that | ||
480 | * substitutes a new vm_file, and we don't then want bin_vm_ops. | ||
481 | */ | ||
482 | if (vma->vm_file != file) | ||
483 | goto out_put; | ||
484 | |||
485 | rc = -EINVAL; | ||
486 | if (of->mmapped && of->vm_ops != vma->vm_ops) | ||
487 | goto out_put; | ||
488 | |||
489 | /* | ||
490 | * It is not possible to successfully wrap close. | ||
491 | * So error if someone is trying to use close. | ||
492 | */ | ||
493 | rc = -EINVAL; | ||
494 | if (vma->vm_ops && vma->vm_ops->close) | ||
495 | goto out_put; | ||
496 | |||
497 | rc = 0; | ||
498 | of->mmapped = true; | ||
499 | of->vm_ops = vma->vm_ops; | ||
500 | vma->vm_ops = &kernfs_vm_ops; | ||
501 | out_put: | ||
502 | kernfs_put_active(of->kn); | ||
503 | out_unlock: | ||
504 | mutex_unlock(&of->mutex); | ||
505 | |||
506 | return rc; | ||
507 | } | ||
508 | |||
509 | /** | ||
510 | * kernfs_get_open_node - get or create kernfs_open_node | ||
511 | * @kn: target kernfs_node | ||
512 | * @of: kernfs_open_file for this instance of open | ||
513 | * | ||
514 | * If @kn->attr.open exists, increment its reference count; otherwise, | ||
515 | * create one. @of is chained to the files list. | ||
516 | * | ||
517 | * LOCKING: | ||
518 | * Kernel thread context (may sleep). | ||
519 | * | ||
520 | * RETURNS: | ||
521 | * 0 on success, -errno on failure. | ||
522 | */ | ||
523 | static int kernfs_get_open_node(struct kernfs_node *kn, | ||
524 | struct kernfs_open_file *of) | ||
525 | { | ||
526 | struct kernfs_open_node *on, *new_on = NULL; | ||
527 | |||
528 | retry: | ||
529 | mutex_lock(&kernfs_open_file_mutex); | ||
530 | spin_lock_irq(&kernfs_open_node_lock); | ||
531 | |||
532 | if (!kn->attr.open && new_on) { | ||
533 | kn->attr.open = new_on; | ||
534 | new_on = NULL; | ||
535 | } | ||
536 | |||
537 | on = kn->attr.open; | ||
538 | if (on) { | ||
539 | atomic_inc(&on->refcnt); | ||
540 | list_add_tail(&of->list, &on->files); | ||
541 | } | ||
542 | |||
543 | spin_unlock_irq(&kernfs_open_node_lock); | ||
544 | mutex_unlock(&kernfs_open_file_mutex); | ||
545 | |||
546 | if (on) { | ||
547 | kfree(new_on); | ||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | /* not there, initialize a new one and retry */ | ||
552 | new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); | ||
553 | if (!new_on) | ||
554 | return -ENOMEM; | ||
555 | |||
556 | atomic_set(&new_on->refcnt, 0); | ||
557 | atomic_set(&new_on->event, 1); | ||
558 | init_waitqueue_head(&new_on->poll); | ||
559 | INIT_LIST_HEAD(&new_on->files); | ||
560 | goto retry; | ||
561 | } | ||
562 | |||
563 | /** | ||
564 | * kernfs_put_open_node - put kernfs_open_node | ||
565 | * @kn: target kernfs_nodet | ||
566 | * @of: associated kernfs_open_file | ||
567 | * | ||
568 | * Put @kn->attr.open and unlink @of from the files list. If | ||
569 | * reference count reaches zero, disassociate and free it. | ||
570 | * | ||
571 | * LOCKING: | ||
572 | * None. | ||
573 | */ | ||
574 | static void kernfs_put_open_node(struct kernfs_node *kn, | ||
575 | struct kernfs_open_file *of) | ||
576 | { | ||
577 | struct kernfs_open_node *on = kn->attr.open; | ||
578 | unsigned long flags; | ||
579 | |||
580 | mutex_lock(&kernfs_open_file_mutex); | ||
581 | spin_lock_irqsave(&kernfs_open_node_lock, flags); | ||
582 | |||
583 | if (of) | ||
584 | list_del(&of->list); | ||
585 | |||
586 | if (atomic_dec_and_test(&on->refcnt)) | ||
587 | kn->attr.open = NULL; | ||
588 | else | ||
589 | on = NULL; | ||
590 | |||
591 | spin_unlock_irqrestore(&kernfs_open_node_lock, flags); | ||
592 | mutex_unlock(&kernfs_open_file_mutex); | ||
593 | |||
594 | kfree(on); | ||
595 | } | ||
596 | |||
597 | static int kernfs_fop_open(struct inode *inode, struct file *file) | ||
598 | { | ||
599 | struct kernfs_node *kn = inode->i_private; | ||
600 | struct kernfs_root *root = kernfs_root(kn); | ||
601 | const struct kernfs_ops *ops; | ||
602 | struct kernfs_open_file *of; | ||
603 | bool has_read, has_write, has_mmap; | ||
604 | int error = -EACCES; | ||
605 | |||
606 | if (!kernfs_get_active(kn)) | ||
607 | return -ENODEV; | ||
608 | |||
609 | ops = kernfs_ops(kn); | ||
610 | |||
611 | has_read = ops->seq_show || ops->read || ops->mmap; | ||
612 | has_write = ops->write || ops->mmap; | ||
613 | has_mmap = ops->mmap; | ||
614 | |||
615 | /* see the flag definition for details */ | ||
616 | if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) { | ||
617 | if ((file->f_mode & FMODE_WRITE) && | ||
618 | (!(inode->i_mode & S_IWUGO) || !has_write)) | ||
619 | goto err_out; | ||
620 | |||
621 | if ((file->f_mode & FMODE_READ) && | ||
622 | (!(inode->i_mode & S_IRUGO) || !has_read)) | ||
623 | goto err_out; | ||
624 | } | ||
625 | |||
626 | /* allocate a kernfs_open_file for the file */ | ||
627 | error = -ENOMEM; | ||
628 | of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL); | ||
629 | if (!of) | ||
630 | goto err_out; | ||
631 | |||
632 | /* | ||
633 | * The following is done to give a different lockdep key to | ||
634 | * @of->mutex for files which implement mmap. This is a rather | ||
635 | * crude way to avoid false positive lockdep warning around | ||
636 | * mm->mmap_lock - mmap nests @of->mutex under mm->mmap_lock and | ||
637 | * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under | ||
638 | * which mm->mmap_lock nests, while holding @of->mutex. As each | ||
639 | * open file has a separate mutex, it's okay as long as those don't | ||
640 | * happen on the same file. At this point, we can't easily give | ||
641 | * each file a separate locking class. Let's differentiate on | ||
642 | * whether the file has mmap or not for now. | ||
643 | * | ||
644 | * Both paths of the branch look the same. They're supposed to | ||
645 | * look that way and give @of->mutex different static lockdep keys. | ||
646 | */ | ||
647 | if (has_mmap) | ||
648 | mutex_init(&of->mutex); | ||
649 | else | ||
650 | mutex_init(&of->mutex); | ||
651 | |||
652 | of->kn = kn; | ||
653 | of->file = file; | ||
654 | |||
655 | /* | ||
656 | * Write path needs to atomic_write_len outside active reference. | ||
657 | * Cache it in open_file. See kernfs_fop_write_iter() for details. | ||
658 | */ | ||
659 | of->atomic_write_len = ops->atomic_write_len; | ||
660 | |||
661 | error = -EINVAL; | ||
662 | /* | ||
663 | * ->seq_show is incompatible with ->prealloc, | ||
664 | * as seq_read does its own allocation. | ||
665 | * ->read must be used instead. | ||
666 | */ | ||
667 | if (ops->prealloc && ops->seq_show) | ||
668 | goto err_free; | ||
669 | if (ops->prealloc) { | ||
670 | int len = of->atomic_write_len ?: PAGE_SIZE; | ||
671 | of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL); | ||
672 | error = -ENOMEM; | ||
673 | if (!of->prealloc_buf) | ||
674 | goto err_free; | ||
675 | mutex_init(&of->prealloc_mutex); | ||
676 | } | ||
677 | |||
678 | /* | ||
679 | * Always instantiate seq_file even if read access doesn't use | ||
680 | * seq_file or is not requested. This unifies private data access | ||
681 | * and readable regular files are the vast majority anyway. | ||
682 | */ | ||
683 | if (ops->seq_show) | ||
684 | error = seq_open(file, &kernfs_seq_ops); | ||
685 | else | ||
686 | error = seq_open(file, NULL); | ||
687 | if (error) | ||
688 | goto err_free; | ||
689 | |||
690 | of->seq_file = file->private_data; | ||
691 | of->seq_file->private = of; | ||
692 | |||
693 | /* seq_file clears PWRITE unconditionally, restore it if WRITE */ | ||
694 | if (file->f_mode & FMODE_WRITE) | ||
695 | file->f_mode |= FMODE_PWRITE; | ||
696 | |||
697 | /* make sure we have open node struct */ | ||
698 | error = kernfs_get_open_node(kn, of); | ||
699 | if (error) | ||
700 | goto err_seq_release; | ||
701 | |||
702 | if (ops->open) { | ||
703 | /* nobody has access to @of yet, skip @of->mutex */ | ||
704 | error = ops->open(of); | ||
705 | if (error) | ||
706 | goto err_put_node; | ||
707 | } | ||
708 | |||
709 | /* open succeeded, put active references */ | ||
710 | kernfs_put_active(kn); | ||
711 | return 0; | ||
712 | |||
713 | err_put_node: | ||
714 | kernfs_put_open_node(kn, of); | ||
715 | err_seq_release: | ||
716 | seq_release(inode, file); | ||
717 | err_free: | ||
718 | kfree(of->prealloc_buf); | ||
719 | kfree(of); | ||
720 | err_out: | ||
721 | kernfs_put_active(kn); | ||
722 | return error; | ||
723 | } | ||
724 | |||
725 | /* used from release/drain to ensure that ->release() is called exactly once */ | ||
726 | static void kernfs_release_file(struct kernfs_node *kn, | ||
727 | struct kernfs_open_file *of) | ||
728 | { | ||
729 | /* | ||
730 | * @of is guaranteed to have no other file operations in flight and | ||
731 | * we just want to synchronize release and drain paths. | ||
732 | * @kernfs_open_file_mutex is enough. @of->mutex can't be used | ||
733 | * here because drain path may be called from places which can | ||
734 | * cause circular dependency. | ||
735 | */ | ||
736 | lockdep_assert_held(&kernfs_open_file_mutex); | ||
737 | |||
738 | if (!of->released) { | ||
739 | /* | ||
740 | * A file is never detached without being released and we | ||
741 | * need to be able to release files which are deactivated | ||
742 | * and being drained. Don't use kernfs_ops(). | ||
743 | */ | ||
744 | kn->attr.ops->release(of); | ||
745 | of->released = true; | ||
746 | } | ||
747 | } | ||
748 | |||
749 | static int kernfs_fop_release(struct inode *inode, struct file *filp) | ||
750 | { | ||
751 | struct kernfs_node *kn = inode->i_private; | ||
752 | struct kernfs_open_file *of = kernfs_of(filp); | ||
753 | |||
754 | if (kn->flags & KERNFS_HAS_RELEASE) { | ||
755 | mutex_lock(&kernfs_open_file_mutex); | ||
756 | kernfs_release_file(kn, of); | ||
757 | mutex_unlock(&kernfs_open_file_mutex); | ||
758 | } | ||
759 | |||
760 | kernfs_put_open_node(kn, of); | ||
761 | seq_release(inode, filp); | ||
762 | kfree(of->prealloc_buf); | ||
763 | kfree(of); | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | void kernfs_drain_open_files(struct kernfs_node *kn) | ||
769 | { | ||
770 | struct kernfs_open_node *on; | ||
771 | struct kernfs_open_file *of; | ||
772 | |||
773 | if (!(kn->flags & (KERNFS_HAS_MMAP | KERNFS_HAS_RELEASE))) | ||
774 | return; | ||
775 | |||
776 | spin_lock_irq(&kernfs_open_node_lock); | ||
777 | on = kn->attr.open; | ||
778 | if (on) | ||
779 | atomic_inc(&on->refcnt); | ||
780 | spin_unlock_irq(&kernfs_open_node_lock); | ||
781 | if (!on) | ||
782 | return; | ||
783 | |||
784 | mutex_lock(&kernfs_open_file_mutex); | ||
785 | |||
786 | list_for_each_entry(of, &on->files, list) { | ||
787 | struct inode *inode = file_inode(of->file); | ||
788 | |||
789 | if (kn->flags & KERNFS_HAS_MMAP) | ||
790 | unmap_mapping_range(inode->i_mapping, 0, 0, 1); | ||
791 | |||
792 | if (kn->flags & KERNFS_HAS_RELEASE) | ||
793 | kernfs_release_file(kn, of); | ||
794 | } | ||
795 | |||
796 | mutex_unlock(&kernfs_open_file_mutex); | ||
797 | |||
798 | kernfs_put_open_node(kn, NULL); | ||
799 | } | ||
800 | |||
801 | /* | ||
802 | * Kernfs attribute files are pollable. The idea is that you read | ||
803 | * the content and then you use 'poll' or 'select' to wait for | ||
804 | * the content to change. When the content changes (assuming the | ||
805 | * manager for the kobject supports notification), poll will | ||
806 | * return EPOLLERR|EPOLLPRI, and select will return the fd whether | ||
807 | * it is waiting for read, write, or exceptions. | ||
808 | * Once poll/select indicates that the value has changed, you | ||
809 | * need to close and re-open the file, or seek to 0 and read again. | ||
810 | * Reminder: this only works for attributes which actively support | ||
811 | * it, and it is not possible to test an attribute from userspace | ||
812 | * to see if it supports poll (Neither 'poll' nor 'select' return | ||
813 | * an appropriate error code). When in doubt, set a suitable timeout value. | ||
814 | */ | ||
815 | __poll_t kernfs_generic_poll(struct kernfs_open_file *of, poll_table *wait) | ||
816 | { | ||
817 | struct kernfs_node *kn = kernfs_dentry_node(of->file->f_path.dentry); | ||
818 | struct kernfs_open_node *on = kn->attr.open; | ||
819 | |||
820 | poll_wait(of->file, &on->poll, wait); | ||
821 | |||
822 | if (of->event != atomic_read(&on->event)) | ||
823 | return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; | ||
824 | |||
825 | return DEFAULT_POLLMASK; | ||
826 | } | ||
827 | |||
828 | static __poll_t kernfs_fop_poll(struct file *filp, poll_table *wait) | ||
829 | { | ||
830 | struct kernfs_open_file *of = kernfs_of(filp); | ||
831 | struct kernfs_node *kn = kernfs_dentry_node(filp->f_path.dentry); | ||
832 | __poll_t ret; | ||
833 | |||
834 | if (!kernfs_get_active(kn)) | ||
835 | return DEFAULT_POLLMASK|EPOLLERR|EPOLLPRI; | ||
836 | |||
837 | if (kn->attr.ops->poll) | ||
838 | ret = kn->attr.ops->poll(of, wait); | ||
839 | else | ||
840 | ret = kernfs_generic_poll(of, wait); | ||
841 | |||
842 | kernfs_put_active(kn); | ||
843 | return ret; | ||
844 | } | ||
845 | |||
846 | static void kernfs_notify_workfn(struct work_struct *work) | ||
847 | { | ||
848 | struct kernfs_node *kn; | ||
849 | struct kernfs_super_info *info; | ||
850 | repeat: | ||
851 | /* pop one off the notify_list */ | ||
852 | spin_lock_irq(&kernfs_notify_lock); | ||
853 | kn = kernfs_notify_list; | ||
854 | if (kn == KERNFS_NOTIFY_EOL) { | ||
855 | spin_unlock_irq(&kernfs_notify_lock); | ||
856 | return; | ||
857 | } | ||
858 | kernfs_notify_list = kn->attr.notify_next; | ||
859 | kn->attr.notify_next = NULL; | ||
860 | spin_unlock_irq(&kernfs_notify_lock); | ||
861 | |||
862 | /* kick fsnotify */ | ||
863 | mutex_lock(&kernfs_mutex); | ||
864 | |||
865 | list_for_each_entry(info, &kernfs_root(kn)->supers, node) { | ||
866 | struct kernfs_node *parent; | ||
867 | struct inode *p_inode = NULL; | ||
868 | struct inode *inode; | ||
869 | struct qstr name; | ||
870 | |||
871 | /* | ||
872 | * We want fsnotify_modify() on @kn but as the | ||
873 | * modifications aren't originating from userland don't | ||
874 | * have the matching @file available. Look up the inodes | ||
875 | * and generate the events manually. | ||
876 | */ | ||
877 | inode = ilookup(info->sb, kernfs_ino(kn)); | ||
878 | if (!inode) | ||
879 | continue; | ||
880 | |||
881 | name = (struct qstr)QSTR_INIT(kn->name, strlen(kn->name)); | ||
882 | parent = kernfs_get_parent(kn); | ||
883 | if (parent) { | ||
884 | p_inode = ilookup(info->sb, kernfs_ino(parent)); | ||
885 | if (p_inode) { | ||
886 | fsnotify(FS_MODIFY | FS_EVENT_ON_CHILD, | ||
887 | inode, FSNOTIFY_EVENT_INODE, | ||
888 | p_inode, &name, inode, 0); | ||
889 | iput(p_inode); | ||
890 | } | ||
891 | |||
892 | kernfs_put(parent); | ||
893 | } | ||
894 | |||
895 | if (!p_inode) | ||
896 | fsnotify_inode(inode, FS_MODIFY); | ||
897 | |||
898 | iput(inode); | ||
899 | } | ||
900 | |||
901 | mutex_unlock(&kernfs_mutex); | ||
902 | kernfs_put(kn); | ||
903 | goto repeat; | ||
904 | } | ||
905 | |||
906 | /** | ||
907 | * kernfs_notify - notify a kernfs file | ||
908 | * @kn: file to notify | ||
909 | * | ||
910 | * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any | ||
911 | * context. | ||
912 | */ | ||
913 | void kernfs_notify(struct kernfs_node *kn) | ||
914 | { | ||
915 | static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn); | ||
916 | unsigned long flags; | ||
917 | struct kernfs_open_node *on; | ||
918 | |||
919 | if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) | ||
920 | return; | ||
921 | |||
922 | /* kick poll immediately */ | ||
923 | spin_lock_irqsave(&kernfs_open_node_lock, flags); | ||
924 | on = kn->attr.open; | ||
925 | if (on) { | ||
926 | atomic_inc(&on->event); | ||
927 | wake_up_interruptible(&on->poll); | ||
928 | } | ||
929 | spin_unlock_irqrestore(&kernfs_open_node_lock, flags); | ||
930 | |||
931 | /* schedule work to kick fsnotify */ | ||
932 | spin_lock_irqsave(&kernfs_notify_lock, flags); | ||
933 | if (!kn->attr.notify_next) { | ||
934 | kernfs_get(kn); | ||
935 | kn->attr.notify_next = kernfs_notify_list; | ||
936 | kernfs_notify_list = kn; | ||
937 | schedule_work(&kernfs_notify_work); | ||
938 | } | ||
939 | spin_unlock_irqrestore(&kernfs_notify_lock, flags); | ||
940 | } | ||
941 | EXPORT_SYMBOL_GPL(kernfs_notify); | ||
942 | |||
943 | const struct file_operations kernfs_file_fops = { | ||
944 | .read_iter = kernfs_fop_read_iter, | ||
945 | .write_iter = kernfs_fop_write_iter, | ||
946 | .llseek = generic_file_llseek, | ||
947 | .mmap = kernfs_fop_mmap, | ||
948 | .open = kernfs_fop_open, | ||
949 | .release = kernfs_fop_release, | ||
950 | .poll = kernfs_fop_poll, | ||
951 | .fsync = noop_fsync, | ||
952 | .splice_read = generic_file_splice_read, | ||
953 | .splice_write = iter_file_splice_write, | ||
954 | }; | ||
955 | |||
956 | /** | ||
957 | * __kernfs_create_file - kernfs internal function to create a file | ||
958 | * @parent: directory to create the file in | ||
959 | * @name: name of the file | ||
960 | * @mode: mode of the file | ||
961 | * @uid: uid of the file | ||
962 | * @gid: gid of the file | ||
963 | * @size: size of the file | ||
964 | * @ops: kernfs operations for the file | ||
965 | * @priv: private data for the file | ||
966 | * @ns: optional namespace tag of the file | ||
967 | * @key: lockdep key for the file's active_ref, %NULL to disable lockdep | ||
968 | * | ||
969 | * Returns the created node on success, ERR_PTR() value on error. | ||
970 | */ | ||
971 | struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent, | ||
972 | const char *name, | ||
973 | umode_t mode, kuid_t uid, kgid_t gid, | ||
974 | loff_t size, | ||
975 | const struct kernfs_ops *ops, | ||
976 | void *priv, const void *ns, | ||
977 | struct lock_class_key *key) | ||
978 | { | ||
979 | struct kernfs_node *kn; | ||
980 | unsigned flags; | ||
981 | int rc; | ||
982 | |||
983 | flags = KERNFS_FILE; | ||
984 | |||
985 | kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, | ||
986 | uid, gid, flags); | ||
987 | if (!kn) | ||
988 | return ERR_PTR(-ENOMEM); | ||
989 | |||
990 | kn->attr.ops = ops; | ||
991 | kn->attr.size = size; | ||
992 | kn->ns = ns; | ||
993 | kn->priv = priv; | ||
994 | |||
995 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
996 | if (key) { | ||
997 | lockdep_init_map(&kn->dep_map, "kn->active", key, 0); | ||
998 | kn->flags |= KERNFS_LOCKDEP; | ||
999 | } | ||
1000 | #endif | ||
1001 | |||
1002 | /* | ||
1003 | * kn->attr.ops is accesible only while holding active ref. We | ||
1004 | * need to know whether some ops are implemented outside active | ||
1005 | * ref. Cache their existence in flags. | ||
1006 | */ | ||
1007 | if (ops->seq_show) | ||
1008 | kn->flags |= KERNFS_HAS_SEQ_SHOW; | ||
1009 | if (ops->mmap) | ||
1010 | kn->flags |= KERNFS_HAS_MMAP; | ||
1011 | if (ops->release) | ||
1012 | kn->flags |= KERNFS_HAS_RELEASE; | ||
1013 | |||
1014 | rc = kernfs_add_one(kn); | ||
1015 | if (rc) { | ||
1016 | kernfs_put(kn); | ||
1017 | return ERR_PTR(rc); | ||
1018 | } | ||
1019 | return kn; | ||
1020 | } | ||
diff --git a/fs/kernfs/inode.c b/fs/kernfs/inode.c new file mode 100644 index 000000000..fc2469a20 --- /dev/null +++ b/fs/kernfs/inode.c | |||
@@ -0,0 +1,435 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * fs/kernfs/inode.c - kernfs inode implementation | ||
4 | * | ||
5 | * Copyright (c) 2001-3 Patrick Mochel | ||
6 | * Copyright (c) 2007 SUSE Linux Products GmbH | ||
7 | * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/pagemap.h> | ||
11 | #include <linux/backing-dev.h> | ||
12 | #include <linux/capability.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/xattr.h> | ||
16 | #include <linux/security.h> | ||
17 | |||
18 | #include "kernfs-internal.h" | ||
19 | |||
20 | static const struct address_space_operations kernfs_aops = { | ||
21 | .readpage = simple_readpage, | ||
22 | .write_begin = simple_write_begin, | ||
23 | .write_end = simple_write_end, | ||
24 | }; | ||
25 | |||
26 | static const struct inode_operations kernfs_iops = { | ||
27 | .permission = kernfs_iop_permission, | ||
28 | .setattr = kernfs_iop_setattr, | ||
29 | .getattr = kernfs_iop_getattr, | ||
30 | .listxattr = kernfs_iop_listxattr, | ||
31 | }; | ||
32 | |||
33 | static struct kernfs_iattrs *__kernfs_iattrs(struct kernfs_node *kn, int alloc) | ||
34 | { | ||
35 | static DEFINE_MUTEX(iattr_mutex); | ||
36 | struct kernfs_iattrs *ret; | ||
37 | |||
38 | mutex_lock(&iattr_mutex); | ||
39 | |||
40 | if (kn->iattr || !alloc) | ||
41 | goto out_unlock; | ||
42 | |||
43 | kn->iattr = kmem_cache_zalloc(kernfs_iattrs_cache, GFP_KERNEL); | ||
44 | if (!kn->iattr) | ||
45 | goto out_unlock; | ||
46 | |||
47 | /* assign default attributes */ | ||
48 | kn->iattr->ia_uid = GLOBAL_ROOT_UID; | ||
49 | kn->iattr->ia_gid = GLOBAL_ROOT_GID; | ||
50 | |||
51 | ktime_get_real_ts64(&kn->iattr->ia_atime); | ||
52 | kn->iattr->ia_mtime = kn->iattr->ia_atime; | ||
53 | kn->iattr->ia_ctime = kn->iattr->ia_atime; | ||
54 | |||
55 | simple_xattrs_init(&kn->iattr->xattrs); | ||
56 | atomic_set(&kn->iattr->nr_user_xattrs, 0); | ||
57 | atomic_set(&kn->iattr->user_xattr_size, 0); | ||
58 | out_unlock: | ||
59 | ret = kn->iattr; | ||
60 | mutex_unlock(&iattr_mutex); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | static struct kernfs_iattrs *kernfs_iattrs(struct kernfs_node *kn) | ||
65 | { | ||
66 | return __kernfs_iattrs(kn, 1); | ||
67 | } | ||
68 | |||
69 | static struct kernfs_iattrs *kernfs_iattrs_noalloc(struct kernfs_node *kn) | ||
70 | { | ||
71 | return __kernfs_iattrs(kn, 0); | ||
72 | } | ||
73 | |||
74 | int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr) | ||
75 | { | ||
76 | struct kernfs_iattrs *attrs; | ||
77 | unsigned int ia_valid = iattr->ia_valid; | ||
78 | |||
79 | attrs = kernfs_iattrs(kn); | ||
80 | if (!attrs) | ||
81 | return -ENOMEM; | ||
82 | |||
83 | if (ia_valid & ATTR_UID) | ||
84 | attrs->ia_uid = iattr->ia_uid; | ||
85 | if (ia_valid & ATTR_GID) | ||
86 | attrs->ia_gid = iattr->ia_gid; | ||
87 | if (ia_valid & ATTR_ATIME) | ||
88 | attrs->ia_atime = iattr->ia_atime; | ||
89 | if (ia_valid & ATTR_MTIME) | ||
90 | attrs->ia_mtime = iattr->ia_mtime; | ||
91 | if (ia_valid & ATTR_CTIME) | ||
92 | attrs->ia_ctime = iattr->ia_ctime; | ||
93 | if (ia_valid & ATTR_MODE) | ||
94 | kn->mode = iattr->ia_mode; | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * kernfs_setattr - set iattr on a node | ||
100 | * @kn: target node | ||
101 | * @iattr: iattr to set | ||
102 | * | ||
103 | * Returns 0 on success, -errno on failure. | ||
104 | */ | ||
105 | int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr) | ||
106 | { | ||
107 | int ret; | ||
108 | |||
109 | mutex_lock(&kernfs_mutex); | ||
110 | ret = __kernfs_setattr(kn, iattr); | ||
111 | mutex_unlock(&kernfs_mutex); | ||
112 | return ret; | ||
113 | } | ||
114 | |||
115 | int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr) | ||
116 | { | ||
117 | struct inode *inode = d_inode(dentry); | ||
118 | struct kernfs_node *kn = inode->i_private; | ||
119 | int error; | ||
120 | |||
121 | if (!kn) | ||
122 | return -EINVAL; | ||
123 | |||
124 | mutex_lock(&kernfs_mutex); | ||
125 | error = setattr_prepare(dentry, iattr); | ||
126 | if (error) | ||
127 | goto out; | ||
128 | |||
129 | error = __kernfs_setattr(kn, iattr); | ||
130 | if (error) | ||
131 | goto out; | ||
132 | |||
133 | /* this ignores size changes */ | ||
134 | setattr_copy(inode, iattr); | ||
135 | |||
136 | out: | ||
137 | mutex_unlock(&kernfs_mutex); | ||
138 | return error; | ||
139 | } | ||
140 | |||
141 | ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size) | ||
142 | { | ||
143 | struct kernfs_node *kn = kernfs_dentry_node(dentry); | ||
144 | struct kernfs_iattrs *attrs; | ||
145 | |||
146 | attrs = kernfs_iattrs(kn); | ||
147 | if (!attrs) | ||
148 | return -ENOMEM; | ||
149 | |||
150 | return simple_xattr_list(d_inode(dentry), &attrs->xattrs, buf, size); | ||
151 | } | ||
152 | |||
153 | static inline void set_default_inode_attr(struct inode *inode, umode_t mode) | ||
154 | { | ||
155 | inode->i_mode = mode; | ||
156 | inode->i_atime = inode->i_mtime = | ||
157 | inode->i_ctime = current_time(inode); | ||
158 | } | ||
159 | |||
160 | static inline void set_inode_attr(struct inode *inode, | ||
161 | struct kernfs_iattrs *attrs) | ||
162 | { | ||
163 | inode->i_uid = attrs->ia_uid; | ||
164 | inode->i_gid = attrs->ia_gid; | ||
165 | inode->i_atime = attrs->ia_atime; | ||
166 | inode->i_mtime = attrs->ia_mtime; | ||
167 | inode->i_ctime = attrs->ia_ctime; | ||
168 | } | ||
169 | |||
170 | static void kernfs_refresh_inode(struct kernfs_node *kn, struct inode *inode) | ||
171 | { | ||
172 | struct kernfs_iattrs *attrs = kn->iattr; | ||
173 | |||
174 | inode->i_mode = kn->mode; | ||
175 | if (attrs) | ||
176 | /* | ||
177 | * kernfs_node has non-default attributes get them from | ||
178 | * persistent copy in kernfs_node. | ||
179 | */ | ||
180 | set_inode_attr(inode, attrs); | ||
181 | |||
182 | if (kernfs_type(kn) == KERNFS_DIR) | ||
183 | set_nlink(inode, kn->dir.subdirs + 2); | ||
184 | } | ||
185 | |||
186 | int kernfs_iop_getattr(const struct path *path, struct kstat *stat, | ||
187 | u32 request_mask, unsigned int query_flags) | ||
188 | { | ||
189 | struct inode *inode = d_inode(path->dentry); | ||
190 | struct kernfs_node *kn = inode->i_private; | ||
191 | |||
192 | mutex_lock(&kernfs_mutex); | ||
193 | kernfs_refresh_inode(kn, inode); | ||
194 | mutex_unlock(&kernfs_mutex); | ||
195 | |||
196 | generic_fillattr(inode, stat); | ||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static void kernfs_init_inode(struct kernfs_node *kn, struct inode *inode) | ||
201 | { | ||
202 | kernfs_get(kn); | ||
203 | inode->i_private = kn; | ||
204 | inode->i_mapping->a_ops = &kernfs_aops; | ||
205 | inode->i_op = &kernfs_iops; | ||
206 | inode->i_generation = kernfs_gen(kn); | ||
207 | |||
208 | set_default_inode_attr(inode, kn->mode); | ||
209 | kernfs_refresh_inode(kn, inode); | ||
210 | |||
211 | /* initialize inode according to type */ | ||
212 | switch (kernfs_type(kn)) { | ||
213 | case KERNFS_DIR: | ||
214 | inode->i_op = &kernfs_dir_iops; | ||
215 | inode->i_fop = &kernfs_dir_fops; | ||
216 | if (kn->flags & KERNFS_EMPTY_DIR) | ||
217 | make_empty_dir_inode(inode); | ||
218 | break; | ||
219 | case KERNFS_FILE: | ||
220 | inode->i_size = kn->attr.size; | ||
221 | inode->i_fop = &kernfs_file_fops; | ||
222 | break; | ||
223 | case KERNFS_LINK: | ||
224 | inode->i_op = &kernfs_symlink_iops; | ||
225 | break; | ||
226 | default: | ||
227 | BUG(); | ||
228 | } | ||
229 | |||
230 | unlock_new_inode(inode); | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * kernfs_get_inode - get inode for kernfs_node | ||
235 | * @sb: super block | ||
236 | * @kn: kernfs_node to allocate inode for | ||
237 | * | ||
238 | * Get inode for @kn. If such inode doesn't exist, a new inode is | ||
239 | * allocated and basics are initialized. New inode is returned | ||
240 | * locked. | ||
241 | * | ||
242 | * LOCKING: | ||
243 | * Kernel thread context (may sleep). | ||
244 | * | ||
245 | * RETURNS: | ||
246 | * Pointer to allocated inode on success, NULL on failure. | ||
247 | */ | ||
248 | struct inode *kernfs_get_inode(struct super_block *sb, struct kernfs_node *kn) | ||
249 | { | ||
250 | struct inode *inode; | ||
251 | |||
252 | inode = iget_locked(sb, kernfs_ino(kn)); | ||
253 | if (inode && (inode->i_state & I_NEW)) | ||
254 | kernfs_init_inode(kn, inode); | ||
255 | |||
256 | return inode; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * The kernfs_node serves as both an inode and a directory entry for | ||
261 | * kernfs. To prevent the kernfs inode numbers from being freed | ||
262 | * prematurely we take a reference to kernfs_node from the kernfs inode. A | ||
263 | * super_operations.evict_inode() implementation is needed to drop that | ||
264 | * reference upon inode destruction. | ||
265 | */ | ||
266 | void kernfs_evict_inode(struct inode *inode) | ||
267 | { | ||
268 | struct kernfs_node *kn = inode->i_private; | ||
269 | |||
270 | truncate_inode_pages_final(&inode->i_data); | ||
271 | clear_inode(inode); | ||
272 | kernfs_put(kn); | ||
273 | } | ||
274 | |||
275 | int kernfs_iop_permission(struct inode *inode, int mask) | ||
276 | { | ||
277 | struct kernfs_node *kn; | ||
278 | |||
279 | if (mask & MAY_NOT_BLOCK) | ||
280 | return -ECHILD; | ||
281 | |||
282 | kn = inode->i_private; | ||
283 | |||
284 | mutex_lock(&kernfs_mutex); | ||
285 | kernfs_refresh_inode(kn, inode); | ||
286 | mutex_unlock(&kernfs_mutex); | ||
287 | |||
288 | return generic_permission(inode, mask); | ||
289 | } | ||
290 | |||
291 | int kernfs_xattr_get(struct kernfs_node *kn, const char *name, | ||
292 | void *value, size_t size) | ||
293 | { | ||
294 | struct kernfs_iattrs *attrs = kernfs_iattrs_noalloc(kn); | ||
295 | if (!attrs) | ||
296 | return -ENODATA; | ||
297 | |||
298 | return simple_xattr_get(&attrs->xattrs, name, value, size); | ||
299 | } | ||
300 | |||
301 | int kernfs_xattr_set(struct kernfs_node *kn, const char *name, | ||
302 | const void *value, size_t size, int flags) | ||
303 | { | ||
304 | struct kernfs_iattrs *attrs = kernfs_iattrs(kn); | ||
305 | if (!attrs) | ||
306 | return -ENOMEM; | ||
307 | |||
308 | return simple_xattr_set(&attrs->xattrs, name, value, size, flags, NULL); | ||
309 | } | ||
310 | |||
311 | static int kernfs_vfs_xattr_get(const struct xattr_handler *handler, | ||
312 | struct dentry *unused, struct inode *inode, | ||
313 | const char *suffix, void *value, size_t size) | ||
314 | { | ||
315 | const char *name = xattr_full_name(handler, suffix); | ||
316 | struct kernfs_node *kn = inode->i_private; | ||
317 | |||
318 | return kernfs_xattr_get(kn, name, value, size); | ||
319 | } | ||
320 | |||
321 | static int kernfs_vfs_xattr_set(const struct xattr_handler *handler, | ||
322 | struct dentry *unused, struct inode *inode, | ||
323 | const char *suffix, const void *value, | ||
324 | size_t size, int flags) | ||
325 | { | ||
326 | const char *name = xattr_full_name(handler, suffix); | ||
327 | struct kernfs_node *kn = inode->i_private; | ||
328 | |||
329 | return kernfs_xattr_set(kn, name, value, size, flags); | ||
330 | } | ||
331 | |||
332 | static int kernfs_vfs_user_xattr_add(struct kernfs_node *kn, | ||
333 | const char *full_name, | ||
334 | struct simple_xattrs *xattrs, | ||
335 | const void *value, size_t size, int flags) | ||
336 | { | ||
337 | atomic_t *sz = &kn->iattr->user_xattr_size; | ||
338 | atomic_t *nr = &kn->iattr->nr_user_xattrs; | ||
339 | ssize_t removed_size; | ||
340 | int ret; | ||
341 | |||
342 | if (atomic_inc_return(nr) > KERNFS_MAX_USER_XATTRS) { | ||
343 | ret = -ENOSPC; | ||
344 | goto dec_count_out; | ||
345 | } | ||
346 | |||
347 | if (atomic_add_return(size, sz) > KERNFS_USER_XATTR_SIZE_LIMIT) { | ||
348 | ret = -ENOSPC; | ||
349 | goto dec_size_out; | ||
350 | } | ||
351 | |||
352 | ret = simple_xattr_set(xattrs, full_name, value, size, flags, | ||
353 | &removed_size); | ||
354 | |||
355 | if (!ret && removed_size >= 0) | ||
356 | size = removed_size; | ||
357 | else if (!ret) | ||
358 | return 0; | ||
359 | dec_size_out: | ||
360 | atomic_sub(size, sz); | ||
361 | dec_count_out: | ||
362 | atomic_dec(nr); | ||
363 | return ret; | ||
364 | } | ||
365 | |||
366 | static int kernfs_vfs_user_xattr_rm(struct kernfs_node *kn, | ||
367 | const char *full_name, | ||
368 | struct simple_xattrs *xattrs, | ||
369 | const void *value, size_t size, int flags) | ||
370 | { | ||
371 | atomic_t *sz = &kn->iattr->user_xattr_size; | ||
372 | atomic_t *nr = &kn->iattr->nr_user_xattrs; | ||
373 | ssize_t removed_size; | ||
374 | int ret; | ||
375 | |||
376 | ret = simple_xattr_set(xattrs, full_name, value, size, flags, | ||
377 | &removed_size); | ||
378 | |||
379 | if (removed_size >= 0) { | ||
380 | atomic_sub(removed_size, sz); | ||
381 | atomic_dec(nr); | ||
382 | } | ||
383 | |||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | static int kernfs_vfs_user_xattr_set(const struct xattr_handler *handler, | ||
388 | struct dentry *unused, struct inode *inode, | ||
389 | const char *suffix, const void *value, | ||
390 | size_t size, int flags) | ||
391 | { | ||
392 | const char *full_name = xattr_full_name(handler, suffix); | ||
393 | struct kernfs_node *kn = inode->i_private; | ||
394 | struct kernfs_iattrs *attrs; | ||
395 | |||
396 | if (!(kernfs_root(kn)->flags & KERNFS_ROOT_SUPPORT_USER_XATTR)) | ||
397 | return -EOPNOTSUPP; | ||
398 | |||
399 | attrs = kernfs_iattrs(kn); | ||
400 | if (!attrs) | ||
401 | return -ENOMEM; | ||
402 | |||
403 | if (value) | ||
404 | return kernfs_vfs_user_xattr_add(kn, full_name, &attrs->xattrs, | ||
405 | value, size, flags); | ||
406 | else | ||
407 | return kernfs_vfs_user_xattr_rm(kn, full_name, &attrs->xattrs, | ||
408 | value, size, flags); | ||
409 | |||
410 | } | ||
411 | |||
412 | static const struct xattr_handler kernfs_trusted_xattr_handler = { | ||
413 | .prefix = XATTR_TRUSTED_PREFIX, | ||
414 | .get = kernfs_vfs_xattr_get, | ||
415 | .set = kernfs_vfs_xattr_set, | ||
416 | }; | ||
417 | |||
418 | static const struct xattr_handler kernfs_security_xattr_handler = { | ||
419 | .prefix = XATTR_SECURITY_PREFIX, | ||
420 | .get = kernfs_vfs_xattr_get, | ||
421 | .set = kernfs_vfs_xattr_set, | ||
422 | }; | ||
423 | |||
424 | static const struct xattr_handler kernfs_user_xattr_handler = { | ||
425 | .prefix = XATTR_USER_PREFIX, | ||
426 | .get = kernfs_vfs_xattr_get, | ||
427 | .set = kernfs_vfs_user_xattr_set, | ||
428 | }; | ||
429 | |||
430 | const struct xattr_handler *kernfs_xattr_handlers[] = { | ||
431 | &kernfs_trusted_xattr_handler, | ||
432 | &kernfs_security_xattr_handler, | ||
433 | &kernfs_user_xattr_handler, | ||
434 | NULL | ||
435 | }; | ||
diff --git a/fs/kernfs/kernfs-internal.h b/fs/kernfs/kernfs-internal.h new file mode 100644 index 000000000..7ee97ef59 --- /dev/null +++ b/fs/kernfs/kernfs-internal.h | |||
@@ -0,0 +1,127 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-only */ | ||
2 | /* | ||
3 | * fs/kernfs/kernfs-internal.h - kernfs internal header file | ||
4 | * | ||
5 | * Copyright (c) 2001-3 Patrick Mochel | ||
6 | * Copyright (c) 2007 SUSE Linux Products GmbH | ||
7 | * Copyright (c) 2007, 2013 Tejun Heo <teheo@suse.de> | ||
8 | */ | ||
9 | |||
10 | #ifndef __KERNFS_INTERNAL_H | ||
11 | #define __KERNFS_INTERNAL_H | ||
12 | |||
13 | #include <linux/lockdep.h> | ||
14 | #include <linux/fs.h> | ||
15 | #include <linux/mutex.h> | ||
16 | #include <linux/xattr.h> | ||
17 | |||
18 | #include <linux/kernfs.h> | ||
19 | #include <linux/fs_context.h> | ||
20 | |||
21 | struct kernfs_iattrs { | ||
22 | kuid_t ia_uid; | ||
23 | kgid_t ia_gid; | ||
24 | struct timespec64 ia_atime; | ||
25 | struct timespec64 ia_mtime; | ||
26 | struct timespec64 ia_ctime; | ||
27 | |||
28 | struct simple_xattrs xattrs; | ||
29 | atomic_t nr_user_xattrs; | ||
30 | atomic_t user_xattr_size; | ||
31 | }; | ||
32 | |||
33 | /* +1 to avoid triggering overflow warning when negating it */ | ||
34 | #define KN_DEACTIVATED_BIAS (INT_MIN + 1) | ||
35 | |||
36 | /* KERNFS_TYPE_MASK and types are defined in include/linux/kernfs.h */ | ||
37 | |||
38 | /** | ||
39 | * kernfs_root - find out the kernfs_root a kernfs_node belongs to | ||
40 | * @kn: kernfs_node of interest | ||
41 | * | ||
42 | * Return the kernfs_root @kn belongs to. | ||
43 | */ | ||
44 | static inline struct kernfs_root *kernfs_root(struct kernfs_node *kn) | ||
45 | { | ||
46 | /* if parent exists, it's always a dir; otherwise, @sd is a dir */ | ||
47 | if (kn->parent) | ||
48 | kn = kn->parent; | ||
49 | return kn->dir.root; | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * mount.c | ||
54 | */ | ||
55 | struct kernfs_super_info { | ||
56 | struct super_block *sb; | ||
57 | |||
58 | /* | ||
59 | * The root associated with this super_block. Each super_block is | ||
60 | * identified by the root and ns it's associated with. | ||
61 | */ | ||
62 | struct kernfs_root *root; | ||
63 | |||
64 | /* | ||
65 | * Each sb is associated with one namespace tag, currently the | ||
66 | * network namespace of the task which mounted this kernfs | ||
67 | * instance. If multiple tags become necessary, make the following | ||
68 | * an array and compare kernfs_node tag against every entry. | ||
69 | */ | ||
70 | const void *ns; | ||
71 | |||
72 | /* anchored at kernfs_root->supers, protected by kernfs_mutex */ | ||
73 | struct list_head node; | ||
74 | }; | ||
75 | #define kernfs_info(SB) ((struct kernfs_super_info *)(SB->s_fs_info)) | ||
76 | |||
77 | static inline struct kernfs_node *kernfs_dentry_node(struct dentry *dentry) | ||
78 | { | ||
79 | if (d_really_is_negative(dentry)) | ||
80 | return NULL; | ||
81 | return d_inode(dentry)->i_private; | ||
82 | } | ||
83 | |||
84 | extern const struct super_operations kernfs_sops; | ||
85 | extern struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache; | ||
86 | |||
87 | /* | ||
88 | * inode.c | ||
89 | */ | ||
90 | extern const struct xattr_handler *kernfs_xattr_handlers[]; | ||
91 | void kernfs_evict_inode(struct inode *inode); | ||
92 | int kernfs_iop_permission(struct inode *inode, int mask); | ||
93 | int kernfs_iop_setattr(struct dentry *dentry, struct iattr *iattr); | ||
94 | int kernfs_iop_getattr(const struct path *path, struct kstat *stat, | ||
95 | u32 request_mask, unsigned int query_flags); | ||
96 | ssize_t kernfs_iop_listxattr(struct dentry *dentry, char *buf, size_t size); | ||
97 | int __kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); | ||
98 | |||
99 | /* | ||
100 | * dir.c | ||
101 | */ | ||
102 | extern struct mutex kernfs_mutex; | ||
103 | extern const struct dentry_operations kernfs_dops; | ||
104 | extern const struct file_operations kernfs_dir_fops; | ||
105 | extern const struct inode_operations kernfs_dir_iops; | ||
106 | |||
107 | struct kernfs_node *kernfs_get_active(struct kernfs_node *kn); | ||
108 | void kernfs_put_active(struct kernfs_node *kn); | ||
109 | int kernfs_add_one(struct kernfs_node *kn); | ||
110 | struct kernfs_node *kernfs_new_node(struct kernfs_node *parent, | ||
111 | const char *name, umode_t mode, | ||
112 | kuid_t uid, kgid_t gid, | ||
113 | unsigned flags); | ||
114 | |||
115 | /* | ||
116 | * file.c | ||
117 | */ | ||
118 | extern const struct file_operations kernfs_file_fops; | ||
119 | |||
120 | void kernfs_drain_open_files(struct kernfs_node *kn); | ||
121 | |||
122 | /* | ||
123 | * symlink.c | ||
124 | */ | ||
125 | extern const struct inode_operations kernfs_symlink_iops; | ||
126 | |||
127 | #endif /* __KERNFS_INTERNAL_H */ | ||
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c new file mode 100644 index 000000000..9dc7e7a64 --- /dev/null +++ b/fs/kernfs/mount.c | |||
@@ -0,0 +1,397 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * fs/kernfs/mount.c - kernfs mount implementation | ||
4 | * | ||
5 | * Copyright (c) 2001-3 Patrick Mochel | ||
6 | * Copyright (c) 2007 SUSE Linux Products GmbH | ||
7 | * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/fs.h> | ||
11 | #include <linux/mount.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/magic.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/pagemap.h> | ||
16 | #include <linux/namei.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/exportfs.h> | ||
19 | |||
20 | #include "kernfs-internal.h" | ||
21 | |||
22 | struct kmem_cache *kernfs_node_cache, *kernfs_iattrs_cache; | ||
23 | |||
24 | static int kernfs_sop_show_options(struct seq_file *sf, struct dentry *dentry) | ||
25 | { | ||
26 | struct kernfs_root *root = kernfs_root(kernfs_dentry_node(dentry)); | ||
27 | struct kernfs_syscall_ops *scops = root->syscall_ops; | ||
28 | |||
29 | if (scops && scops->show_options) | ||
30 | return scops->show_options(sf, root); | ||
31 | return 0; | ||
32 | } | ||
33 | |||
34 | static int kernfs_sop_show_path(struct seq_file *sf, struct dentry *dentry) | ||
35 | { | ||
36 | struct kernfs_node *node = kernfs_dentry_node(dentry); | ||
37 | struct kernfs_root *root = kernfs_root(node); | ||
38 | struct kernfs_syscall_ops *scops = root->syscall_ops; | ||
39 | |||
40 | if (scops && scops->show_path) | ||
41 | return scops->show_path(sf, node, root); | ||
42 | |||
43 | seq_dentry(sf, dentry, " \t\n\\"); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | const struct super_operations kernfs_sops = { | ||
48 | .statfs = simple_statfs, | ||
49 | .drop_inode = generic_delete_inode, | ||
50 | .evict_inode = kernfs_evict_inode, | ||
51 | |||
52 | .show_options = kernfs_sop_show_options, | ||
53 | .show_path = kernfs_sop_show_path, | ||
54 | }; | ||
55 | |||
56 | static int kernfs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, | ||
57 | struct inode *parent) | ||
58 | { | ||
59 | struct kernfs_node *kn = inode->i_private; | ||
60 | |||
61 | if (*max_len < 2) { | ||
62 | *max_len = 2; | ||
63 | return FILEID_INVALID; | ||
64 | } | ||
65 | |||
66 | *max_len = 2; | ||
67 | *(u64 *)fh = kn->id; | ||
68 | return FILEID_KERNFS; | ||
69 | } | ||
70 | |||
71 | static struct dentry *__kernfs_fh_to_dentry(struct super_block *sb, | ||
72 | struct fid *fid, int fh_len, | ||
73 | int fh_type, bool get_parent) | ||
74 | { | ||
75 | struct kernfs_super_info *info = kernfs_info(sb); | ||
76 | struct kernfs_node *kn; | ||
77 | struct inode *inode; | ||
78 | u64 id; | ||
79 | |||
80 | if (fh_len < 2) | ||
81 | return NULL; | ||
82 | |||
83 | switch (fh_type) { | ||
84 | case FILEID_KERNFS: | ||
85 | id = *(u64 *)fid; | ||
86 | break; | ||
87 | case FILEID_INO32_GEN: | ||
88 | case FILEID_INO32_GEN_PARENT: | ||
89 | /* | ||
90 | * blk_log_action() exposes "LOW32,HIGH32" pair without | ||
91 | * type and userland can call us with generic fid | ||
92 | * constructed from them. Combine it back to ID. See | ||
93 | * blk_log_action(). | ||
94 | */ | ||
95 | id = ((u64)fid->i32.gen << 32) | fid->i32.ino; | ||
96 | break; | ||
97 | default: | ||
98 | return NULL; | ||
99 | } | ||
100 | |||
101 | kn = kernfs_find_and_get_node_by_id(info->root, id); | ||
102 | if (!kn) | ||
103 | return ERR_PTR(-ESTALE); | ||
104 | |||
105 | if (get_parent) { | ||
106 | struct kernfs_node *parent; | ||
107 | |||
108 | parent = kernfs_get_parent(kn); | ||
109 | kernfs_put(kn); | ||
110 | kn = parent; | ||
111 | if (!kn) | ||
112 | return ERR_PTR(-ESTALE); | ||
113 | } | ||
114 | |||
115 | inode = kernfs_get_inode(sb, kn); | ||
116 | kernfs_put(kn); | ||
117 | if (!inode) | ||
118 | return ERR_PTR(-ESTALE); | ||
119 | |||
120 | return d_obtain_alias(inode); | ||
121 | } | ||
122 | |||
123 | static struct dentry *kernfs_fh_to_dentry(struct super_block *sb, | ||
124 | struct fid *fid, int fh_len, | ||
125 | int fh_type) | ||
126 | { | ||
127 | return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, false); | ||
128 | } | ||
129 | |||
130 | static struct dentry *kernfs_fh_to_parent(struct super_block *sb, | ||
131 | struct fid *fid, int fh_len, | ||
132 | int fh_type) | ||
133 | { | ||
134 | return __kernfs_fh_to_dentry(sb, fid, fh_len, fh_type, true); | ||
135 | } | ||
136 | |||
137 | static struct dentry *kernfs_get_parent_dentry(struct dentry *child) | ||
138 | { | ||
139 | struct kernfs_node *kn = kernfs_dentry_node(child); | ||
140 | |||
141 | return d_obtain_alias(kernfs_get_inode(child->d_sb, kn->parent)); | ||
142 | } | ||
143 | |||
144 | static const struct export_operations kernfs_export_ops = { | ||
145 | .encode_fh = kernfs_encode_fh, | ||
146 | .fh_to_dentry = kernfs_fh_to_dentry, | ||
147 | .fh_to_parent = kernfs_fh_to_parent, | ||
148 | .get_parent = kernfs_get_parent_dentry, | ||
149 | }; | ||
150 | |||
151 | /** | ||
152 | * kernfs_root_from_sb - determine kernfs_root associated with a super_block | ||
153 | * @sb: the super_block in question | ||
154 | * | ||
155 | * Return the kernfs_root associated with @sb. If @sb is not a kernfs one, | ||
156 | * %NULL is returned. | ||
157 | */ | ||
158 | struct kernfs_root *kernfs_root_from_sb(struct super_block *sb) | ||
159 | { | ||
160 | if (sb->s_op == &kernfs_sops) | ||
161 | return kernfs_info(sb)->root; | ||
162 | return NULL; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * find the next ancestor in the path down to @child, where @parent was the | ||
167 | * ancestor whose descendant we want to find. | ||
168 | * | ||
169 | * Say the path is /a/b/c/d. @child is d, @parent is NULL. We return the root | ||
170 | * node. If @parent is b, then we return the node for c. | ||
171 | * Passing in d as @parent is not ok. | ||
172 | */ | ||
173 | static struct kernfs_node *find_next_ancestor(struct kernfs_node *child, | ||
174 | struct kernfs_node *parent) | ||
175 | { | ||
176 | if (child == parent) { | ||
177 | pr_crit_once("BUG in find_next_ancestor: called with parent == child"); | ||
178 | return NULL; | ||
179 | } | ||
180 | |||
181 | while (child->parent != parent) { | ||
182 | if (!child->parent) | ||
183 | return NULL; | ||
184 | child = child->parent; | ||
185 | } | ||
186 | |||
187 | return child; | ||
188 | } | ||
189 | |||
190 | /** | ||
191 | * kernfs_node_dentry - get a dentry for the given kernfs_node | ||
192 | * @kn: kernfs_node for which a dentry is needed | ||
193 | * @sb: the kernfs super_block | ||
194 | */ | ||
195 | struct dentry *kernfs_node_dentry(struct kernfs_node *kn, | ||
196 | struct super_block *sb) | ||
197 | { | ||
198 | struct dentry *dentry; | ||
199 | struct kernfs_node *knparent = NULL; | ||
200 | |||
201 | BUG_ON(sb->s_op != &kernfs_sops); | ||
202 | |||
203 | dentry = dget(sb->s_root); | ||
204 | |||
205 | /* Check if this is the root kernfs_node */ | ||
206 | if (!kn->parent) | ||
207 | return dentry; | ||
208 | |||
209 | knparent = find_next_ancestor(kn, NULL); | ||
210 | if (WARN_ON(!knparent)) { | ||
211 | dput(dentry); | ||
212 | return ERR_PTR(-EINVAL); | ||
213 | } | ||
214 | |||
215 | do { | ||
216 | struct dentry *dtmp; | ||
217 | struct kernfs_node *kntmp; | ||
218 | |||
219 | if (kn == knparent) | ||
220 | return dentry; | ||
221 | kntmp = find_next_ancestor(kn, knparent); | ||
222 | if (WARN_ON(!kntmp)) { | ||
223 | dput(dentry); | ||
224 | return ERR_PTR(-EINVAL); | ||
225 | } | ||
226 | dtmp = lookup_positive_unlocked(kntmp->name, dentry, | ||
227 | strlen(kntmp->name)); | ||
228 | dput(dentry); | ||
229 | if (IS_ERR(dtmp)) | ||
230 | return dtmp; | ||
231 | knparent = kntmp; | ||
232 | dentry = dtmp; | ||
233 | } while (true); | ||
234 | } | ||
235 | |||
236 | static int kernfs_fill_super(struct super_block *sb, struct kernfs_fs_context *kfc) | ||
237 | { | ||
238 | struct kernfs_super_info *info = kernfs_info(sb); | ||
239 | struct inode *inode; | ||
240 | struct dentry *root; | ||
241 | |||
242 | info->sb = sb; | ||
243 | /* Userspace would break if executables or devices appear on sysfs */ | ||
244 | sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV; | ||
245 | sb->s_blocksize = PAGE_SIZE; | ||
246 | sb->s_blocksize_bits = PAGE_SHIFT; | ||
247 | sb->s_magic = kfc->magic; | ||
248 | sb->s_op = &kernfs_sops; | ||
249 | sb->s_xattr = kernfs_xattr_handlers; | ||
250 | if (info->root->flags & KERNFS_ROOT_SUPPORT_EXPORTOP) | ||
251 | sb->s_export_op = &kernfs_export_ops; | ||
252 | sb->s_time_gran = 1; | ||
253 | |||
254 | /* sysfs dentries and inodes don't require IO to create */ | ||
255 | sb->s_shrink.seeks = 0; | ||
256 | |||
257 | /* get root inode, initialize and unlock it */ | ||
258 | mutex_lock(&kernfs_mutex); | ||
259 | inode = kernfs_get_inode(sb, info->root->kn); | ||
260 | mutex_unlock(&kernfs_mutex); | ||
261 | if (!inode) { | ||
262 | pr_debug("kernfs: could not get root inode\n"); | ||
263 | return -ENOMEM; | ||
264 | } | ||
265 | |||
266 | /* instantiate and link root dentry */ | ||
267 | root = d_make_root(inode); | ||
268 | if (!root) { | ||
269 | pr_debug("%s: could not get root dentry!\n", __func__); | ||
270 | return -ENOMEM; | ||
271 | } | ||
272 | sb->s_root = root; | ||
273 | sb->s_d_op = &kernfs_dops; | ||
274 | return 0; | ||
275 | } | ||
276 | |||
277 | static int kernfs_test_super(struct super_block *sb, struct fs_context *fc) | ||
278 | { | ||
279 | struct kernfs_super_info *sb_info = kernfs_info(sb); | ||
280 | struct kernfs_super_info *info = fc->s_fs_info; | ||
281 | |||
282 | return sb_info->root == info->root && sb_info->ns == info->ns; | ||
283 | } | ||
284 | |||
285 | static int kernfs_set_super(struct super_block *sb, struct fs_context *fc) | ||
286 | { | ||
287 | struct kernfs_fs_context *kfc = fc->fs_private; | ||
288 | |||
289 | kfc->ns_tag = NULL; | ||
290 | return set_anon_super_fc(sb, fc); | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * kernfs_super_ns - determine the namespace tag of a kernfs super_block | ||
295 | * @sb: super_block of interest | ||
296 | * | ||
297 | * Return the namespace tag associated with kernfs super_block @sb. | ||
298 | */ | ||
299 | const void *kernfs_super_ns(struct super_block *sb) | ||
300 | { | ||
301 | struct kernfs_super_info *info = kernfs_info(sb); | ||
302 | |||
303 | return info->ns; | ||
304 | } | ||
305 | |||
306 | /** | ||
307 | * kernfs_get_tree - kernfs filesystem access/retrieval helper | ||
308 | * @fc: The filesystem context. | ||
309 | * | ||
310 | * This is to be called from each kernfs user's fs_context->ops->get_tree() | ||
311 | * implementation, which should set the specified ->@fs_type and ->@flags, and | ||
312 | * specify the hierarchy and namespace tag to mount via ->@root and ->@ns, | ||
313 | * respectively. | ||
314 | */ | ||
315 | int kernfs_get_tree(struct fs_context *fc) | ||
316 | { | ||
317 | struct kernfs_fs_context *kfc = fc->fs_private; | ||
318 | struct super_block *sb; | ||
319 | struct kernfs_super_info *info; | ||
320 | int error; | ||
321 | |||
322 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
323 | if (!info) | ||
324 | return -ENOMEM; | ||
325 | |||
326 | info->root = kfc->root; | ||
327 | info->ns = kfc->ns_tag; | ||
328 | INIT_LIST_HEAD(&info->node); | ||
329 | |||
330 | fc->s_fs_info = info; | ||
331 | sb = sget_fc(fc, kernfs_test_super, kernfs_set_super); | ||
332 | if (IS_ERR(sb)) | ||
333 | return PTR_ERR(sb); | ||
334 | |||
335 | if (!sb->s_root) { | ||
336 | struct kernfs_super_info *info = kernfs_info(sb); | ||
337 | |||
338 | kfc->new_sb_created = true; | ||
339 | |||
340 | error = kernfs_fill_super(sb, kfc); | ||
341 | if (error) { | ||
342 | deactivate_locked_super(sb); | ||
343 | return error; | ||
344 | } | ||
345 | sb->s_flags |= SB_ACTIVE; | ||
346 | |||
347 | mutex_lock(&kernfs_mutex); | ||
348 | list_add(&info->node, &info->root->supers); | ||
349 | mutex_unlock(&kernfs_mutex); | ||
350 | } | ||
351 | |||
352 | fc->root = dget(sb->s_root); | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | void kernfs_free_fs_context(struct fs_context *fc) | ||
357 | { | ||
358 | /* Note that we don't deal with kfc->ns_tag here. */ | ||
359 | kfree(fc->s_fs_info); | ||
360 | fc->s_fs_info = NULL; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * kernfs_kill_sb - kill_sb for kernfs | ||
365 | * @sb: super_block being killed | ||
366 | * | ||
367 | * This can be used directly for file_system_type->kill_sb(). If a kernfs | ||
368 | * user needs extra cleanup, it can implement its own kill_sb() and call | ||
369 | * this function at the end. | ||
370 | */ | ||
371 | void kernfs_kill_sb(struct super_block *sb) | ||
372 | { | ||
373 | struct kernfs_super_info *info = kernfs_info(sb); | ||
374 | |||
375 | mutex_lock(&kernfs_mutex); | ||
376 | list_del(&info->node); | ||
377 | mutex_unlock(&kernfs_mutex); | ||
378 | |||
379 | /* | ||
380 | * Remove the superblock from fs_supers/s_instances | ||
381 | * so we can't find it, before freeing kernfs_super_info. | ||
382 | */ | ||
383 | kill_anon_super(sb); | ||
384 | kfree(info); | ||
385 | } | ||
386 | |||
387 | void __init kernfs_init(void) | ||
388 | { | ||
389 | kernfs_node_cache = kmem_cache_create("kernfs_node_cache", | ||
390 | sizeof(struct kernfs_node), | ||
391 | 0, SLAB_PANIC, NULL); | ||
392 | |||
393 | /* Creates slab cache for kernfs inode attributes */ | ||
394 | kernfs_iattrs_cache = kmem_cache_create("kernfs_iattrs_cache", | ||
395 | sizeof(struct kernfs_iattrs), | ||
396 | 0, SLAB_PANIC, NULL); | ||
397 | } | ||
diff --git a/fs/kernfs/symlink.c b/fs/kernfs/symlink.c new file mode 100644 index 000000000..5432883d8 --- /dev/null +++ b/fs/kernfs/symlink.c | |||
@@ -0,0 +1,153 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * fs/kernfs/symlink.c - kernfs symlink implementation | ||
4 | * | ||
5 | * Copyright (c) 2001-3 Patrick Mochel | ||
6 | * Copyright (c) 2007 SUSE Linux Products GmbH | ||
7 | * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org> | ||
8 | */ | ||
9 | |||
10 | #include <linux/fs.h> | ||
11 | #include <linux/gfp.h> | ||
12 | #include <linux/namei.h> | ||
13 | |||
14 | #include "kernfs-internal.h" | ||
15 | |||
16 | /** | ||
17 | * kernfs_create_link - create a symlink | ||
18 | * @parent: directory to create the symlink in | ||
19 | * @name: name of the symlink | ||
20 | * @target: target node for the symlink to point to | ||
21 | * | ||
22 | * Returns the created node on success, ERR_PTR() value on error. | ||
23 | * Ownership of the link matches ownership of the target. | ||
24 | */ | ||
25 | struct kernfs_node *kernfs_create_link(struct kernfs_node *parent, | ||
26 | const char *name, | ||
27 | struct kernfs_node *target) | ||
28 | { | ||
29 | struct kernfs_node *kn; | ||
30 | int error; | ||
31 | kuid_t uid = GLOBAL_ROOT_UID; | ||
32 | kgid_t gid = GLOBAL_ROOT_GID; | ||
33 | |||
34 | if (target->iattr) { | ||
35 | uid = target->iattr->ia_uid; | ||
36 | gid = target->iattr->ia_gid; | ||
37 | } | ||
38 | |||
39 | kn = kernfs_new_node(parent, name, S_IFLNK|S_IRWXUGO, uid, gid, | ||
40 | KERNFS_LINK); | ||
41 | if (!kn) | ||
42 | return ERR_PTR(-ENOMEM); | ||
43 | |||
44 | if (kernfs_ns_enabled(parent)) | ||
45 | kn->ns = target->ns; | ||
46 | kn->symlink.target_kn = target; | ||
47 | kernfs_get(target); /* ref owned by symlink */ | ||
48 | |||
49 | error = kernfs_add_one(kn); | ||
50 | if (!error) | ||
51 | return kn; | ||
52 | |||
53 | kernfs_put(kn); | ||
54 | return ERR_PTR(error); | ||
55 | } | ||
56 | |||
57 | static int kernfs_get_target_path(struct kernfs_node *parent, | ||
58 | struct kernfs_node *target, char *path) | ||
59 | { | ||
60 | struct kernfs_node *base, *kn; | ||
61 | char *s = path; | ||
62 | int len = 0; | ||
63 | |||
64 | /* go up to the root, stop at the base */ | ||
65 | base = parent; | ||
66 | while (base->parent) { | ||
67 | kn = target->parent; | ||
68 | while (kn->parent && base != kn) | ||
69 | kn = kn->parent; | ||
70 | |||
71 | if (base == kn) | ||
72 | break; | ||
73 | |||
74 | if ((s - path) + 3 >= PATH_MAX) | ||
75 | return -ENAMETOOLONG; | ||
76 | |||
77 | strcpy(s, "../"); | ||
78 | s += 3; | ||
79 | base = base->parent; | ||
80 | } | ||
81 | |||
82 | /* determine end of target string for reverse fillup */ | ||
83 | kn = target; | ||
84 | while (kn->parent && kn != base) { | ||
85 | len += strlen(kn->name) + 1; | ||
86 | kn = kn->parent; | ||
87 | } | ||
88 | |||
89 | /* check limits */ | ||
90 | if (len < 2) | ||
91 | return -EINVAL; | ||
92 | len--; | ||
93 | if ((s - path) + len >= PATH_MAX) | ||
94 | return -ENAMETOOLONG; | ||
95 | |||
96 | /* reverse fillup of target string from target to base */ | ||
97 | kn = target; | ||
98 | while (kn->parent && kn != base) { | ||
99 | int slen = strlen(kn->name); | ||
100 | |||
101 | len -= slen; | ||
102 | memcpy(s + len, kn->name, slen); | ||
103 | if (len) | ||
104 | s[--len] = '/'; | ||
105 | |||
106 | kn = kn->parent; | ||
107 | } | ||
108 | |||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static int kernfs_getlink(struct inode *inode, char *path) | ||
113 | { | ||
114 | struct kernfs_node *kn = inode->i_private; | ||
115 | struct kernfs_node *parent = kn->parent; | ||
116 | struct kernfs_node *target = kn->symlink.target_kn; | ||
117 | int error; | ||
118 | |||
119 | mutex_lock(&kernfs_mutex); | ||
120 | error = kernfs_get_target_path(parent, target, path); | ||
121 | mutex_unlock(&kernfs_mutex); | ||
122 | |||
123 | return error; | ||
124 | } | ||
125 | |||
126 | static const char *kernfs_iop_get_link(struct dentry *dentry, | ||
127 | struct inode *inode, | ||
128 | struct delayed_call *done) | ||
129 | { | ||
130 | char *body; | ||
131 | int error; | ||
132 | |||
133 | if (!dentry) | ||
134 | return ERR_PTR(-ECHILD); | ||
135 | body = kzalloc(PAGE_SIZE, GFP_KERNEL); | ||
136 | if (!body) | ||
137 | return ERR_PTR(-ENOMEM); | ||
138 | error = kernfs_getlink(inode, body); | ||
139 | if (unlikely(error < 0)) { | ||
140 | kfree(body); | ||
141 | return ERR_PTR(error); | ||
142 | } | ||
143 | set_delayed_call(done, kfree_link, body); | ||
144 | return body; | ||
145 | } | ||
146 | |||
147 | const struct inode_operations kernfs_symlink_iops = { | ||
148 | .listxattr = kernfs_iop_listxattr, | ||
149 | .get_link = kernfs_iop_get_link, | ||
150 | .setattr = kernfs_iop_setattr, | ||
151 | .getattr = kernfs_iop_getattr, | ||
152 | .permission = kernfs_iop_permission, | ||
153 | }; | ||