source: scripts/untested/blfs-patches/inotify-0.17-rml-2.6.10-2.patch@ b67b7ca

clfs-1.2 clfs-2.1 clfs-3.0.0-systemd clfs-3.0.0-sysvinit systemd sysvinit
Last change on this file since b67b7ca was 617118d, checked in by Jim Gifford <clfs@…>, 19 years ago

r561@server (orig r559): root | 2005-06-05 02:38:49 -0700
Fixed Directory Structure

  • Property mode set to 100644
File size: 45.2 KB
RevLine 
[617118d]1inotify.
2
3Signed-off-by: Robert Love <rml@novell.com>
4
5 drivers/char/Kconfig | 13
6 drivers/char/Makefile | 2
7 drivers/char/inotify.c | 1024 +++++++++++++++++++++++++++++++++++++++++++++
8 drivers/char/misc.c | 14
9 fs/attr.c | 73 ++-
10 fs/file_table.c | 7
11 fs/inode.c | 3
12 fs/namei.c | 36 +
13 fs/open.c | 5
14 fs/read_write.c | 33 +
15 fs/super.c | 2
16 include/linux/fs.h | 7
17 include/linux/inotify.h | 155 ++++++
18 include/linux/miscdevice.h | 5
19 include/linux/sched.h | 2
20 kernel/user.c | 2
21 16 files changed, 1344 insertions(+), 39 deletions(-)
22
23diff -urN linux-2.6.10/drivers/char/inotify.c linux/drivers/char/inotify.c
24--- linux-2.6.10/drivers/char/inotify.c 1969-12-31 19:00:00.000000000 -0500
25+++ linux/drivers/char/inotify.c 2005-01-06 15:04:03.739780352 -0500
26@@ -0,0 +1,1024 @@
27+/*
28+ * Inode based directory notifications for Linux.
29+ *
30+ * Copyright (C) 2004 John McCutchan
31+ *
32+ * This program is free software; you can redistribute it and/or modify it
33+ * under the terms of the GNU General Public License as published by the
34+ * Free Software Foundation; either version 2, or (at your option) any
35+ * later version.
36+ *
37+ * This program is distributed in the hope that it will be useful, but
38+ * WITHOUT ANY WARRANTY; without even the implied warranty of
39+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
40+ * General Public License for more details.
41+ */
42+
43+#include <linux/module.h>
44+#include <linux/kernel.h>
45+#include <linux/sched.h>
46+#include <linux/spinlock.h>
47+#include <linux/idr.h>
48+#include <linux/slab.h>
49+#include <linux/fs.h>
50+#include <linux/namei.h>
51+#include <linux/poll.h>
52+#include <linux/device.h>
53+#include <linux/miscdevice.h>
54+#include <linux/init.h>
55+#include <linux/list.h>
56+#include <linux/writeback.h>
57+#include <linux/inotify.h>
58+
59+#include <asm/ioctls.h>
60+
61+static atomic_t inotify_cookie;
62+static kmem_cache_t *watch_cachep;
63+static kmem_cache_t *event_cachep;
64+static kmem_cache_t *inode_data_cachep;
65+
66+static int sysfs_attrib_max_user_devices;
67+static int sysfs_attrib_max_user_watches;
68+static unsigned int sysfs_attrib_max_queued_events;
69+
70+/*
71+ * struct inotify_device - represents an open instance of an inotify device
72+ *
73+ * For each inotify device, we need to keep track of the events queued on it,
74+ * a list of the inodes that we are watching, and so on.
75+ *
76+ * This structure is protected by 'lock'. Lock ordering:
77+ *
78+ * dev->lock (protects dev)
79+ * inode_lock (used to safely walk inode_in_use list)
80+ * inode->i_lock (only needed for getting ref on inode_data)
81+ */
82+struct inotify_device {
83+ wait_queue_head_t wait;
84+ struct idr idr;
85+ struct list_head events;
86+ struct list_head watches;
87+ spinlock_t lock;
88+ unsigned int event_count;
89+ unsigned int max_events;
90+ struct user_struct *user;
91+};
92+
93+struct inotify_watch {
94+ s32 wd; /* watch descriptor */
95+ u32 mask;
96+ struct inode *inode;
97+ struct inotify_device *dev;
98+ struct list_head d_list; /* device list */
99+ struct list_head i_list; /* inode list */
100+};
101+
102+static ssize_t show_max_queued_events(struct class_device *class, char *buf)
103+{
104+ return sprintf(buf, "%d\n", sysfs_attrib_max_queued_events);
105+}
106+
107+static ssize_t store_max_queued_events(struct class_device *class,
108+ const char *buf, size_t count)
109+{
110+ unsigned int max;
111+
112+ if (sscanf(buf, "%u", &max) > 0 && max > 0) {
113+ sysfs_attrib_max_queued_events = max;
114+ return strlen(buf);
115+ }
116+ return -EINVAL;
117+}
118+
119+static ssize_t show_max_user_devices(struct class_device *class, char *buf)
120+{
121+ return sprintf(buf, "%d\n", sysfs_attrib_max_user_devices);
122+}
123+
124+static ssize_t store_max_user_devices(struct class_device *class,
125+ const char *buf, size_t count)
126+{
127+ int max;
128+
129+ if (sscanf(buf, "%d", &max) > 0 && max > 0) {
130+ sysfs_attrib_max_user_devices = max;
131+ return strlen(buf);
132+ }
133+ return -EINVAL;
134+}
135+
136+static ssize_t show_max_user_watches(struct class_device *class, char *buf)
137+{
138+ return sprintf(buf, "%d\n", sysfs_attrib_max_user_watches);
139+}
140+
141+static ssize_t store_max_user_watches(struct class_device *class,
142+ const char *buf, size_t count)
143+{
144+ int max;
145+
146+ if (sscanf(buf, "%d", &max) > 0 && max > 0) {
147+ sysfs_attrib_max_user_watches = max;
148+ return strlen(buf);
149+ }
150+ return -EINVAL;
151+}
152+
153+static CLASS_DEVICE_ATTR(max_queued_events, S_IRUGO | S_IWUSR,
154+ show_max_queued_events, store_max_queued_events);
155+static CLASS_DEVICE_ATTR(max_user_devices, S_IRUGO | S_IWUSR,
156+ show_max_user_devices, store_max_user_devices);
157+static CLASS_DEVICE_ATTR(max_user_watches, S_IRUGO | S_IWUSR,
158+ show_max_user_watches, store_max_user_watches);
159+
160+/*
161+ * A list of these is attached to each instance of the driver
162+ * when the drivers read() gets called, this list is walked and
163+ * all events that can fit in the buffer get delivered
164+ */
165+struct inotify_kernel_event {
166+ struct list_head list;
167+ struct inotify_event event;
168+};
169+
170+static inline void __get_inode_data(struct inotify_inode_data *data)
171+{
172+ atomic_inc(&data->count);
173+}
174+
175+/*
176+ * get_inode_data - pin an inotify_inode_data structure. Returns the structure
177+ * if successful and NULL on failure, which can only occur if inotify_data is
178+ * not yet allocated. The inode must be pinned prior to invocation.
179+ */
180+static inline struct inotify_inode_data * get_inode_data(struct inode *inode)
181+{
182+ struct inotify_inode_data *data;
183+
184+ spin_lock(&inode->i_lock);
185+ data = inode->inotify_data;
186+ if (data)
187+ __get_inode_data(data);
188+ spin_unlock(&inode->i_lock);
189+
190+ return data;
191+}
192+
193+/*
194+ * put_inode_data - drop our reference on an inotify_inode_data and the
195+ * inode structure in which it lives. If the reference count on inotify_data
196+ * reaches zero, free it.
197+ */
198+static inline void put_inode_data(struct inode *inode)
199+{
200+ //spin_lock(&inode->i_lock);
201+ if (atomic_dec_and_test(&inode->inotify_data->count)) {
202+ kmem_cache_free(inode_data_cachep, inode->inotify_data);
203+ inode->inotify_data = NULL;
204+ }
205+ //spin_unlock(&inode->i_lock);
206+}
207+
208+/*
209+ * find_inode - resolve a user-given path to a specific inode and iget() it
210+ */
211+static struct inode * find_inode(const char __user *dirname)
212+{
213+ struct inode *inode;
214+ struct nameidata nd;
215+ int error;
216+
217+ error = __user_walk(dirname, LOOKUP_FOLLOW, &nd);
218+ if (error)
219+ return ERR_PTR(error);
220+
221+ inode = nd.dentry->d_inode;
222+
223+ /* you can only watch an inode if you have read permissions on it */
224+ error = permission(inode, MAY_READ, NULL);
225+ if (error) {
226+ inode = ERR_PTR(error);
227+ goto release_and_out;
228+ }
229+
230+ spin_lock(&inode_lock);
231+ __iget(inode);
232+ spin_unlock(&inode_lock);
233+release_and_out:
234+ path_release(&nd);
235+ return inode;
236+}
237+
238+struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
239+ const char *filename)
240+{
241+ struct inotify_kernel_event *kevent;
242+
243+ kevent = kmem_cache_alloc(event_cachep, GFP_ATOMIC);
244+ if (!kevent)
245+ return NULL;
246+
247+ /* we hand this out to user-space, so zero it out just in case */
248+ memset(kevent, 0, sizeof(struct inotify_kernel_event));
249+
250+ kevent->event.wd = wd;
251+ kevent->event.mask = mask;
252+ kevent->event.cookie = cookie;
253+ INIT_LIST_HEAD(&kevent->list);
254+
255+ if (filename) {
256+ strncpy(kevent->event.filename, filename, INOTIFY_FILENAME_MAX);
257+ kevent->event.filename[INOTIFY_FILENAME_MAX-1] = '\0';
258+ } else
259+ kevent->event.filename[0] = '\0';
260+
261+ return kevent;
262+}
263+
264+void delete_kernel_event(struct inotify_kernel_event *kevent)
265+{
266+ if (!kevent)
267+ return;
268+ kmem_cache_free(event_cachep, kevent);
269+}
270+
271+#define list_to_inotify_kernel_event(pos) \
272+ list_entry((pos), struct inotify_kernel_event, list)
273+
274+#define inotify_dev_get_event(dev) \
275+ (list_to_inotify_kernel_event(dev->events.next))
276+
277+/* Does this events mask get sent to the watch ? */
278+#define event_and(event_mask,watches_mask) ((event_mask == IN_UNMOUNT) || \
279+ (event_mask == IN_IGNORED) || \
280+ (event_mask & watches_mask))
281+
282+/*
283+ * inotify_dev_queue_event - add a new event to the given device
284+ *
285+ * Caller must hold dev->lock.
286+ */
287+static void inotify_dev_queue_event(struct inotify_device *dev,
288+ struct inotify_watch *watch, u32 mask,
289+ u32 cookie, const char *filename)
290+{
291+ struct inotify_kernel_event *kevent, *last;
292+
293+ /* Check if the new event is a duplicate of the last event queued. */
294+ last = inotify_dev_get_event(dev);
295+ if (dev->event_count && last->event.mask == mask &&
296+ last->event.wd == watch->wd) {
297+ /* Check if the filenames match */
298+ if (!filename && last->event.filename[0] == '\0')
299+ return;
300+ if (filename && !strcmp(last->event.filename, filename))
301+ return;
302+ }
303+
304+ /*
305+ * the queue has already overflowed and we have already sent the
306+ * Q_OVERFLOW event
307+ */
308+ if (dev->event_count > dev->max_events)
309+ return;
310+
311+ /* the queue has just overflowed and we need to notify user space */
312+ if (dev->event_count == dev->max_events) {
313+ kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
314+ goto add_event_to_queue;
315+ }
316+
317+ if (!event_and(mask, watch->inode->inotify_data->watch_mask) ||
318+ !event_and(mask, watch->mask))
319+ return;
320+
321+ kevent = kernel_event(watch->wd, mask, cookie, filename);
322+
323+add_event_to_queue:
324+ if (!kevent)
325+ return;
326+
327+ /* queue the event and wake up anyone waiting */
328+ dev->event_count++;
329+ list_add_tail(&kevent->list, &dev->events);
330+ wake_up_interruptible(&dev->wait);
331+}
332+
333+static inline int inotify_dev_has_events(struct inotify_device *dev)
334+{
335+ return !list_empty(&dev->events);
336+}
337+
338+/*
339+ * inotify_dev_event_dequeue - destroy an event on the given device
340+ *
341+ * Caller must hold dev->lock.
342+ */
343+static void inotify_dev_event_dequeue(struct inotify_device *dev)
344+{
345+ struct inotify_kernel_event *kevent;
346+
347+ if (!inotify_dev_has_events(dev))
348+ return;
349+
350+ kevent = inotify_dev_get_event(dev);
351+ list_del_init(&kevent->list);
352+ dev->event_count--;
353+ delete_kernel_event(kevent);
354+}
355+
356+/*
357+ * inotify_dev_get_wd - returns the next WD for use by the given dev
358+ *
359+ * This function can sleep.
360+ */
361+static int inotify_dev_get_wd(struct inotify_device *dev,
362+ struct inotify_watch *watch)
363+{
364+ int ret;
365+
366+ if (atomic_read(&dev->user->inotify_watches) >=
367+ sysfs_attrib_max_user_watches)
368+ return -ENOSPC;
369+
370+repeat:
371+ if (!idr_pre_get(&dev->idr, GFP_KERNEL))
372+ return -ENOSPC;
373+ spin_lock(&dev->lock);
374+ ret = idr_get_new(&dev->idr, watch, &watch->wd);
375+ spin_unlock(&dev->lock);
376+ if (ret == -EAGAIN) /* more memory is required, try again */
377+ goto repeat;
378+ else if (ret) /* the idr is full! */
379+ return -ENOSPC;
380+
381+ atomic_inc(&dev->user->inotify_watches);
382+
383+ return 0;
384+}
385+
386+/*
387+ * inotify_dev_put_wd - release the given WD on the given device
388+ *
389+ * Caller must hold dev->lock.
390+ */
391+static int inotify_dev_put_wd(struct inotify_device *dev, s32 wd)
392+{
393+ if (!dev || wd < 0)
394+ return -1;
395+
396+ atomic_dec(&dev->user->inotify_watches);
397+ idr_remove(&dev->idr, wd);
398+
399+ return 0;
400+}
401+
402+/*
403+ * create_watch - creates a watch on the given device.
404+ *
405+ * Grabs dev->lock, so the caller must not hold it.
406+ */
407+static struct inotify_watch *create_watch(struct inotify_device *dev,
408+ u32 mask, struct inode *inode)
409+{
410+ struct inotify_watch *watch;
411+
412+ watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
413+ if (!watch)
414+ return NULL;
415+
416+ watch->mask = mask;
417+ watch->inode = inode;
418+ watch->dev = dev;
419+ INIT_LIST_HEAD(&watch->d_list);
420+ INIT_LIST_HEAD(&watch->i_list);
421+
422+ if (inotify_dev_get_wd(dev, watch)) {
423+ kmem_cache_free(watch_cachep, watch);
424+ return NULL;
425+ }
426+
427+ return watch;
428+}
429+
430+/*
431+ * delete_watch - removes the given 'watch' from the given 'dev'
432+ *
433+ * Caller must hold dev->lock.
434+ */
435+static void delete_watch(struct inotify_device *dev,
436+ struct inotify_watch *watch)
437+{
438+ inotify_dev_put_wd(dev, watch->wd);
439+ kmem_cache_free(watch_cachep, watch);
440+}
441+
442+/*
443+ * inotify_find_dev - find the watch associated with the given inode and dev
444+ *
445+ * Caller must hold dev->lock.
446+ * FIXME: Needs inotify_data->lock too. Don't need dev->lock, just pin it.
447+ */
448+static struct inotify_watch *inode_find_dev(struct inode *inode,
449+ struct inotify_device *dev)
450+{
451+ struct inotify_watch *watch;
452+
453+ if (!inode->inotify_data)
454+ return NULL;
455+
456+ list_for_each_entry(watch, &inode->inotify_data->watches, i_list) {
457+ if (watch->dev == dev)
458+ return watch;
459+ }
460+
461+ return NULL;
462+}
463+
464+/*
465+ * dev_find_wd - given a (dev,wd) pair, returns the matching inotify_watcher
466+ *
467+ * Returns the results of looking up (dev,wd) in the idr layer. NULL is
468+ * returned on error.
469+ *
470+ * The caller must hold dev->lock.
471+ */
472+static inline struct inotify_watch *dev_find_wd(struct inotify_device *dev,
473+ u32 wd)
474+{
475+ return idr_find(&dev->idr, wd);
476+}
477+
478+static int inotify_dev_is_watching_inode(struct inotify_device *dev,
479+ struct inode *inode)
480+{
481+ struct inotify_watch *watch;
482+
483+ list_for_each_entry(watch, &dev->watches, d_list) {
484+ if (watch->inode == inode)
485+ return 1;
486+ }
487+
488+ return 0;
489+}
490+
491+/*
492+ * inotify_dev_add_watcher - add the given watcher to the given device instance
493+ *
494+ * Caller must hold dev->lock.
495+ */
496+static int inotify_dev_add_watch(struct inotify_device *dev,
497+ struct inotify_watch *watch)
498+{
499+ if (!dev || !watch)
500+ return -EINVAL;
501+
502+ list_add(&watch->d_list, &dev->watches);
503+ return 0;
504+}
505+
506+/*
507+ * inotify_dev_rm_watch - remove the given watch from the given device
508+ *
509+ * Caller must hold dev->lock because we call inotify_dev_queue_event().
510+ */
511+static int inotify_dev_rm_watch(struct inotify_device *dev,
512+ struct inotify_watch *watch)
513+{
514+ if (!watch)
515+ return -EINVAL;
516+
517+ inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
518+ list_del_init(&watch->d_list);
519+
520+ return 0;
521+}
522+
523+/*
524+ * inode_update_watch_mask - update inode->inotify_data->watch_mask
525+ *
526+ * Grabs inode->inotify_data->lock.
527+ */
528+static void inode_update_watch_mask(struct inode *inode)
529+{
530+ struct inotify_inode_data *data;
531+ struct inotify_watch *watch;
532+ u32 new_mask;
533+
534+ data = get_inode_data(inode);
535+ if (!data) /* FIXME: this should never happen */
536+ return;
537+ spin_lock(&data->lock);
538+
539+ new_mask = 0;
540+ list_for_each_entry(watch, &data->watches, i_list)
541+ new_mask |= watch->mask;
542+
543+ data->watch_mask = new_mask;
544+
545+ spin_unlock(&data->lock);
546+ put_inode_data(inode);
547+}
548+
549+/*
550+ * inode_add_watch - add a watch to the given inode
551+ *
552+ * Callers must hold dev->lock, because we call inode_find_dev().
553+ */
554+static int inode_add_watch(struct inode *inode, struct inotify_watch *watch)
555+{
556+ int ret;
557+
558+ if (!inode || !watch)
559+ return -EINVAL;
560+
561+ spin_lock(&inode->i_lock);
562+ if (!inode->inotify_data) {
563+ /* inotify_data is not attached to the inode, so add it */
564+ inode->inotify_data = kmem_cache_alloc(inode_data_cachep,
565+ GFP_ATOMIC);
566+ if (!inode->inotify_data) {
567+ ret = -ENOMEM;
568+ goto out_lock;
569+ }
570+
571+ atomic_set(&inode->inotify_data->count, 0);
572+ INIT_LIST_HEAD(&inode->inotify_data->watches);
573+ inode->inotify_data->watch_mask = 0;
574+ spin_lock_init(&inode->inotify_data->lock);
575+ } else if (inode_find_dev(inode, watch->dev)) {
576+ /* a watch is already associated with this (inode,dev) pair */
577+ ret = -EINVAL;
578+ goto out_lock;
579+ }
580+ __get_inode_data(inode->inotify_data);
581+ spin_unlock(&inode->i_lock);
582+
583+ list_add(&watch->i_list, &inode->inotify_data->watches);
584+ inode_update_watch_mask(inode);
585+
586+ return 0;
587+out_lock:
588+ spin_unlock(&inode->i_lock);
589+ return ret;
590+}
591+
592+static int inode_rm_watch(struct inode *inode,
593+ struct inotify_watch *watch)
594+{
595+ if (!inode || !watch || !inode->inotify_data)
596+ return -EINVAL;
597+
598+ list_del_init(&watch->i_list);
599+ inode_update_watch_mask(inode);
600+
601+ /* clean up inode->inotify_data */
602+ put_inode_data(inode);
603+
604+ return 0;
605+}
606+
607+/* Kernel API */
608+
609+/*
610+ * inotify_inode_queue_event - queue an event with the given mask, cookie, and
611+ * filename to any watches associated with the given inode.
612+ *
613+ * inode must be pinned prior to calling.
614+ */
615+void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
616+ const char *filename)
617+{
618+ struct inotify_watch *watch;
619+
620+ if (!inode->inotify_data)
621+ return;
622+
623+ list_for_each_entry(watch, &inode->inotify_data->watches, i_list) {
624+ spin_lock(&watch->dev->lock);
625+ inotify_dev_queue_event(watch->dev, watch, mask, cookie,
626+ filename);
627+ spin_unlock(&watch->dev->lock);
628+ }
629+}
630+EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
631+
632+void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
633+ u32 cookie, const char *filename)
634+{
635+ struct dentry *parent;
636+
637+ parent = dget_parent(dentry);
638+ inotify_inode_queue_event(parent->d_inode, mask, cookie, filename);
639+ dput(parent);
640+}
641+EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
642+
643+u32 inotify_get_cookie(void)
644+{
645+ atomic_inc(&inotify_cookie);
646+ return atomic_read(&inotify_cookie);
647+}
648+EXPORT_SYMBOL_GPL(inotify_get_cookie);
649+
650+/*
651+ * watch->inode must be pinned. We drop a reference before returning.
652+ */
653+static void ignore_helper(struct inotify_watch *watch, int event)
654+{
655+ struct inotify_device *dev;
656+ struct inode *inode;
657+
658+ inode = watch->inode;
659+ dev = watch->dev;
660+
661+ spin_lock(&dev->lock);
662+
663+ if (event)
664+ inotify_dev_queue_event(dev, watch, event, 0, NULL);
665+
666+ inode_rm_watch(inode, watch);
667+ inotify_dev_rm_watch(watch->dev, watch);
668+
669+ delete_watch(dev, watch);
670+ spin_unlock(&dev->lock);
671+
672+ iput(inode);
673+}
674+
675+void inotify_super_block_umount(struct super_block *sb)
676+{
677+ struct inode *inode;
678+
679+ spin_lock(&inode_lock);
680+
681+ /*
682+ * We hold the inode_lock, so the inodes are not going anywhere, and
683+ * we grab a reference on inotify_data before walking its list of
684+ * watches.
685+ */
686+ list_for_each_entry(inode, &inode_in_use, i_list) {
687+ struct inotify_inode_data *inode_data;
688+ struct inotify_watch *watch;
689+
690+ if (inode->i_sb != sb)
691+ continue;
692+
693+ inode_data = get_inode_data(inode);
694+ if (!inode_data)
695+ continue;
696+
697+ list_for_each_entry(watch, &inode_data->watches, i_list)
698+ ignore_helper(watch, IN_UNMOUNT);
699+ put_inode_data(inode);
700+ }
701+
702+ spin_unlock(&inode_lock);
703+}
704+EXPORT_SYMBOL_GPL(inotify_super_block_umount);
705+
706+/*
707+ * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
708+ */
709+void inotify_inode_is_dead(struct inode *inode)
710+{
711+ struct inotify_watch *watch, *next;
712+ struct inotify_inode_data *data;
713+
714+ data = get_inode_data(inode);
715+ if (!data)
716+ return;
717+ list_for_each_entry_safe(watch, next, &data->watches, i_list)
718+ ignore_helper(watch, 0);
719+ put_inode_data(inode);
720+}
721+EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
722+
723+/* The driver interface is implemented below */
724+
725+static unsigned int inotify_poll(struct file *file, poll_table *wait)
726+{
727+ struct inotify_device *dev;
728+
729+ dev = file->private_data;
730+
731+ poll_wait(file, &dev->wait, wait);
732+
733+ if (inotify_dev_has_events(dev))
734+ return POLLIN | POLLRDNORM;
735+
736+ return 0;
737+}
738+
739+static ssize_t inotify_read(struct file *file, char __user *buf,
740+ size_t count, loff_t *pos)
741+{
742+ size_t event_size;
743+ struct inotify_device *dev;
744+ char __user *start;
745+ DECLARE_WAITQUEUE(wait, current);
746+
747+ start = buf;
748+ dev = file->private_data;
749+
750+ /* We only hand out full inotify events */
751+ event_size = sizeof(struct inotify_event);
752+ if (count < event_size)
753+ return -EINVAL;
754+
755+ while (1) {
756+ int has_events;
757+
758+ spin_lock(&dev->lock);
759+ has_events = inotify_dev_has_events(dev);
760+ spin_unlock(&dev->lock);
761+ if (has_events)
762+ break;
763+
764+ if (file->f_flags & O_NONBLOCK)
765+ return -EAGAIN;
766+
767+ if (signal_pending(current))
768+ return -ERESTARTSYS;
769+
770+ add_wait_queue(&dev->wait, &wait);
771+ set_current_state(TASK_INTERRUPTIBLE);
772+
773+ schedule();
774+
775+ set_current_state(TASK_RUNNING);
776+ remove_wait_queue(&dev->wait, &wait);
777+ }
778+
779+ while (count >= event_size) {
780+ struct inotify_kernel_event *kevent;
781+
782+ spin_lock(&dev->lock);
783+ if (!inotify_dev_has_events(dev)) {
784+ spin_unlock(&dev->lock);
785+ break;
786+ }
787+ kevent = inotify_dev_get_event(dev);
788+ spin_unlock(&dev->lock);
789+ if (copy_to_user(buf, &kevent->event, event_size))
790+ return -EFAULT;
791+
792+ spin_lock(&dev->lock);
793+ inotify_dev_event_dequeue(dev);
794+ spin_unlock(&dev->lock);
795+ count -= event_size;
796+ buf += event_size;
797+ }
798+
799+ return buf - start;
800+}
801+
802+static int inotify_open(struct inode *inode, struct file *file)
803+{
804+ struct inotify_device *dev;
805+ struct user_struct *user;
806+ int ret;
807+
808+ user = get_uid(current->user);
809+
810+ if (atomic_read(&user->inotify_devs) >= sysfs_attrib_max_user_devices) {
811+ ret = -ENOSPC;
812+ goto out_err;
813+ }
814+
815+ atomic_inc(&current->user->inotify_devs);
816+
817+ dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
818+ if (!dev) {
819+ ret = -ENOMEM;
820+ goto out_err;
821+ }
822+
823+ idr_init(&dev->idr);
824+
825+ INIT_LIST_HEAD(&dev->events);
826+ INIT_LIST_HEAD(&dev->watches);
827+ init_waitqueue_head(&dev->wait);
828+
829+ dev->event_count = 0;
830+ dev->max_events = sysfs_attrib_max_queued_events;
831+ dev->user = user;
832+ spin_lock_init(&dev->lock);
833+
834+ file->private_data = dev;
835+
836+ return 0;
837+out_err:
838+ free_uid(current->user);
839+ return ret;
840+}
841+
842+/*
843+ * inotify_release_all_watches - destroy all watches on a given device
844+ *
845+ * FIXME: Do we want a lock here?
846+ */
847+static void inotify_release_all_watches(struct inotify_device *dev)
848+{
849+ struct inotify_watch *watch, *next;
850+
851+ list_for_each_entry_safe(watch, next, &dev->watches, d_list)
852+ ignore_helper(watch, 0);
853+}
854+
855+/*
856+ * inotify_release_all_events - destroy all of the events on a given device
857+ */
858+static void inotify_release_all_events(struct inotify_device *dev)
859+{
860+ spin_lock(&dev->lock);
861+ while (inotify_dev_has_events(dev))
862+ inotify_dev_event_dequeue(dev);
863+ spin_unlock(&dev->lock);
864+}
865+
866+static int inotify_release(struct inode *inode, struct file *file)
867+{
868+ struct inotify_device *dev;
869+
870+ dev = file->private_data;
871+
872+ inotify_release_all_watches(dev);
873+ inotify_release_all_events(dev);
874+
875+ atomic_dec(&dev->user->inotify_devs);
876+ free_uid(dev->user);
877+
878+ kfree(dev);
879+
880+ return 0;
881+}
882+
883+static int inotify_add_watch(struct inotify_device *dev,
884+ struct inotify_watch_request *request)
885+{
886+ struct inode *inode;
887+ struct inotify_watch *watch;
888+ int ret;
889+
890+ inode = find_inode((const char __user*) request->dirname);
891+ if (IS_ERR(inode))
892+ return PTR_ERR(inode);
893+
894+ spin_lock(&dev->lock);
895+
896+ /*
897+ * This handles the case of re-adding a directory we are already
898+ * watching, we just update the mask and return 0
899+ */
900+ if (inotify_dev_is_watching_inode(dev, inode)) {
901+ struct inotify_watch *owatch; /* the old watch */
902+
903+ owatch = inode_find_dev(inode, dev);
904+ owatch->mask = request->mask;
905+ inode_update_watch_mask(inode);
906+ spin_unlock(&dev->lock);
907+ iput(inode);
908+
909+ return owatch->wd;
910+ }
911+
912+ spin_unlock(&dev->lock);
913+
914+ watch = create_watch(dev, request->mask, inode);
915+ if (!watch) {
916+ iput(inode);
917+ return -ENOSPC;
918+ }
919+
920+ spin_lock(&dev->lock);
921+
922+ /* We can't add anymore watches to this device */
923+ if (inotify_dev_add_watch(dev, watch)) {
924+ delete_watch(dev, watch);
925+ spin_unlock(&dev->lock);
926+ iput(inode);
927+ return -EINVAL;
928+ }
929+
930+ ret = inode_add_watch(inode, watch);
931+ if (ret < 0) {
932+ list_del_init(&watch->d_list);
933+ delete_watch(dev, watch);
934+ spin_unlock(&dev->lock);
935+ iput(inode);
936+ return ret;
937+ }
938+
939+ spin_unlock(&dev->lock);
940+
941+ return watch->wd;
942+}
943+
944+static int inotify_ignore(struct inotify_device *dev, s32 wd)
945+{
946+ struct inotify_watch *watch;
947+
948+ /*
949+ * FIXME: Silly to grab dev->lock here and then drop it, when
950+ * ignore_helper() grabs it anyway a few lines down.
951+ */
952+ spin_lock(&dev->lock);
953+ watch = dev_find_wd(dev, wd);
954+ spin_unlock(&dev->lock);
955+ if (!watch)
956+ return -EINVAL;
957+ ignore_helper(watch, 0);
958+
959+ return 0;
960+}
961+
962+/*
963+ * inotify_ioctl() - our device file's ioctl method
964+ *
965+ * The VFS serializes all of our calls via the BKL and we rely on that. We
966+ * could, alternatively, grab dev->lock. Right now lower levels grab that
967+ * where needed.
968+ */
969+static int inotify_ioctl(struct inode *ip, struct file *fp,
970+ unsigned int cmd, unsigned long arg)
971+{
972+ struct inotify_device *dev;
973+ struct inotify_watch_request request;
974+ void __user *p;
975+ int bytes;
976+ s32 wd;
977+
978+ dev = fp->private_data;
979+ p = (void __user *) arg;
980+
981+ switch (cmd) {
982+ case INOTIFY_WATCH:
983+ if (copy_from_user(&request, p, sizeof (request)))
984+ return -EFAULT;
985+ return inotify_add_watch(dev, &request);
986+ case INOTIFY_IGNORE:
987+ if (copy_from_user(&wd, p, sizeof (wd)))
988+ return -EFAULT;
989+ return inotify_ignore(dev, wd);
990+ case FIONREAD:
991+ bytes = dev->event_count * sizeof(struct inotify_event);
992+ return put_user(bytes, (int __user *) p);
993+ default:
994+ return -ENOTTY;
995+ }
996+}
997+
998+static struct file_operations inotify_fops = {
999+ .owner = THIS_MODULE,
1000+ .poll = inotify_poll,
1001+ .read = inotify_read,
1002+ .open = inotify_open,
1003+ .release = inotify_release,
1004+ .ioctl = inotify_ioctl,
1005+};
1006+
1007+struct miscdevice inotify_device = {
1008+ .minor = MISC_DYNAMIC_MINOR,
1009+ .name = "inotify",
1010+ .fops = &inotify_fops,
1011+};
1012+
1013+static int __init inotify_init(void)
1014+{
1015+ struct class_device *class;
1016+ int ret;
1017+
1018+ ret = misc_register(&inotify_device);
1019+ if (ret)
1020+ return ret;
1021+
1022+ sysfs_attrib_max_queued_events = 512;
1023+ sysfs_attrib_max_user_devices = 64;
1024+ sysfs_attrib_max_user_watches = 16384;
1025+
1026+ class = inotify_device.class;
1027+ class_device_create_file(class, &class_device_attr_max_queued_events);
1028+ class_device_create_file(class, &class_device_attr_max_user_devices);
1029+ class_device_create_file(class, &class_device_attr_max_user_watches);
1030+
1031+ atomic_set(&inotify_cookie, 0);
1032+
1033+ watch_cachep = kmem_cache_create("inotify_watch_cache",
1034+ sizeof(struct inotify_watch), 0, SLAB_PANIC,
1035+ NULL, NULL);
1036+
1037+ event_cachep = kmem_cache_create("inotify_event_cache",
1038+ sizeof(struct inotify_kernel_event), 0,
1039+ SLAB_PANIC, NULL, NULL);
1040+
1041+ inode_data_cachep = kmem_cache_create("inotify_inode_data_cache",
1042+ sizeof(struct inotify_inode_data), 0, SLAB_PANIC,
1043+ NULL, NULL);
1044+
1045+ printk(KERN_INFO "inotify device minor=%d\n", inotify_device.minor);
1046+
1047+ return 0;
1048+}
1049+
1050+module_init(inotify_init);
1051diff -urN linux-2.6.10/drivers/char/Kconfig linux/drivers/char/Kconfig
1052--- linux-2.6.10/drivers/char/Kconfig 2004-12-24 16:33:49.000000000 -0500
1053+++ linux/drivers/char/Kconfig 2005-01-06 15:04:03.741780048 -0500
1054@@ -62,6 +62,19 @@
1055 depends on VT && !S390 && !USERMODE
1056 default y
1057
1058+config INOTIFY
1059+ bool "Inotify file change notification support"
1060+ default y
1061+ ---help---
1062+ Say Y here to enable inotify support and the /dev/inotify character
1063+ device. Inotify is a file change notification system and a
1064+ replacement for dnotify. Inotify fixes numerous shortcomings in
1065+ dnotify and introduces several new features. It allows monitoring
1066+ of both files and directories via a single open fd. Multiple file
1067+ events are supported.
1068+
1069+ If unsure, say Y.
1070+
1071 config SERIAL_NONSTANDARD
1072 bool "Non-standard serial port support"
1073 ---help---
1074diff -urN linux-2.6.10/drivers/char/Makefile linux/drivers/char/Makefile
1075--- linux-2.6.10/drivers/char/Makefile 2004-12-24 16:35:29.000000000 -0500
1076+++ linux/drivers/char/Makefile 2005-01-06 15:04:03.741780048 -0500
1077@@ -9,6 +9,8 @@
1078
1079 obj-y += mem.o random.o tty_io.o n_tty.o tty_ioctl.o
1080
1081+
1082+obj-$(CONFIG_INOTIFY) += inotify.o
1083 obj-$(CONFIG_LEGACY_PTYS) += pty.o
1084 obj-$(CONFIG_UNIX98_PTYS) += pty.o
1085 obj-y += misc.o
1086diff -urN linux-2.6.10/drivers/char/misc.c linux/drivers/char/misc.c
1087--- linux-2.6.10/drivers/char/misc.c 2004-12-24 16:35:28.000000000 -0500
1088+++ linux/drivers/char/misc.c 2005-01-06 15:04:03.742779896 -0500
1089@@ -207,10 +207,9 @@
1090 int misc_register(struct miscdevice * misc)
1091 {
1092 struct miscdevice *c;
1093- struct class_device *class;
1094 dev_t dev;
1095 int err;
1096-
1097+
1098 down(&misc_sem);
1099 list_for_each_entry(c, &misc_list, list) {
1100 if (c->minor == misc->minor) {
1101@@ -224,8 +223,7 @@
1102 while (--i >= 0)
1103 if ( (misc_minors[i>>3] & (1 << (i&7))) == 0)
1104 break;
1105- if (i<0)
1106- {
1107+ if (i<0) {
1108 up(&misc_sem);
1109 return -EBUSY;
1110 }
1111@@ -240,10 +238,10 @@
1112 }
1113 dev = MKDEV(MISC_MAJOR, misc->minor);
1114
1115- class = class_simple_device_add(misc_class, dev,
1116- misc->dev, misc->name);
1117- if (IS_ERR(class)) {
1118- err = PTR_ERR(class);
1119+ misc->class = class_simple_device_add(misc_class, dev,
1120+ misc->dev, misc->name);
1121+ if (IS_ERR(misc->class)) {
1122+ err = PTR_ERR(misc->class);
1123 goto out;
1124 }
1125
1126diff -urN linux-2.6.10/fs/attr.c linux/fs/attr.c
1127--- linux-2.6.10/fs/attr.c 2004-12-24 16:34:00.000000000 -0500
1128+++ linux/fs/attr.c 2005-01-06 15:04:03.743779744 -0500
1129@@ -11,6 +11,7 @@
1130 #include <linux/string.h>
1131 #include <linux/smp_lock.h>
1132 #include <linux/dnotify.h>
1133+#include <linux/inotify.h>
1134 #include <linux/fcntl.h>
1135 #include <linux/quotaops.h>
1136 #include <linux/security.h>
1137@@ -103,29 +104,51 @@
1138 out:
1139 return error;
1140 }
1141-
1142 EXPORT_SYMBOL(inode_setattr);
1143
1144-int setattr_mask(unsigned int ia_valid)
1145+void setattr_mask (unsigned int ia_valid, int *dn_mask, u32 *in_mask)
1146 {
1147- unsigned long dn_mask = 0;
1148+ int dnmask;
1149+ u32 inmask;
1150
1151- if (ia_valid & ATTR_UID)
1152- dn_mask |= DN_ATTRIB;
1153- if (ia_valid & ATTR_GID)
1154- dn_mask |= DN_ATTRIB;
1155- if (ia_valid & ATTR_SIZE)
1156- dn_mask |= DN_MODIFY;
1157- /* both times implies a utime(s) call */
1158- if ((ia_valid & (ATTR_ATIME|ATTR_MTIME)) == (ATTR_ATIME|ATTR_MTIME))
1159- dn_mask |= DN_ATTRIB;
1160- else if (ia_valid & ATTR_ATIME)
1161- dn_mask |= DN_ACCESS;
1162- else if (ia_valid & ATTR_MTIME)
1163- dn_mask |= DN_MODIFY;
1164- if (ia_valid & ATTR_MODE)
1165- dn_mask |= DN_ATTRIB;
1166- return dn_mask;
1167+ inmask = 0;
1168+ dnmask = 0;
1169+
1170+ if (!dn_mask || !in_mask) {
1171+ return;
1172+ }
1173+ if (ia_valid & ATTR_UID) {
1174+ inmask |= IN_ATTRIB;
1175+ dnmask |= DN_ATTRIB;
1176+ }
1177+ if (ia_valid & ATTR_GID) {
1178+ inmask |= IN_ATTRIB;
1179+ dnmask |= DN_ATTRIB;
1180+ }
1181+ if (ia_valid & ATTR_SIZE) {
1182+ inmask |= IN_MODIFY;
1183+ dnmask |= DN_MODIFY;
1184+ }
1185+ /* both times implies a utime(s) call */
1186+ if ((ia_valid & (ATTR_ATIME|ATTR_MTIME)) == (ATTR_ATIME|ATTR_MTIME)) {
1187+ inmask |= IN_ATTRIB;
1188+ dnmask |= DN_ATTRIB;
1189+ }
1190+ else if (ia_valid & ATTR_ATIME) {
1191+ inmask |= IN_ACCESS;
1192+ dnmask |= DN_ACCESS;
1193+ }
1194+ else if (ia_valid & ATTR_MTIME) {
1195+ inmask |= IN_MODIFY;
1196+ dnmask |= DN_MODIFY;
1197+ }
1198+ if (ia_valid & ATTR_MODE) {
1199+ inmask |= IN_ATTRIB;
1200+ dnmask |= DN_ATTRIB;
1201+ }
1202+
1203+ *in_mask = inmask;
1204+ *dn_mask = dnmask;
1205 }
1206
1207 int notify_change(struct dentry * dentry, struct iattr * attr)
1208@@ -184,9 +207,19 @@
1209 }
1210 }
1211 if (!error) {
1212- unsigned long dn_mask = setattr_mask(ia_valid);
1213+ int dn_mask;
1214+ u32 in_mask;
1215+
1216+ setattr_mask (ia_valid, &dn_mask, &in_mask);
1217+
1218 if (dn_mask)
1219 dnotify_parent(dentry, dn_mask);
1220+ if (in_mask) {
1221+ inotify_inode_queue_event(dentry->d_inode, in_mask, 0,
1222+ NULL);
1223+ inotify_dentry_parent_queue_event(dentry, in_mask, 0,
1224+ dentry->d_name.name);
1225+ }
1226 }
1227 return error;
1228 }
1229diff -urN linux-2.6.10/fs/file_table.c linux/fs/file_table.c
1230--- linux-2.6.10/fs/file_table.c 2004-12-24 16:33:50.000000000 -0500
1231+++ linux/fs/file_table.c 2005-01-06 15:04:03.743779744 -0500
1232@@ -16,6 +16,7 @@
1233 #include <linux/eventpoll.h>
1234 #include <linux/mount.h>
1235 #include <linux/cdev.h>
1236+#include <linux/inotify.h>
1237
1238 /* sysctl tunables... */
1239 struct files_stat_struct files_stat = {
1240@@ -120,6 +121,12 @@
1241 struct dentry *dentry = file->f_dentry;
1242 struct vfsmount *mnt = file->f_vfsmnt;
1243 struct inode *inode = dentry->d_inode;
1244+ u32 mask;
1245+
1246+
1247+ mask = (file->f_mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE;
1248+ inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
1249+ inotify_inode_queue_event(inode, mask, 0, NULL);
1250
1251 might_sleep();
1252 /*
1253diff -urN linux-2.6.10/fs/inode.c linux/fs/inode.c
1254--- linux-2.6.10/fs/inode.c 2004-12-24 16:35:40.000000000 -0500
1255+++ linux/fs/inode.c 2005-01-06 15:04:03.744779592 -0500
1256@@ -130,6 +130,9 @@
1257 #ifdef CONFIG_QUOTA
1258 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
1259 #endif
1260+#ifdef CONFIG_INOTIFY
1261+ inode->inotify_data = NULL;
1262+#endif
1263 inode->i_pipe = NULL;
1264 inode->i_bdev = NULL;
1265 inode->i_cdev = NULL;
1266diff -urN linux-2.6.10/fs/namei.c linux/fs/namei.c
1267--- linux-2.6.10/fs/namei.c 2004-12-24 16:34:30.000000000 -0500
1268+++ linux/fs/namei.c 2005-01-06 15:30:13.049209056 -0500
1269@@ -22,6 +22,7 @@
1270 #include <linux/quotaops.h>
1271 #include <linux/pagemap.h>
1272 #include <linux/dnotify.h>
1273+#include <linux/inotify.h>
1274 #include <linux/smp_lock.h>
1275 #include <linux/personality.h>
1276 #include <linux/security.h>
1277@@ -1242,6 +1243,8 @@
1278 error = dir->i_op->create(dir, dentry, mode, nd);
1279 if (!error) {
1280 inode_dir_notify(dir, DN_CREATE);
1281+ inotify_inode_queue_event(dir, IN_CREATE_FILE,
1282+ 0, dentry->d_name.name);
1283 security_inode_post_create(dir, dentry, mode);
1284 }
1285 return error;
1286@@ -1556,6 +1559,8 @@
1287 error = dir->i_op->mknod(dir, dentry, mode, dev);
1288 if (!error) {
1289 inode_dir_notify(dir, DN_CREATE);
1290+ inotify_inode_queue_event(dir, IN_CREATE_FILE, 0,
1291+ dentry->d_name.name);
1292 security_inode_post_mknod(dir, dentry, mode, dev);
1293 }
1294 return error;
1295@@ -1629,6 +1634,8 @@
1296 error = dir->i_op->mkdir(dir, dentry, mode);
1297 if (!error) {
1298 inode_dir_notify(dir, DN_CREATE);
1299+ inotify_inode_queue_event(dir, IN_CREATE_SUBDIR, 0,
1300+ dentry->d_name.name);
1301 security_inode_post_mkdir(dir,dentry, mode);
1302 }
1303 return error;
1304@@ -1724,6 +1731,11 @@
1305 up(&dentry->d_inode->i_sem);
1306 if (!error) {
1307 inode_dir_notify(dir, DN_DELETE);
1308+ inotify_inode_queue_event(dir, IN_DELETE_SUBDIR, 0,
1309+ dentry->d_name.name);
1310+ inotify_inode_queue_event(dentry->d_inode, IN_DELETE_SELF, 0,
1311+ NULL);
1312+ inotify_inode_is_dead (dentry->d_inode);
1313 d_delete(dentry);
1314 }
1315 dput(dentry);
1316@@ -1796,8 +1808,13 @@
1317
1318 /* We don't d_delete() NFS sillyrenamed files--they still exist. */
1319 if (!error && !(dentry->d_flags & DCACHE_NFSFS_RENAMED)) {
1320- d_delete(dentry);
1321 inode_dir_notify(dir, DN_DELETE);
1322+ inotify_inode_queue_event(dir, IN_DELETE_FILE, 0,
1323+ dentry->d_name.name);
1324+ inotify_inode_queue_event(dentry->d_inode, IN_DELETE_SELF, 0,
1325+ NULL);
1326+ inotify_inode_is_dead (dentry->d_inode);
1327+ d_delete(dentry);
1328 }
1329 return error;
1330 }
1331@@ -1873,6 +1890,8 @@
1332 error = dir->i_op->symlink(dir, dentry, oldname);
1333 if (!error) {
1334 inode_dir_notify(dir, DN_CREATE);
1335+ inotify_inode_queue_event(dir, IN_CREATE_FILE, 0,
1336+ dentry->d_name.name);
1337 security_inode_post_symlink(dir, dentry, oldname);
1338 }
1339 return error;
1340@@ -1946,6 +1965,8 @@
1341 up(&old_dentry->d_inode->i_sem);
1342 if (!error) {
1343 inode_dir_notify(dir, DN_CREATE);
1344+ inotify_inode_queue_event(dir, IN_CREATE_FILE, 0,
1345+ new_dentry->d_name.name);
1346 security_inode_post_link(old_dentry, dir, new_dentry);
1347 }
1348 return error;
1349@@ -2109,6 +2130,8 @@
1350 {
1351 int error;
1352 int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
1353+ char *old_name;
1354+ u32 cookie;
1355
1356 if (old_dentry->d_inode == new_dentry->d_inode)
1357 return 0;
1358@@ -2130,6 +2153,8 @@
1359 DQUOT_INIT(old_dir);
1360 DQUOT_INIT(new_dir);
1361
1362+ old_name = inotify_oldname_init(old_dentry);
1363+
1364 if (is_dir)
1365 error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
1366 else
1367@@ -2141,7 +2166,16 @@
1368 inode_dir_notify(old_dir, DN_DELETE);
1369 inode_dir_notify(new_dir, DN_CREATE);
1370 }
1371+
1372+ cookie = inotify_get_cookie();
1373+
1374+ inotify_inode_queue_event(old_dir, IN_MOVED_FROM, cookie,
1375+ old_name);
1376+ inotify_inode_queue_event(new_dir, IN_MOVED_TO, cookie,
1377+ old_dentry->d_name.name);
1378 }
1379+ inotify_oldname_free(old_name);
1380+
1381 return error;
1382 }
1383
1384diff -urN linux-2.6.10/fs/open.c linux/fs/open.c
1385--- linux-2.6.10/fs/open.c 2004-12-24 16:33:50.000000000 -0500
1386+++ linux/fs/open.c 2005-01-06 15:04:03.747779136 -0500
1387@@ -11,6 +11,7 @@
1388 #include <linux/smp_lock.h>
1389 #include <linux/quotaops.h>
1390 #include <linux/dnotify.h>
1391+#include <linux/inotify.h>
1392 #include <linux/module.h>
1393 #include <linux/slab.h>
1394 #include <linux/tty.h>
1395@@ -956,6 +957,10 @@
1396 error = PTR_ERR(f);
1397 if (IS_ERR(f))
1398 goto out_error;
1399+ inotify_inode_queue_event(f->f_dentry->d_inode,
1400+ IN_OPEN, 0, NULL);
1401+ inotify_dentry_parent_queue_event(f->f_dentry, IN_OPEN,
1402+ 0, f->f_dentry->d_name.name);
1403 fd_install(fd, f);
1404 }
1405 out:
1406diff -urN linux-2.6.10/fs/read_write.c linux/fs/read_write.c
1407--- linux-2.6.10/fs/read_write.c 2004-12-24 16:35:00.000000000 -0500
1408+++ linux/fs/read_write.c 2005-01-06 15:04:03.748778984 -0500
1409@@ -11,6 +11,7 @@
1410 #include <linux/uio.h>
1411 #include <linux/smp_lock.h>
1412 #include <linux/dnotify.h>
1413+#include <linux/inotify.h>
1414 #include <linux/security.h>
1415 #include <linux/module.h>
1416 #include <linux/syscalls.h>
1417@@ -216,8 +217,14 @@
1418 ret = file->f_op->read(file, buf, count, pos);
1419 else
1420 ret = do_sync_read(file, buf, count, pos);
1421- if (ret > 0)
1422- dnotify_parent(file->f_dentry, DN_ACCESS);
1423+ if (ret > 0) {
1424+ struct dentry *dentry = file->f_dentry;
1425+ dnotify_parent(dentry, DN_ACCESS);
1426+ inotify_dentry_parent_queue_event(dentry,
1427+ IN_ACCESS, 0, dentry->d_name.name);
1428+ inotify_inode_queue_event(inode, IN_ACCESS, 0,
1429+ NULL);
1430+ }
1431 }
1432 }
1433
1434@@ -260,8 +267,14 @@
1435 ret = file->f_op->write(file, buf, count, pos);
1436 else
1437 ret = do_sync_write(file, buf, count, pos);
1438- if (ret > 0)
1439- dnotify_parent(file->f_dentry, DN_MODIFY);
1440+ if (ret > 0) {
1441+ struct dentry *dentry = file->f_dentry;
1442+ dnotify_parent(dentry, DN_MODIFY);
1443+ inotify_dentry_parent_queue_event(dentry,
1444+ IN_MODIFY, 0, dentry->d_name.name);
1445+ inotify_inode_queue_event(inode, IN_MODIFY, 0,
1446+ NULL);
1447+ }
1448 }
1449 }
1450
1451@@ -493,9 +506,15 @@
1452 out:
1453 if (iov != iovstack)
1454 kfree(iov);
1455- if ((ret + (type == READ)) > 0)
1456- dnotify_parent(file->f_dentry,
1457- (type == READ) ? DN_ACCESS : DN_MODIFY);
1458+ if ((ret + (type == READ)) > 0) {
1459+ struct dentry *dentry = file->f_dentry;
1460+ dnotify_parent(dentry, (type == READ) ? DN_ACCESS : DN_MODIFY);
1461+ inotify_dentry_parent_queue_event(dentry,
1462+ (type == READ) ? IN_ACCESS : IN_MODIFY, 0,
1463+ dentry->d_name.name);
1464+ inotify_inode_queue_event (dentry->d_inode,
1465+ (type == READ) ? IN_ACCESS : IN_MODIFY, 0, NULL);
1466+ }
1467 return ret;
1468 }
1469
1470diff -urN linux-2.6.10/fs/super.c linux/fs/super.c
1471--- linux-2.6.10/fs/super.c 2004-12-24 16:34:33.000000000 -0500
1472+++ linux/fs/super.c 2005-01-06 15:04:03.748778984 -0500
1473@@ -38,6 +38,7 @@
1474 #include <linux/idr.h>
1475 #include <linux/kobject.h>
1476 #include <asm/uaccess.h>
1477+#include <linux/inotify.h>
1478
1479
1480 void get_filesystem(struct file_system_type *fs);
1481@@ -227,6 +228,7 @@
1482
1483 if (root) {
1484 sb->s_root = NULL;
1485+ inotify_super_block_umount(sb);
1486 shrink_dcache_parent(root);
1487 shrink_dcache_anon(&sb->s_anon);
1488 dput(root);
1489diff -urN linux-2.6.10/include/linux/fs.h linux/include/linux/fs.h
1490--- linux-2.6.10/include/linux/fs.h 2004-12-24 16:34:27.000000000 -0500
1491+++ linux/include/linux/fs.h 2005-01-06 15:04:03.750778680 -0500
1492@@ -27,6 +27,7 @@
1493 struct kstatfs;
1494 struct vm_area_struct;
1495 struct vfsmount;
1496+struct inotify_inode_data;
1497
1498 /*
1499 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change
1500@@ -473,6 +474,10 @@
1501 struct dnotify_struct *i_dnotify; /* for directory notifications */
1502 #endif
1503
1504+#ifdef CONFIG_INOTIFY
1505+ struct inotify_inode_data *inotify_data;
1506+#endif
1507+
1508 unsigned long i_state;
1509 unsigned long dirtied_when; /* jiffies of first dirtying */
1510
1511@@ -1353,7 +1358,7 @@
1512 extern int do_remount_sb(struct super_block *sb, int flags,
1513 void *data, int force);
1514 extern sector_t bmap(struct inode *, sector_t);
1515-extern int setattr_mask(unsigned int);
1516+extern void setattr_mask(unsigned int, int *, u32 *);
1517 extern int notify_change(struct dentry *, struct iattr *);
1518 extern int permission(struct inode *, int, struct nameidata *);
1519 extern int generic_permission(struct inode *, int,
1520diff -urN linux-2.6.10/include/linux/inotify.h linux/include/linux/inotify.h
1521--- linux-2.6.10/include/linux/inotify.h 1969-12-31 19:00:00.000000000 -0500
1522+++ linux/include/linux/inotify.h 2005-01-06 15:04:03.751778528 -0500
1523@@ -0,0 +1,155 @@
1524+/*
1525+ * Inode based directory notification for Linux
1526+ *
1527+ * Copyright (C) 2004 John McCutchan
1528+ */
1529+
1530+#ifndef _LINUX_INOTIFY_H
1531+#define _LINUX_INOTIFY_H
1532+
1533+#include <linux/types.h>
1534+#include <linux/limits.h>
1535+
1536+/* this size could limit things, since technically we could need PATH_MAX */
1537+#define INOTIFY_FILENAME_MAX 256
1538+
1539+/*
1540+ * struct inotify_event - structure read from the inotify device for each event
1541+ *
1542+ * When you are watching a directory, you will receive the filename for events
1543+ * such as IN_CREATE, IN_DELETE, IN_OPEN, IN_CLOSE, ...
1544+ *
1545+ * Note: When reading from the device you must provide a buffer that is a
1546+ * multiple of sizeof(struct inotify_event)
1547+ */
1548+struct inotify_event {
1549+ __s32 wd;
1550+ __u32 mask;
1551+ __u32 cookie;
1552+ char filename[INOTIFY_FILENAME_MAX];
1553+};
1554+
1555+/*
1556+ * struct inotify_watch_request - represents a watch request
1557+ *
1558+ * Pass to the inotify device via the INOTIFY_WATCH ioctl
1559+ */
1560+struct inotify_watch_request {
1561+ char *dirname; /* directory name */
1562+ __u32 mask; /* event mask */
1563+};
1564+
1565+/* the following are legal, implemented events */
1566+#define IN_ACCESS 0x00000001 /* File was accessed */
1567+#define IN_MODIFY 0x00000002 /* File was modified */
1568+#define IN_ATTRIB 0x00000004 /* File changed attributes */
1569+#define IN_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
1570+#define IN_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
1571+#define IN_OPEN 0x00000020 /* File was opened */
1572+#define IN_MOVED_FROM 0x00000040 /* File was moved from X */
1573+#define IN_MOVED_TO 0x00000080 /* File was moved to Y */
1574+#define IN_DELETE_SUBDIR 0x00000100 /* Subdir was deleted */
1575+#define IN_DELETE_FILE 0x00000200 /* Subfile was deleted */
1576+#define IN_CREATE_SUBDIR 0x00000400 /* Subdir was created */
1577+#define IN_CREATE_FILE 0x00000800 /* Subfile was created */
1578+#define IN_DELETE_SELF 0x00001000 /* Self was deleted */
1579+#define IN_UNMOUNT 0x00002000 /* Backing fs was unmounted */
1580+#define IN_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
1581+#define IN_IGNORED 0x00008000 /* File was ignored */
1582+
1583+/* special flags */
1584+#define IN_ALL_EVENTS 0xffffffff /* All the events */
1585+#define IN_CLOSE (IN_CLOSE_WRITE | IN_CLOSE_NOWRITE)
1586+
1587+#define INOTIFY_IOCTL_MAGIC 'Q'
1588+#define INOTIFY_IOCTL_MAXNR 2
1589+
1590+#define INOTIFY_WATCH _IOR(INOTIFY_IOCTL_MAGIC, 1, struct inotify_watch_request)
1591+#define INOTIFY_IGNORE _IOR(INOTIFY_IOCTL_MAGIC, 2, int)
1592+
1593+#ifdef __KERNEL__
1594+
1595+#include <linux/dcache.h>
1596+#include <linux/fs.h>
1597+#include <linux/config.h>
1598+
1599+struct inotify_inode_data {
1600+ struct list_head watches;
1601+ __u32 watch_mask;
1602+ spinlock_t lock;
1603+ atomic_t count;
1604+};
1605+
1606+#ifdef CONFIG_INOTIFY
1607+
1608+extern void inotify_inode_queue_event(struct inode *, __u32, __u32,
1609+ const char *);
1610+extern void inotify_dentry_parent_queue_event(struct dentry *, __u32, __u32,
1611+ const char *);
1612+extern void inotify_super_block_umount(struct super_block *);
1613+extern void inotify_inode_is_dead(struct inode *);
1614+extern __u32 inotify_get_cookie(void);
1615+extern __u32 setattr_mask_inotify(unsigned int);
1616+
1617+/* this could be kstrdup if only we could add that to lib/string.c */
1618+static inline char * inotify_oldname_init(struct dentry *old_dentry)
1619+{
1620+ char *old_name;
1621+
1622+ old_name = kmalloc(strlen(old_dentry->d_name.name) + 1, GFP_KERNEL);
1623+ if (old_name)
1624+ strcpy(old_name, old_dentry->d_name.name);
1625+ return old_name;
1626+}
1627+
1628+static inline void inotify_oldname_free(const char *old_name)
1629+{
1630+ kfree(old_name);
1631+}
1632+
1633+#else
1634+
1635+static inline void inotify_inode_queue_event(struct inode *inode,
1636+ __u32 mask, __u32 cookie,
1637+ const char *filename)
1638+{
1639+}
1640+
1641+static inline void inotify_dentry_parent_queue_event(struct dentry *dentry,
1642+ __u32 mask, __u32 cookie,
1643+ const char *filename)
1644+{
1645+}
1646+
1647+static inline void inotify_super_block_umount(struct super_block *sb)
1648+{
1649+}
1650+
1651+static inline void inotify_inode_is_dead(struct inode *inode)
1652+{
1653+}
1654+
1655+static inline char * inotify_oldname_init(struct dentry *old_dentry)
1656+{
1657+ return NULL;
1658+}
1659+
1660+static inline __u32 inotify_get_cookie(void)
1661+{
1662+ return 0;
1663+}
1664+
1665+static inline void inotify_oldname_free(const char *old_name)
1666+{
1667+}
1668+
1669+static inline int setattr_mask_inotify(unsigned int ia_mask)
1670+{
1671+ return 0;
1672+}
1673+
1674+#endif /* CONFIG_INOTIFY */
1675+
1676+#endif /* __KERNEL __ */
1677+
1678+#endif /* _LINUX_INOTIFY_H */
1679diff -urN linux-2.6.10/include/linux/miscdevice.h linux/include/linux/miscdevice.h
1680--- linux-2.6.10/include/linux/miscdevice.h 2004-12-24 16:34:58.000000000 -0500
1681+++ linux/include/linux/miscdevice.h 2005-01-06 15:04:03.751778528 -0500
1682@@ -2,6 +2,7 @@
1683 #define _LINUX_MISCDEVICE_H
1684 #include <linux/module.h>
1685 #include <linux/major.h>
1686+#include <linux/device.h>
1687
1688 #define PSMOUSE_MINOR 1
1689 #define MS_BUSMOUSE_MINOR 2
1690@@ -32,13 +33,13 @@
1691
1692 struct device;
1693
1694-struct miscdevice
1695-{
1696+struct miscdevice {
1697 int minor;
1698 const char *name;
1699 struct file_operations *fops;
1700 struct list_head list;
1701 struct device *dev;
1702+ struct class_device *class;
1703 char devfs_name[64];
1704 };
1705
1706diff -urN linux-2.6.10/include/linux/sched.h linux/include/linux/sched.h
1707--- linux-2.6.10/include/linux/sched.h 2004-12-24 16:33:59.000000000 -0500
1708+++ linux/include/linux/sched.h 2005-01-06 15:04:03.752778376 -0500
1709@@ -353,6 +353,8 @@
1710 atomic_t processes; /* How many processes does this user have? */
1711 atomic_t files; /* How many open files does this user have? */
1712 atomic_t sigpending; /* How many pending signals does this user have? */
1713+ atomic_t inotify_watches; /* How many inotify watches does this user have? */
1714+ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
1715 /* protected by mq_lock */
1716 unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
1717 unsigned long locked_shm; /* How many pages of mlocked shm ? */
1718diff -urN linux-2.6.10/kernel/user.c linux/kernel/user.c
1719--- linux-2.6.10/kernel/user.c 2004-12-24 16:34:31.000000000 -0500
1720+++ linux/kernel/user.c 2005-01-06 15:04:03.753778224 -0500
1721@@ -119,6 +119,8 @@
1722 atomic_set(&new->processes, 0);
1723 atomic_set(&new->files, 0);
1724 atomic_set(&new->sigpending, 0);
1725+ atomic_set(&new->inotify_watches, 0);
1726+ atomic_set(&new->inotify_devs, 0);
1727
1728 new->mq_bytes = 0;
1729 new->locked_shm = 0;
Note: See TracBrowser for help on using the repository browser.