summaryrefslogtreecommitdiffstats
path: root/src/fs/buffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/fs/buffer.c')
-rw-r--r--src/fs/buffer.c384
1 files changed, 384 insertions, 0 deletions
diff --git a/src/fs/buffer.c b/src/fs/buffer.c
new file mode 100644
index 0000000..89918e8
--- /dev/null
+++ b/src/fs/buffer.c
@@ -0,0 +1,384 @@
1/*
2 * linux/fs/buffer.c
3 *
4 * (C) 1991 Linus Torvalds
5 */
6
7/*
8 * 'buffer.c' implements the buffer-cache functions. Race-conditions have
9 * been avoided by NEVER letting a interrupt change a buffer (except for the
10 * data, of course), but instead letting the caller do it. NOTE! As interrupts
11 * can wake up a caller, some cli-sti sequences are needed to check for
12 * sleep-on-calls. These should be extremely quick, though (I hope).
13 */
14
15/*
16 * NOTE! There is one discordant note here: checking floppies for
17 * disk change. This is where it fits best, I think, as it should
18 * invalidate changed floppy-disk-caches.
19 */
20
21#include <stdarg.h>
22
23#include <linux/config.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <asm/system.h>
27#include <asm/io.h>
28
29extern int end;
30extern void put_super(int);
31extern void invalidate_inodes(int);
32
33struct buffer_head * start_buffer = (struct buffer_head *) &end;
34struct buffer_head * hash_table[NR_HASH];
35static struct buffer_head * free_list;
36static struct task_struct * buffer_wait = NULL;
37int NR_BUFFERS = 0;
38
39static inline void wait_on_buffer(struct buffer_head * bh)
40{
41 cli();
42 while (bh->b_lock)
43 sleep_on(&bh->b_wait);
44 sti();
45}
46
47int sys_sync(void)
48{
49 int i;
50 struct buffer_head * bh;
51
52 sync_inodes(); /* write out inodes into buffers */
53 bh = start_buffer;
54 for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
55 wait_on_buffer(bh);
56 if (bh->b_dirt)
57 ll_rw_block(WRITE,bh);
58 }
59 return 0;
60}
61
62int sync_dev(int dev)
63{
64 int i;
65 struct buffer_head * bh;
66
67 bh = start_buffer;
68 for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
69 if (bh->b_dev != dev)
70 continue;
71 wait_on_buffer(bh);
72 if (bh->b_dev == dev && bh->b_dirt)
73 ll_rw_block(WRITE,bh);
74 }
75 sync_inodes();
76 bh = start_buffer;
77 for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
78 if (bh->b_dev != dev)
79 continue;
80 wait_on_buffer(bh);
81 if (bh->b_dev == dev && bh->b_dirt)
82 ll_rw_block(WRITE,bh);
83 }
84 return 0;
85}
86
87static void inline invalidate_buffers(int dev)
88{
89 int i;
90 struct buffer_head * bh;
91
92 bh = start_buffer;
93 for (i=0 ; i<NR_BUFFERS ; i++,bh++) {
94 if (bh->b_dev != dev)
95 continue;
96 wait_on_buffer(bh);
97 if (bh->b_dev == dev)
98 bh->b_uptodate = bh->b_dirt = 0;
99 }
100}
101
102/*
103 * This routine checks whether a floppy has been changed, and
104 * invalidates all buffer-cache-entries in that case. This
105 * is a relatively slow routine, so we have to try to minimize using
106 * it. Thus it is called only upon a 'mount' or 'open'. This
107 * is the best way of combining speed and utility, I think.
108 * People changing diskettes in the middle of an operation deserve
109 * to loose :-)
110 *
111 * NOTE! Although currently this is only for floppies, the idea is
112 * that any additional removable block-device will use this routine,
113 * and that mount/open needn't know that floppies/whatever are
114 * special.
115 */
116void check_disk_change(int dev)
117{
118 int i;
119
120 if (MAJOR(dev) != 2)
121 return;
122 if (!floppy_change(dev & 0x03))
123 return;
124 for (i=0 ; i<NR_SUPER ; i++)
125 if (super_block[i].s_dev == dev)
126 put_super(super_block[i].s_dev);
127 invalidate_inodes(dev);
128 invalidate_buffers(dev);
129}
130
131#define _hashfn(dev,block) (((unsigned)(dev^block))%NR_HASH)
132#define hash(dev,block) hash_table[_hashfn(dev,block)]
133
134static inline void remove_from_queues(struct buffer_head * bh)
135{
136/* remove from hash-queue */
137 if (bh->b_next)
138 bh->b_next->b_prev = bh->b_prev;
139 if (bh->b_prev)
140 bh->b_prev->b_next = bh->b_next;
141 if (hash(bh->b_dev,bh->b_blocknr) == bh)
142 hash(bh->b_dev,bh->b_blocknr) = bh->b_next;
143/* remove from free list */
144 if (!(bh->b_prev_free) || !(bh->b_next_free))
145 panic("Free block list corrupted");
146 bh->b_prev_free->b_next_free = bh->b_next_free;
147 bh->b_next_free->b_prev_free = bh->b_prev_free;
148 if (free_list == bh)
149 free_list = bh->b_next_free;
150}
151
152static inline void insert_into_queues(struct buffer_head * bh)
153{
154/* put at end of free list */
155 bh->b_next_free = free_list;
156 bh->b_prev_free = free_list->b_prev_free;
157 free_list->b_prev_free->b_next_free = bh;
158 free_list->b_prev_free = bh;
159/* put the buffer in new hash-queue if it has a device */
160 bh->b_prev = NULL;
161 bh->b_next = NULL;
162 if (!bh->b_dev)
163 return;
164 bh->b_next = hash(bh->b_dev,bh->b_blocknr);
165 hash(bh->b_dev,bh->b_blocknr) = bh;
166 bh->b_next->b_prev = bh;
167}
168
169static struct buffer_head * find_buffer(int dev, int block)
170{
171 struct buffer_head * tmp;
172
173 for (tmp = hash(dev,block) ; tmp != NULL ; tmp = tmp->b_next)
174 if (tmp->b_dev==dev && tmp->b_blocknr==block)
175 return tmp;
176 return NULL;
177}
178
179/*
180 * Why like this, I hear you say... The reason is race-conditions.
181 * As we don't lock buffers (unless we are readint them, that is),
182 * something might happen to it while we sleep (ie a read-error
183 * will force it bad). This shouldn't really happen currently, but
184 * the code is ready.
185 */
186struct buffer_head * get_hash_table(int dev, int block)
187{
188 struct buffer_head * bh;
189
190 for (;;) {
191 if (!(bh=find_buffer(dev,block)))
192 return NULL;
193 bh->b_count++;
194 wait_on_buffer(bh);
195 if (bh->b_dev == dev && bh->b_blocknr == block)
196 return bh;
197 bh->b_count--;
198 }
199}
200
201/*
202 * Ok, this is getblk, and it isn't very clear, again to hinder
203 * race-conditions. Most of the code is seldom used, (ie repeating),
204 * so it should be much more efficient than it looks.
205 *
206 * The algoritm is changed: hopefully better, and an elusive bug removed.
207 */
208#define BADNESS(bh) (((bh)->b_dirt<<1)+(bh)->b_lock)
209struct buffer_head * getblk(int dev,int block)
210{
211 struct buffer_head * tmp, * bh;
212
213repeat:
214 if ((bh = get_hash_table(dev,block)))
215 return bh;
216 tmp = free_list;
217 do {
218 if (tmp->b_count)
219 continue;
220 if (!bh || BADNESS(tmp)<BADNESS(bh)) {
221 bh = tmp;
222 if (!BADNESS(tmp))
223 break;
224 }
225/* and repeat until we find something good */
226 } while ((tmp = tmp->b_next_free) != free_list);
227 if (!bh) {
228 sleep_on(&buffer_wait);
229 goto repeat;
230 }
231 wait_on_buffer(bh);
232 if (bh->b_count)
233 goto repeat;
234 while (bh->b_dirt) {
235 sync_dev(bh->b_dev);
236 wait_on_buffer(bh);
237 if (bh->b_count)
238 goto repeat;
239 }
240/* NOTE!! While we slept waiting for this block, somebody else might */
241/* already have added "this" block to the cache. check it */
242 if (find_buffer(dev,block))
243 goto repeat;
244/* OK, FINALLY we know that this buffer is the only one of it's kind, */
245/* and that it's unused (b_count=0), unlocked (b_lock=0), and clean */
246 bh->b_count=1;
247 bh->b_dirt=0;
248 bh->b_uptodate=0;
249 remove_from_queues(bh);
250 bh->b_dev=dev;
251 bh->b_blocknr=block;
252 insert_into_queues(bh);
253 return bh;
254}
255
256void brelse(struct buffer_head * buf)
257{
258 if (!buf)
259 return;
260 wait_on_buffer(buf);
261 if (!(buf->b_count--))
262 panic("Trying to free free buffer");
263 wake_up(&buffer_wait);
264}
265
266/*
267 * bread() reads a specified block and returns the buffer that contains
268 * it. It returns NULL if the block was unreadable.
269 */
270struct buffer_head * bread(int dev,int block)
271{
272 struct buffer_head * bh;
273
274 if (!(bh=getblk(dev,block)))
275 panic("bread: getblk returned NULL\n");
276 if (bh->b_uptodate)
277 return bh;
278 ll_rw_block(READ,bh);
279 wait_on_buffer(bh);
280 if (bh->b_uptodate)
281 return bh;
282 brelse(bh);
283 return NULL;
284}
285
286#define COPYBLK(from,to) \
287__asm__("cld\n\t" \
288 "rep\n\t" \
289 "movsl\n\t" \
290 ::"c" (BLOCK_SIZE/4),"S" (from),"D" (to) \
291 )
292
293/*
294 * bread_page reads four buffers into memory at the desired address. It's
295 * a function of its own, as there is some speed to be got by reading them
296 * all at the same time, not waiting for one to be read, and then another
297 * etc.
298 */
299void bread_page(unsigned long address,int dev,int b[4])
300{
301 struct buffer_head * bh[4];
302 int i;
303
304 for (i=0 ; i<4 ; i++)
305 if (b[i]) {
306 if ((bh[i] = getblk(dev,b[i])))
307 if (!bh[i]->b_uptodate)
308 ll_rw_block(READ,bh[i]);
309 } else
310 bh[i] = NULL;
311 for (i=0 ; i<4 ; i++,address += BLOCK_SIZE)
312 if (bh[i]) {
313 wait_on_buffer(bh[i]);
314 if (bh[i]->b_uptodate)
315 COPYBLK((unsigned long) bh[i]->b_data,address);
316 brelse(bh[i]);
317 }
318}
319
320/*
321 * Ok, breada can be used as bread, but additionally to mark other
322 * blocks for reading as well. End the argument list with a negative
323 * number.
324 */
325struct buffer_head * breada(int dev,int first, ...)
326{
327 va_list args;
328 struct buffer_head * bh, *tmp;
329
330 va_start(args,first);
331 if (!(bh=getblk(dev,first)))
332 panic("bread: getblk returned NULL\n");
333 if (!bh->b_uptodate)
334 ll_rw_block(READ,bh);
335 while ((first=va_arg(args,int))>=0) {
336 tmp=getblk(dev,first);
337 if (tmp) {
338 if (!tmp->b_uptodate)
339 ll_rw_block(READA,bh);
340 tmp->b_count--;
341 }
342 }
343 va_end(args);
344 wait_on_buffer(bh);
345 if (bh->b_uptodate)
346 return bh;
347 brelse(bh);
348 return (NULL);
349}
350
351void buffer_init(long buffer_end)
352{
353 struct buffer_head * h = start_buffer;
354 void * b;
355 int i;
356
357 if (buffer_end == 1<<20)
358 b = (void *) (640*1024);
359 else
360 b = (void *) buffer_end;
361 while ( (b -= BLOCK_SIZE) >= ((void *) (h+1)) ) {
362 h->b_dev = 0;
363 h->b_dirt = 0;
364 h->b_count = 0;
365 h->b_lock = 0;
366 h->b_uptodate = 0;
367 h->b_wait = NULL;
368 h->b_next = NULL;
369 h->b_prev = NULL;
370 h->b_data = (char *) b;
371 h->b_prev_free = h-1;
372 h->b_next_free = h+1;
373 h++;
374 NR_BUFFERS++;
375 if (b == (void *) 0x100000)
376 b = (void *) 0xA0000;
377 }
378 h--;
379 free_list = start_buffer;
380 free_list->b_prev_free = h;
381 h->b_next_free = free_list;
382 for (i=0;i<NR_HASH;i++)
383 hash_table[i]=NULL;
384}