aboutsummaryrefslogtreecommitdiffstats
path: root/src/lib/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/lib/kernel')
-rw-r--r--src/lib/kernel/bitmap.c372
-rw-r--r--src/lib/kernel/bitmap.h51
-rw-r--r--src/lib/kernel/console.c191
-rw-r--r--src/lib/kernel/console.h8
-rw-r--r--src/lib/kernel/debug.c48
-rw-r--r--src/lib/kernel/hash.c430
-rw-r--r--src/lib/kernel/hash.h103
-rw-r--r--src/lib/kernel/list.c532
-rw-r--r--src/lib/kernel/list.h168
-rw-r--r--src/lib/kernel/slist.c153
-rw-r--r--src/lib/kernel/slist.h25
-rw-r--r--src/lib/kernel/stdio.h6
12 files changed, 2087 insertions, 0 deletions
diff --git a/src/lib/kernel/bitmap.c b/src/lib/kernel/bitmap.c
new file mode 100644
index 0000000..d323b89
--- /dev/null
+++ b/src/lib/kernel/bitmap.c
@@ -0,0 +1,372 @@
+#include "bitmap.h"
+#include <debug.h>
+#include <limits.h>
+#include <round.h>
+#include <stdio.h>
+#include "threads/malloc.h"
+#ifdef FILESYS
+#include "filesys/file.h"
+#endif
+
+/* Element type.
+
+ This must be an unsigned integer type at least as wide as int.
+
+ Each bit represents one bit in the bitmap.
+ If bit 0 in an element represents bit K in the bitmap,
+ then bit 1 in the element represents bit K+1 in the bitmap,
+ and so on. */
+typedef unsigned long elem_type;
+
+/* Number of bits in an element. */
+#define ELEM_BITS (sizeof (elem_type) * CHAR_BIT)
+
+/* From the outside, a bitmap is an array of bits. From the
+ inside, it's an array of elem_type (defined above) that
+ simulates an array of bits. */
+struct bitmap
+ {
+ size_t bit_cnt; /* Number of bits. */
+ elem_type *bits; /* Elements that represent bits. */
+ };
+
+/* Returns the index of the element that contains the bit
+ numbered BIT_IDX. */
+static inline size_t
+elem_idx (size_t bit_idx)
+{
+ return bit_idx / ELEM_BITS;
+}
+
+/* Returns an elem_type where only the bit corresponding to
+ BIT_IDX is turned on. */
+static inline elem_type
+bit_mask (size_t bit_idx)
+{
+ return (elem_type) 1 << (bit_idx % ELEM_BITS);
+}
+
+/* Returns the number of elements required for BIT_CNT bits. */
+static inline size_t
+elem_cnt (size_t bit_cnt)
+{
+ return DIV_ROUND_UP (bit_cnt, ELEM_BITS);
+}
+
+/* Returns the number of bytes required for BIT_CNT bits. */
+static inline size_t
+byte_cnt (size_t bit_cnt)
+{
+ return sizeof (elem_type) * elem_cnt (bit_cnt);
+}
+
+/* Returns a bit mask in which the bits actually used in the last
+ element of B's bits are set to 1 and the rest are set to 0. */
+static inline elem_type
+last_mask (const struct bitmap *b)
+{
+ int last_bits = b->bit_cnt % ELEM_BITS;
+ return last_bits ? ((elem_type) 1 << last_bits) - 1 : (elem_type) -1;
+}
+
+/* Creation and destruction. */
+
+/* Initializes B to be a bitmap of BIT_CNT bits
+ and sets all of its bits to false.
+ Returns true if success, false if memory allocation
+ failed. */
+struct bitmap *
+bitmap_create (size_t bit_cnt)
+{
+ struct bitmap *b = malloc (sizeof *b);
+ if (b != NULL)
+ {
+ b->bit_cnt = bit_cnt;
+ b->bits = malloc (byte_cnt (bit_cnt));
+ if (b->bits != NULL || bit_cnt == 0)
+ {
+ bitmap_set_all (b, false);
+ return b;
+ }
+ free (b);
+ }
+ return NULL;
+}
+
+/* Creates and returns a bitmap with BIT_CNT bits in the
+ BLOCK_SIZE bytes of storage preallocated at BLOCK.
+ BLOCK_SIZE must be at least bitmap_needed_bytes(BIT_CNT). */
+struct bitmap *
+bitmap_create_in_buf (size_t bit_cnt, void *block, size_t block_size UNUSED)
+{
+ struct bitmap *b = block;
+
+ ASSERT (block_size >= bitmap_buf_size (bit_cnt));
+
+ b->bit_cnt = bit_cnt;
+ b->bits = (elem_type *) (b + 1);
+ bitmap_set_all (b, false);
+ return b;
+}
+
+/* Returns the number of bytes required to accomodate a bitmap
+ with BIT_CNT bits (for use with bitmap_create_in_buf()). */
+size_t
+bitmap_buf_size (size_t bit_cnt)
+{
+ return sizeof (struct bitmap) + byte_cnt (bit_cnt);
+}
+
+/* Destroys bitmap B, freeing its storage.
+ Not for use on bitmaps created by
+ bitmap_create_preallocated(). */
+void
+bitmap_destroy (struct bitmap *b)
+{
+ if (b != NULL)
+ {
+ free (b->bits);
+ free (b);
+ }
+}
+
+/* Bitmap size. */
+
+/* Returns the number of bits in B. */
+size_t
+bitmap_size (const struct bitmap *b)
+{
+ return b->bit_cnt;
+}
+
+/* Setting and testing single bits. */
+
+/* Atomically sets the bit numbered IDX in B to VALUE. */
+void
+bitmap_set (struct bitmap *b, size_t idx, bool value)
+{
+ ASSERT (b != NULL);
+ ASSERT (idx < b->bit_cnt);
+ if (value)
+ bitmap_mark (b, idx);
+ else
+ bitmap_reset (b, idx);
+}
+
+/* Atomically sets the bit numbered BIT_IDX in B to true. */
+void
+bitmap_mark (struct bitmap *b, size_t bit_idx)
+{
+ size_t idx = elem_idx (bit_idx);
+ elem_type mask = bit_mask (bit_idx);
+
+ /* This is equivalent to `b->bits[idx] |= mask' except that it
+ is guaranteed to be atomic on a uniprocessor machine. See
+ the description of the OR instruction in [IA32-v2b]. */
+ asm ("orl %1, %0" : "=m" (b->bits[idx]) : "r" (mask) : "cc");
+}
+
+/* Atomically sets the bit numbered BIT_IDX in B to false. */
+void
+bitmap_reset (struct bitmap *b, size_t bit_idx)
+{
+ size_t idx = elem_idx (bit_idx);
+ elem_type mask = bit_mask (bit_idx);
+
+ /* This is equivalent to `b->bits[idx] &= ~mask' except that it
+ is guaranteed to be atomic on a uniprocessor machine. See
+ the description of the AND instruction in [IA32-v2a]. */
+ asm ("andl %1, %0" : "=m" (b->bits[idx]) : "r" (~mask) : "cc");
+}
+
+/* Atomically toggles the bit numbered IDX in B;
+ that is, if it is true, makes it false,
+ and if it is false, makes it true. */
+void
+bitmap_flip (struct bitmap *b, size_t bit_idx)
+{
+ size_t idx = elem_idx (bit_idx);
+ elem_type mask = bit_mask (bit_idx);
+
+ /* This is equivalent to `b->bits[idx] ^= mask' except that it
+ is guaranteed to be atomic on a uniprocessor machine. See
+ the description of the XOR instruction in [IA32-v2b]. */
+ asm ("xorl %1, %0" : "=m" (b->bits[idx]) : "r" (mask) : "cc");
+}
+
+/* Returns the value of the bit numbered IDX in B. */
+bool
+bitmap_test (const struct bitmap *b, size_t idx)
+{
+ ASSERT (b != NULL);
+ ASSERT (idx < b->bit_cnt);
+ return (b->bits[elem_idx (idx)] & bit_mask (idx)) != 0;
+}
+
+/* Setting and testing multiple bits. */
+
+/* Sets all bits in B to VALUE. */
+void
+bitmap_set_all (struct bitmap *b, bool value)
+{
+ ASSERT (b != NULL);
+
+ bitmap_set_multiple (b, 0, bitmap_size (b), value);
+}
+
+/* Sets the CNT bits starting at START in B to VALUE. */
+void
+bitmap_set_multiple (struct bitmap *b, size_t start, size_t cnt, bool value)
+{
+ size_t i;
+
+ ASSERT (b != NULL);
+ ASSERT (start <= b->bit_cnt);
+ ASSERT (start + cnt <= b->bit_cnt);
+
+ for (i = 0; i < cnt; i++)
+ bitmap_set (b, start + i, value);
+}
+
+/* Returns the number of bits in B between START and START + CNT,
+ exclusive, that are set to VALUE. */
+size_t
+bitmap_count (const struct bitmap *b, size_t start, size_t cnt, bool value)
+{
+ size_t i, value_cnt;
+
+ ASSERT (b != NULL);
+ ASSERT (start <= b->bit_cnt);
+ ASSERT (start + cnt <= b->bit_cnt);
+
+ value_cnt = 0;
+ for (i = 0; i < cnt; i++)
+ if (bitmap_test (b, start + i) == value)
+ value_cnt++;
+ return value_cnt;
+}
+
+/* Returns true if any bits in B between START and START + CNT,
+ exclusive, are set to VALUE, and false otherwise. */
+bool
+bitmap_contains (const struct bitmap *b, size_t start, size_t cnt, bool value)
+{
+ size_t i;
+
+ ASSERT (b != NULL);
+ ASSERT (start <= b->bit_cnt);
+ ASSERT (start + cnt <= b->bit_cnt);
+
+ for (i = 0; i < cnt; i++)
+ if (bitmap_test (b, start + i) == value)
+ return true;
+ return false;
+}
+
+/* Returns true if any bits in B between START and START + CNT,
+ exclusive, are set to true, and false otherwise.*/
+bool
+bitmap_any (const struct bitmap *b, size_t start, size_t cnt)
+{
+ return bitmap_contains (b, start, cnt, true);
+}
+
+/* Returns true if no bits in B between START and START + CNT,
+ exclusive, are set to true, and false otherwise.*/
+bool
+bitmap_none (const struct bitmap *b, size_t start, size_t cnt)
+{
+ return !bitmap_contains (b, start, cnt, true);
+}
+
+/* Returns true if every bit in B between START and START + CNT,
+ exclusive, is set to true, and false otherwise. */
+bool
+bitmap_all (const struct bitmap *b, size_t start, size_t cnt)
+{
+ return !bitmap_contains (b, start, cnt, false);
+}
+
+/* Finding set or unset bits. */
+
+/* Finds and returns the starting index of the first group of CNT
+ consecutive bits in B at or after START that are all set to
+ VALUE.
+ If there is no such group, returns BITMAP_ERROR. */
+size_t
+bitmap_scan (const struct bitmap *b, size_t start, size_t cnt, bool value)
+{
+ ASSERT (b != NULL);
+ ASSERT (start <= b->bit_cnt);
+
+ if (cnt <= b->bit_cnt)
+ {
+ size_t last = b->bit_cnt - cnt;
+ size_t i;
+ for (i = start; i <= last; i++)
+ if (!bitmap_contains (b, i, cnt, !value))
+ return i;
+ }
+ return BITMAP_ERROR;
+}
+
+/* Finds the first group of CNT consecutive bits in B at or after
+ START that are all set to VALUE, flips them all to !VALUE,
+ and returns the index of the first bit in the group.
+ If there is no such group, returns BITMAP_ERROR.
+ If CNT is zero, returns 0.
+ Bits are set atomically, but testing bits is not atomic with
+ setting them. */
+size_t
+bitmap_scan_and_flip (struct bitmap *b, size_t start, size_t cnt, bool value)
+{
+ size_t idx = bitmap_scan (b, start, cnt, value);
+ if (idx != BITMAP_ERROR)
+ bitmap_set_multiple (b, idx, cnt, !value);
+ return idx;
+}
+
+/* File input and output. */
+
+#ifdef FILESYS
+/* Returns the number of bytes needed to store B in a file. */
+size_t
+bitmap_file_size (const struct bitmap *b)
+{
+ return byte_cnt (b->bit_cnt);
+}
+
+/* Reads B from FILE. Returns true if successful, false
+ otherwise. */
+bool
+bitmap_read (struct bitmap *b, struct file *file)
+{
+ bool success = true;
+ if (b->bit_cnt > 0)
+ {
+ off_t size = byte_cnt (b->bit_cnt);
+ success = file_read_at (file, b->bits, size, 0) == size;
+ b->bits[elem_cnt (b->bit_cnt) - 1] &= last_mask (b);
+ }
+ return success;
+}
+
+/* Writes B to FILE. Return true if successful, false
+ otherwise. */
+bool
+bitmap_write (const struct bitmap *b, struct file *file)
+{
+ off_t size = byte_cnt (b->bit_cnt);
+ return file_write_at (file, b->bits, size, 0) == size;
+}
+#endif /* FILESYS */
+
+/* Debugging. */
+
+/* Dumps the contents of B to the console as hexadecimal. */
+void
+bitmap_dump (const struct bitmap *b)
+{
+ hex_dump (0, b->bits, byte_cnt (b->bit_cnt), false);
+}
+
diff --git a/src/lib/kernel/bitmap.h b/src/lib/kernel/bitmap.h
new file mode 100644
index 0000000..a50593c
--- /dev/null
+++ b/src/lib/kernel/bitmap.h
@@ -0,0 +1,51 @@
+#ifndef __LIB_KERNEL_BITMAP_H
+#define __LIB_KERNEL_BITMAP_H
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <inttypes.h>
+
+/* Bitmap abstract data type. */
+
+/* Creation and destruction. */
+struct bitmap *bitmap_create (size_t bit_cnt);
+struct bitmap *bitmap_create_in_buf (size_t bit_cnt, void *, size_t byte_cnt);
+size_t bitmap_buf_size (size_t bit_cnt);
+void bitmap_destroy (struct bitmap *);
+
+/* Bitmap size. */
+size_t bitmap_size (const struct bitmap *);
+
+/* Setting and testing single bits. */
+void bitmap_set (struct bitmap *, size_t idx, bool);
+void bitmap_mark (struct bitmap *, size_t idx);
+void bitmap_reset (struct bitmap *, size_t idx);
+void bitmap_flip (struct bitmap *, size_t idx);
+bool bitmap_test (const struct bitmap *, size_t idx);
+
+/* Setting and testing multiple bits. */
+void bitmap_set_all (struct bitmap *, bool);
+void bitmap_set_multiple (struct bitmap *, size_t start, size_t cnt, bool);
+size_t bitmap_count (const struct bitmap *, size_t start, size_t cnt, bool);
+bool bitmap_contains (const struct bitmap *, size_t start, size_t cnt, bool);
+bool bitmap_any (const struct bitmap *, size_t start, size_t cnt);
+bool bitmap_none (const struct bitmap *, size_t start, size_t cnt);
+bool bitmap_all (const struct bitmap *, size_t start, size_t cnt);
+
+/* Finding set or unset bits. */
+#define BITMAP_ERROR SIZE_MAX
+size_t bitmap_scan (const struct bitmap *, size_t start, size_t cnt, bool);
+size_t bitmap_scan_and_flip (struct bitmap *, size_t start, size_t cnt, bool);
+
+/* File input and output. */
+#ifdef FILESYS
+struct file;
+size_t bitmap_file_size (const struct bitmap *);
+bool bitmap_read (struct bitmap *, struct file *);
+bool bitmap_write (const struct bitmap *, struct file *);
+#endif
+
+/* Debugging. */
+void bitmap_dump (const struct bitmap *);
+
+#endif /* lib/kernel/bitmap.h */
diff --git a/src/lib/kernel/console.c b/src/lib/kernel/console.c
new file mode 100644
index 0000000..0d031b5
--- /dev/null
+++ b/src/lib/kernel/console.c
@@ -0,0 +1,191 @@
+#include <console.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include "devices/serial.h"
+#include "devices/vga.h"
+#include "threads/init.h"
+#include "threads/interrupt.h"
+#include "threads/synch.h"
+
+static void vprintf_helper (char, void *);
+static void putchar_have_lock (uint8_t c);
+
+/* The console lock.
+ Both the vga and serial layers do their own locking, so it's
+ safe to call them at any time.
+ But this lock is useful to prevent simultaneous printf() calls
+ from mixing their output, which looks confusing. */
+static struct lock console_lock;
+
+/* True in ordinary circumstances: we want to use the console
+ lock to avoid mixing output between threads, as explained
+ above.
+
+ False in early boot before the point that locks are functional
+ or the console lock has been initialized, or after a kernel
+ panics. In the former case, taking the lock would cause an
+ assertion failure, which in turn would cause a panic, turning
+ it into the latter case. In the latter case, if it is a buggy
+ lock_acquire() implementation that caused the panic, we'll
+ likely just recurse. */
+static bool use_console_lock;
+
+/* It's possible, if you add enough debug output to Pintos, to
+ try to recursively grab console_lock from a single thread. As
+ a real example, I added a printf() call to palloc_free().
+ Here's a real backtrace that resulted:
+
+ lock_console()
+ vprintf()
+ printf() - palloc() tries to grab the lock again
+ palloc_free()
+ schedule_tail() - another thread dying as we switch threads
+ schedule()
+ thread_yield()
+ intr_handler() - timer interrupt
+ intr_set_level()
+ serial_putc()
+ putchar_have_lock()
+ putbuf()
+ sys_write() - one process writing to the console
+ syscall_handler()
+ intr_handler()
+
+ This kind of thing is very difficult to debug, so we avoid the
+ problem by simulating a recursive lock with a depth
+ counter. */
+static int console_lock_depth;
+
+/* Number of characters written to console. */
+static int64_t write_cnt;
+
+/* Enable console locking. */
+void
+console_init (void)
+{
+ lock_init (&console_lock);
+ use_console_lock = true;
+}
+
+/* Notifies the console that a kernel panic is underway,
+ which warns it to avoid trying to take the console lock from
+ now on. */
+void
+console_panic (void)
+{
+ use_console_lock = false;
+}
+
+/* Prints console statistics. */
+void
+console_print_stats (void)
+{
+ printf ("Console: %lld characters output\n", write_cnt);
+}
+
+/* Acquires the console lock. */
+static void
+acquire_console (void)
+{
+ if (!intr_context () && use_console_lock)
+ {
+ if (lock_held_by_current_thread (&console_lock))
+ console_lock_depth++;
+ else
+ lock_acquire (&console_lock);
+ }
+}
+
+/* Releases the console lock. */
+static void
+release_console (void)
+{
+ if (!intr_context () && use_console_lock)
+ {
+ if (console_lock_depth > 0)
+ console_lock_depth--;
+ else
+ lock_release (&console_lock);
+ }
+}
+
+/* Returns true if the current thread has the console lock,
+ false otherwise. */
+static bool
+console_locked_by_current_thread (void)
+{
+ return (intr_context ()
+ || !use_console_lock
+ || lock_held_by_current_thread (&console_lock));
+}
+
+/* The standard vprintf() function,
+ which is like printf() but uses a va_list.
+ Writes its output to both vga display and serial port. */
+int
+vprintf (const char *format, va_list args)
+{
+ int char_cnt = 0;
+
+ acquire_console ();
+ __vprintf (format, args, vprintf_helper, &char_cnt);
+ release_console ();
+
+ return char_cnt;
+}
+
+/* Writes string S to the console, followed by a new-line
+ character. */
+int
+puts (const char *s)
+{
+ acquire_console ();
+ while (*s != '\0')
+ putchar_have_lock (*s++);
+ putchar_have_lock ('\n');
+ release_console ();
+
+ return 0;
+}
+
+/* Writes the N characters in BUFFER to the console. */
+void
+putbuf (const char *buffer, size_t n)
+{
+ acquire_console ();
+ while (n-- > 0)
+ putchar_have_lock (*buffer++);
+ release_console ();
+}
+
+/* Writes C to the vga display and serial port. */
+int
+putchar (int c)
+{
+ acquire_console ();
+ putchar_have_lock (c);
+ release_console ();
+
+ return c;
+}
+
+/* Helper function for vprintf(). */
+static void
+vprintf_helper (char c, void *char_cnt_)
+{
+ int *char_cnt = char_cnt_;
+ (*char_cnt)++;
+ putchar_have_lock (c);
+}
+
+/* Writes C to the vga display and serial port.
+ The caller has already acquired the console lock if
+ appropriate. */
+static void
+putchar_have_lock (uint8_t c)
+{
+ ASSERT (console_locked_by_current_thread ());
+ write_cnt++;
+ serial_putc (c);
+ vga_putc (c);
+}
diff --git a/src/lib/kernel/console.h b/src/lib/kernel/console.h
new file mode 100644
index 0000000..ab99249
--- /dev/null
+++ b/src/lib/kernel/console.h
@@ -0,0 +1,8 @@
+#ifndef __LIB_KERNEL_CONSOLE_H
+#define __LIB_KERNEL_CONSOLE_H
+
+void console_init (void);
+void console_panic (void);
+void console_print_stats (void);
+
+#endif /* lib/kernel/console.h */
diff --git a/src/lib/kernel/debug.c b/src/lib/kernel/debug.c
new file mode 100644
index 0000000..93c3952
--- /dev/null
+++ b/src/lib/kernel/debug.c
@@ -0,0 +1,48 @@
+#include <debug.h>
+#include <console.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include "threads/init.h"
+#include "threads/interrupt.h"
+#include "devices/serial.h"
+
+/* Halts the OS, printing the source file name, line number, and
+ function name, plus a user-specific message. */
+void
+debug_panic (const char *file, int line, const char *function,
+ const char *message, ...)
+{
+ static int level;
+ va_list args;
+
+ intr_disable ();
+ console_panic ();
+
+ level++;
+ if (level == 1)
+ {
+ printf ("Kernel PANIC at %s:%d in %s(): ", file, line, function);
+
+ va_start (args, message);
+ vprintf (message, args);
+ printf ("\n");
+ va_end (args);
+
+ debug_backtrace ();
+ }
+ else if (level == 2)
+ printf ("Kernel PANIC recursion at %s:%d in %s().\n",
+ file, line, function);
+ else
+ {
+ /* Don't print anything: that's probably why we recursed. */
+ }
+
+ serial_flush ();
+ if (power_off_when_done)
+ power_off ();
+ for (;;);
+}
diff --git a/src/lib/kernel/hash.c b/src/lib/kernel/hash.c
new file mode 100644
index 0000000..57eed45
--- /dev/null
+++ b/src/lib/kernel/hash.c
@@ -0,0 +1,430 @@
+/* Hash table.
+
+ This data structure is thoroughly documented in the Tour of
+ Pintos for Project 3.
+
+ See hash.h for basic information. */
+
+#include "hash.h"
+#include "../debug.h"
+#include "threads/malloc.h"
+
+#define list_elem_to_hash_elem(LIST_ELEM) \
+ list_entry(LIST_ELEM, struct hash_elem, list_elem)
+
+static struct list *find_bucket (struct hash *, struct hash_elem *);
+static struct hash_elem *find_elem (struct hash *, struct list *,
+ struct hash_elem *);
+static void insert_elem (struct hash *, struct list *, struct hash_elem *);
+static void remove_elem (struct hash *, struct hash_elem *);
+static void rehash (struct hash *);
+
+/* Initializes hash table H to compute hash values using HASH and
+ compare hash elements using LESS, given auxiliary data AUX. */
+bool
+hash_init (struct hash *h,
+ hash_hash_func *hash, hash_less_func *less, void *aux)
+{
+ h->elem_cnt = 0;
+ h->bucket_cnt = 4;
+ h->buckets = malloc (sizeof *h->buckets * h->bucket_cnt);
+ h->hash = hash;
+ h->less = less;
+ h->aux = aux;
+
+ if (h->buckets != NULL)
+ {
+ hash_clear (h, NULL);
+ return true;
+ }
+ else
+ return false;
+}
+
+/* Removes all the elements from H.
+
+ If DESTRUCTOR is non-null, then it is called for each element
+ in the hash. DESTRUCTOR may, if appropriate, deallocate the
+ memory used by the hash element. However, modifying hash
+ table H while hash_clear() is running, using any of the
+ functions hash_clear(), hash_destroy(), hash_insert(),
+ hash_replace(), or hash_delete(), yields undefined behavior,
+ whether done in DESTRUCTOR or elsewhere. */
+void
+hash_clear (struct hash *h, hash_action_func *destructor)
+{
+ size_t i;
+
+ for (i = 0; i < h->bucket_cnt; i++)
+ {
+ struct list *bucket = &h->buckets[i];
+
+ if (destructor != NULL)
+ while (!list_empty (bucket))
+ {
+ struct list_elem *list_elem = list_pop_front (bucket);
+ struct hash_elem *hash_elem = list_elem_to_hash_elem (list_elem);
+ destructor (hash_elem, h->aux);
+ }
+
+ list_init (bucket);
+ }
+
+ h->elem_cnt = 0;
+}
+
+/* Destroys hash table H.
+
+ If DESTRUCTOR is non-null, then it is first called for each
+ element in the hash. DESTRUCTOR may, if appropriate,
+ deallocate the memory used by the hash element. However,
+ modifying hash table H while hash_clear() is running, using
+ any of the functions hash_clear(), hash_destroy(),
+ hash_insert(), hash_replace(), or hash_delete(), yields
+ undefined behavior, whether done in DESTRUCTOR or
+ elsewhere. */
+void
+hash_destroy (struct hash *h, hash_action_func *destructor)
+{
+ if (destructor != NULL)
+ hash_clear (h, destructor);
+ free (h->buckets);
+}
+
+/* Inserts NEW into hash table H and returns a null pointer, if
+ no equal element is already in the table.
+ If an equal element is already in the table, returns it
+ without inserting NEW. */
+struct hash_elem *
+hash_insert (struct hash *h, struct hash_elem *new)
+{
+ struct list *bucket = find_bucket (h, new);
+ struct hash_elem *old = find_elem (h, bucket, new);
+
+ if (old == NULL)
+ insert_elem (h, bucket, new);
+
+ rehash (h);
+
+ return old;
+}
+
+/* Inserts NEW into hash table H, replacing any equal element
+ already in the table, which is returned. */
+struct hash_elem *
+hash_replace (struct hash *h, struct hash_elem *new)
+{
+ struct list *bucket = find_bucket (h, new);
+ struct hash_elem *old = find_elem (h, bucket, new);
+
+ if (old != NULL)
+ remove_elem (h, old);
+ insert_elem (h, bucket, new);
+
+ rehash (h);
+
+ return old;
+}
+
+/* Finds and returns an element equal to E in hash table H, or a
+ null pointer if no equal element exists in the table. */
+struct hash_elem *
+hash_find (struct hash *h, struct hash_elem *e)
+{
+ return find_elem (h, find_bucket (h, e), e);
+}
+
+/* Finds, removes, and returns an element equal to E in hash
+ table H. Returns a null pointer if no equal element existed
+ in the table.
+
+ If the elements of the hash table are dynamically allocated,
+ or own resources that are, then it is the caller's
+ responsibility to deallocate them. */
+struct hash_elem *
+hash_delete (struct hash *h, struct hash_elem *e)
+{
+ struct hash_elem *found = find_elem (h, find_bucket (h, e), e);
+ if (found != NULL)
+ {
+ remove_elem (h, found);
+ rehash (h);
+ }
+ return found;
+}
+
+/* Calls ACTION for each element in hash table H in arbitrary
+ order.
+ Modifying hash table H while hash_apply() is running, using
+ any of the functions hash_clear(), hash_destroy(),
+ hash_insert(), hash_replace(), or hash_delete(), yields
+ undefined behavior, whether done from ACTION or elsewhere. */
+void
+hash_apply (struct hash *h, hash_action_func *action)
+{
+ size_t i;
+
+ ASSERT (action != NULL);
+
+ for (i = 0; i < h->bucket_cnt; i++)
+ {
+ struct list *bucket = &h->buckets[i];
+ struct list_elem *elem, *next;
+
+ for (elem = list_begin (bucket); elem != list_end (bucket); elem = next)
+ {
+ next = list_next (elem);
+ action (list_elem_to_hash_elem (elem), h->aux);
+ }
+ }
+}
+
+/* Initializes I for iterating hash table H.
+
+ Iteration idiom:
+
+ struct hash_iterator i;
+
+ hash_first (&i, h);
+ while (hash_next (&i))
+ {
+ struct foo *f = hash_entry (hash_cur (&i), struct foo, elem);
+ ...do something with f...
+ }
+
+ Modifying hash table H during iteration, using any of the
+ functions hash_clear(), hash_destroy(), hash_insert(),
+ hash_replace(), or hash_delete(), invalidates all
+ iterators. */
+void
+hash_first (struct hash_iterator *i, struct hash *h)
+{
+ ASSERT (i != NULL);
+ ASSERT (h != NULL);
+
+ i->hash = h;
+ i->bucket = i->hash->buckets;
+ i->elem = list_elem_to_hash_elem (list_head (i->bucket));
+}
+
+/* Advances I to the next element in the hash table and returns
+ it. Returns a null pointer if no elements are left. Elements
+ are returned in arbitrary order.
+
+ Modifying a hash table H during iteration, using any of the
+ functions hash_clear(), hash_destroy(), hash_insert(),
+ hash_replace(), or hash_delete(), invalidates all
+ iterators. */
+struct hash_elem *
+hash_next (struct hash_iterator *i)
+{
+ ASSERT (i != NULL);
+
+ i->elem = list_elem_to_hash_elem (list_next (&i->elem->list_elem));
+ while (i->elem == list_elem_to_hash_elem (list_end (i->bucket)))
+ {
+ if (++i->bucket >= i->hash->buckets + i->hash->bucket_cnt)
+ {
+ i->elem = NULL;
+ break;
+ }
+ i->elem = list_elem_to_hash_elem (list_begin (i->bucket));
+ }
+
+ return i->elem;
+}
+
+/* Returns the current element in the hash table iteration, or a
+ null pointer at the end of the table. Undefined behavior
+ after calling hash_first() but before hash_next(). */
+struct hash_elem *
+hash_cur (struct hash_iterator *i)
+{
+ return i->elem;
+}
+
+/* Returns the number of elements in H. */
+size_t
+hash_size (struct hash *h)
+{
+ return h->elem_cnt;
+}
+
+/* Returns true if H contains no elements, false otherwise. */
+bool
+hash_empty (struct hash *h)
+{
+ return h->elem_cnt == 0;
+}
+
+/* Fowler-Noll-Vo hash constants, for 32-bit word sizes. */
+#define FNV_32_PRIME 16777619u
+#define FNV_32_BASIS 2166136261u
+
+/* Returns a hash of the SIZE bytes in BUF. */
+unsigned
+hash_bytes (const void *buf_, size_t size)
+{
+ /* Fowler-Noll-Vo 32-bit hash, for bytes. */
+ const unsigned char *buf = buf_;
+ unsigned hash;
+
+ ASSERT (buf != NULL);
+
+ hash = FNV_32_BASIS;
+ while (size-- > 0)
+ hash = (hash * FNV_32_PRIME) ^ *buf++;
+
+ return hash;
+}
+
+/* Returns a hash of string S. */
+unsigned
+hash_string (const char *s_)
+{
+ const unsigned char *s = (const unsigned char *) s_;
+ unsigned hash;
+
+ ASSERT (s != NULL);
+
+ hash = FNV_32_BASIS;
+ while (*s != '\0')
+ hash = (hash * FNV_32_PRIME) ^ *s++;
+
+ return hash;
+}
+
+/* Returns a hash of integer I. */
+unsigned
+hash_int (int i)
+{
+ return hash_bytes (&i, sizeof i);
+}
+
+/* Returns the bucket in H that E belongs in. */
+static struct list *
+find_bucket (struct hash *h, struct hash_elem *e)
+{
+ size_t bucket_idx = h->hash (e, h->aux) & (h->bucket_cnt - 1);
+ return &h->buckets[bucket_idx];
+}
+
+/* Searches BUCKET in H for a hash element equal to E. Returns
+ it if found or a null pointer otherwise. */
+static struct hash_elem *
+find_elem (struct hash *h, struct list *bucket, struct hash_elem *e)
+{
+ struct list_elem *i;
+
+ for (i = list_begin (bucket); i != list_end (bucket); i = list_next (i))
+ {
+ struct hash_elem *hi = list_elem_to_hash_elem (i);
+ if (!h->less (hi, e, h->aux) && !h->less (e, hi, h->aux))
+ return hi;
+ }
+ return NULL;
+}
+
+/* Returns X with its lowest-order bit set to 1 turned off. */
+static inline size_t
+turn_off_least_1bit (size_t x)
+{
+ return x & (x - 1);
+}
+
+/* Returns true if X is a power of 2, otherwise false. */
+static inline size_t
+is_power_of_2 (size_t x)
+{
+ return x != 0 && turn_off_least_1bit (x) == 0;
+}
+
+/* Element per bucket ratios. */
+#define MIN_ELEMS_PER_BUCKET 1 /* Elems/bucket < 1: reduce # of buckets. */
+#define BEST_ELEMS_PER_BUCKET 2 /* Ideal elems/bucket. */
+#define MAX_ELEMS_PER_BUCKET 4 /* Elems/bucket > 4: increase # of buckets. */
+
+/* Changes the number of buckets in hash table H to match the
+ ideal. This function can fail because of an out-of-memory
+ condition, but that'll just make hash accesses less efficient;
+ we can still continue. */
+static void
+rehash (struct hash *h)
+{
+ size_t old_bucket_cnt, new_bucket_cnt;
+ struct list *new_buckets, *old_buckets;
+ size_t i;
+
+ ASSERT (h != NULL);
+
+ /* Save old bucket info for later use. */
+ old_buckets = h->buckets;
+ old_bucket_cnt = h->bucket_cnt;
+
+ /* Calculate the number of buckets to use now.
+ We want one bucket for about every BEST_ELEMS_PER_BUCKET.
+ We must have at least four buckets, and the number of
+ buckets must be a power of 2. */
+ new_bucket_cnt = h->elem_cnt / BEST_ELEMS_PER_BUCKET;
+ if (new_bucket_cnt < 4)
+ new_bucket_cnt = 4;
+ while (!is_power_of_2 (new_bucket_cnt))
+ new_bucket_cnt = turn_off_least_1bit (new_bucket_cnt);
+
+ /* Don't do anything if the bucket count wouldn't change. */
+ if (new_bucket_cnt == old_bucket_cnt)
+ return;
+
+ /* Allocate new buckets and initialize them as empty. */
+ new_buckets = malloc (sizeof *new_buckets * new_bucket_cnt);
+ if (new_buckets == NULL)
+ {
+ /* Allocation failed. This means that use of the hash table will
+ be less efficient. However, it is still usable, so
+ there's no reason for it to be an error. */
+ return;
+ }
+ for (i = 0; i < new_bucket_cnt; i++)
+ list_init (&new_buckets[i]);
+
+ /* Install new bucket info. */
+ h->buckets = new_buckets;
+ h->bucket_cnt = new_bucket_cnt;
+
+ /* Move each old element into the appropriate new bucket. */
+ for (i = 0; i < old_bucket_cnt; i++)
+ {
+ struct list *old_bucket;
+ struct list_elem *elem, *next;
+
+ old_bucket = &old_buckets[i];
+ for (elem = list_begin (old_bucket);
+ elem != list_end (old_bucket); elem = next)
+ {
+ struct list *new_bucket
+ = find_bucket (h, list_elem_to_hash_elem (elem));
+ next = list_next (elem);
+ list_remove (elem);
+ list_push_front (new_bucket, elem);
+ }
+ }
+
+ free (old_buckets);
+}
+
+/* Inserts E into BUCKET (in hash table H). */
+static void
+insert_elem (struct hash *h, struct list *bucket, struct hash_elem *e)
+{
+ h->elem_cnt++;
+ list_push_front (bucket, &e->list_elem);
+}
+
+/* Removes E from hash table H. */
+static void
+remove_elem (struct hash *h, struct hash_elem *e)
+{
+ h->elem_cnt--;
+ list_remove (&e->list_elem);
+}
+
diff --git a/src/lib/kernel/hash.h b/src/lib/kernel/hash.h
new file mode 100644
index 0000000..db9f674
--- /dev/null
+++ b/src/lib/kernel/hash.h
@@ -0,0 +1,103 @@
+#ifndef __LIB_KERNEL_HASH_H
+#define __LIB_KERNEL_HASH_H
+
+/* Hash table.
+
+ This data structure is thoroughly documented in the Tour of
+ Pintos for Project 3.
+
+ This is a standard hash table with chaining. To locate an
+ element in the table, we compute a hash function over the
+ element's data and use that as an index into an array of
+ doubly linked lists, then linearly search the list.
+
+ The chain lists do not use dynamic allocation. Instead, each
+ structure that can potentially be in a hash must embed a
+ struct hash_elem member. All of the hash functions operate on
+ these `struct hash_elem's. The hash_entry macro allows
+ conversion from a struct hash_elem back to a structure object
+ that contains it. This is the same technique used in the
+ linked list implementation. Refer to lib/kernel/list.h for a
+ detailed explanation. */
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include "list.h"
+
+/* Hash element. */
+struct hash_elem
+ {
+ struct list_elem list_elem;
+ };
+
+/* Converts pointer to hash element HASH_ELEM into a pointer to
+ the structure that HASH_ELEM is embedded inside. Supply the
+ name of the outer structure STRUCT and the member name MEMBER
+ of the hash element. See the big comment at the top of the
+ file for an example. */
+#define hash_entry(HASH_ELEM, STRUCT, MEMBER) \
+ ((STRUCT *) ((uint8_t *) &(HASH_ELEM)->list_elem \
+ - offsetof (STRUCT, MEMBER.list_elem)))
+
+/* Computes and returns the hash value for hash element E, given
+ auxiliary data AUX. */
+typedef unsigned hash_hash_func (const struct hash_elem *e, void *aux);
+
+/* Compares the value of two hash elements A and B, given
+ auxiliary data AUX. Returns true if A is less than B, or
+ false if A is greater than or equal to B. */
+typedef bool hash_less_func (const struct hash_elem *a,
+ const struct hash_elem *b,
+ void *aux);
+
+/* Performs some operation on hash element E, given auxiliary
+ data AUX. */
+typedef void hash_action_func (struct hash_elem *e, void *aux);
+
+/* Hash table. */
+struct hash
+ {
+ size_t elem_cnt; /* Number of elements in table. */
+ size_t bucket_cnt; /* Number of buckets, a power of 2. */
+ struct list *buckets; /* Array of `bucket_cnt' lists. */
+ hash_hash_func *hash; /* Hash function. */
+ hash_less_func *less; /* Comparison function. */
+ void *aux; /* Auxiliary data for `hash' and `less'. */
+ };
+
+/* A hash table iterator. */
+struct hash_iterator
+ {
+ struct hash *hash; /* The hash table. */
+ struct list *bucket; /* Current bucket. */
+ struct hash_elem *elem; /* Current hash element in current bucket. */
+ };
+
+/* Basic life cycle. */
+bool hash_init (struct hash *, hash_hash_func *, hash_less_func *, void *aux);
+void hash_clear (struct hash *, hash_action_func *);
+void hash_destroy (struct hash *, hash_action_func *);
+
+/* Search, insertion, deletion. */
+struct hash_elem *hash_insert (struct hash *, struct hash_elem *);
+struct hash_elem *hash_replace (struct hash *, struct hash_elem *);
+struct hash_elem *hash_find (struct hash *, struct hash_elem *);
+struct hash_elem *hash_delete (struct hash *, struct hash_elem *);
+
+/* Iteration. */
+void hash_apply (struct hash *, hash_action_func *);
+void hash_first (struct hash_iterator *, struct hash *);
+struct hash_elem *hash_next (struct hash_iterator *);
+struct hash_elem *hash_cur (struct hash_iterator *);
+
+/* Information. */
+size_t hash_size (struct hash *);
+bool hash_empty (struct hash *);
+
+/* Sample hash functions. */
+unsigned hash_bytes (const void *, size_t);
+unsigned hash_string (const char *);
+unsigned hash_int (int);
+
+#endif /* lib/kernel/hash.h */
diff --git a/src/lib/kernel/list.c b/src/lib/kernel/list.c
new file mode 100644
index 0000000..e9993cb
--- /dev/null
+++ b/src/lib/kernel/list.c
@@ -0,0 +1,532 @@
+#include "list.h"
+#include "../debug.h"
+
+/* Our doubly linked lists have two header elements: the "head"
+ just before the first element and the "tail" just after the
+ last element. The `prev' link of the front header is null, as
+ is the `next' link of the back header. Their other two links
+ point toward each other via the interior elements of the list.
+
+ An empty list looks like this:
+
+ +------+ +------+
+ <---| head |<--->| tail |--->
+ +------+ +------+
+
+ A list with two elements in it looks like this:
+
+ +------+ +-------+ +-------+ +------+
+ <---| head |<--->| 1 |<--->| 2 |<--->| tail |<--->
+ +------+ +-------+ +-------+ +------+
+
+ The symmetry of this arrangement eliminates lots of special
+ cases in list processing. For example, take a look at
+ list_remove(): it takes only two pointer assignments and no
+ conditionals. That's a lot simpler than the code would be
+ without header elements.
+
+ (Because only one of the pointers in each header element is used,
+ we could in fact combine them into a single header element
+ without sacrificing this simplicity. But using two separate
+ elements allows us to do a little bit of checking on some
+ operations, which can be valuable.) */
+
+static bool is_sorted (struct list_elem *a, struct list_elem *b,
+ list_less_func *less, void *aux) UNUSED;
+
+/* Returns true if ELEM is a head, false otherwise. */
+static inline bool
+is_head (struct list_elem *elem)
+{
+ return elem != NULL && elem->prev == NULL && elem->next != NULL;
+}
+
+/* Returns true if ELEM is an interior element,
+ false otherwise. */
+static inline bool
+is_interior (struct list_elem *elem)
+{
+ return elem != NULL && elem->prev != NULL && elem->next != NULL;
+}
+
+/* Returns true if ELEM is a tail, false otherwise. */
+static inline bool
+is_tail (struct list_elem *elem)
+{
+ return elem != NULL && elem->prev != NULL && elem->next == NULL;
+}
+
+/* Initializes LIST as an empty list. */
+void
+list_init (struct list *list)
+{
+ ASSERT (list != NULL);
+ list->head.prev = NULL;
+ list->head.next = &list->tail;
+ list->tail.prev = &list->head;
+ list->tail.next = NULL;
+}
+
+/* Returns the beginning of LIST. */
+struct list_elem *
+list_begin (struct list *list)
+{
+ ASSERT (list != NULL);
+ return list->head.next;
+}
+
+/* Returns the element after ELEM in its list. If ELEM is the
+ last element in its list, returns the list tail. Results are
+ undefined if ELEM is itself a list tail. */
+struct list_elem *
+list_next (struct list_elem *elem)
+{
+ ASSERT (is_head (elem) || is_interior (elem));
+ return elem->next;
+}
+
+/* Returns LIST's tail.
+
+ list_end() is often used in iterating through a list from
+ front to back. See the big comment at the top of list.h for
+ an example. */
+struct list_elem *
+list_end (struct list *list)
+{
+ ASSERT (list != NULL);
+ return &list->tail;
+}
+
+/* Returns the LIST's reverse beginning, for iterating through
+ LIST in reverse order, from back to front. */
+struct list_elem *
+list_rbegin (struct list *list)
+{
+ ASSERT (list != NULL);
+ return list->tail.prev;
+}
+
+/* Returns the element before ELEM in its list. If ELEM is the
+ first element in its list, returns the list head. Results are
+ undefined if ELEM is itself a list head. */
+struct list_elem *
+list_prev (struct list_elem *elem)
+{
+ ASSERT (is_interior (elem) || is_tail (elem));
+ return elem->prev;
+}
+
+/* Returns LIST's head.
+
+ list_rend() is often used in iterating through a list in
+ reverse order, from back to front. Here's typical usage,
+ following the example from the top of list.h:
+
+ for (e = list_rbegin (&foo_list); e != list_rend (&foo_list);
+ e = list_prev (e))
+ {
+ struct foo *f = list_entry (e, struct foo, elem);
+ ...do something with f...
+ }
+*/
+struct list_elem *
+list_rend (struct list *list)
+{
+ ASSERT (list != NULL);
+ return &list->head;
+}
+
+/* Return's LIST's head.
+
+ list_head() can be used for an alternate style of iterating
+ through a list, e.g.:
+
+ e = list_head (&list);
+ while ((e = list_next (e)) != list_end (&list))
+ {
+ ...
+ }
+*/
+struct list_elem *
+list_head (struct list *list)
+{
+ ASSERT (list != NULL);
+ return &list->head;
+}
+
+/* Return's LIST's tail. */
+struct list_elem *
+list_tail (struct list *list)
+{
+ ASSERT (list != NULL);
+ return &list->tail;
+}
+
+/* Inserts ELEM just before BEFORE, which may be either an
+ interior element or a tail. The latter case is equivalent to
+ list_push_back(). */
+void
+list_insert (struct list_elem *before, struct list_elem *elem)
+{
+ ASSERT (is_interior (before) || is_tail (before));
+ ASSERT (elem != NULL);
+
+ elem->prev = before->prev;
+ elem->next = before;
+ before->prev->next = elem;
+ before->prev = elem;
+}
+
+/* Removes elements FIRST though LAST (exclusive) from their
+ current list, then inserts them just before BEFORE, which may
+ be either an interior element or a tail. */
+void
+list_splice (struct list_elem *before,
+ struct list_elem *first, struct list_elem *last)
+{
+ ASSERT (is_interior (before) || is_tail (before));
+ if (first == last)
+ return;
+ last = list_prev (last);
+
+ ASSERT (is_interior (first));
+ ASSERT (is_interior (last));
+
+ /* Cleanly remove FIRST...LAST from its current list. */
+ first->prev->next = last->next;
+ last->next->prev = first->prev;
+
+ /* Splice FIRST...LAST into new list. */
+ first->prev = before->prev;
+ last->next = before;
+ before->prev->next = first;
+ before->prev = last;
+}
+
+/* Inserts ELEM at the beginning of LIST, so that it becomes the
+ front in LIST. */
+void
+list_push_front (struct list *list, struct list_elem *elem)
+{
+ list_insert (list_begin (list), elem);
+}
+
+/* Inserts ELEM at the end of LIST, so that it becomes the
+ back in LIST. */
+void
+list_push_back (struct list *list, struct list_elem *elem)
+{
+ list_insert (list_end (list), elem);
+}
+
+/* Removes ELEM from its list and returns the element that
+ followed it. Undefined behavior if ELEM is not in a list.
+
+ It's not safe to treat ELEM as an element in a list after
+ removing it. In particular, using list_next() or list_prev()
+ on ELEM after removal yields undefined behavior. This means
+ that a naive loop to remove the elements in a list will fail:
+
+ ** DON'T DO THIS **
+ for (e = list_begin (&list); e != list_end (&list); e = list_next (e))
+ {
+ ...do something with e...
+ list_remove (e);
+ }
+ ** DON'T DO THIS **
+
+ Here is one correct way to iterate and remove elements from a
+ list:
+
+ for (e = list_begin (&list); e != list_end (&list); e = list_remove (e))
+ {
+ ...do something with e...
+ }
+
+ If you need to free() elements of the list then you need to be
+ more conservative. Here's an alternate strategy that works
+ even in that case:
+
+ while (!list_empty (&list))
+ {
+ struct list_elem *e = list_pop_front (&list);
+ ...do something with e...
+ }
+*/
+struct list_elem *
+list_remove (struct list_elem *elem)
+{
+ ASSERT (is_interior (elem));
+ elem->prev->next = elem->next;
+ elem->next->prev = elem->prev;
+ return elem->next;
+}
+
+/* Removes the front element from LIST and returns it.
+ Undefined behavior if LIST is empty before removal. */
+struct list_elem *
+list_pop_front (struct list *list)
+{
+ struct list_elem *front = list_front (list);
+ list_remove (front);
+ return front;
+}
+
+/* Removes the back element from LIST and returns it.
+ Undefined behavior if LIST is empty before removal. */
+struct list_elem *
+list_pop_back (struct list *list)
+{
+ struct list_elem *back = list_back (list);
+ list_remove (back);
+ return back;
+}
+
+/* Returns the front element in LIST.
+ Undefined behavior if LIST is empty. */
+struct list_elem *
+list_front (struct list *list)
+{
+ ASSERT (!list_empty (list));
+ return list->head.next;
+}
+
+/* Returns the back element in LIST.
+ Undefined behavior if LIST is empty. */
+struct list_elem *
+list_back (struct list *list)
+{
+ ASSERT (!list_empty (list));
+ return list->tail.prev;
+}
+
+/* Returns the number of elements in LIST.
+ Runs in O(n) in the number of elements. */
+size_t
+list_size (struct list *list)
+{
+ struct list_elem *e;
+ size_t cnt = 0;
+
+ for (e = list_begin (list); e != list_end (list); e = list_next (e))
+ cnt++;
+ return cnt;
+}
+
+/* Returns true if LIST is empty, false otherwise. */
+bool
+list_empty (struct list *list)
+{
+ return list_begin (list) == list_end (list);
+}
+
+/* Swaps the `struct list_elem *'s that A and B point to. */
+static void
+swap (struct list_elem **a, struct list_elem **b)
+{
+ struct list_elem *t = *a;
+ *a = *b;
+ *b = t;
+}
+
+/* Reverses the order of LIST. */
+void
+list_reverse (struct list *list)
+{
+ if (!list_empty (list))
+ {
+ struct list_elem *e;
+
+ for (e = list_begin (list); e != list_end (list); e = e->prev)
+ swap (&e->prev, &e->next);
+ swap (&list->head.next, &list->tail.prev);
+ swap (&list->head.next->prev, &list->tail.prev->next);
+ }
+}
+
+/* Returns true only if the list elements A through B (exclusive)
+ are in order according to LESS given auxiliary data AUX. */
+static bool
+is_sorted (struct list_elem *a, struct list_elem *b,
+ list_less_func *less, void *aux)
+{
+ if (a != b)
+ while ((a = list_next (a)) != b)
+ if (less (a, list_prev (a), aux))
+ return false;
+ return true;
+}
+
+/* Finds a run, starting at A and ending not after B, of list
+ elements that are in nondecreasing order according to LESS
+ given auxiliary data AUX. Returns the (exclusive) end of the
+ run.
+ A through B (exclusive) must form a non-empty range. */
+static struct list_elem *
+find_end_of_run (struct list_elem *a, struct list_elem *b,
+ list_less_func *less, void *aux)
+{
+ ASSERT (a != NULL);
+ ASSERT (b != NULL);
+ ASSERT (less != NULL);
+ ASSERT (a != b);
+
+ do
+ {
+ a = list_next (a);
+ }
+ while (a != b && !less (a, list_prev (a), aux));
+ return a;
+}
+
+/* Merges A0 through A1B0 (exclusive) with A1B0 through B1
+ (exclusive) to form a combined range also ending at B1
+ (exclusive). Both input ranges must be nonempty and sorted in
+ nondecreasing order according to LESS given auxiliary data
+ AUX. The output range will be sorted the same way. */
+static void
+inplace_merge (struct list_elem *a0, struct list_elem *a1b0,
+ struct list_elem *b1,
+ list_less_func *less, void *aux)
+{
+ ASSERT (a0 != NULL);
+ ASSERT (a1b0 != NULL);
+ ASSERT (b1 != NULL);
+ ASSERT (less != NULL);
+ ASSERT (is_sorted (a0, a1b0, less, aux));
+ ASSERT (is_sorted (a1b0, b1, less, aux));
+
+ while (a0 != a1b0 && a1b0 != b1)
+ if (!less (a1b0, a0, aux))
+ a0 = list_next (a0);
+ else
+ {
+ a1b0 = list_next (a1b0);
+ list_splice (a0, list_prev (a1b0), a1b0);
+ }
+}
+
+/* Sorts LIST according to LESS given auxiliary data AUX, using a
+ natural iterative merge sort that runs in O(n lg n) time and
+ O(1) space in the number of elements in LIST. */
+void
+list_sort (struct list *list, list_less_func *less, void *aux)
+{
+ size_t output_run_cnt; /* Number of runs output in current pass. */
+
+ ASSERT (list != NULL);
+ ASSERT (less != NULL);
+
+ /* Pass over the list repeatedly, merging adjacent runs of
+ nondecreasing elements, until only one run is left. */
+ do
+ {
+ struct list_elem *a0; /* Start of first run. */
+ struct list_elem *a1b0; /* End of first run, start of second. */
+ struct list_elem *b1; /* End of second run. */
+
+ output_run_cnt = 0;
+ for (a0 = list_begin (list); a0 != list_end (list); a0 = b1)
+ {
+ /* Each iteration produces one output run. */
+ output_run_cnt++;
+
+ /* Locate two adjacent runs of nondecreasing elements
+ A0...A1B0 and A1B0...B1. */
+ a1b0 = find_end_of_run (a0, list_end (list), less, aux);
+ if (a1b0 == list_end (list))
+ break;
+ b1 = find_end_of_run (a1b0, list_end (list), less, aux);
+
+ /* Merge the runs. */
+ inplace_merge (a0, a1b0, b1, less, aux);
+ }
+ }
+ while (output_run_cnt > 1);
+
+ ASSERT (is_sorted (list_begin (list), list_end (list), less, aux));
+}
+
+/* Inserts ELEM in the proper position in LIST, which must be
+ sorted according to LESS given auxiliary data AUX.
+ Runs in O(n) average case in the number of elements in LIST. */
+void
+list_insert_ordered (struct list *list, struct list_elem *elem,
+ list_less_func *less, void *aux)
+{
+ struct list_elem *e;
+
+ ASSERT (list != NULL);
+ ASSERT (elem != NULL);
+ ASSERT (less != NULL);
+
+ for (e = list_begin (list); e != list_end (list); e = list_next (e))
+ if (less (elem, e, aux))
+ break;
+ return list_insert (e, elem);
+}
+
+/* Iterates through LIST and removes all but the first in each
+ set of adjacent elements that are equal according to LESS
+ given auxiliary data AUX. If DUPLICATES is non-null, then the
+ elements from LIST are appended to DUPLICATES. */
+void
+list_unique (struct list *list, struct list *duplicates,
+ list_less_func *less, void *aux)
+{
+ struct list_elem *elem, *next;
+
+ ASSERT (list != NULL);
+ ASSERT (less != NULL);
+ if (list_empty (list))
+ return;
+
+ elem = list_begin (list);
+ while ((next = list_next (elem)) != list_end (list))
+ if (!less (elem, next, aux) && !less (next, elem, aux))
+ {
+ list_remove (next);
+ if (duplicates != NULL)
+ list_push_back (duplicates, next);
+ }
+ else
+ elem = next;
+}
+
+/* Returns the element in LIST with the largest value according
+ to LESS given auxiliary data AUX. If there is more than one
+ maximum, returns the one that appears earlier in the list. If
+ the list is empty, returns its tail. */
+struct list_elem *
+list_max (struct list *list, list_less_func *less, void *aux)
+{
+ struct list_elem *max = list_begin (list);
+ if (max != list_end (list))
+ {
+ struct list_elem *e;
+
+ for (e = list_next (max); e != list_end (list); e = list_next (e))
+ if (less (max, e, aux))
+ max = e;
+ }
+ return max;
+}
+
+/* Returns the element in LIST with the smallest value according
+ to LESS given auxiliary data AUX. If there is more than one
+ minimum, returns the one that appears earlier in the list. If
+ the list is empty, returns its tail. */
+struct list_elem *
+list_min (struct list *list, list_less_func *less, void *aux)
+{
+ struct list_elem *min = list_begin (list);
+ if (min != list_end (list))
+ {
+ struct list_elem *e;
+
+ for (e = list_next (min); e != list_end (list); e = list_next (e))
+ if (less (e, min, aux))
+ min = e;
+ }
+ return min;
+}
diff --git a/src/lib/kernel/list.h b/src/lib/kernel/list.h
new file mode 100644
index 0000000..2388f9a
--- /dev/null
+++ b/src/lib/kernel/list.h
@@ -0,0 +1,168 @@
+#ifndef __LIB_KERNEL_LIST_H
+#define __LIB_KERNEL_LIST_H
+
+/* Doubly linked list.
+
+ This implementation of a doubly linked list does not require
+ use of dynamically allocated memory. Instead, each structure
+ that is a potential list element must embed a struct list_elem
+ member. All of the list functions operate on these `struct
+ list_elem's. The list_entry macro allows conversion from a
+ struct list_elem back to a structure object that contains it.
+
+ For example, suppose there is a needed for a list of `struct
+ foo'. `struct foo' should contain a `struct list_elem'
+ member, like so:
+
+ struct foo
+ {
+ struct list_elem elem;
+ int bar;
+ ...other members...
+ };
+
+ Then a list of `struct foo' can be be declared and initialized
+ like so:
+
+ struct list foo_list;
+
+ list_init (&foo_list);
+
+ Iteration is a typical situation where it is necessary to
+ convert from a struct list_elem back to its enclosing
+ structure. Here's an example using foo_list:
+
+ struct list_elem *e;
+
+ for (e = list_begin (&foo_list); e != list_end (&foo_list);
+ e = list_next (e))
+ {
+ struct foo *f = list_entry (e, struct foo, elem);
+ ...do something with f...
+ }
+
+ You can find real examples of list usage throughout the
+ source; for example, malloc.c, palloc.c, and thread.c in the
+ threads directory all use lists.
+
+ The interface for this list is inspired by the list<> template
+ in the C++ STL. If you're familiar with list<>, you should
+ find this easy to use. However, it should be emphasized that
+ these lists do *no* type checking and can't do much other
+ correctness checking. If you screw up, it will bite you.
+
+ Glossary of list terms:
+
+ - "front": The first element in a list. Undefined in an
+ empty list. Returned by list_front().
+
+ - "back": The last element in a list. Undefined in an empty
+ list. Returned by list_back().
+
+ - "tail": The element figuratively just after the last
+ element of a list. Well defined even in an empty list.
+ Returned by list_end(). Used as the end sentinel for an
+ iteration from front to back.
+
+ - "beginning": In a non-empty list, the front. In an empty
+ list, the tail. Returned by list_begin(). Used as the
+ starting point for an iteration from front to back.
+
+ - "head": The element figuratively just before the first
+ element of a list. Well defined even in an empty list.
+ Returned by list_rend(). Used as the end sentinel for an
+ iteration from back to front.
+
+ - "reverse beginning": In a non-empty list, the back. In an
+ empty list, the head. Returned by list_rbegin(). Used as
+ the starting point for an iteration from back to front.
+
+ - "interior element": An element that is not the head or
+ tail, that is, a real list element. An empty list does
+ not have any interior elements.
+*/
+
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* List element. */
+struct list_elem
+ {
+ struct list_elem *prev; /* Previous list element. */
+ struct list_elem *next; /* Next list element. */
+ };
+
+/* List. */
+struct list
+ {
+ struct list_elem head; /* List head. */
+ struct list_elem tail; /* List tail. */
+ };
+
+/* Converts pointer to list element LIST_ELEM into a pointer to
+ the structure that LIST_ELEM is embedded inside. Supply the
+ name of the outer structure STRUCT and the member name MEMBER
+ of the list element. See the big comment at the top of the
+ file for an example. */
+#define list_entry(LIST_ELEM, STRUCT, MEMBER) \
+ ((STRUCT *) ((uint8_t *) &(LIST_ELEM)->next \
+ - offsetof (STRUCT, MEMBER.next)))
+
+void list_init (struct list *);
+
+/* List traversal. */
+struct list_elem *list_begin (struct list *);
+struct list_elem *list_next (struct list_elem *);
+struct list_elem *list_end (struct list *);
+
+struct list_elem *list_rbegin (struct list *);
+struct list_elem *list_prev (struct list_elem *);
+struct list_elem *list_rend (struct list *);
+
+struct list_elem *list_head (struct list *);
+struct list_elem *list_tail (struct list *);
+
+/* List insertion. */
+void list_insert (struct list_elem *, struct list_elem *);
+void list_splice (struct list_elem *before,
+ struct list_elem *first, struct list_elem *last);
+void list_push_front (struct list *, struct list_elem *);
+void list_push_back (struct list *, struct list_elem *);
+
+/* List removal. */
+struct list_elem *list_remove (struct list_elem *);
+struct list_elem *list_pop_front (struct list *);
+struct list_elem *list_pop_back (struct list *);
+
+/* List elements. */
+struct list_elem *list_front (struct list *);
+struct list_elem *list_back (struct list *);
+
+/* List properties. */
+size_t list_size (struct list *);
+bool list_empty (struct list *);
+
+/* Miscellaneous. */
+void list_reverse (struct list *);
+
+/* Compares the value of two list elements A and B, given
+ auxiliary data AUX. Returns true if A is less than B, or
+ false if A is greater than or equal to B. */
+typedef bool list_less_func (const struct list_elem *a,
+ const struct list_elem *b,
+ void *aux);
+
+/* Operations on lists with ordered elements. */
+void list_sort (struct list *,
+ list_less_func *, void *aux);
+void list_insert_ordered (struct list *, struct list_elem *,
+ list_less_func *, void *aux);
+void list_unique (struct list *, struct list *duplicates,
+ list_less_func *, void *aux);
+
+/* Max and min. */
+struct list_elem *list_max (struct list *, list_less_func *, void *aux);
+struct list_elem *list_min (struct list *, list_less_func *, void *aux);
+
+#endif /* lib/kernel/list.h */
diff --git a/src/lib/kernel/slist.c b/src/lib/kernel/slist.c
new file mode 100644
index 0000000..392f08a
--- /dev/null
+++ b/src/lib/kernel/slist.c
@@ -0,0 +1,153 @@
+ #include "slist.h"
+ #include "threads/malloc.h"
+#include <stdio.h>
+// #include <stdlib.h>
+
+ /* List structure */
+ struct Node
+ {
+ ListElement Element;
+ Position Next;
+ };
+
+ /* make empty list */
+
+ SList
+ MakeEmpty( SList L )
+ {
+ if( L != NULL )
+ DeleteList( L );
+ L = malloc( sizeof( struct Node ) );
+ if( L == NULL ) {
+ printf( "Out of memory!\n" );
+ return NULL;
+ }
+ L->Next = NULL;
+ return L;
+ }
+
+ /* Return true if L is empty */
+
+ int
+ IsEmpty( SList L )
+ {
+ return L->Next == NULL;
+ }
+
+ /* Return true if P is the last position in SList L */
+ /* Parameter L is unused in this implementation */
+
+ int IsLast( Position P, UNUSED SList L)
+ {
+ return P->Next == NULL;
+ }
+
+ /* Return Position of X in L; NULL if not found */
+
+ Position
+ Find( ListElement X, SList L )
+ {
+ Position P;
+
+ P = L->Next;
+ while( P != NULL && P->Element != X )
+ P = P->Next;
+
+ return P;
+ }
+
+ /* Delete from a SList */
+ /* Cell pointed to by P->Next is wiped out */
+ /* Assume that the position is legal */
+ /* Assume use of a header node */
+
+ void
+ Delete( ListElement X, SList L )
+ {
+ Position P, TmpCell;
+
+ P = FindPrevious( X, L );
+
+ if( !IsLast( P, L ) ) /* Assumption of header use */
+ { /* X is found; delete it */
+ TmpCell = P->Next;
+ P->Next = TmpCell->Next; /* Bypass deleted cell */
+ free( TmpCell );
+ }
+ }
+
+ /* If X is not found, then Next field of returned value is NULL */
+ /* Assumes a header */
+
+ Position
+ FindPrevious( ListElement X, SList L )
+ {
+ Position P;
+
+ P = L;
+ while( P->Next != NULL && P->Next->Element != X )
+ P = P->Next;
+
+ return P;
+ }
+
+ /* Insert (after legal position P) */
+ /* Header implementation assumed */
+ /* Parameter L is unused in this implementation */
+
+ void
+ Insert( ListElement X, UNUSED SList L, Position P )
+ {
+ Position TmpCell;
+
+ TmpCell = malloc( sizeof( struct Node ) );
+ if( TmpCell == NULL ) {
+ printf( "Out of space!!!\n" );
+ return;
+ }
+
+ TmpCell->Element = X;
+ TmpCell->Next = P->Next;
+ P->Next = TmpCell;
+ }
+
+ /* DeleteSList algorithm */
+
+ void
+ DeleteList( SList L )
+ {
+ Position P, Tmp;
+
+ P = L->Next; /* Header assumed */
+ L->Next = NULL;
+ while( P != NULL )
+ {
+ Tmp = P->Next;
+ free( P );
+ P = Tmp;
+ }
+ }
+
+ Position
+ Header( SList L )
+ {
+ return L;
+ }
+
+ Position
+ First( SList L )
+ {
+ return L->Next;
+ }
+
+ Position
+ Advance( Position P )
+ {
+ return P->Next;
+ }
+
+ ListElement
+ Retrieve( Position P )
+ {
+ return P->Element;
+ }
diff --git a/src/lib/kernel/slist.h b/src/lib/kernel/slist.h
new file mode 100644
index 0000000..07b13c5
--- /dev/null
+++ b/src/lib/kernel/slist.h
@@ -0,0 +1,25 @@
+/* SList -- simple list for students */
+ typedef void * ListElement;
+
+ #ifndef _SList_H
+ #define _SList_H
+
+ struct Node;
+ typedef struct Node *PtrToNode;
+ typedef PtrToNode SList;
+ typedef PtrToNode Position;
+
+ SList MakeEmpty( SList L );
+ int IsEmpty( SList L );
+ int IsLast( Position P, SList L);
+ Position Find( ListElement X, SList L );
+ void Delete( ListElement X, SList L );
+ Position FindPrevious( ListElement X, SList L );
+ void Insert( ListElement X, SList L, Position P );
+ void DeleteList( SList L );
+ Position Header( SList L );
+ Position First( SList L );
+ Position Advance( Position P );
+ ListElement Retrieve( Position P );
+
+ #endif /* _SList_H */
diff --git a/src/lib/kernel/stdio.h b/src/lib/kernel/stdio.h
new file mode 100644
index 0000000..3e5bae9
--- /dev/null
+++ b/src/lib/kernel/stdio.h
@@ -0,0 +1,6 @@
+#ifndef __LIB_KERNEL_STDIO_H
+#define __LIB_KERNEL_STDIO_H
+
+void putbuf (const char *, size_t);
+
+#endif /* lib/kernel/stdio.h */