summaryrefslogtreecommitdiffstats
path: root/src/threads
diff options
context:
space:
mode:
authorFelipe Boeira <felipe.boeira@liu.se>2019-01-08 18:39:03 +0100
committerFelipe Boeira <felipe.boeira@liu.se>2019-01-08 18:39:03 +0100
commitd4522b8e9854178473adcea0fbb84f23f6e744bd (patch)
treefbcf620617c5023154eba3f965b3a982daa64a47 /src/threads
downloadpintos-d4522b8e9854178473adcea0fbb84f23f6e744bd.tar.gz
Initial commit
Diffstat (limited to 'src/threads')
-rw-r--r--src/threads/.cvsignore3
-rw-r--r--src/threads/Make.vars7
-rw-r--r--src/threads/Makefile1
-rw-r--r--src/threads/bochsrc.txt10
-rw-r--r--src/threads/boundedbuffer.c35
-rw-r--r--src/threads/boundedbuffer.h24
-rw-r--r--src/threads/copyright.h24
-rw-r--r--src/threads/flags.h8
-rw-r--r--src/threads/init.c398
-rw-r--r--src/threads/init.h20
-rw-r--r--src/threads/interrupt.c419
-rw-r--r--src/threads/interrupt.h70
-rw-r--r--src/threads/intr-stubs.S204
-rw-r--r--src/threads/intr-stubs.h19
-rw-r--r--src/threads/io.h173
-rw-r--r--src/threads/kernel.lds.S26
-rw-r--r--src/threads/loader.S349
-rw-r--r--src/threads/loader.h38
-rw-r--r--src/threads/malloc.c294
-rw-r--r--src/threads/malloc.h13
-rw-r--r--src/threads/palloc.c189
-rw-r--r--src/threads/palloc.h23
-rw-r--r--src/threads/pte.h107
-rw-r--r--src/threads/start.S16
-rw-r--r--src/threads/switch.S65
-rw-r--r--src/threads/switch.h39
-rw-r--r--src/threads/synch.c338
-rw-r--r--src/threads/synch.h51
-rw-r--r--src/threads/synchlist.c98
-rw-r--r--src/threads/synchlist.h39
-rw-r--r--src/threads/thread.c553
-rw-r--r--src/threads/thread.h136
-rw-r--r--src/threads/vaddr.h89
33 files changed, 3878 insertions, 0 deletions
diff --git a/src/threads/.cvsignore b/src/threads/.cvsignore
new file mode 100644
index 0000000..6d5357c
--- /dev/null
+++ b/src/threads/.cvsignore
@@ -0,0 +1,3 @@
+build
+bochsrc.txt
+bochsout.txt
diff --git a/src/threads/Make.vars b/src/threads/Make.vars
new file mode 100644
index 0000000..c9cb81f
--- /dev/null
+++ b/src/threads/Make.vars
@@ -0,0 +1,7 @@
+# -*- makefile -*-
+
+os.dsk: DEFINES =
+KERNEL_SUBDIRS = threads devices lib lib/kernel $(TEST_SUBDIRS)
+TEST_SUBDIRS = tests/threads
+GRADING_FILE = $(SRCDIR)/tests/threads/Grading
+SIMULATOR = --qemu
diff --git a/src/threads/Makefile b/src/threads/Makefile
new file mode 100644
index 0000000..34c10aa
--- /dev/null
+++ b/src/threads/Makefile
@@ -0,0 +1 @@
+include ../Makefile.kernel
diff --git a/src/threads/bochsrc.txt b/src/threads/bochsrc.txt
new file mode 100644
index 0000000..2ac8738
--- /dev/null
+++ b/src/threads/bochsrc.txt
@@ -0,0 +1,10 @@
+romimage: file=$BXSHARE/BIOS-bochs-latest, address=0xf0000
+vgaromimage: file=$BXSHARE/VGABIOS-lgpl-latest
+boot: disk
+cpu: ips=1000000
+megs: 4
+log: bochsout.txt
+panic: action=fatal
+clock: sync=none, time0=0
+ata0-master: type=disk, path=/tmp/c9HlBlQTJq.dsk, mode=flat, cylinders=1, heads=16, spt=63, translation=none
+com1: enabled=1, mode=term, dev=/dev/stdout
diff --git a/src/threads/boundedbuffer.c b/src/threads/boundedbuffer.c
new file mode 100644
index 0000000..36c2e24
--- /dev/null
+++ b/src/threads/boundedbuffer.c
@@ -0,0 +1,35 @@
+// boundedbuffer.cc
+// This is a skeleton for the implementation of bounded buffer to test
+// synchronization primitives (condition and lock).
+//
+// Your implementation of bounded buffer should be thread safe. For an
+// example of implementation refer to synchlist implementation.
+//
+// Created by Andrzej Bednarski
+//
+// Modified by Vlad Jahundovics (translation from C++ to C)
+
+#include "threads/boundedbuffer.h"
+#include "threads/malloc.h"
+
+void bb_init(struct bounded_buffer *bb, int _size)
+{
+ bb->size = _size;
+ // write your code here
+}
+
+void bb_destroy(struct bounded_buffer *bb)
+{
+ // write your code here
+}
+
+int bb_read(struct bounded_buffer *bb)
+{
+ // write your code here
+}
+
+void bb_write(struct bounded_buffer *bb, int value)
+{
+ // write your code here
+}
+
diff --git a/src/threads/boundedbuffer.h b/src/threads/boundedbuffer.h
new file mode 100644
index 0000000..7508d1c
--- /dev/null
+++ b/src/threads/boundedbuffer.h
@@ -0,0 +1,24 @@
+//
+// Skeleton of bounded buffer for laboratory assignment 1 for Pintos
+//
+// Created by Andrzej Bednarski
+//
+// modified by Vlad Jahundovics for Pintos (translation from C++ to C)
+
+#ifndef BOUNDEDBUFFER_H
+#define BOUNDEDBUFFER_H
+
+#include "threads/synch.h"
+
+struct bounded_buffer {
+ int size;
+ // You may add other data fields required for the bounded buffer
+
+};
+
+void bb_init(struct bounded_buffer *, int);
+int bb_read(struct bounded_buffer *);
+void bb_write(struct bounded_buffer *, int);
+void bb_destroy(struct bounded_buffer *);
+
+#endif
diff --git a/src/threads/copyright.h b/src/threads/copyright.h
new file mode 100644
index 0000000..a9a606b
--- /dev/null
+++ b/src/threads/copyright.h
@@ -0,0 +1,24 @@
+/*
+Copyright (c) 1992-1993 The Regents of the University of California.
+All rights reserved.
+
+Permission to use, copy, modify, and distribute this software and its
+documentation for any purpose, without fee, and without written agreement is
+hereby granted, provided that the above copyright notice and the following
+two paragraphs appear in all copies of this software.
+
+IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
+DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
+OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF
+CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
+INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
+AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
+ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
+PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+*/
+
+#ifdef MAIN /* include the copyright message in every executable */
+static char *copyright = "Copyright (c) 1992-1993 The Regents of the University of California. All rights reserved.";
+#endif // MAIN
diff --git a/src/threads/flags.h b/src/threads/flags.h
new file mode 100644
index 0000000..5654ac7
--- /dev/null
+++ b/src/threads/flags.h
@@ -0,0 +1,8 @@
+#ifndef THREADS_FLAGS_H
+#define THREADS_FLAGS_H
+
+/* EFLAGS Register. */
+#define FLAG_MBS 0x00000002 /* Must be set. */
+#define FLAG_IF 0x00000200 /* Interrupt Flag. */
+
+#endif /* threads/flags.h */
diff --git a/src/threads/init.c b/src/threads/init.c
new file mode 100644
index 0000000..0e97c39
--- /dev/null
+++ b/src/threads/init.c
@@ -0,0 +1,398 @@
+#include "threads/init.h"
+#include <console.h>
+#include <debug.h>
+#include <limits.h>
+#include <random.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "devices/kbd.h"
+#include "devices/input.h"
+#include "devices/serial.h"
+#include "devices/timer.h"
+#include "devices/vga.h"
+#include "threads/interrupt.h"
+#include "threads/io.h"
+#include "threads/loader.h"
+#include "threads/malloc.h"
+#include "threads/palloc.h"
+#include "threads/pte.h"
+#include "threads/thread.h"
+#ifdef USERPROG
+#include "userprog/process.h"
+#include "userprog/exception.h"
+#include "userprog/gdt.h"
+#include "userprog/syscall.h"
+#include "userprog/tss.h"
+#else
+#include "tests/threads/tests.h"
+#endif
+#ifdef FILESYS
+#include "devices/disk.h"
+#include "filesys/filesys.h"
+#include "filesys/fsutil.h"
+#endif
+
+/* Amount of physical memory, in 4 kB pages. */
+size_t ram_pages;
+
+/* Page directory with kernel mappings only. */
+uint32_t *base_page_dir;
+
+#ifdef FILESYS
+/* -f: Format the file system? */
+static bool format_filesys;
+#endif
+
+/* -q: Power off after kernel tasks complete? */
+bool power_off_when_done;
+
+static void ram_init (void);
+static void paging_init (void);
+
+static char **read_command_line (void);
+static char **parse_options (char **argv);
+static void run_actions (char **argv);
+static void usage (void);
+
+static void print_stats (void);
+
+
+int main (void) NO_RETURN;
+
+/* Pintos main program. */
+int
+main (void)
+{
+ char **argv;
+
+ /* Clear BSS and get machine's RAM size. */
+ ram_init ();
+
+ /* Break command line into arguments and parse options. */
+ argv = read_command_line ();
+ argv = parse_options (argv);
+
+ /* Initialize ourselves as a thread so we can use locks,
+ then enable console locking. */
+ thread_init ();
+ console_init ();
+
+ /* Greet user. */
+ printf ("Pintos booting with %'zu kB RAM...\n", ram_pages * PGSIZE / 1024);
+
+ /* Initialize memory system. */
+ palloc_init ();
+ malloc_init ();
+ paging_init ();
+
+ /* Segmentation. */
+#ifdef USERPROG
+ tss_init ();
+ gdt_init ();
+#endif
+
+ /* Initialize interrupt handlers. */
+ intr_init ();
+ timer_init ();
+ kbd_init ();
+ input_init ();
+#ifdef USERPROG
+ exception_init ();
+ syscall_init ();
+#endif
+
+ /* Start thread scheduler and enable interrupts. */
+ thread_start ();
+ serial_init_queue ();
+ timer_calibrate ();
+
+#ifdef FILESYS
+ /* Initialize file system. */
+ disk_init ();
+ filesys_init (format_filesys);
+#endif
+
+ printf ("Boot complete.\n");
+
+ /* Run actions specified on kernel command line. */
+ run_actions (argv);
+
+ /* Finish up. */
+ if (power_off_when_done)
+ power_off ();
+ thread_exit ();
+}
+
+/* Clear BSS and obtain RAM size from loader. */
+static void
+ram_init (void)
+{
+ /* The "BSS" is a segment that should be initialized to zeros.
+ It isn't actually stored on disk or zeroed by the kernel
+ loader, so we have to zero it ourselves.
+
+ The start and end of the BSS segment is recorded by the
+ linker as _start_bss and _end_bss. See kernel.lds. */
+ extern char _start_bss, _end_bss;
+ memset (&_start_bss, 0, &_end_bss - &_start_bss);
+
+ /* Get RAM size from loader. See loader.S. */
+ ram_pages = *(uint32_t *) ptov (LOADER_RAM_PGS);
+}
+
+/* Populates the base page directory and page table with the
+ kernel virtual mapping, and then sets up the CPU to use the
+ new page directory. Points base_page_dir to the page
+ directory it creates.
+
+ At the time this function is called, the active page table
+ (set up by loader.S) only maps the first 4 MB of RAM, so we
+ should not try to use extravagant amounts of memory.
+ Fortunately, there is no need to do so. */
+static void
+paging_init (void)
+{
+ uint32_t *pd, *pt;
+ size_t page;
+ extern char _start, _end_kernel_text;
+
+ pd = base_page_dir = palloc_get_page (PAL_ASSERT | PAL_ZERO);
+ pt = NULL;
+ for (page = 0; page < ram_pages; page++)
+ {
+ uintptr_t paddr = page * PGSIZE;
+ char *vaddr = ptov (paddr);
+ size_t pde_idx = pd_no (vaddr);
+ size_t pte_idx = pt_no (vaddr);
+ bool in_kernel_text = &_start <= vaddr && vaddr < &_end_kernel_text;
+
+ if (pd[pde_idx] == 0)
+ {
+ pt = palloc_get_page (PAL_ASSERT | PAL_ZERO);
+ pd[pde_idx] = pde_create (pt);
+ }
+
+ pt[pte_idx] = pte_create_kernel (vaddr, !in_kernel_text);
+ }
+
+ /* Store the physical address of the page directory into CR3
+ aka PDBR (page directory base register). This activates our
+ new page tables immediately. See [IA32-v2a] "MOV--Move
+ to/from Control Registers" and [IA32-v3a] 3.7.5 "Base Address
+ of the Page Directory". */
+ asm volatile ("movl %0, %%cr3" : : "r" (vtop (base_page_dir)));
+}
+
+/* Breaks the kernel command line into words and returns them as
+ an argv-like array. */
+static char **
+read_command_line (void)
+{
+ static char *argv[LOADER_ARGS_LEN / 2 + 1];
+ char *p, *end;
+ int argc;
+ int i;
+
+ argc = *(uint32_t *) ptov (LOADER_ARG_CNT);
+ p = ptov (LOADER_ARGS);
+ end = p + LOADER_ARGS_LEN;
+ for (i = 0; i < argc; i++)
+ {
+ if (p >= end)
+ PANIC ("command line arguments overflow");
+
+ argv[i] = p;
+ p += strnlen (p, end - p) + 1;
+ }
+ argv[argc] = NULL;
+
+ /* Print kernel command line. */
+ printf ("Kernel command line:");
+ for (i = 0; i < argc; i++)
+ if (strchr (argv[i], ' ') == NULL)
+ printf (" %s", argv[i]);
+ else
+ printf (" '%s'", argv[i]);
+ printf ("\n");
+
+ return argv;
+}
+
+/* Parses options in ARGV[]
+ and returns the first non-option argument. */
+static char **
+parse_options (char **argv)
+{
+ for (; *argv != NULL && **argv == '-'; argv++)
+ {
+ char *save_ptr;
+ char *name = strtok_r (*argv, "=", &save_ptr);
+ char *value = strtok_r (NULL, "", &save_ptr);
+
+ if (!strcmp (name, "-h"))
+ usage ();
+ else if (!strcmp (name, "-q"))
+ power_off_when_done = true;
+#ifdef FILESYS
+ else if (!strcmp (name, "-f"))
+ format_filesys = true;
+#endif
+ else if (!strcmp (name, "-rs"))
+ random_init (atoi (value));
+ else if (!strcmp (name, "-mlfqs"))
+ thread_mlfqs = true;
+#ifdef USERPROG
+ else if (!strcmp (name, "-ul"))
+ user_page_limit = atoi (value);
+#endif
+ else
+ PANIC ("unknown option `%s' (use -h for help)", name);
+ }
+
+ return argv;
+}
+
+/* Runs the task specified in ARGV[1]. */
+static void
+run_task (char **argv)
+{
+ const char *task = argv[1];
+
+ printf ("Executing '%s':\n", task);
+#ifdef USERPROG
+ process_wait (process_execute (task));
+#else
+ run_test (task);
+#endif
+ printf ("Execution of '%s' complete.\n", task);
+}
+
+/* Executes all of the actions specified in ARGV[]
+ up to the null pointer sentinel. */
+static void
+run_actions (char **argv)
+{
+ /* An action. */
+ struct action
+ {
+ char *name; /* Action name. */
+ int argc; /* # of args, including action name. */
+ void (*function) (char **argv); /* Function to execute action. */
+ };
+
+ /* Table of supported actions. */
+ static const struct action actions[] =
+ {
+ {"run", 2, run_task},
+#ifdef FILESYS
+ {"ls", 1, fsutil_ls},
+ {"cat", 2, fsutil_cat},
+ {"rm", 2, fsutil_rm},
+ {"put", 2, fsutil_put},
+ {"get", 2, fsutil_get},
+#endif
+ {NULL, 0, NULL},
+ };
+
+ while (*argv != NULL)
+ {
+ const struct action *a;
+ int i;
+
+ /* Find action name. */
+ for (a = actions; ; a++)
+ if (a->name == NULL)
+ PANIC ("unknown action `%s' (use -h for help)", *argv);
+ else if (!strcmp (*argv, a->name))
+ break;
+
+ /* Check for required arguments. */
+ for (i = 1; i < a->argc; i++)
+ if (argv[i] == NULL)
+ PANIC ("action `%s' requires %d argument(s)", *argv, a->argc - 1);
+
+ /* Invoke action and advance. */
+ a->function (argv);
+ argv += a->argc;
+ }
+
+}
+
+/* Prints a kernel command line help message and powers off the
+ machine. */
+static void
+usage (void)
+{
+ printf ("\nCommand line syntax: [OPTION...] [ACTION...]\n"
+ "Options must precede actions.\n"
+ "Actions are executed in the order specified.\n"
+ "\nAvailable actions:\n"
+#ifdef USERPROG
+ " run 'PROG [ARG...]' Run PROG and wait for it to complete.\n"
+#else
+ " run TEST Run TEST.\n"
+#endif
+#ifdef FILESYS
+ " ls List files in the root directory.\n"
+ " cat FILE Print FILE to the console.\n"
+ " rm FILE Delete FILE.\n"
+ "Use these actions indirectly via `pintos' -g and -p options:\n"
+ " put FILE Put FILE into file system from scratch disk.\n"
+ " get FILE Get FILE from file system into scratch disk.\n"
+#endif
+ "\nOptions:\n"
+ " -h Print this help message and power off.\n"
+ " -q Power off VM after actions or on panic.\n"
+ " -f Format file system disk during startup.\n"
+ " -rs=SEED Set random number seed to SEED.\n"
+ " -mlfqs Use multi-level feedback queue scheduler.\n"
+#ifdef USERPROG
+ " -ul=COUNT Limit user memory to COUNT pages.\n"
+#endif
+ );
+ power_off ();
+}
+
+
+/* Powers down the machine we're running on,
+ as long as we're running on Bochs or QEMU. */
+void
+power_off (void)
+{
+ const char s[] = "Shutdown";
+ const char *p;
+
+#ifdef FILESYS
+ filesys_done ();
+#endif
+
+ print_stats ();
+
+ printf ("Powering off...\n");
+ serial_flush ();
+ outw(0xB004, 0x2000);
+ for (p = s; *p != '\0'; p++)
+ outb (0x8900, *p);
+ asm volatile ("cli; hlt" : : : "memory");
+ printf ("still running...\n");
+ for (;;);
+}
+
+/* Print statistics about Pintos execution. */
+static void
+print_stats (void)
+{
+ timer_print_stats ();
+ thread_print_stats ();
+#ifdef FILESYS
+ disk_print_stats ();
+#endif
+ console_print_stats ();
+ kbd_print_stats ();
+#ifdef USERPROG
+ exception_print_stats ();
+#endif
+}
diff --git a/src/threads/init.h b/src/threads/init.h
new file mode 100644
index 0000000..a6fec05
--- /dev/null
+++ b/src/threads/init.h
@@ -0,0 +1,20 @@
+#ifndef THREADS_INIT_H
+#define THREADS_INIT_H
+
+#include <debug.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* Physical memory size, in 4 kB pages. */
+extern size_t ram_pages;
+
+/* Page directory with kernel mappings only. */
+extern uint32_t *base_page_dir;
+
+/* -q: Power off when kernel tasks complete? */
+extern bool power_off_when_done;
+
+void power_off (void) NO_RETURN;
+
+#endif /* threads/init.h */
diff --git a/src/threads/interrupt.c b/src/threads/interrupt.c
new file mode 100644
index 0000000..075962f
--- /dev/null
+++ b/src/threads/interrupt.c
@@ -0,0 +1,419 @@
+#include "threads/interrupt.h"
+#include <debug.h>
+#include <inttypes.h>
+#include <stdint.h>
+#include <stdio.h>
+#include "threads/flags.h"
+#include "threads/intr-stubs.h"
+#include "threads/io.h"
+#include "threads/thread.h"
+#include "threads/vaddr.h"
+#include "devices/timer.h"
+
+/* Number of x86 interrupts. */
+#define INTR_CNT 256
+
+/* The Interrupt Descriptor Table (IDT). The format is fixed by
+ the CPU. See [IA32-v3a] sections 5.10 "Interrupt Descriptor
+ Table (IDT)", 5.11 "IDT Descriptors", 5.12.1.2 "Flag Usage By
+ Exception- or Interrupt-Handler Procedure". */
+static uint64_t idt[INTR_CNT];
+
+/* Interrupt handler functions for each interrupt. */
+static intr_handler_func *intr_handlers[INTR_CNT];
+
+/* Names for each interrupt, for debugging purposes. */
+static const char *intr_names[INTR_CNT];
+
+/* External interrupts are those generated by devices outside the
+ CPU, such as the timer. External interrupts run with
+ interrupts turned off, so they never nest, nor are they ever
+ pre-empted. Handlers for external interrupts also may not
+ sleep, although they may invoke intr_yield_on_return() to
+ request that a new process be scheduled just before the
+ interrupt returns. */
+static bool in_external_intr; /* Are we processing an external interrupt? */
+static bool yield_on_return; /* Should we yield on interrupt return? */
+
+/* Programmable Interrupt Controller helpers. */
+static void pic_init (void);
+static void pic_end_of_interrupt (int irq);
+
+/* Interrupt Descriptor Table helpers. */
+static uint64_t make_intr_gate (void (*) (void), int dpl);
+static uint64_t make_trap_gate (void (*) (void), int dpl);
+static inline uint64_t make_idtr_operand (uint16_t limit, void *base);
+
+/* Interrupt handlers. */
+void intr_handler (struct intr_frame *args);
+
+/* Returns the current interrupt status. */
+enum intr_level
+intr_get_level (void)
+{
+ uint32_t flags;
+
+ /* Push the flags register on the processor stack, then pop the
+ value off the stack into `flags'. See [IA32-v2b] "PUSHF"
+ and "POP" and [IA32-v3a] 5.8.1 "Masking Maskable Hardware
+ Interrupts". */
+ asm volatile ("pushfl; popl %0" : "=g" (flags));
+
+ return flags & FLAG_IF ? INTR_ON : INTR_OFF;
+}
+
+/* Enables or disables interrupts as specified by LEVEL and
+ returns the previous interrupt status. */
+enum intr_level
+intr_set_level (enum intr_level level)
+{
+ return level == INTR_ON ? intr_enable () : intr_disable ();
+}
+
+/* Enables interrupts and returns the previous interrupt status. */
+enum intr_level
+intr_enable (void)
+{
+ enum intr_level old_level = intr_get_level ();
+ ASSERT (!intr_context ());
+
+ /* Enable interrupts by setting the interrupt flag.
+
+ See [IA32-v2b] "STI" and [IA32-v3a] 5.8.1 "Masking Maskable
+ Hardware Interrupts". */
+ asm volatile ("sti");
+
+ return old_level;
+}
+
+/* Disables interrupts and returns the previous interrupt status. */
+enum intr_level
+intr_disable (void)
+{
+ enum intr_level old_level = intr_get_level ();
+
+ /* Disable interrupts by clearing the interrupt flag.
+ See [IA32-v2b] "CLI" and [IA32-v3a] 5.8.1 "Masking Maskable
+ Hardware Interrupts". */
+ asm volatile ("cli" : : : "memory");
+
+ return old_level;
+}
+
+/* Initializes the interrupt system. */
+void
+intr_init (void)
+{
+ uint64_t idtr_operand;
+ int i;
+
+ /* Initialize interrupt controller. */
+ pic_init ();
+
+ /* Initialize IDT. */
+ for (i = 0; i < INTR_CNT; i++)
+ idt[i] = make_intr_gate (intr_stubs[i], 0);
+
+ /* Load IDT register.
+ See [IA32-v2a] "LIDT" and [IA32-v3a] 5.10 "Interrupt
+ Descriptor Table (IDT)". */
+ idtr_operand = make_idtr_operand (sizeof idt - 1, idt);
+ asm volatile ("lidt %0" : : "m" (idtr_operand));
+
+ /* Initialize intr_names. */
+ for (i = 0; i < INTR_CNT; i++)
+ intr_names[i] = "unknown";
+ intr_names[0] = "#DE Divide Error";
+ intr_names[1] = "#DB Debug Exception";
+ intr_names[2] = "NMI Interrupt";
+ intr_names[3] = "#BP Breakpoint Exception";
+ intr_names[4] = "#OF Overflow Exception";
+ intr_names[5] = "#BR BOUND Range Exceeded Exception";
+ intr_names[6] = "#UD Invalid Opcode Exception";
+ intr_names[7] = "#NM Device Not Available Exception";
+ intr_names[8] = "#DF Double Fault Exception";
+ intr_names[9] = "Coprocessor Segment Overrun";
+ intr_names[10] = "#TS Invalid TSS Exception";
+ intr_names[11] = "#NP Segment Not Present";
+ intr_names[12] = "#SS Stack Fault Exception";
+ intr_names[13] = "#GP General Protection Exception";
+ intr_names[14] = "#PF Page-Fault Exception";
+ intr_names[16] = "#MF x87 FPU Floating-Point Error";
+ intr_names[17] = "#AC Alignment Check Exception";
+ intr_names[18] = "#MC Machine-Check Exception";
+ intr_names[19] = "#XF SIMD Floating-Point Exception";
+}
+
+/* Registers interrupt VEC_NO to invoke HANDLER with descriptor
+ privilege level DPL. Names the interrupt NAME for debugging
+ purposes. The interrupt handler will be invoked with
+ interrupt status set to LEVEL. */
+static void
+register_handler (uint8_t vec_no, int dpl, enum intr_level level,
+ intr_handler_func *handler, const char *name)
+{
+ ASSERT (intr_handlers[vec_no] == NULL);
+ if (level == INTR_ON)
+ idt[vec_no] = make_trap_gate (intr_stubs[vec_no], dpl);
+ else
+ idt[vec_no] = make_intr_gate (intr_stubs[vec_no], dpl);
+ intr_handlers[vec_no] = handler;
+ intr_names[vec_no] = name;
+}
+
+/* Registers external interrupt VEC_NO to invoke HANDLER, which
+ is named NAME for debugging purposes. The handler will
+ execute with interrupts disabled. */
+void
+intr_register_ext (uint8_t vec_no, intr_handler_func *handler,
+ const char *name)
+{
+ ASSERT (vec_no >= 0x20 && vec_no <= 0x2f);
+ register_handler (vec_no, 0, INTR_OFF, handler, name);
+}
+
+/* Registers internal interrupt VEC_NO to invoke HANDLER, which
+ is named NAME for debugging purposes. The interrupt handler
+ will be invoked with interrupt status LEVEL.
+
+ The handler will have descriptor privilege level DPL, meaning
+ that it can be invoked intentionally when the processor is in
+ the DPL or lower-numbered ring. In practice, DPL==3 allows
+ user mode to invoke the interrupts and DPL==0 prevents such
+ invocation. Faults and exceptions that occur in user mode
+ still cause interrupts with DPL==0 to be invoked. See
+ [IA32-v3a] sections 4.5 "Privilege Levels" and 4.8.1.1
+ "Accessing Nonconforming Code Segments" for further
+ discussion. */
+void
+intr_register_int (uint8_t vec_no, int dpl, enum intr_level level,
+ intr_handler_func *handler, const char *name)
+{
+ ASSERT (vec_no < 0x20 || vec_no > 0x2f);
+ register_handler (vec_no, dpl, level, handler, name);
+}
+
+/* Returns true during processing of an external interrupt
+ and false at all other times. */
+bool
+intr_context (void)
+{
+ return in_external_intr;
+}
+
+/* During processing of an external interrupt, directs the
+ interrupt handler to yield to a new process just before
+ returning from the interrupt. May not be called at any other
+ time. */
+void
+intr_yield_on_return (void)
+{
+ ASSERT (intr_context ());
+ yield_on_return = true;
+}
+
+/* 8259A Programmable Interrupt Controller. */
+
+/* Every PC has two 8259A Programmable Interrupt Controller (PIC)
+ chips. One is a "master" accessible at ports 0x20 and 0x21.
+ The other is a "slave" cascaded onto the master's IRQ 2 line
+ and accessible at ports 0xa0 and 0xa1. Accesses to port 0x20
+ set the A0 line to 0 and accesses to 0x21 set the A1 line to
+ 1. The situation is similar for the slave PIC.
+
+ By default, interrupts 0...15 delivered by the PICs will go to
+ interrupt vectors 0...15. Unfortunately, those vectors are
+ also used for CPU traps and exceptions. We reprogram the PICs
+ so that interrupts 0...15 are delivered to interrupt vectors
+ 32...47 (0x20...0x2f) instead. */
+
+/* Initializes the PICs. Refer to [8259A] for details. */
+static void
+pic_init (void)
+{
+ /* Mask all interrupts on both PICs. */
+ outb (0x21, 0xff);
+ outb (0xa1, 0xff);
+
+ /* Initialize master. */
+ outb (0x20, 0x11); /* ICW1: single mode, edge triggered, expect ICW4. */
+ outb (0x21, 0x20); /* ICW2: line IR0...7 -> irq 0x20...0x27. */
+ outb (0x21, 0x04); /* ICW3: slave PIC on line IR2. */
+ outb (0x21, 0x01); /* ICW4: 8086 mode, normal EOI, non-buffered. */
+
+ /* Initialize slave. */
+ outb (0xa0, 0x11); /* ICW1: single mode, edge triggered, expect ICW4. */
+ outb (0xa1, 0x28); /* ICW2: line IR0...7 -> irq 0x28...0x2f. */
+ outb (0xa1, 0x02); /* ICW3: slave ID is 2. */
+ outb (0xa1, 0x01); /* ICW4: 8086 mode, normal EOI, non-buffered. */
+
+ /* Unmask all interrupts. */
+ outb (0x21, 0x00);
+ outb (0xa1, 0x00);
+}
+
+/* Sends an end-of-interrupt signal to the PIC for the given IRQ.
+ If we don't acknowledge the IRQ, it will never be delivered to
+ us again, so this is important. */
+static void
+pic_end_of_interrupt (int irq)
+{
+ ASSERT (irq >= 0x20 && irq < 0x30);
+
+ /* Acknowledge master PIC. */
+ outb (0x20, 0x20);
+
+ /* Acknowledge slave PIC if this is a slave interrupt. */
+ if (irq >= 0x28)
+ outb (0xa0, 0x20);
+}
+
+/* Creates an gate that invokes FUNCTION.
+
+ The gate has descriptor privilege level DPL, meaning that it
+ can be invoked intentionally when the processor is in the DPL
+ or lower-numbered ring. In practice, DPL==3 allows user mode
+ to call into the gate and DPL==0 prevents such calls. Faults
+ and exceptions that occur in user mode still cause gates with
+ DPL==0 to be invoked. See [IA32-v3a] sections 4.5 "Privilege
+ Levels" and 4.8.1.1 "Accessing Nonconforming Code Segments"
+ for further discussion.
+
+ TYPE must be either 14 (for an interrupt gate) or 15 (for a
+ trap gate). The difference is that entering an interrupt gate
+ disables interrupts, but entering a trap gate does not. See
+ [IA32-v3a] section 5.12.1.2 "Flag Usage By Exception- or
+ Interrupt-Handler Procedure" for discussion. */
+static uint64_t
+make_gate (void (*function) (void), int dpl, int type)
+{
+ uint32_t e0, e1;
+
+ ASSERT (function != NULL);
+ ASSERT (dpl >= 0 && dpl <= 3);
+ ASSERT (type >= 0 && type <= 15);
+
+ e0 = (((uint32_t) function & 0xffff) /* Offset 15:0. */
+ | (SEL_KCSEG << 16)); /* Target code segment. */
+
+ e1 = (((uint32_t) function & 0xffff0000) /* Offset 31:16. */
+ | (1 << 15) /* Present. */
+ | ((uint32_t) dpl << 13) /* Descriptor privilege level. */
+ | (0 << 12) /* System. */
+ | ((uint32_t) type << 8)); /* Gate type. */
+
+ return e0 | ((uint64_t) e1 << 32);
+}
+
+/* Creates an interrupt gate that invokes FUNCTION with the given
+ DPL. */
+static uint64_t
+make_intr_gate (void (*function) (void), int dpl)
+{
+ return make_gate (function, dpl, 14);
+}
+
+/* Creates a trap gate that invokes FUNCTION with the given
+ DPL. */
+static uint64_t
+make_trap_gate (void (*function) (void), int dpl)
+{
+ return make_gate (function, dpl, 15);
+}
+
+/* Returns a descriptor that yields the given LIMIT and BASE when
+ used as an operand for the LIDT instruction. */
+static inline uint64_t
+make_idtr_operand (uint16_t limit, void *base)
+{
+ return limit | ((uint64_t) (uint32_t) base << 16);
+}
+
+/* Interrupt handlers. */
+
+/* Handler for all interrupts, faults, and exceptions. This
+ function is called by the assembly language interrupt stubs in
+ intr-stubs.S. FRAME describes the interrupt and the
+ interrupted thread's registers. */
+void
+intr_handler (struct intr_frame *frame)
+{
+ bool external;
+ intr_handler_func *handler;
+
+ /* External interrupts are special.
+ We only handle one at a time (so interrupts must be off)
+ and they need to be acknowledged on the PIC (see below).
+ An external interrupt handler cannot sleep. */
+ external = frame->vec_no >= 0x20 && frame->vec_no < 0x30;
+ if (external)
+ {
+ ASSERT (intr_get_level () == INTR_OFF);
+ ASSERT (!intr_context ());
+
+ in_external_intr = true;
+ yield_on_return = false;
+ }
+
+ /* Invoke the interrupt's handler. */
+ handler = intr_handlers[frame->vec_no];
+ if (handler != NULL)
+ handler (frame);
+ else if (frame->vec_no == 0x27 || frame->vec_no == 0x2f)
+ {
+ /* There is no handler, but this interrupt can trigger
+ spuriously due to a hardware fault or hardware race
+ condition. Ignore it. */
+ }
+ else
+ {
+ /* No handler and not spurious. Invoke the unexpected
+ interrupt handler. */
+ intr_dump_frame (frame);
+ PANIC ("Unexpected interrupt");
+ }
+
+ /* Complete the processing of an external interrupt. */
+ if (external)
+ {
+ ASSERT (intr_get_level () == INTR_OFF);
+ ASSERT (intr_context ());
+
+ in_external_intr = false;
+ pic_end_of_interrupt (frame->vec_no);
+
+ if (yield_on_return)
+ thread_yield ();
+ }
+}
+
+/* Dumps interrupt frame F to the console, for debugging. */
+void
+intr_dump_frame (const struct intr_frame *f)
+{
+ uint32_t cr2;
+
+ /* Store current value of CR2 into `cr2'.
+ CR2 is the linear address of the last page fault.
+ See [IA32-v2a] "MOV--Move to/from Control Registers" and
+ [IA32-v3a] 5.14 "Interrupt 14--Page Fault Exception
+ (#PF)". */
+ asm ("movl %%cr2, %0" : "=r" (cr2));
+
+ printf ("Interrupt %#04x (%s) at eip=%p\n",
+ f->vec_no, intr_names[f->vec_no], f->eip);
+ printf (" cr2=%08"PRIx32" error=%08"PRIx32"\n", cr2, f->error_code);
+ printf (" eax=%08"PRIx32" ebx=%08"PRIx32" ecx=%08"PRIx32" edx=%08"PRIx32"\n",
+ f->eax, f->ebx, f->ecx, f->edx);
+ printf (" esi=%08"PRIx32" edi=%08"PRIx32" esp=%08"PRIx32" ebp=%08"PRIx32"\n",
+ f->esi, f->edi, (uint32_t) f->esp, f->ebp);
+ printf (" cs=%04"PRIx16" ds=%04"PRIx16" es=%04"PRIx16" ss=%04"PRIx16"\n",
+ f->cs, f->ds, f->es, f->ss);
+}
+
+/* Returns the name of interrupt VEC. */
+const char *
+intr_name (uint8_t vec)
+{
+ return intr_names[vec];
+}
diff --git a/src/threads/interrupt.h b/src/threads/interrupt.h
new file mode 100644
index 0000000..d43e06d
--- /dev/null
+++ b/src/threads/interrupt.h
@@ -0,0 +1,70 @@
+#ifndef THREADS_INTERRUPT_H
+#define THREADS_INTERRUPT_H
+
+#include <stdbool.h>
+#include <stdint.h>
+
+/* Interrupts on or off? */
+enum intr_level
+ {
+ INTR_OFF, /* Interrupts disabled. */
+ INTR_ON /* Interrupts enabled. */
+ };
+
+enum intr_level intr_get_level (void);
+enum intr_level intr_set_level (enum intr_level);
+enum intr_level intr_enable (void);
+enum intr_level intr_disable (void);
+
+/* Interrupt stack frame. */
+struct intr_frame
+ {
+ /* Pushed by intr_entry in intr-stubs.S.
+ These are the interrupted task's saved registers. */
+ uint32_t edi; /* Saved EDI. */
+ uint32_t esi; /* Saved ESI. */
+ uint32_t ebp; /* Saved EBP. */
+ uint32_t esp_dummy; /* Not used. */
+ uint32_t ebx; /* Saved EBX. */
+ uint32_t edx; /* Saved EDX. */
+ uint32_t ecx; /* Saved ECX. */
+ uint32_t eax; /* Saved EAX. */
+ uint16_t gs, :16; /* Saved GS segment register. */
+ uint16_t fs, :16; /* Saved FS segment register. */
+ uint16_t es, :16; /* Saved ES segment register. */
+ uint16_t ds, :16; /* Saved DS segment register. */
+
+ /* Pushed by intrNN_stub in intr-stubs.S. */
+ uint32_t vec_no; /* Interrupt vector number. */
+
+ /* Sometimes pushed by the CPU,
+ otherwise for consistency pushed as 0 by intrNN_stub.
+ The CPU puts it just under `eip', but we move it here. */
+ uint32_t error_code; /* Error code. */
+
+ /* Pushed by intrNN_stub in intr-stubs.S.
+ This frame pointer eases interpretation of backtraces. */
+ void *frame_pointer; /* Saved EBP (frame pointer). */
+
+ /* Pushed by the CPU.
+ These are the interrupted task's saved registers. */
+ void (*eip) (void); /* Next instruction to execute. */
+ uint16_t cs, :16; /* Code segment for eip. */
+ uint32_t eflags; /* Saved CPU flags. */
+ void *esp; /* Saved stack pointer. */
+ uint16_t ss, :16; /* Data segment for esp. */
+ };
+
+typedef void intr_handler_func (struct intr_frame *);
+
+void intr_init (void);
+void intr_register_ext (uint8_t vec, intr_handler_func *, const char *name);
+void intr_register_int (uint8_t vec, int dpl, enum intr_level,
+ intr_handler_func *, const char *name);
+bool intr_context (void);
+void intr_yield_on_return (void);
+
+void intr_dump_frame (const struct intr_frame *);
+const char *intr_name (uint8_t vec);
+
+#endif /* threads/interrupt.h */
diff --git a/src/threads/intr-stubs.S b/src/threads/intr-stubs.S
new file mode 100644
index 0000000..b34c142
--- /dev/null
+++ b/src/threads/intr-stubs.S
@@ -0,0 +1,204 @@
+#include "threads/loader.h"
+
+ .text
+
+/* Main interrupt entry point.
+
+ An internal or external interrupt starts in one of the
+ intrNN_stub routines, which push the `struct intr_frame'
+ frame_pointer, error_code, and vec_no members on the stack,
+ then jump here.
+
+ We save the rest of the `struct intr_frame' members to the
+ stack, set up some registers as needed by the kernel, and then
+ call intr_handler(), which actually handles the interrupt.
+
+ We "fall through" to intr_exit to return from the interrupt.
+*/
+.func intr_entry
+intr_entry:
+ /* Save caller's registers. */
+ pushl %ds
+ pushl %es
+ pushl %fs
+ pushl %gs
+ pushal
+
+ /* Set up kernel environment. */
+ cld /* String instructions go upward. */
+ mov $SEL_KDSEG, %eax /* Initialize segment registers. */
+ mov %eax, %ds
+ mov %eax, %es
+ leal 56(%esp), %ebp /* Set up frame pointer. */
+
+ /* Call interrupt handler. */
+ pushl %esp
+.globl intr_handler
+ call intr_handler
+ addl $4, %esp
+.endfunc
+
+/* Interrupt exit.
+
+ Restores the caller's registers, discards extra data on the
+ stack, and returns to the caller.
+
+ This is a separate function because it is called directly when
+ we launch a new user process (see execute_thread() in
+ userprog/process.c). */
+.globl intr_exit
+.func intr_exit
+intr_exit:
+ /* Restore caller's registers. */
+ popal
+ popl %gs
+ popl %fs
+ popl %es
+ popl %ds
+
+ /* Discard `struct intr_frame' vec_no, error_code,
+ frame_pointer members. */
+ addl $12, %esp
+
+ /* Return to caller. */
+ iret
+.endfunc
+
+/* Interrupt stubs.
+
+ This defines 256 fragments of code, named `intr00_stub'
+ through `intrff_stub', each of which is used as the entry
+ point for the corresponding interrupt vector. It also puts
+ the address of each of these functions in the correct spot in
+ `intr_stubs', an array of function pointers.
+
+ Most of the stubs do this:
+
+ 1. Push %ebp on the stack (frame_pointer in `struct intr_frame').
+
+ 2. Push 0 on the stack (error_code).
+
+ 3. Push the interrupt number on the stack (vec_no).
+
+ The CPU pushes an extra "error code" on the stack for a few
+ interrupts. Because we want %ebp to be where the error code
+ is, we follow a different path:
+
+ 1. Push a duplicate copy of the error code on the stack.
+
+ 2. Replace the original copy of the error code by %ebp.
+
+ 3. Push the interrupt number on the stack. */
+
+ .data
+.globl intr_stubs
+intr_stubs:
+
+/* This implements steps 1 and 2, described above, in the common
+ case where we just push a 0 error code. */
+#define zero \
+ pushl %ebp; \
+ pushl $0
+
+/* This implements steps 1 and 2, described above, in the case
+ where the CPU already pushed an error code. */
+#define REAL \
+ pushl (%esp); \
+ movl %ebp, 4(%esp)
+
+/* Emits a stub for interrupt vector NUMBER.
+ TYPE is `zero', for the case where we push a 0 error code,
+ or `REAL', if the CPU pushes an error code for us. */
+#define STUB(NUMBER, TYPE) \
+ .text; \
+.globl intr##NUMBER##_stub; \
+.func intr##NUMBER##_stub; \
+intr##NUMBER##_stub: \
+ TYPE; \
+ push $0x##NUMBER; \
+ jmp intr_entry; \
+.endfunc; \
+ \
+ .data; \
+ .long intr##NUMBER##_stub;
+
+/* All the stubs. */
+STUB(00, zero) STUB(01, zero) STUB(02, zero) STUB(03, zero)
+STUB(04, zero) STUB(05, zero) STUB(06, zero) STUB(07, zero)
+STUB(08, REAL) STUB(09, zero) STUB(0a, REAL) STUB(0b, REAL)
+STUB(0c, zero) STUB(0d, REAL) STUB(0e, REAL) STUB(0f, zero)
+
+STUB(10, zero) STUB(11, REAL) STUB(12, zero) STUB(13, zero)
+STUB(14, zero) STUB(15, zero) STUB(16, zero) STUB(17, zero)
+STUB(18, REAL) STUB(19, zero) STUB(1a, REAL) STUB(1b, REAL)
+STUB(1c, zero) STUB(1d, REAL) STUB(1e, REAL) STUB(1f, zero)
+
+STUB(20, zero) STUB(21, zero) STUB(22, zero) STUB(23, zero)
+STUB(24, zero) STUB(25, zero) STUB(26, zero) STUB(27, zero)
+STUB(28, zero) STUB(29, zero) STUB(2a, zero) STUB(2b, zero)
+STUB(2c, zero) STUB(2d, zero) STUB(2e, zero) STUB(2f, zero)
+
+STUB(30, zero) STUB(31, zero) STUB(32, zero) STUB(33, zero)
+STUB(34, zero) STUB(35, zero) STUB(36, zero) STUB(37, zero)
+STUB(38, zero) STUB(39, zero) STUB(3a, zero) STUB(3b, zero)
+STUB(3c, zero) STUB(3d, zero) STUB(3e, zero) STUB(3f, zero)
+
+STUB(40, zero) STUB(41, zero) STUB(42, zero) STUB(43, zero)
+STUB(44, zero) STUB(45, zero) STUB(46, zero) STUB(47, zero)
+STUB(48, zero) STUB(49, zero) STUB(4a, zero) STUB(4b, zero)
+STUB(4c, zero) STUB(4d, zero) STUB(4e, zero) STUB(4f, zero)
+
+STUB(50, zero) STUB(51, zero) STUB(52, zero) STUB(53, zero)
+STUB(54, zero) STUB(55, zero) STUB(56, zero) STUB(57, zero)
+STUB(58, zero) STUB(59, zero) STUB(5a, zero) STUB(5b, zero)
+STUB(5c, zero) STUB(5d, zero) STUB(5e, zero) STUB(5f, zero)
+
+STUB(60, zero) STUB(61, zero) STUB(62, zero) STUB(63, zero)
+STUB(64, zero) STUB(65, zero) STUB(66, zero) STUB(67, zero)
+STUB(68, zero) STUB(69, zero) STUB(6a, zero) STUB(6b, zero)
+STUB(6c, zero) STUB(6d, zero) STUB(6e, zero) STUB(6f, zero)
+
+STUB(70, zero) STUB(71, zero) STUB(72, zero) STUB(73, zero)
+STUB(74, zero) STUB(75, zero) STUB(76, zero) STUB(77, zero)
+STUB(78, zero) STUB(79, zero) STUB(7a, zero) STUB(7b, zero)
+STUB(7c, zero) STUB(7d, zero) STUB(7e, zero) STUB(7f, zero)
+
+STUB(80, zero) STUB(81, zero) STUB(82, zero) STUB(83, zero)
+STUB(84, zero) STUB(85, zero) STUB(86, zero) STUB(87, zero)
+STUB(88, zero) STUB(89, zero) STUB(8a, zero) STUB(8b, zero)
+STUB(8c, zero) STUB(8d, zero) STUB(8e, zero) STUB(8f, zero)
+
+STUB(90, zero) STUB(91, zero) STUB(92, zero) STUB(93, zero)
+STUB(94, zero) STUB(95, zero) STUB(96, zero) STUB(97, zero)
+STUB(98, zero) STUB(99, zero) STUB(9a, zero) STUB(9b, zero)
+STUB(9c, zero) STUB(9d, zero) STUB(9e, zero) STUB(9f, zero)
+
+STUB(a0, zero) STUB(a1, zero) STUB(a2, zero) STUB(a3, zero)
+STUB(a4, zero) STUB(a5, zero) STUB(a6, zero) STUB(a7, zero)
+STUB(a8, zero) STUB(a9, zero) STUB(aa, zero) STUB(ab, zero)
+STUB(ac, zero) STUB(ad, zero) STUB(ae, zero) STUB(af, zero)
+
+STUB(b0, zero) STUB(b1, zero) STUB(b2, zero) STUB(b3, zero)
+STUB(b4, zero) STUB(b5, zero) STUB(b6, zero) STUB(b7, zero)
+STUB(b8, zero) STUB(b9, zero) STUB(ba, zero) STUB(bb, zero)
+STUB(bc, zero) STUB(bd, zero) STUB(be, zero) STUB(bf, zero)
+
+STUB(c0, zero) STUB(c1, zero) STUB(c2, zero) STUB(c3, zero)
+STUB(c4, zero) STUB(c5, zero) STUB(c6, zero) STUB(c7, zero)
+STUB(c8, zero) STUB(c9, zero) STUB(ca, zero) STUB(cb, zero)
+STUB(cc, zero) STUB(cd, zero) STUB(ce, zero) STUB(cf, zero)
+
+STUB(d0, zero) STUB(d1, zero) STUB(d2, zero) STUB(d3, zero)
+STUB(d4, zero) STUB(d5, zero) STUB(d6, zero) STUB(d7, zero)
+STUB(d8, zero) STUB(d9, zero) STUB(da, zero) STUB(db, zero)
+STUB(dc, zero) STUB(dd, zero) STUB(de, zero) STUB(df, zero)
+
+STUB(e0, zero) STUB(e1, zero) STUB(e2, zero) STUB(e3, zero)
+STUB(e4, zero) STUB(e5, zero) STUB(e6, zero) STUB(e7, zero)
+STUB(e8, zero) STUB(e9, zero) STUB(ea, zero) STUB(eb, zero)
+STUB(ec, zero) STUB(ed, zero) STUB(ee, zero) STUB(ef, zero)
+
+STUB(f0, zero) STUB(f1, zero) STUB(f2, zero) STUB(f3, zero)
+STUB(f4, zero) STUB(f5, zero) STUB(f6, zero) STUB(f7, zero)
+STUB(f8, zero) STUB(f9, zero) STUB(fa, zero) STUB(fb, zero)
+STUB(fc, zero) STUB(fd, zero) STUB(fe, zero) STUB(ff, zero)
diff --git a/src/threads/intr-stubs.h b/src/threads/intr-stubs.h
new file mode 100644
index 0000000..9ceba15
--- /dev/null
+++ b/src/threads/intr-stubs.h
@@ -0,0 +1,19 @@
+#ifndef THREADS_INTR_STUBS_H
+#define THREADS_INTR_STUBS_H
+
+/* Interrupt stubs.
+
+ These are little snippets of code in intr-stubs.S, one for
+ each of the 256 possible x86 interrupts. Each one does a
+ little bit of stack manipulation, then jumps to intr_entry().
+ See intr-stubs.S for more information.
+
+ This array points to each of the interrupt stub entry points
+ so that intr_init() can easily find them. */
+typedef void intr_stub_func (void);
+extern intr_stub_func *intr_stubs[256];
+
+/* Interrupt return path. */
+void intr_exit (void);
+
+#endif /* threads/intr-stubs.h */
diff --git a/src/threads/io.h b/src/threads/io.h
new file mode 100644
index 0000000..b493299
--- /dev/null
+++ b/src/threads/io.h
@@ -0,0 +1,173 @@
+/* This file is derived from source code used in MIT's 6.828
+ course. The original copyright notice is reproduced in full
+ below. */
+
+/*
+ * Copyright (C) 1997 Massachusetts Institute of Technology
+ *
+ * This software is being provided by the copyright holders under the
+ * following license. By obtaining, using and/or copying this software,
+ * you agree that you have read, understood, and will comply with the
+ * following terms and conditions:
+ *
+ * Permission to use, copy, modify, distribute, and sell this software
+ * and its documentation for any purpose and without fee or royalty is
+ * hereby granted, provided that the full text of this NOTICE appears on
+ * ALL copies of the software and documentation or portions thereof,
+ * including modifications, that you make.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE NO
+ * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE,
+ * BUT NOT LIMITATION, COPYRIGHT HOLDERS MAKE NO REPRESENTATIONS OR
+ * WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR
+ * THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY
+ * THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. COPYRIGHT
+ * HOLDERS WILL BEAR NO LIABILITY FOR ANY USE OF THIS SOFTWARE OR
+ * DOCUMENTATION.
+ *
+ * The name and trademarks of copyright holders may NOT be used in
+ * advertising or publicity pertaining to the software without specific,
+ * written prior permission. Title to copyright in this software and any
+ * associated documentation will at all times remain with copyright
+ * holders. See the file AUTHORS which should have accompanied this software
+ * for a list of all copyright holders.
+ *
+ * This file may be derived from previously copyrighted software. This
+ * copyright applies only to those changes made by the copyright
+ * holders listed in the AUTHORS file. The rest of this file is covered by
+ * the copyright notices, if any, listed below.
+ */
+
+#ifndef THREADS_IO_H
+#define THREADS_IO_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+/* Reads and returns a byte from PORT. */
+static inline uint8_t
+inb (uint16_t port)
+{
+ /* See [IA32-v2a] "IN". */
+ uint8_t data;
+ asm volatile ("inb %w1,%0" : "=a" (data) : "d" (port));
+ return data;
+}
+
+/* Reads CNT bytes from PORT, one after another, and stores them
+ into the buffer starting at ADDR. */
+static inline void
+insb (uint16_t port, void *addr, size_t cnt)
+{
+ /* See [IA32-v2a] "INS". */
+ asm volatile ("cld; repne; insb"
+ : "=D" (addr), "=c" (cnt)
+ : "d" (port), "0" (addr), "1" (cnt)
+ : "memory", "cc");
+}
+
+/* Reads and returns 16 bits from PORT. */
+static inline uint16_t
+inw (uint16_t port)
+{
+ uint16_t data;
+ /* See [IA32-v2a] "IN". */
+ asm volatile ("inw %w1,%0" : "=a" (data) : "d" (port));
+ return data;
+}
+
+/* Reads CNT 16-bit (halfword) units from PORT, one after
+ another, and stores them into the buffer starting at ADDR. */
+static inline void
+insw (uint16_t port, void *addr, size_t cnt)
+{
+ /* See [IA32-v2a] "INS". */
+ asm volatile ("cld; repne; insw"
+ : "=D" (addr), "=c" (cnt)
+ : "d" (port), "0" (addr), "1" (cnt)
+ : "memory", "cc");
+}
+
+/* Reads and returns 32 bits from PORT. */
+static inline uint32_t
+inl (uint16_t port)
+{
+ /* See [IA32-v2a] "IN". */
+ uint32_t data;
+ asm volatile ("inl %w1,%0" : "=a" (data) : "d" (port));
+ return data;
+}
+
+/* Reads CNT 32-bit (word) units from PORT, one after another,
+ and stores them into the buffer starting at ADDR. */
+static inline void
+insl (uint16_t port, void *addr, size_t cnt)
+{
+ /* See [IA32-v2a] "INS". */
+ asm volatile ("cld; repne; insl"
+ : "=D" (addr), "=c" (cnt)
+ : "d" (port), "0" (addr), "1" (cnt)
+ : "memory", "cc");
+}
+
+/* Writes byte DATA to PORT. */
+static inline void
+outb (uint16_t port, uint8_t data)
+{
+ /* See [IA32-v2b] "OUT". */
+ asm volatile ("outb %0,%w1" : : "a" (data), "d" (port));
+}
+
+/* Writes to PORT each byte of data in the CNT-byte buffer
+ starting at ADDR. */
+static inline void
+outsb (uint16_t port, const void *addr, size_t cnt)
+{
+ /* See [IA32-v2b] "OUTS". */
+ asm volatile ("cld; repne; outsb"
+ : "=S" (addr), "=c" (cnt)
+ : "d" (port), "0" (addr), "1" (cnt)
+ : "cc");
+}
+
+/* Writes the 16-bit DATA to PORT. */
+static inline void
+outw (uint16_t port, uint16_t data)
+{
+ /* See [IA32-v2b] "OUT". */
+ asm volatile ("outw %0,%w1" : : "a" (data), "d" (port));
+}
+
+/* Writes to PORT each 16-bit unit (halfword) of data in the
+ CNT-halfword buffer starting at ADDR. */
+static inline void
+outsw (uint16_t port, const void *addr, size_t cnt)
+{
+ /* See [IA32-v2b] "OUTS". */
+ asm volatile ("cld; repne; outsw"
+ : "=S" (addr), "=c" (cnt)
+ : "d" (port), "0" (addr), "1" (cnt)
+ : "cc");
+}
+
+/* Writes the 32-bit DATA to PORT. */
+static inline void
+outl (uint16_t port, uint32_t data)
+{
+ /* See [IA32-v2b] "OUT". */
+ asm volatile ("outl %0,%w1" : : "a" (data), "d" (port));
+}
+
+/* Writes to PORT each 32-bit unit (word) of data in the CNT-word
+ buffer starting at ADDR. */
+static inline void
+outsl (uint16_t port, const void *addr, size_t cnt)
+{
+ /* See [IA32-v2b] "OUTS". */
+ asm volatile ("cld; repne; outsl"
+ : "=S" (addr), "=c" (cnt)
+ : "d" (port), "0" (addr), "1" (cnt)
+ : "cc");
+}
+
+#endif /* threads/io.h */
diff --git a/src/threads/kernel.lds.S b/src/threads/kernel.lds.S
new file mode 100644
index 0000000..6154d08
--- /dev/null
+++ b/src/threads/kernel.lds.S
@@ -0,0 +1,26 @@
+#include "threads/loader.h"
+
+OUTPUT_FORMAT("elf32-i386")
+OUTPUT_ARCH("i386")
+ENTRY(start) /* Kernel starts at "start" symbol. */
+SECTIONS
+{
+ /* Specifies the virtual address for the kernel base. */
+ . = LOADER_PHYS_BASE + LOADER_KERN_BASE;
+
+ _start = .;
+
+ /* Kernel starts with code, followed by read-only data and writable data. */
+ .text : { *(.start) *(.text) } = 0x90
+ .rodata : { *(.rodata) *(.rodata.*)
+ . = ALIGN(0x1000);
+ _end_kernel_text = .; }
+ .data : { *(.data) }
+
+ /* BSS (zero-initialized data) is after everything else. */
+ _start_bss = .;
+ .bss : { *(.bss) }
+ _end_bss = .;
+
+ _end = .;
+}
diff --git a/src/threads/loader.S b/src/threads/loader.S
new file mode 100644
index 0000000..b7842d3
--- /dev/null
+++ b/src/threads/loader.S
@@ -0,0 +1,349 @@
+/* This file is derived from source code used in MIT's 6.828
+ course. The original copyright notice is reproduced in full
+ below. */
+
+/*
+ * Copyright (C) 1997 Massachusetts Institute of Technology
+ *
+ * This software is being provided by the copyright holders under the
+ * following license. By obtaining, using and/or copying this software,
+ * you agree that you have read, understood, and will comply with the
+ * following terms and conditions:
+ *
+ * Permission to use, copy, modify, distribute, and sell this software
+ * and its documentation for any purpose and without fee or royalty is
+ * hereby granted, provided that the full text of this NOTICE appears on
+ * ALL copies of the software and documentation or portions thereof,
+ * including modifications, that you make.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS," AND COPYRIGHT HOLDERS MAKE NO
+ * REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF EXAMPLE,
+ * BUT NOT LIMITATION, COPYRIGHT HOLDERS MAKE NO REPRESENTATIONS OR
+ * WARRANTIES OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR
+ * THAT THE USE OF THE SOFTWARE OR DOCUMENTATION WILL NOT INFRINGE ANY
+ * THIRD PARTY PATENTS, COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS. COPYRIGHT
+ * HOLDERS WILL BEAR NO LIABILITY FOR ANY USE OF THIS SOFTWARE OR
+ * DOCUMENTATION.
+ *
+ * The name and trademarks of copyright holders may NOT be used in
+ * advertising or publicity pertaining to the software without specific,
+ * written prior permission. Title to copyright in this software and any
+ * associated documentation will at all times remain with copyright
+ * holders. See the file AUTHORS which should have accompanied this software
+ * for a list of all copyright holders.
+ *
+ * This file may be derived from previously copyrighted software. This
+ * copyright applies only to those changes made by the copyright
+ * holders listed in the AUTHORS file. The rest of this file is covered by
+ * the copyright notices, if any, listed below.
+ */
+
+#include "threads/loader.h"
+
+#### Kernel loader.
+
+#### This code should be stored in the first sector of the hard disk.
+#### When the BIOS runs, it loads this code at physical address
+#### 0x7c00-0x7e00 (512 bytes). Then it jumps to the beginning of it,
+#### in real mode. This code switches into protected mode (32-bit
+#### mode) so that all of memory can accessed, loads the kernel into
+#### memory, and jumps to the first byte of the kernel, where start.S
+#### is linked.
+
+/* Flags in control register 0. */
+#define CR0_PE 0x00000001 /* Protection Enable. */
+#define CR0_EM 0x00000004 /* (Floating-point) Emulation. */
+#define CR0_PG 0x80000000 /* Paging. */
+#define CR0_WP 0x00010000 /* Write-Protect enable in kernel mode. */
+
+
+.globl start
+start:
+
+# Code runs in real mode, which is a 16-bit segment.
+ .code16
+
+# Disable interrupts, because we will not be prepared to handle them
+# in protected mode until much later.
+# String instructions go upward (e.g. for "rep stosl" below).
+
+ cli
+ cld
+
+# Set up data segments.
+
+ subw %ax, %ax
+ movw %ax, %es
+ movw %ax, %ds
+
+# Set up stack segment.
+# Stack grows downward starting from us.
+# We don't ever use the stack, but we call into the BIOS,
+# which might.
+
+ movw %ax, %ss
+ movw $0x7c00, %sp
+
+#### Enable A20. Address line 20 is tied to low when the machine
+#### boots, which prevents addressing memory about 1 MB. This code
+#### fixes it.
+
+# Poll status register while busy.
+
+1: inb $0x64, %al
+ testb $0x2, %al
+ jnz 1b
+
+# Send command for writing output port.
+
+ movb $0xd1, %al
+ outb %al, $0x64
+
+# Poll status register while busy.
+
+1: inb $0x64, %al
+ testb $0x2, %al
+ jnz 1b
+
+# Enable A20 line.
+
+ movb $0xdf, %al
+ outb %al, $0x60
+
+#### Get memory size, via interrupt 15h function 88h. Returns CF
+#### clear if successful, with AX = (kB of physical memory) - 1024.
+#### This only works for memory sizes <= 65 MB, which should be fine
+#### for our purposes. We cap memory at 64 MB because that's all we
+#### prepare page tables for, below.
+
+ movb $0x88, %ah
+ int $0x15
+ jc panic
+ cli # BIOS might have enabled interrupts
+ addl $1024, %eax # Total kB memory
+ cmp $0x10000, %eax # Cap at 64 MB
+ jbe 1f
+ mov $0x10000, %eax
+1: shrl $2, %eax # Total 4 kB pages
+ movl %eax, ram_pgs
+
+#### Create temporary page directory and page table and set page
+#### directory base register.
+
+# Create page directory at 64 kB and fill with zeroes.
+ mov $0x1000, %ax
+ mov %ax, %es
+ subl %eax, %eax
+ subl %edi, %edi
+ movl $0x400, %ecx
+ rep stosl
+
+# Add PDEs to point to PTEs for the first 64 MB of RAM.
+# Also add identical PDEs starting at LOADER_PHYS_BASE.
+# See [IA32-v3a] section 3.7.6 "Page-Directory and Page-Table Entries"
+# for a description of the bits in %eax.
+
+
+ movl $0x11007, %eax
+ movl $0x11, %ecx
+ subl %edi, %edi
+1: movl %eax, %es:(%di)
+ movl %eax, %es:LOADER_PHYS_BASE >> 20(%di)
+ addw $4, %di
+ addl $0x1000, %eax
+ loop 1b
+
+# Set up one-to-map linear to physical map for the first 64 MB of RAM.
+# See [IA32-v3a] section 3.7.6 "Page-Directory and Page-Table Entries"
+# for a description of the bits in %eax.
+
+ movw $0x1100, %ax
+ movw %ax, %es
+ movl $0x7, %eax
+ movl $0x4000, %ecx
+ subl %edi, %edi
+1: movl %eax, %es:(%di)
+ addw $4, %di
+ addl $0x1000, %eax
+ loop 1b
+
+# Set page directory base register.
+
+ movl $0x10000, %eax
+ movl %eax, %cr3
+
+#### Switch to protected mode.
+
+# Note that interrupts are still off.
+
+# Point the GDTR to our GDT. Protected mode requires a GDT.
+# We need a data32 prefix to ensure that all 32 bits of the GDT
+# descriptor are loaded (default is to load only 24 bits).
+
+ data32 lgdt gdtdesc
+
+# Then we turn on the following bits in CR0:
+# PE (Protect Enable): this turns on protected mode.
+# PG (Paging): turns on paging.
+# WP (Write Protect): if unset, ring 0 code ignores
+# write-protect bits in page tables (!).
+# EM (Emulation): forces floating-point instructions to trap.
+# We don't support floating point.
+
+ movl %cr0, %eax
+ orl $CR0_PE | CR0_PG | CR0_WP | CR0_EM, %eax
+ movl %eax, %cr0
+
+# We're now in protected mode in a 16-bit segment. The CPU still has
+# the real-mode code segment cached in %cs's segment descriptor. We
+# need to reload %cs, and the easiest way is to use a far jump.
+# Because we're not in a 32-bit segment the data32 prefix is needed to
+# jump to a 32-bit offset.
+
+ data32 ljmp $SEL_KCSEG, $1f + LOADER_PHYS_BASE
+
+# We're now in protected mode in a 32-bit segment.
+
+ .code32
+
+# Reload all the other segment registers and the stack pointer to
+# point into our new GDT.
+
+1: movw $SEL_KDSEG, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+ movw %ax, %ss
+ movl $LOADER_PHYS_BASE + 0x30000, %esp
+
+#### Load kernel starting at physical address LOADER_KERN_BASE by
+#### frobbing the IDE controller directly.
+
+ movl $1, %ebx
+ movl $LOADER_KERN_BASE + LOADER_PHYS_BASE, %edi
+
+# Disable interrupt delivery by IDE controller, because we will be
+# polling for data.
+# (If we don't do this, Bochs 2.2.6 will never deliver any IDE
+# interrupt to us later after we reset the interrupt controller during
+# boot, even if we also reset the IDE controller.)
+
+ movw $0x3f6, %dx
+ movb $0x02, %al
+ outb %al, %dx
+
+read_sector:
+
+# Poll status register while controller busy.
+
+ movl $0x1f7, %edx
+1: inb %dx, %al
+ testb $0x80, %al
+ jnz 1b
+
+# Read a single sector.
+
+ movl $0x1f2, %edx
+ movb $1, %al
+ outb %al, %dx
+
+# Sector number to write in low 28 bits.
+# LBA mode, device 0 in top 4 bits.
+
+ movl %ebx, %eax
+ andl $0x0fffffff, %eax
+ orl $0xe0000000, %eax
+
+# Dump %eax to ports 0x1f3...0x1f6.
+
+ movl $4, %ecx
+1: incw %dx
+ outb %al, %dx
+ shrl $8, %eax
+ loop 1b
+
+# READ command to command register.
+
+ incw %dx
+ movb $0x20, %al
+ outb %al, %dx
+
+# Poll status register while controller busy.
+
+1: inb %dx, %al
+ testb $0x80, %al
+ jnz 1b
+
+# Poll status register until data ready.
+
+1: inb %dx, %al
+ testb $0x08, %al
+ jz 1b
+
+# Transfer sector.
+
+ movl $256, %ecx
+ movl $0x1f0, %edx
+ rep insw
+
+# Next sector.
+
+ incl %ebx
+ cmpl $KERNEL_LOAD_PAGES*8 + 1, %ebx
+ jnz read_sector
+
+#### Jump to kernel entry point.
+
+ movl $LOADER_PHYS_BASE + LOADER_KERN_BASE, %eax
+ call *%eax
+ jmp panic
+
+#### GDT
+
+gdt:
+ .quad 0x0000000000000000 # null seg
+ .quad 0x00cf9a000000ffff # code seg
+ .quad 0x00cf92000000ffff # data seg
+
+gdtdesc:
+ .word 0x17 # sizeof (gdt) - 1
+ .long gdt + LOADER_PHYS_BASE # address gdt
+
+#### Fatal error.
+#### Print panic_message (with help from the BIOS) and spin.
+
+panic: .code16 # We only panic in real mode.
+ movw $panic_message, %si
+ movb $0xe, %ah
+ subb %bh, %bh
+1: lodsb
+ test %al, %al
+2: jz 2b # Spin.
+ int $0x10
+ jmp 1b
+
+panic_message:
+ .ascii "Panic!"
+ .byte 0
+
+#### Physical memory size in 4 kB pages.
+#### This is initialized by the loader and read by the kernel.
+ .org LOADER_RAM_PGS - LOADER_BASE
+ram_pgs:
+ .long 0
+
+#### Command-line arguments and their count.
+#### This is written by the `pintos' utility and read by the kernel.
+#### The loader itself does not do anything with the command line.
+ .org LOADER_ARG_CNT - LOADER_BASE
+arg_cnt:
+ .long 0
+ .org LOADER_ARGS - LOADER_BASE
+args:
+ .fill 0x80, 1, 0
+
+#### Boot-sector signature.
+#### The BIOS checks that this is set properly.
+ .org LOADER_SIG - LOADER_BASE
+ .word 0xaa55
diff --git a/src/threads/loader.h b/src/threads/loader.h
new file mode 100644
index 0000000..5bd9813
--- /dev/null
+++ b/src/threads/loader.h
@@ -0,0 +1,38 @@
+#ifndef THREADS_LOADER_H
+#define THREADS_LOADER_H
+
+/* Constants fixed by the PC BIOS. */
+#define LOADER_BASE 0x7c00 /* Physical address of loader's base. */
+#define LOADER_END 0x7e00 /* Physical address of end of loader. */
+
+/* Physical address of kernel base. */
+#define LOADER_KERN_BASE 0x100000 /* 1 MB. */
+
+/* Kernel virtual address at which all physical memory is mapped.
+
+ The loader maps the 4 MB at the bottom of physical memory to
+ this virtual base address. Later, paging_init() adds the rest
+ of physical memory to the mapping.
+
+ This must be aligned on a 4 MB boundary. */
+#define LOADER_PHYS_BASE 0xc0000000 /* 3 GB. */
+
+/* Important loader physical addresses. */
+#define LOADER_SIG (LOADER_END - LOADER_SIG_LEN) /* 0xaa55 BIOS signature. */
+#define LOADER_ARGS (LOADER_SIG - LOADER_ARGS_LEN) /* Command-line args. */
+#define LOADER_ARG_CNT (LOADER_ARGS - LOADER_ARG_CNT_LEN) /* Number of args. */
+#define LOADER_RAM_PGS (LOADER_ARG_CNT - LOADER_RAM_PGS_LEN) /* # RAM pages. */
+
+/* Sizes of loader data structures. */
+#define LOADER_SIG_LEN 2
+#define LOADER_ARGS_LEN 128
+#define LOADER_ARG_CNT_LEN 4
+#define LOADER_RAM_PGS_LEN 4
+
+/* GDT selectors defined by loader.
+ More selectors are defined by userprog/gdt.h. */
+#define SEL_NULL 0x00 /* Null selector. */
+#define SEL_KCSEG 0x08 /* Kernel code selector. */
+#define SEL_KDSEG 0x10 /* Kernel data selector. */
+
+#endif /* threads/loader.h */
diff --git a/src/threads/malloc.c b/src/threads/malloc.c
new file mode 100644
index 0000000..f6f803b
--- /dev/null
+++ b/src/threads/malloc.c
@@ -0,0 +1,294 @@
+#include "threads/malloc.h"
+#include <debug.h>
+#include <list.h>
+#include <round.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include "threads/palloc.h"
+#include "threads/synch.h"
+#include "threads/vaddr.h"
+
+/* A simple implementation of malloc().
+
+ The size of each request, in bytes, is rounded up to a power
+ of 2 and assigned to the "descriptor" that manages blocks of
+ that size. The descriptor keeps a list of free blocks. If
+ the free list is nonempty, one of its blocks is used to
+ satisfy the request.
+
+ Otherwise, a new page of memory, called an "arena", is
+ obtained from the page allocator (if none is available,
+ malloc() returns a null pointer). The new arena is divided
+ into blocks, all of which are added to the descriptor's free
+ list. Then we return one of the new blocks.
+
+ When we free a block, we add it to its descriptor's free list.
+ But if the arena that the block was in now has no in-use
+ blocks, we remove all of the arena's blocks from the free list
+ and give the arena back to the page allocator.
+
+ We can't handle blocks bigger than 2 kB using this scheme,
+ because they're too big to fit in a single page with a
+ descriptor. We handle those by allocating contiguous pages
+ with the page allocator and sticking the allocation size at
+ the beginning of the allocated block's arena header. */
+
+/* Descriptor. */
+struct desc
+ {
+ size_t block_size; /* Size of each element in bytes. */
+ size_t blocks_per_arena; /* Number of blocks in an arena. */
+ struct list free_list; /* List of free blocks. */
+ struct lock lock; /* Lock. */
+ };
+
+/* Magic number for detecting arena corruption. */
+#define ARENA_MAGIC 0x9a548eed
+
+/* Arena. */
+struct arena
+ {
+ unsigned magic; /* Always set to ARENA_MAGIC. */
+ struct desc *desc; /* Owning descriptor, null for big block. */
+ size_t free_cnt; /* Free blocks; pages in big block. */
+ };
+
+/* Free block. */
+struct block
+ {
+ struct list_elem free_elem; /* Free list element. */
+ };
+
+/* Our set of descriptors. */
+static struct desc descs[10]; /* Descriptors. */
+static size_t desc_cnt; /* Number of descriptors. */
+
+static struct arena *block_to_arena (struct block *);
+static struct block *arena_to_block (struct arena *, size_t idx);
+
+/* Initializes the malloc() descriptors. */
+void
+malloc_init (void)
+{
+ size_t block_size;
+
+ for (block_size = 16; block_size < PGSIZE / 2; block_size *= 2)
+ {
+ struct desc *d = &descs[desc_cnt++];
+ ASSERT (desc_cnt <= sizeof descs / sizeof *descs);
+ d->block_size = block_size;
+ d->blocks_per_arena = (PGSIZE - sizeof (struct arena)) / block_size;
+ list_init (&d->free_list);
+ lock_init (&d->lock);
+ }
+}
+
+/* Obtains and returns a new block of at least SIZE bytes.
+ Returns a null pointer if memory is not available. */
+void *
+malloc (size_t size)
+{
+ struct desc *d;
+ struct block *b;
+ struct arena *a;
+
+ /* A null pointer satisfies a request for 0 bytes. */
+ if (size == 0)
+ return NULL;
+
+ /* Find the smallest descriptor that satisfies a SIZE-byte
+ request. */
+ for (d = descs; d < descs + desc_cnt; d++)
+ if (d->block_size >= size)
+ break;
+ if (d == descs + desc_cnt)
+ {
+ /* SIZE is too big for any descriptor.
+ Allocate enough pages to hold SIZE plus an arena. */
+ size_t page_cnt = DIV_ROUND_UP (size + sizeof *a, PGSIZE);
+ a = palloc_get_multiple (0, page_cnt);
+ if (a == NULL)
+ return NULL;
+
+ /* Initialize the arena to indicate a big block of PAGE_CNT
+ pages, and return it. */
+ a->magic = ARENA_MAGIC;
+ a->desc = NULL;
+ a->free_cnt = page_cnt;
+ return a + 1;
+ }
+
+ lock_acquire (&d->lock);
+
+ /* If the free list is empty, create a new arena. */
+ if (list_empty (&d->free_list))
+ {
+ size_t i;
+
+ /* Allocate a page. */
+ a = palloc_get_page (0);
+ if (a == NULL)
+ {
+ lock_release (&d->lock);
+ return NULL;
+ }
+
+ /* Initialize arena and add its blocks to the free list. */
+ a->magic = ARENA_MAGIC;
+ a->desc = d;
+ a->free_cnt = d->blocks_per_arena;
+ for (i = 0; i < d->blocks_per_arena; i++)
+ {
+ struct block *b = arena_to_block (a, i);
+ list_push_back (&d->free_list, &b->free_elem);
+ }
+ }
+
+ /* Get a block from free list and return it. */
+ b = list_entry (list_pop_front (&d->free_list), struct block, free_elem);
+ a = block_to_arena (b);
+ a->free_cnt--;
+ lock_release (&d->lock);
+ return b;
+}
+
+/* Allocates and return A times B bytes initialized to zeroes.
+ Returns a null pointer if memory is not available. */
+void *
+calloc (size_t a, size_t b)
+{
+ void *p;
+ size_t size;
+
+ /* Calculate block size and make sure it fits in size_t. */
+ size = a * b;
+ if (size < a || size < b)
+ return NULL;
+
+ /* Allocate and zero memory. */
+ p = malloc (size);
+ if (p != NULL)
+ memset (p, 0, size);
+
+ return p;
+}
+
+/* Returns the number of bytes allocated for BLOCK. */
+static size_t
+block_size (void *block)
+{
+ struct block *b = block;
+ struct arena *a = block_to_arena (b);
+ struct desc *d = a->desc;
+
+ return d != NULL ? d->block_size : PGSIZE * a->free_cnt - pg_ofs (block);
+}
+
+/* Attempts to resize OLD_BLOCK to NEW_SIZE bytes, possibly
+ moving it in the process.
+ If successful, returns the new block; on failure, returns a
+ null pointer.
+ A call with null OLD_BLOCK is equivalent to malloc(NEW_SIZE).
+ A call with zero NEW_SIZE is equivalent to free(OLD_BLOCK). */
+void *
+realloc (void *old_block, size_t new_size)
+{
+ if (new_size == 0)
+ {
+ free (old_block);
+ return NULL;
+ }
+ else
+ {
+ void *new_block = malloc (new_size);
+ if (old_block != NULL && new_block != NULL)
+ {
+ size_t old_size = block_size (old_block);
+ size_t min_size = new_size < old_size ? new_size : old_size;
+ memcpy (new_block, old_block, min_size);
+ free (old_block);
+ }
+ return new_block;
+ }
+}
+
+/* Frees block P, which must have been previously allocated with
+ malloc(), calloc(), or realloc(). */
+void
+free (void *p)
+{
+ if (p != NULL)
+ {
+ struct block *b = p;
+ struct arena *a = block_to_arena (b);
+ struct desc *d = a->desc;
+
+ if (d != NULL)
+ {
+ /* It's a normal block. We handle it here. */
+
+#ifndef NDEBUG
+ /* Clear the block to help detect use-after-free bugs. */
+ memset (b, 0xcc, d->block_size);
+#endif
+
+ lock_acquire (&d->lock);
+
+ /* Add block to free list. */
+ list_push_front (&d->free_list, &b->free_elem);
+
+ /* If the arena is now entirely unused, free it. */
+ if (++a->free_cnt >= d->blocks_per_arena)
+ {
+ size_t i;
+
+ ASSERT (a->free_cnt == d->blocks_per_arena);
+ for (i = 0; i < d->blocks_per_arena; i++)
+ {
+ struct block *b = arena_to_block (a, i);
+ list_remove (&b->free_elem);
+ }
+ palloc_free_page (a);
+ }
+
+ lock_release (&d->lock);
+ }
+ else
+ {
+ /* It's a big block. Free its pages. */
+ palloc_free_multiple (a, a->free_cnt);
+ return;
+ }
+ }
+}
+
+/* Returns the arena that block B is inside. */
+static struct arena *
+block_to_arena (struct block *b)
+{
+ struct arena *a = pg_round_down (b);
+
+ /* Check that the arena is valid. */
+ ASSERT (a != NULL);
+ ASSERT (a->magic == ARENA_MAGIC);
+
+ /* Check that the block is properly aligned for the arena. */
+ ASSERT (a->desc == NULL
+ || (pg_ofs (b) - sizeof *a) % a->desc->block_size == 0);
+ ASSERT (a->desc != NULL || pg_ofs (b) == sizeof *a);
+
+ return a;
+}
+
+/* Returns the (IDX - 1)'th block within arena A. */
+static struct block *
+arena_to_block (struct arena *a, size_t idx)
+{
+ ASSERT (a != NULL);
+ ASSERT (a->magic == ARENA_MAGIC);
+ ASSERT (idx < a->desc->blocks_per_arena);
+ return (struct block *) ((uint8_t *) a
+ + sizeof *a
+ + idx * a->desc->block_size);
+}
diff --git a/src/threads/malloc.h b/src/threads/malloc.h
new file mode 100644
index 0000000..bc55d36
--- /dev/null
+++ b/src/threads/malloc.h
@@ -0,0 +1,13 @@
+#ifndef THREADS_MALLOC_H
+#define THREADS_MALLOC_H
+
+#include <debug.h>
+#include <stddef.h>
+
+void malloc_init (void);
+void *malloc (size_t) __attribute__ ((malloc));
+void *calloc (size_t, size_t) __attribute__ ((malloc));
+void *realloc (void *, size_t);
+void free (void *);
+
+#endif /* threads/malloc.h */
diff --git a/src/threads/palloc.c b/src/threads/palloc.c
new file mode 100644
index 0000000..eab41e4
--- /dev/null
+++ b/src/threads/palloc.c
@@ -0,0 +1,189 @@
+#include "threads/palloc.h"
+#include <bitmap.h>
+#include <debug.h>
+#include <inttypes.h>
+#include <round.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include "threads/init.h"
+#include "threads/loader.h"
+#include "threads/synch.h"
+#include "threads/vaddr.h"
+
+/* Page allocator. Hands out memory in page-size (or
+ page-multiple) chunks. See malloc.h for an allocator that
+ hands out smaller chunks.
+
+ System memory is divided into two "pools" called the kernel
+ and user pools. The user pool is for user (virtual) memory
+ pages, the kernel pool for everything else. The idea here is
+ that the kernel needs to have memory for its own operations
+ even if user processes are swapping like mad.
+
+ By default, half of system RAM is given to the kernel pool and
+ half to the user pool. That should be huge overkill for the
+ kernel pool, but that's just fine for demonstration purposes. */
+
+/* A memory pool. */
+struct pool
+ {
+ struct lock lock; /* Mutual exclusion. */
+ struct bitmap *used_map; /* Bitmap of free pages. */
+ uint8_t *base; /* Base of pool. */
+ };
+
+/* Two pools: one for kernel data, one for user pages. */
+struct pool kernel_pool, user_pool;
+
+/* Maximum number of pages to put in user pool. */
+size_t user_page_limit = SIZE_MAX;
+
+static void init_pool (struct pool *, void *base, size_t page_cnt,
+ const char *name);
+static bool page_from_pool (const struct pool *, void *page);
+
+/* Initializes the page allocator. */
+void
+palloc_init (void)
+{
+ /* End of the kernel as recorded by the linker.
+ See kernel.lds.S. */
+ extern char _end;
+
+ /* Free memory. */
+ uint8_t *free_start = pg_round_up (&_end);
+ uint8_t *free_end = ptov (ram_pages * PGSIZE);
+ size_t free_pages = (free_end - free_start) / PGSIZE;
+ size_t user_pages = free_pages / 2;
+ size_t kernel_pages;
+ if (user_pages > user_page_limit)
+ user_pages = user_page_limit;
+ kernel_pages = free_pages - user_pages;
+
+ /* Give half of memory to kernel, half to user. */
+ init_pool (&kernel_pool, free_start, kernel_pages, "kernel pool");
+ init_pool (&user_pool, free_start + kernel_pages * PGSIZE,
+ user_pages, "user pool");
+}
+
+/* Obtains and returns a group of PAGE_CNT contiguous free pages.
+ If PAL_USER is set, the pages are obtained from the user pool,
+ otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
+ then the pages are filled with zeros. If too few pages are
+ available, returns a null pointer, unless PAL_ASSERT is set in
+ FLAGS, in which case the kernel panics. */
+void *
+palloc_get_multiple (enum palloc_flags flags, size_t page_cnt)
+{
+ struct pool *pool = flags & PAL_USER ? &user_pool : &kernel_pool;
+ void *pages;
+ size_t page_idx;
+
+ if (page_cnt == 0)
+ return NULL;
+
+ lock_acquire (&pool->lock);
+ page_idx = bitmap_scan_and_flip (pool->used_map, 0, page_cnt, false);
+ lock_release (&pool->lock);
+
+ if (page_idx != BITMAP_ERROR)
+ pages = pool->base + PGSIZE * page_idx;
+ else
+ pages = NULL;
+
+ if (pages != NULL)
+ {
+ if (flags & PAL_ZERO)
+ memset (pages, 0, PGSIZE * page_cnt);
+ }
+ else
+ {
+ if (flags & PAL_ASSERT)
+ PANIC ("palloc_get: out of pages");
+ }
+
+ return pages;
+}
+
+/* Obtains a single free page and returns its kernel virtual
+ address.
+ If PAL_USER is set, the page is obtained from the user pool,
+ otherwise from the kernel pool. If PAL_ZERO is set in FLAGS,
+ then the page is filled with zeros. If no pages are
+ available, returns a null pointer, unless PAL_ASSERT is set in
+ FLAGS, in which case the kernel panics. */
+void *
+palloc_get_page (enum palloc_flags flags)
+{
+ return palloc_get_multiple (flags, 1);
+}
+
+/* Frees the PAGE_CNT pages starting at PAGES. */
+void
+palloc_free_multiple (void *pages, size_t page_cnt)
+{
+ struct pool *pool;
+ size_t page_idx;
+
+ ASSERT (pg_ofs (pages) == 0);
+ if (pages == NULL || page_cnt == 0)
+ return;
+
+ if (page_from_pool (&kernel_pool, pages))
+ pool = &kernel_pool;
+ else if (page_from_pool (&user_pool, pages))
+ pool = &user_pool;
+ else
+ NOT_REACHED ();
+
+ page_idx = pg_no (pages) - pg_no (pool->base);
+
+#ifndef NDEBUG
+ memset (pages, 0xcc, PGSIZE * page_cnt);
+#endif
+
+ ASSERT (bitmap_all (pool->used_map, page_idx, page_cnt));
+ bitmap_set_multiple (pool->used_map, page_idx, page_cnt, false);
+}
+
+/* Frees the page at PAGE. */
+void
+palloc_free_page (void *page)
+{
+ palloc_free_multiple (page, 1);
+}
+
+/* Initializes pool P as starting at START and ending at END,
+ naming it NAME for debugging purposes. */
+static void
+init_pool (struct pool *p, void *base, size_t page_cnt, const char *name)
+{
+ /* We'll put the pool's used_map at its base.
+ Calculate the space needed for the bitmap
+ and subtract it from the pool's size. */
+ size_t bm_pages = DIV_ROUND_UP (bitmap_buf_size (page_cnt), PGSIZE);
+ if (bm_pages > page_cnt)
+ PANIC ("Not enough memory in %s for bitmap.", name);
+ page_cnt -= bm_pages;
+
+ printf ("%zu pages available in %s.\n", page_cnt, name);
+
+ /* Initialize the pool. */
+ lock_init (&p->lock);
+ p->used_map = bitmap_create_in_buf (page_cnt, base, bm_pages * PGSIZE);
+ p->base = base + bm_pages * PGSIZE;
+}
+
+/* Returns true if PAGE was allocated from POOL,
+ false otherwise. */
+static bool
+page_from_pool (const struct pool *pool, void *page)
+{
+ size_t page_no = pg_no (page);
+ size_t start_page = pg_no (pool->base);
+ size_t end_page = start_page + bitmap_size (pool->used_map);
+
+ return page_no >= start_page && page_no < end_page;
+}
diff --git a/src/threads/palloc.h b/src/threads/palloc.h
new file mode 100644
index 0000000..2d41cf6
--- /dev/null
+++ b/src/threads/palloc.h
@@ -0,0 +1,23 @@
+#ifndef THREADS_PALLOC_H
+#define THREADS_PALLOC_H
+
+#include <stddef.h>
+
+/* How to allocate pages. */
+enum palloc_flags
+ {
+ PAL_ASSERT = 001, /* Panic on failure. */
+ PAL_ZERO = 002, /* Zero page contents. */
+ PAL_USER = 004 /* User page. */
+ };
+
+/* Maximum number of pages to put in user pool. */
+extern size_t user_page_limit;
+
+void palloc_init (void);
+void *palloc_get_page (enum palloc_flags);
+void *palloc_get_multiple (enum palloc_flags, size_t page_cnt);
+void palloc_free_page (void *);
+void palloc_free_multiple (void *, size_t page_cnt);
+
+#endif /* threads/palloc.h */
diff --git a/src/threads/pte.h b/src/threads/pte.h
new file mode 100644
index 0000000..1660727
--- /dev/null
+++ b/src/threads/pte.h
@@ -0,0 +1,107 @@
+#ifndef THREADS_PTE_H
+#define THREADS_PTE_H
+
+#include "threads/vaddr.h"
+
+/* Functions and macros for working with x86 hardware page
+ tables.
+
+ See vaddr.h for more generic functions and macros for virtual
+ addresses.
+
+ Virtual addresses are structured as follows:
+
+ 31 22 21 12 11 0
+ +----------------------+----------------------+----------------------+
+ | Page Directory Index | Page Table Index | Page Offset |
+ +----------------------+----------------------+----------------------+
+*/
+
+/* Page table index (bits 12:21). */
+#define PTSHIFT PGBITS /* First page table bit. */
+#define PTBITS 10 /* Number of page table bits. */
+#define PTSPAN (1 << PTBITS << PGBITS) /* Bytes covered by a page table. */
+#define PTMASK BITMASK(PTSHIFT, PTBITS) /* Page table bits (12:21). */
+
+/* Page directory index (bits 22:31). */
+#define PDSHIFT (PTSHIFT + PTBITS) /* First page directory bit. */
+#define PDBITS 10 /* Number of page dir bits. */
+#define PDMASK BITMASK(PDSHIFT, PDBITS) /* Page directory bits (22:31). */
+
+/* Obtains page table index from a virtual address. */
+static inline unsigned pt_no (const void *va) {
+ return ((uintptr_t) va & PTMASK) >> PTSHIFT;
+}
+
+/* Obtains page directory index from a virtual address. */
+static inline uintptr_t pd_no (const void *va) {
+ return (uintptr_t) va >> PDSHIFT;
+}
+
+/* Page directory and page table entries.
+
+ For more information see the section on page tables in the
+ Pintos reference guide chapter, or [IA32-v3a] 3.7.6
+ "Page-Directory and Page-Table Entries".
+
+ PDEs and PTEs share a common format:
+
+ 31 12 11 0
+ +------------------------------------+------------------------+
+ | Physical Address | Flags |
+ +------------------------------------+------------------------+
+
+ In a PDE, the physical address points to a page table.
+ In a PTE, the physical address points to a data or code page.
+ The important flags are listed below.
+ When a PDE or PTE is not "present", the other flags are
+ ignored.
+ A PDE or PTE that is initialized to 0 will be interpreted as
+ "not present", which is just fine. */
+#define PTE_FLAGS 0x00000fff /* Flag bits. */
+#define PTE_ADDR 0xfffff000 /* Address bits. */
+#define PTE_AVL 0x00000e00 /* Bits available for OS use. */
+#define PTE_P 0x1 /* 1=present, 0=not present. */
+#define PTE_W 0x2 /* 1=read/write, 0=read-only. */
+#define PTE_U 0x4 /* 1=user/kernel, 0=kernel only. */
+#define PTE_A 0x20 /* 1=accessed, 0=not acccessed. */
+#define PTE_D 0x40 /* 1=dirty, 0=not dirty (PTEs only). */
+
+/* Returns a PDE that points to page table PT. */
+static inline uint32_t pde_create (uint32_t *pt) {
+ ASSERT (pg_ofs (pt) == 0);
+ return vtop (pt) | PTE_U | PTE_P | PTE_W;
+}
+
+/* Returns a pointer to the page table that page directory entry
+ PDE, which must "present", points to. */
+static inline uint32_t *pde_get_pt (uint32_t pde) {
+ ASSERT (pde & PTE_P);
+ return ptov (pde & PTE_ADDR);
+}
+
+/* Returns a PTE that points to PAGE.
+ The PTE's page is readable.
+ If WRITABLE is true then it will be writable as well.
+ The page will be usable only by ring 0 code (the kernel). */
+static inline uint32_t pte_create_kernel (void *page, bool writable) {
+ ASSERT (pg_ofs (page) == 0);
+ return vtop (page) | PTE_P | (writable ? PTE_W : 0);
+}
+
+/* Returns a PTE that points to PAGE.
+ The PTE's page is readable.
+ If WRITABLE is true then it will be writable as well.
+ The page will be usable by both user and kernel code. */
+static inline uint32_t pte_create_user (void *page, bool writable) {
+ return pte_create_kernel (page, writable) | PTE_U;
+}
+
+/* Returns a pointer to the page that page table entry PTE points
+ to. */
+static inline void *pte_get_page (uint32_t pte) {
+ return ptov (pte & PTE_ADDR);
+}
+
+#endif /* threads/pte.h */
+
diff --git a/src/threads/start.S b/src/threads/start.S
new file mode 100644
index 0000000..5a495c4
--- /dev/null
+++ b/src/threads/start.S
@@ -0,0 +1,16 @@
+#### The loader needs to have some way to know the kernel's entry
+#### point, that is, the address to which it should jump to start the
+#### kernel. We handle this by writing the linker script kernel.lds.S
+#### so that this module appears at the very beginning of the kernel
+#### image, and then using that as the entry point.
+
+.section .start
+
+.globl start
+.func start
+ # Call main.
+start: call main
+
+ # main() should not return, but if it does, spin.
+1: jmp 1b
+.endfunc
diff --git a/src/threads/switch.S b/src/threads/switch.S
new file mode 100644
index 0000000..244249e
--- /dev/null
+++ b/src/threads/switch.S
@@ -0,0 +1,65 @@
+#include "threads/switch.h"
+
+#### struct thread *switch_threads (struct thread *cur, struct thread *next);
+####
+#### Switches from CUR, which must be the running thread, to NEXT,
+#### which must also be running switch_threads(), returning CUR in
+#### NEXT's context.
+####
+#### This function works by assuming that the thread we're switching
+#### into is also running switch_threads(). Thus, all it has to do is
+#### preserve a few registers on the stack, then switch stacks and
+#### restore the registers. As part of switching stacks we record the
+#### current stack pointer in CUR's thread structure.
+
+.globl switch_threads
+.func switch_threads
+switch_threads:
+ # Save caller's register state.
+ #
+ # Note that the SVR4 ABI allows us to destroy %eax, %ecx, %edx,
+ # but requires us to preserve %ebx, %ebp, %esi, %edi. See
+ # [SysV-ABI-386] pages 3-11 and 3-12 for details.
+ #
+ # This stack frame must match the one set up by thread_create()
+ # in size.
+ pushl %ebx
+ pushl %ebp
+ pushl %esi
+ pushl %edi
+
+ # Get offsetof (struct thread, stack).
+.globl thread_stack_ofs
+ mov thread_stack_ofs, %edx
+
+ # Save current stack pointer to old thread's stack, if any.
+ movl SWITCH_CUR(%esp), %eax
+ movl %esp, (%eax,%edx,1)
+
+ # Restore stack pointer from new thread's stack.
+ movl SWITCH_NEXT(%esp), %ecx
+ movl (%ecx,%edx,1), %esp
+
+ # Restore caller's register state.
+ popl %edi
+ popl %esi
+ popl %ebp
+ popl %ebx
+ ret
+.endfunc
+
+.globl switch_entry
+.func switch_entry
+switch_entry:
+ # Discard switch_threads() arguments.
+ addl $8, %esp
+
+ # Call schedule_tail(prev).
+ pushl %eax
+.globl schedule_tail
+ call schedule_tail
+ addl $4, %esp
+
+ # Start thread proper.
+ ret
+.endfunc
diff --git a/src/threads/switch.h b/src/threads/switch.h
new file mode 100644
index 0000000..cc156b6
--- /dev/null
+++ b/src/threads/switch.h
@@ -0,0 +1,39 @@
+#ifndef THREADS_SWITCH_H
+#define THREADS_SWITCH_H
+
+#ifndef __ASSEMBLER__
+/* switch_thread()'s stack frame. */
+struct switch_threads_frame
+ {
+ uint32_t edi; /* 0: Saved %edi. */
+ uint32_t esi; /* 4: Saved %esi. */
+ uint32_t ebp; /* 8: Saved %ebp. */
+ uint32_t ebx; /* 12: Saved %ebx. */
+ void (*eip) (void); /* 16: Return address. */
+ struct thread *cur; /* 20: switch_threads()'s CUR argument. */
+ struct thread *next; /* 24: switch_threads()'s NEXT argument. */
+ };
+
+/* Switches from CUR, which must be the running thread, to NEXT,
+ which must also be running switch_threads(), returning CUR in
+ NEXT's context. */
+struct thread *switch_threads (struct thread *cur, struct thread *next);
+
+/* Stack frame for switch_entry(). */
+struct switch_entry_frame
+ {
+ void (*eip) (void);
+ };
+
+void switch_entry (void);
+
+/* Pops the CUR and NEXT arguments off the stack, for use in
+ initializing threads. */
+void switch_thunk (void);
+#endif
+
+/* Offsets used by switch.S. */
+#define SWITCH_CUR 20
+#define SWITCH_NEXT 24
+
+#endif /* threads/switch.h */
diff --git a/src/threads/synch.c b/src/threads/synch.c
new file mode 100644
index 0000000..317c68a
--- /dev/null
+++ b/src/threads/synch.c
@@ -0,0 +1,338 @@
+/* This file is derived from source code for the Nachos
+ instructional operating system. The Nachos copyright notice
+ is reproduced in full below. */
+
+/* Copyright (c) 1992-1996 The Regents of the University of California.
+ All rights reserved.
+
+ Permission to use, copy, modify, and distribute this software
+ and its documentation for any purpose, without fee, and
+ without written agreement is hereby granted, provided that the
+ above copyright notice and the following two paragraphs appear
+ in all copies of this software.
+
+ IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO
+ ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR
+ CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF THIS SOFTWARE
+ AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA
+ HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS"
+ BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
+ PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR
+ MODIFICATIONS.
+*/
+
+#include "threads/synch.h"
+#include <stdio.h>
+#include <string.h>
+#include "threads/interrupt.h"
+#include "threads/thread.h"
+
+/* Initializes semaphore SEMA to VALUE. A semaphore is a
+ nonnegative integer along with two atomic operators for
+ manipulating it:
+
+ - down or "P": wait for the value to become positive, then
+ decrement it.
+
+ - up or "V": increment the value (and wake up one waiting
+ thread, if any). */
+void
+sema_init (struct semaphore *sema, unsigned value)
+{
+ ASSERT (sema != NULL);
+
+ sema->value = value;
+ list_init (&sema->waiters);
+}
+
+/* Down or "P" operation on a semaphore. Waits for SEMA's value
+ to become positive and then atomically decrements it.
+
+ This function may sleep, so it must not be called within an
+ interrupt handler. This function may be called with
+ interrupts disabled, but if it sleeps then the next scheduled
+ thread will probably turn interrupts back on. */
+void
+sema_down (struct semaphore *sema)
+{
+ enum intr_level old_level;
+
+ ASSERT (sema != NULL);
+ ASSERT (!intr_context ());
+
+ old_level = intr_disable ();
+ while (sema->value == 0)
+ {
+ list_push_back (&sema->waiters, &thread_current ()->elem);
+ thread_block ();
+ }
+ sema->value--;
+ intr_set_level (old_level);
+}
+
+/* Down or "P" operation on a semaphore, but only if the
+ semaphore is not already 0. Returns true if the semaphore is
+ decremented, false otherwise.
+
+ This function may be called from an interrupt handler. */
+bool
+sema_try_down (struct semaphore *sema)
+{
+ enum intr_level old_level;
+ bool success;
+
+ ASSERT (sema != NULL);
+
+ old_level = intr_disable ();
+ if (sema->value > 0)
+ {
+ sema->value--;
+ success = true;
+ }
+ else
+ success = false;
+ intr_set_level (old_level);
+
+ return success;
+}
+
+/* Up or "V" operation on a semaphore. Increments SEMA's value
+ and wakes up one thread of those waiting for SEMA, if any.
+
+ This function may be called from an interrupt handler. */
+void
+sema_up (struct semaphore *sema)
+{
+ enum intr_level old_level;
+
+ ASSERT (sema != NULL);
+
+ old_level = intr_disable ();
+ if (!list_empty (&sema->waiters))
+ thread_unblock (list_entry (list_pop_front (&sema->waiters),
+ struct thread, elem));
+ sema->value++;
+ intr_set_level (old_level);
+}
+
+static void sema_test_helper (void *sema_);
+
+/* Self-test for semaphores that makes control "ping-pong"
+ between a pair of threads. Insert calls to printf() to see
+ what's going on. */
+void
+sema_self_test (void)
+{
+ struct semaphore sema[2];
+ int i;
+
+ printf ("Testing semaphores...");
+ sema_init (&sema[0], 0);
+ sema_init (&sema[1], 0);
+ thread_create ("sema-test", PRI_DEFAULT, sema_test_helper, &sema);
+ for (i = 0; i < 10; i++)
+ {
+ sema_up (&sema[0]);
+ sema_down (&sema[1]);
+ }
+ printf ("done.\n");
+}
+
+/* Thread function used by sema_self_test(). */
+static void
+sema_test_helper (void *sema_)
+{
+ struct semaphore *sema = sema_;
+ int i;
+
+ for (i = 0; i < 10; i++)
+ {
+ sema_down (&sema[0]);
+ sema_up (&sema[1]);
+ }
+}
+
+/* Initializes LOCK. A lock can be held by at most a single
+ thread at any given time. Our locks are not "recursive", that
+ is, it is an error for the thread currently holding a lock to
+ try to acquire that lock.
+
+ A lock is a specialization of a semaphore with an initial
+ value of 1. The difference between a lock and such a
+ semaphore is twofold. First, a semaphore can have a value
+ greater than 1, but a lock can only be owned by a single
+ thread at a time. Second, a semaphore does not have an owner,
+ meaning that one thread can "down" the semaphore and then
+ another one "up" it, but with a lock the same thread must both
+ acquire and release it. When these restrictions prove
+ onerous, it's a good sign that a semaphore should be used,
+ instead of a lock. */
+void
+lock_init (struct lock *lock)
+{
+ ASSERT (lock != NULL);
+
+ lock->holder = NULL;
+ sema_init (&lock->semaphore, 1);
+}
+
+/* Acquires LOCK, sleeping until it becomes available if
+ necessary. The lock must not already be held by the current
+ thread.
+
+ This function may sleep, so it must not be called within an
+ interrupt handler. This function may be called with
+ interrupts disabled, but interrupts will be turned back on if
+ we need to sleep. */
+void
+lock_acquire (struct lock *lock)
+{
+ ASSERT (lock != NULL);
+ ASSERT (!intr_context ());
+ ASSERT (!lock_held_by_current_thread (lock));
+
+ sema_down (&lock->semaphore);
+ lock->holder = thread_current ();
+}
+
+/* Tries to acquires LOCK and returns true if successful or false
+ on failure. The lock must not already be held by the current
+ thread.
+
+ This function will not sleep, so it may be called within an
+ interrupt handler. */
+bool
+lock_try_acquire (struct lock *lock)
+{
+ bool success;
+
+ ASSERT (lock != NULL);
+ ASSERT (!lock_held_by_current_thread (lock));
+
+ success = sema_try_down (&lock->semaphore);
+ if (success)
+ lock->holder = thread_current ();
+ return success;
+}
+
+/* Releases LOCK, which must be owned by the current thread.
+
+ An interrupt handler cannot acquire a lock, so it does not
+ make sense to try to release a lock within an interrupt
+ handler. */
+void
+lock_release (struct lock *lock)
+{
+ ASSERT (lock != NULL);
+ ASSERT (lock_held_by_current_thread (lock));
+
+ lock->holder = NULL;
+ sema_up (&lock->semaphore);
+}
+
+/* Returns true if the current thread holds LOCK, false
+ otherwise. (Note that testing whether some other thread holds
+ a lock would be racy.) */
+bool
+lock_held_by_current_thread (const struct lock *lock)
+{
+ ASSERT (lock != NULL);
+
+ return lock->holder == thread_current ();
+}
+
+/* One semaphore in a list. */
+struct semaphore_elem
+ {
+ struct list_elem elem; /* List element. */
+ struct semaphore semaphore; /* This semaphore. */
+ };
+
+/* Initializes condition variable COND. A condition variable
+ allows one piece of code to signal a condition and cooperating
+ code to receive the signal and act upon it. */
+void
+cond_init (struct condition *cond)
+{
+ ASSERT (cond != NULL);
+
+ list_init (&cond->waiters);
+}
+
+/* Atomically releases LOCK and waits for COND to be signaled by
+ some other piece of code. After COND is signaled, LOCK is
+ reacquired before returning. LOCK must be held before calling
+ this function.
+
+ The monitor implemented by this function is "Mesa" style, not
+ "Hoare" style, that is, sending and receiving a signal are not
+ an atomic operation. Thus, typically the caller must recheck
+ the condition after the wait completes and, if necessary, wait
+ again.
+
+ A given condition variable is associated with only a single
+ lock, but one lock may be associated with any number of
+ condition variables. That is, there is a one-to-many mapping
+ from locks to condition variables.
+
+ This function may sleep, so it must not be called within an
+ interrupt handler. This function may be called with
+ interrupts disabled, but interrupts will be turned back on if
+ we need to sleep. */
+void
+cond_wait (struct condition *cond, struct lock *lock)
+{
+ struct semaphore_elem waiter;
+
+ ASSERT (cond != NULL);
+ ASSERT (lock != NULL);
+ ASSERT (!intr_context ());
+ ASSERT (lock_held_by_current_thread (lock));
+
+ sema_init (&waiter.semaphore, 0);
+ list_push_back (&cond->waiters, &waiter.elem);
+ lock_release (lock);
+ sema_down (&waiter.semaphore);
+ lock_acquire (lock);
+}
+
+/* If any threads are waiting on COND (protected by LOCK), then
+ this function signals one of them to wake up from its wait.
+ LOCK must be held before calling this function.
+
+ An interrupt handler cannot acquire a lock, so it does not
+ make sense to try to signal a condition variable within an
+ interrupt handler. */
+void
+cond_signal (struct condition *cond, struct lock *lock UNUSED)
+{
+ ASSERT (cond != NULL);
+ ASSERT (lock != NULL);
+ ASSERT (!intr_context ());
+ ASSERT (lock_held_by_current_thread (lock));
+
+ if (!list_empty (&cond->waiters))
+ sema_up (&list_entry (list_pop_front (&cond->waiters),
+ struct semaphore_elem, elem)->semaphore);
+}
+
+/* Wakes up all threads, if any, waiting on COND (protected by
+ LOCK). LOCK must be held before calling this function.
+
+ An interrupt handler cannot acquire a lock, so it does not
+ make sense to try to signal a condition variable within an
+ interrupt handler. */
+void
+cond_broadcast (struct condition *cond, struct lock *lock)
+{
+ ASSERT (cond != NULL);
+ ASSERT (lock != NULL);
+
+ while (!list_empty (&cond->waiters))
+ cond_signal (cond, lock);
+}
diff --git a/src/threads/synch.h b/src/threads/synch.h
new file mode 100644
index 0000000..a19e88b
--- /dev/null
+++ b/src/threads/synch.h
@@ -0,0 +1,51 @@
+#ifndef THREADS_SYNCH_H
+#define THREADS_SYNCH_H
+
+#include <list.h>
+#include <stdbool.h>
+
+/* A counting semaphore. */
+struct semaphore
+ {
+ unsigned value; /* Current value. */
+ struct list waiters; /* List of waiting threads. */
+ };
+
+void sema_init (struct semaphore *, unsigned value);
+void sema_down (struct semaphore *);
+bool sema_try_down (struct semaphore *);
+void sema_up (struct semaphore *);
+void sema_self_test (void);
+
+/* Lock. */
+struct lock
+ {
+ struct thread *holder; /* Thread holding lock (for debugging). */
+ struct semaphore semaphore; /* Binary semaphore controlling access. */
+ };
+
+void lock_init (struct lock *);
+void lock_acquire (struct lock *);
+bool lock_try_acquire (struct lock *);
+void lock_release (struct lock *);
+bool lock_held_by_current_thread (const struct lock *);
+
+/* Condition variable. */
+struct condition
+ {
+ struct list waiters; /* List of waiting threads. */
+ };
+
+void cond_init (struct condition *);
+void cond_wait (struct condition *, struct lock *);
+void cond_signal (struct condition *, struct lock *);
+void cond_broadcast (struct condition *, struct lock *);
+
+/* Optimization barrier.
+
+ The compiler will not reorder operations across an
+ optimization barrier. See "Optimization Barriers" in the
+ reference guide for more information.*/
+#define barrier() asm volatile ("" : : : "memory")
+
+#endif /* threads/synch.h */
diff --git a/src/threads/synchlist.c b/src/threads/synchlist.c
new file mode 100644
index 0000000..cc1e570
--- /dev/null
+++ b/src/threads/synchlist.c
@@ -0,0 +1,98 @@
+// synchlist.cc
+// Routines for synchronized access to a list.
+//
+// Implemented by surrounding the List abstraction
+// with synchronization routines.
+//
+// Implemented in "monitor"-style -- surround each procedure with a
+// lock acquire and release pair, using condition signal and wait for
+// synchronization.
+//
+// Copyright (c) 1992-1993 The Regents of the University of California.
+// All rights reserved. See copyright.h for copyright notice and limitation
+// of liability and disclaimer of warranty provisions.
+//
+// modified by Vlad Jahundovics for Pintos (translation from C++ to C)
+
+
+#include "copyright.h"
+#include "synchlist.h"
+#include "threads/malloc.h"
+
+//----------------------------------------------------------------------
+// SynchList::SynchList
+// Initialize the data structures needed for a
+// synchronized list, empty to start with.
+// Elements can now be added to the list.
+//----------------------------------------------------------------------
+
+void sl_init(struct SynchList *sl)
+{
+ list_init(&sl->sl_list);
+ lock_init(&sl->sl_lock);
+ cond_init(&sl->sl_empty);
+}
+
+
+//----------------------------------------------------------------------
+// SynchList::~SynchList
+// Remove and de-allocate all elements of the synchronized list
+//----------------------------------------------------------------------
+
+void sl_destroy(struct SynchList *sl)
+{
+ struct list_elem *e;
+ struct SL_element *sl_elem;
+ while(!list_empty(&sl->sl_list)){
+ e = list_pop_front(&sl->sl_list);
+ sl_elem = list_entry(e, struct SL_element, elem);
+ free(sl_elem);
+ }
+}
+
+
+//----------------------------------------------------------------------
+// SynchList::Append
+// Append an "item" to the end of the list. Wake up anyone
+// waiting for an element to be appended.
+//
+// "item" is the thing to put on the list, it can be a pointer to
+// anything.
+//----------------------------------------------------------------------
+
+void sl_append(struct SynchList *sl, void *item)
+{
+ lock_acquire(&sl->sl_lock); // enforce mutual exclusive access to the list
+ struct SL_element *sl_elem = malloc(sizeof(struct SL_element));
+ sl_elem->item = item;
+ list_push_back(&sl->sl_list, &sl_elem->elem);
+ cond_signal(&sl->sl_empty,&sl->sl_lock); // wake up a waiter, if any
+ lock_release(&sl->sl_lock);
+ return;
+}
+
+
+//----------------------------------------------------------------------
+// SynchList::Remove
+// Remove an "item" from the beginning of the list. Wait if
+// the list is empty.
+// Returns:
+// The removed item.
+//----------------------------------------------------------------------
+
+void *sl_remove(struct SynchList *sl)
+{
+ struct list_elem *e;
+ void *item;
+ lock_acquire(&sl->sl_lock); // enforce mutual exclusion
+ while(list_empty(&sl->sl_list)){
+ cond_wait(&sl->sl_empty, &sl->sl_lock); // wait until list isn't empty
+ }
+ e = list_pop_front(&sl->sl_list);
+ struct SL_element *sl_elem = list_entry(e, struct SL_element, elem);
+ item = sl_elem->item;
+ free(sl_elem);
+ lock_release(&sl->sl_lock);
+ return item;
+}
+
diff --git a/src/threads/synchlist.h b/src/threads/synchlist.h
new file mode 100644
index 0000000..ec248c0
--- /dev/null
+++ b/src/threads/synchlist.h
@@ -0,0 +1,39 @@
+// synchlist.h
+// Data structures for synchronized access to a list.
+//
+// Implemented by surrounding the List abstraction
+// with synchronization routines.
+//
+// Copyright (c) 1992-1993 The Regents of the University of California.
+// All rights reserved. See copyright.h for copyright notice and limitation
+// of liability and disclaimer of warranty provisions.
+//
+// modified by Vlad Jahundovics for Pintos (translation from C++ to C)
+
+
+#include "copyright.h"
+#include <list.h>
+#include "threads/synch.h"
+
+// The following class defines a "synchronized list" -- a list for which:
+// these constraints hold:
+// 1. Threads trying to remove an item from a list will
+// wait until the list has an element on it.
+// 2. One thread at a time can access list data structures
+
+struct SynchList {
+ struct list sl_list;
+ struct lock sl_lock;
+ struct condition sl_empty;
+};
+
+struct SL_element
+{
+ struct list_elem elem;
+ void *item;
+};
+
+void sl_init(struct SynchList *sl);
+void sl_destroy(struct SynchList *sl);
+void sl_append(struct SynchList *sl, void *item);
+void *sl_remove(struct SynchList *sl);
diff --git a/src/threads/thread.c b/src/threads/thread.c
new file mode 100644
index 0000000..92d1aa8
--- /dev/null
+++ b/src/threads/thread.c
@@ -0,0 +1,553 @@
+#include "threads/thread.h"
+#include <debug.h>
+#include <stddef.h>
+#include <random.h>
+#include <stdio.h>
+#include <string.h>
+#include "threads/flags.h"
+#include "threads/interrupt.h"
+#include "threads/intr-stubs.h"
+#include "threads/palloc.h"
+#include "threads/switch.h"
+#include "threads/synch.h"
+#include "threads/vaddr.h"
+#ifdef USERPROG
+#include "userprog/process.h"
+#endif
+
+/* Random value for struct thread's `magic' member.
+ Used to detect stack overflow. See the big comment at the top
+ of thread.h for details. */
+#define THREAD_MAGIC 0xcd6abf4b
+
+/* List of processes in THREAD_READY state, that is, processes
+ that are ready to run but not actually running. */
+static struct list ready_list;
+
+/* Idle thread. */
+static struct thread *idle_thread;
+
+/* Initial thread, the thread running init.c:main(). */
+static struct thread *initial_thread;
+
+/* Lock used by allocate_tid(). */
+static struct lock tid_lock;
+
+/* Stack frame for kernel_thread(). */
+struct kernel_thread_frame
+ {
+ void *eip; /* Return address. */
+ thread_func *function; /* Function to call. */
+ void *aux; /* Auxiliary data for function. */
+ };
+
+/* Statistics. */
+static long long idle_ticks; /* # of timer ticks spent idle. */
+static long long kernel_ticks; /* # of timer ticks in kernel threads. */
+static long long user_ticks; /* # of timer ticks in user programs. */
+
+/* Scheduling. */
+#define TIME_SLICE 4 /* # of timer ticks to give each thread. */
+static unsigned thread_ticks; /* # of timer ticks since last yield. */
+
+/* If false (default), use round-robin scheduler.
+ If true, use multi-level feedback queue scheduler.
+ Controlled by kernel command-line option "-o mlfqs". */
+bool thread_mlfqs;
+
+static void kernel_thread (thread_func *, void *aux);
+
+static void idle (void *aux UNUSED);
+static struct thread *running_thread (void);
+static struct thread *next_thread_to_run (void);
+static void init_thread (struct thread *, const char *name, int priority);
+static bool is_thread (struct thread *) UNUSED;
+static void *alloc_frame (struct thread *, size_t size);
+static void schedule (void);
+void schedule_tail (struct thread *prev);
+static tid_t allocate_tid (void);
+
+/* Initializes the threading system by transforming the code
+ that's currently running into a thread. This can't work in
+ general and it is possible in this case only because loader.S
+ was careful to put the bottom of the stack at a page boundary.
+
+ Also initializes the run queue and the tid lock.
+
+ After calling this function, be sure to initialize the page
+ allocator before trying to create any threads with
+ thread_create().
+
+ It is not safe to call thread_current() until this function
+ finishes. */
+void
+thread_init (void)
+{
+ ASSERT (intr_get_level () == INTR_OFF);
+
+ lock_init (&tid_lock);
+ list_init (&ready_list);
+
+ /* Set up a thread structure for the running thread. */
+ initial_thread = running_thread ();
+ init_thread (initial_thread, "main", PRI_DEFAULT);
+ initial_thread->status = THREAD_RUNNING;
+ initial_thread->tid = allocate_tid ();
+}
+
+/* Starts preemptive thread scheduling by enabling interrupts.
+ Also creates the idle thread. */
+void
+thread_start (void)
+{
+ /* Create the idle thread. */
+ struct semaphore idle_started;
+ sema_init (&idle_started, 0);
+ thread_create ("idle", PRI_MIN, idle, &idle_started);
+
+ /* Start preemptive thread scheduling. */
+ intr_enable ();
+
+ /* Wait for the idle thread to initialize idle_thread. */
+ sema_down (&idle_started);
+}
+
+/* Called by the timer interrupt handler at each timer tick.
+ Thus, this function runs in an external interrupt context. */
+void
+thread_tick (void)
+{
+ struct thread *t = thread_current ();
+
+ /* Update statistics. */
+ if (t == idle_thread)
+ idle_ticks++;
+#ifdef USERPROG
+ else if (t->pagedir != NULL)
+ user_ticks++;
+#endif
+ else
+ kernel_ticks++;
+
+ /* Enforce preemption. */
+ if (++thread_ticks >= TIME_SLICE)
+ intr_yield_on_return ();
+}
+
+/* Prints thread statistics. */
+void
+thread_print_stats (void)
+{
+ printf ("Thread: %lld idle ticks, %lld kernel ticks, %lld user ticks\n",
+ idle_ticks, kernel_ticks, user_ticks);
+}
+
+/* Creates a new kernel thread named NAME with the given initial
+ PRIORITY, which executes FUNCTION passing AUX as the argument,
+ and adds it to the ready queue. Returns the thread identifier
+ for the new thread, or TID_ERROR if creation fails.
+
+ If thread_start() has been called, then the new thread may be
+ scheduled before thread_create() returns. It could even exit
+ before thread_create() returns. Contrariwise, the original
+ thread may run for any amount of time before the new thread is
+ scheduled. Use a semaphore or some other form of
+ synchronization if you need to ensure ordering.
+
+ The code provided sets the new thread's `priority' member to
+ PRIORITY, but no actual priority scheduling is implemented.
+ Priority scheduling is the goal of Problem 1-3. */
+tid_t
+thread_create (const char *name, int priority,
+ thread_func *function, void *aux)
+{
+ struct thread *t;
+ struct kernel_thread_frame *kf;
+ struct switch_entry_frame *ef;
+ struct switch_threads_frame *sf;
+ tid_t tid;
+
+ ASSERT (function != NULL);
+
+ /* Allocate thread. */
+ t = palloc_get_page (PAL_ZERO);
+ if (t == NULL)
+ return TID_ERROR;
+
+ /* Initialize thread. */
+ init_thread (t, name, priority);
+ tid = t->tid = allocate_tid ();
+
+ /* Stack frame for kernel_thread(). */
+ kf = alloc_frame (t, sizeof *kf);
+ kf->eip = NULL;
+ kf->function = function;
+ kf->aux = aux;
+
+ /* Stack frame for switch_entry(). */
+ ef = alloc_frame (t, sizeof *ef);
+ ef->eip = (void (*) (void)) kernel_thread;
+
+ /* Stack frame for switch_threads(). */
+ sf = alloc_frame (t, sizeof *sf);
+ sf->eip = switch_entry;
+
+ /* Add to run queue. */
+ thread_unblock (t);
+
+ return tid;
+}
+
+/* Puts the current thread to sleep. It will not be scheduled
+ again until awoken by thread_unblock().
+
+ This function must be called with interrupts turned off. It
+ is usually a better idea to use one of the synchronization
+ primitives in synch.h. */
+void
+thread_block (void)
+{
+ ASSERT (!intr_context ());
+ ASSERT (intr_get_level () == INTR_OFF);
+
+ thread_current ()->status = THREAD_BLOCKED;
+ schedule ();
+}
+
+/* Transitions a blocked thread T to the ready-to-run state.
+ This is an error if T is not blocked. (Use thread_yield() to
+ make the running thread ready.)
+
+ This function does not preempt the running thread. This can
+ be important: if the caller had disabled interrupts itself,
+ it may expect that it can atomically unblock a thread and
+ update other data. */
+void
+thread_unblock (struct thread *t)
+{
+ enum intr_level old_level;
+
+ ASSERT (is_thread (t));
+
+ old_level = intr_disable ();
+ ASSERT (t->status == THREAD_BLOCKED);
+ list_push_back (&ready_list, &t->elem);
+ t->status = THREAD_READY;
+ intr_set_level (old_level);
+}
+
+/* Returns the name of the running thread. */
+const char *
+thread_name (void)
+{
+ return thread_current ()->name;
+}
+
+/* Returns the running thread.
+ This is running_thread() plus a couple of sanity checks.
+ See the big comment at the top of thread.h for details. */
+struct thread *
+thread_current (void)
+{
+ struct thread *t = running_thread ();
+
+ /* Make sure T is really a thread.
+ If either of these assertions fire, then your thread may
+ have overflowed its stack. Each thread has less than 4 kB
+ of stack, so a few big automatic arrays or moderate
+ recursion can cause stack overflow. */
+ ASSERT (is_thread (t));
+ ASSERT (t->status == THREAD_RUNNING);
+
+ return t;
+}
+
+/* Returns the running thread's tid. */
+tid_t
+thread_tid (void)
+{
+ return thread_current ()->tid;
+}
+
+/* Deschedules the current thread and destroys it. Never
+ returns to the caller. */
+void
+thread_exit (void)
+{
+ ASSERT (!intr_context ());
+
+#ifdef USERPROG
+ process_exit ();
+#endif
+
+ /* Just set our status to dying and schedule another process.
+ We will be destroyed during the call to schedule_tail(). */
+ intr_disable ();
+ thread_current ()->status = THREAD_DYING;
+ schedule ();
+ NOT_REACHED ();
+}
+
+/* Yields the CPU. The current thread is not put to sleep and
+ may be scheduled again immediately at the scheduler's whim. */
+void
+thread_yield (void)
+{
+ struct thread *cur = thread_current ();
+ enum intr_level old_level;
+
+ ASSERT (!intr_context ());
+
+ old_level = intr_disable ();
+ if (cur != idle_thread)
+ list_push_back (&ready_list, &cur->elem);
+ cur->status = THREAD_READY;
+ schedule ();
+ intr_set_level (old_level);
+}
+
+/* Sets the current thread's priority to NEW_PRIORITY. */
+void
+thread_set_priority (int new_priority)
+{
+ thread_current ()->priority = new_priority;
+}
+
+/* Returns the current thread's priority. */
+int
+thread_get_priority (void)
+{
+ return thread_current ()->priority;
+}
+
+/* Sets the current thread's nice value to NICE. */
+void
+thread_set_nice (int nice UNUSED)
+{
+ /* Not yet implemented. */
+}
+
+/* Returns the current thread's nice value. */
+int
+thread_get_nice (void)
+{
+ /* Not yet implemented. */
+ return 0;
+}
+
+/* Returns 100 times the system load average. */
+int
+thread_get_load_avg (void)
+{
+ /* Not yet implemented. */
+ return 0;
+}
+
+/* Returns 100 times the current thread's recent_cpu value. */
+int
+thread_get_recent_cpu (void)
+{
+ /* Not yet implemented. */
+ return 0;
+}
+
+/* Idle thread. Executes when no other thread is ready to run.
+
+ The idle thread is initially put on the ready list by
+ thread_start(). It will be scheduled once initially, at which
+ point it initializes idle_thread, "up"s the semaphore passed
+ to it to enable thread_start() to continue, and immediately
+ blocks. After that, the idle thread never appears in the
+ ready list. It is returned by next_thread_to_run() as a
+ special case when the ready list is empty. */
+static void
+idle (void *idle_started_ UNUSED)
+{
+ struct semaphore *idle_started = idle_started_;
+ idle_thread = thread_current ();
+ sema_up (idle_started);
+
+ for (;;)
+ {
+ /* Let someone else run. */
+ intr_disable ();
+ thread_block ();
+
+ /* Re-enable interrupts and wait for the next one.
+
+ The `sti' instruction disables interrupts until the
+ completion of the next instruction, so these two
+ instructions are executed atomically. This atomicity is
+ important; otherwise, an interrupt could be handled
+ between re-enabling interrupts and waiting for the next
+ one to occur, wasting as much as one clock tick worth of
+ time.
+
+ See [IA32-v2a] "HLT", [IA32-v2b] "STI", and [IA32-v3a]
+ 7.11.1 "HLT Instruction". */
+ asm volatile ("sti; hlt" : : : "memory");
+ }
+}
+
+/* Function used as the basis for a kernel thread. */
+static void
+kernel_thread (thread_func *function, void *aux)
+{
+ ASSERT (function != NULL);
+
+ intr_enable (); /* The scheduler runs with interrupts off. */
+ function (aux); /* Execute the thread function. */
+ thread_exit (); /* If function() returns, kill the thread. */
+}
+
+/* Returns the running thread. */
+struct thread *
+running_thread (void)
+{
+ uint32_t *esp;
+
+ /* Copy the CPU's stack pointer into `esp', and then round that
+ down to the start of a page. Because `struct thread' is
+ always at the beginning of a page and the stack pointer is
+ somewhere in the middle, this locates the curent thread. */
+ asm ("mov %%esp, %0" : "=g" (esp));
+ return pg_round_down (esp);
+}
+
+/* Returns true if T appears to point to a valid thread. */
+static bool
+is_thread (struct thread *t)
+{
+ return t != NULL && t->magic == THREAD_MAGIC;
+}
+
+/* Does basic initialization of T as a blocked thread named
+ NAME. */
+static void
+init_thread (struct thread *t, const char *name, int priority)
+{
+ ASSERT (t != NULL);
+ ASSERT (PRI_MIN <= priority && priority <= PRI_MAX);
+ ASSERT (name != NULL);
+
+ memset (t, 0, sizeof *t);
+ t->status = THREAD_BLOCKED;
+ strlcpy (t->name, name, sizeof t->name);
+ t->stack = (uint8_t *) t + PGSIZE;
+ t->priority = priority;
+ t->magic = THREAD_MAGIC;
+}
+
+/* Allocates a SIZE-byte frame at the top of thread T's stack and
+ returns a pointer to the frame's base. */
+static void *
+alloc_frame (struct thread *t, size_t size)
+{
+ /* Stack data is always allocated in word-size units. */
+ ASSERT (is_thread (t));
+ ASSERT (size % sizeof (uint32_t) == 0);
+
+ t->stack -= size;
+ return t->stack;
+}
+
+/* Chooses and returns the next thread to be scheduled. Should
+ return a thread from the run queue, unless the run queue is
+ empty. (If the running thread can continue running, then it
+ will be in the run queue.) If the run queue is empty, return
+ idle_thread. */
+static struct thread *
+next_thread_to_run (void)
+{
+ if (list_empty (&ready_list))
+ return idle_thread;
+ else
+ return list_entry (list_pop_front (&ready_list), struct thread, elem);
+}
+
+/* Completes a thread switch by activating the new thread's page
+ tables, and, if the previous thread is dying, destroying it.
+
+ At this function's invocation, we just switched from thread
+ PREV, the new thread is already running, and interrupts are
+ still disabled. This function is normally invoked by
+ thread_schedule() as its final action before returning, but
+ the first time a thread is scheduled it is called by
+ switch_entry() (see switch.S).
+
+ It's not safe to call printf() until the thread switch is
+ complete. In practice that means that printf()s should be
+ added at the end of the function.
+
+ After this function and its caller returns, the thread switch
+ is complete. */
+void
+schedule_tail (struct thread *prev)
+{
+ struct thread *cur = running_thread ();
+
+ ASSERT (intr_get_level () == INTR_OFF);
+
+ /* Mark us as running. */
+ cur->status = THREAD_RUNNING;
+
+ /* Start new time slice. */
+ thread_ticks = 0;
+
+#ifdef USERPROG
+ /* Activate the new address space. */
+ process_activate ();
+#endif
+
+ /* If the thread we switched from is dying, destroy its struct
+ thread. This must happen late so that thread_exit() doesn't
+ pull out the rug under itself. (We don't free
+ initial_thread because its memory was not obtained via
+ palloc().) */
+ if (prev != NULL && prev->status == THREAD_DYING && prev != initial_thread)
+ {
+ ASSERT (prev != cur);
+ palloc_free_page (prev);
+ }
+}
+
+/* Schedules a new process. At entry, interrupts must be off and
+ the running process's state must have been changed from
+ running to some other state. This function finds another
+ thread to run and switches to it.
+
+ It's not safe to call printf() until schedule_tail() has
+ completed. */
+static void
+schedule (void)
+{
+ struct thread *cur = running_thread ();
+ struct thread *next = next_thread_to_run ();
+ struct thread *prev = NULL;
+
+ ASSERT (intr_get_level () == INTR_OFF);
+ ASSERT (cur->status != THREAD_RUNNING);
+ ASSERT (is_thread (next));
+
+ if (cur != next)
+ prev = switch_threads (cur, next);
+ schedule_tail (prev);
+}
+
+/* Returns a tid to use for a new thread. */
+static tid_t
+allocate_tid (void)
+{
+ static tid_t next_tid = 1;
+ tid_t tid;
+
+ lock_acquire (&tid_lock);
+ tid = next_tid++;
+ lock_release (&tid_lock);
+
+ return tid;
+}
+
+/* Offset of `stack' member within `struct thread'.
+ Used by switch.S, which can't figure it out on its own. */
+uint32_t thread_stack_ofs = offsetof (struct thread, stack);
diff --git a/src/threads/thread.h b/src/threads/thread.h
new file mode 100644
index 0000000..0039560
--- /dev/null
+++ b/src/threads/thread.h
@@ -0,0 +1,136 @@
+#ifndef THREADS_THREAD_H
+#define THREADS_THREAD_H
+
+#include <debug.h>
+#include <list.h>
+#include <stdint.h>
+
+/* States in a thread's life cycle. */
+enum thread_status
+ {
+ THREAD_RUNNING, /* Running thread. */
+ THREAD_READY, /* Not running but ready to run. */
+ THREAD_BLOCKED, /* Waiting for an event to trigger. */
+ THREAD_DYING /* About to be destroyed. */
+ };
+
+/* Thread identifier type.
+ You can redefine this to whatever type you like. */
+typedef int tid_t;
+#define TID_ERROR ((tid_t) -1) /* Error value for tid_t. */
+
+/* Thread priorities. */
+#define PRI_MIN 0 /* Lowest priority. */
+#define PRI_DEFAULT 31 /* Default priority. */
+#define PRI_MAX 63 /* Highest priority. */
+
+/* A kernel thread or user process.
+
+ Each thread structure is stored in its own 4 kB page. The
+ thread structure itself sits at the very bottom of the page
+ (at offset 0). The rest of the page is reserved for the
+ thread's kernel stack, which grows downward from the top of
+ the page (at offset 4 kB). Here's an illustration:
+
+ 4 kB +---------------------------------+
+ | kernel stack |
+ | | |
+ | | |
+ | V |
+ | grows downward |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ | |
+ +---------------------------------+
+ | magic |
+ | : |
+ | : |
+ | name |
+ | status |
+ 0 kB +---------------------------------+
+
+ The upshot of this is twofold:
+
+ 1. First, `struct thread' must not be allowed to grow too
+ big. If it does, then there will not be enough room for
+ the kernel stack. Our base `struct thread' is only a
+ few bytes in size. It probably should stay well under 1
+ kB.
+
+ 2. Second, kernel stacks must not be allowed to grow too
+ large. If a stack overflows, it will corrupt the thread
+ state. Thus, kernel functions should not allocate large
+ structures or arrays as non-static local variables. Use
+ dynamic allocation with malloc() or palloc_get_page()
+ instead.
+
+ The first symptom of either of these problems will probably be
+ an assertion failure in thread_current(), which checks that
+ the `magic' member of the running thread's `struct thread' is
+ set to THREAD_MAGIC. Stack overflow will normally change this
+ value, triggering the assertion. */
+/* The `elem' member has a dual purpose. It can be an element in
+ the run queue (thread.c), or it can be an element in a
+ semaphore wait list (synch.c). It can be used these two ways
+ only because they are mutually exclusive: only a thread in the
+ ready state is on the run queue, whereas only a thread in the
+ blocked state is on a semaphore wait list. */
+struct thread
+ {
+ /* Owned by thread.c. */
+ tid_t tid; /* Thread identifier. */
+ enum thread_status status; /* Thread state. */
+ char name[16]; /* Name (for debugging purposes). */
+ uint8_t *stack; /* Saved stack pointer. */
+ int priority; /* Priority. */
+
+ /* Shared between thread.c and synch.c. */
+ struct list_elem elem; /* List element. */
+
+#ifdef USERPROG
+ /* Owned by userprog/process.c. */
+ uint32_t *pagedir; /* Page directory. */
+#endif
+
+ /* Owned by thread.c. */
+ unsigned magic; /* Detects stack overflow. */
+ };
+
+/* If false (default), use round-robin scheduler.
+ If true, use multi-level feedback queue scheduler.
+ Controlled by kernel command-line option "-o mlfqs". */
+extern bool thread_mlfqs;
+
+void thread_init (void);
+void thread_start (void);
+
+void thread_tick (void);
+void thread_print_stats (void);
+
+typedef void thread_func (void *aux);
+tid_t thread_create (const char *name, int priority, thread_func *, void *);
+
+void thread_block (void);
+void thread_unblock (struct thread *);
+
+struct thread *thread_current (void);
+tid_t thread_tid (void);
+const char *thread_name (void);
+
+void thread_exit (void) NO_RETURN;
+void thread_yield (void);
+
+int thread_get_priority (void);
+void thread_set_priority (int);
+
+int thread_get_nice (void);
+void thread_set_nice (int);
+int thread_get_recent_cpu (void);
+int thread_get_load_avg (void);
+
+#endif /* threads/thread.h */
diff --git a/src/threads/vaddr.h b/src/threads/vaddr.h
new file mode 100644
index 0000000..184c824
--- /dev/null
+++ b/src/threads/vaddr.h
@@ -0,0 +1,89 @@
+#ifndef THREADS_VADDR_H
+#define THREADS_VADDR_H
+
+#include <debug.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#include "threads/loader.h"
+
+/* Functions and macros for working with virtual addresses.
+
+ See pte.h for functions and macros specifically for x86
+ hardware page tables. */
+
+#define BITMASK(SHIFT, CNT) (((1ul << (CNT)) - 1) << (SHIFT))
+
+/* Page offset (bits 0:12). */
+#define PGSHIFT 0 /* Index of first offset bit. */
+#define PGBITS 12 /* Number of offset bits. */
+#define PGSIZE (1 << PGBITS) /* Bytes in a page. */
+#define PGMASK BITMASK(PGSHIFT, PGBITS) /* Page offset bits (0:12). */
+
+/* Offset within a page. */
+static inline unsigned pg_ofs (const void *va) {
+ return (uintptr_t) va & PGMASK;
+}
+
+/* Virtual page number. */
+static inline uintptr_t pg_no (const void *va) {
+ return (uintptr_t) va >> PGBITS;
+}
+
+/* Round up to nearest page boundary. */
+static inline void *pg_round_up (const void *va) {
+ return (void *) (((uintptr_t) va + PGSIZE - 1) & ~PGMASK);
+}
+
+/* Round down to nearest page boundary. */
+static inline void *pg_round_down (const void *va) {
+ return (void *) ((uintptr_t) va & ~PGMASK);
+}
+
+/* Base address of the 1:1 physical-to-virtual mapping. Physical
+ memory is mapped starting at this virtual address. Thus,
+ physical address 0 is accessible at PHYS_BASE, physical
+ address address 0x1234 at (uint8_t *) PHYS_BASE + 0x1234, and
+ so on.
+
+ This address also marks the end of user programs' address
+ space. Up to this point in memory, user programs are allowed
+ to map whatever they like. At this point and above, the
+ virtual address space belongs to the kernel. */
+#define PHYS_BASE ((void *) LOADER_PHYS_BASE)
+
+/* Returns true if VADDR is a user virtual address. */
+static inline bool
+is_user_vaddr (const void *vaddr)
+{
+ return vaddr < PHYS_BASE;
+}
+
+/* Returns true if VADDR is a kernel virtual address. */
+static inline bool
+is_kernel_vaddr (const void *vaddr)
+{
+ return vaddr >= PHYS_BASE;
+}
+
+/* Returns kernel virtual address at which physical address PADDR
+ is mapped. */
+static inline void *
+ptov (uintptr_t paddr)
+{
+ ASSERT ((void *) paddr < PHYS_BASE);
+
+ return (void *) (paddr + PHYS_BASE);
+}
+
+/* Returns physical address at which kernel virtual address VADDR
+ is mapped. */
+static inline uintptr_t
+vtop (const void *vaddr)
+{
+ ASSERT (is_kernel_vaddr (vaddr));
+
+ return (uintptr_t) vaddr - (uintptr_t) PHYS_BASE;
+}
+
+#endif /* threads/vaddr.h */