<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>https://homeostasis.scs.carleton.ca/wiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Dan</id>
	<title>Soma-notes - User contributions [en]</title>
	<link rel="self" type="application/atom+xml" href="https://homeostasis.scs.carleton.ca/wiki/api.php?action=feedcontributions&amp;feedformat=atom&amp;user=Dan"/>
	<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php/Special:Contributions/Dan"/>
	<updated>2026-05-12T18:48:49Z</updated>
	<subtitle>User contributions</subtitle>
	<generator>MediaWiki 1.42.1</generator>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=Proposal_Meeting_Schedule&amp;diff=1544</id>
		<title>Proposal Meeting Schedule</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=Proposal_Meeting_Schedule&amp;diff=1544"/>
		<updated>2007-10-26T04:41:01Z</updated>

		<summary type="html">&lt;p&gt;Dan: /* Wednesday, Nov. 7th */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;==Tuesday, Oct. 30th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:00 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:10 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:30 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:40 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:50 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 3:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 3:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 3:20 PM&lt;br /&gt;
|&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Wednesday, Oct. 31st==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:00 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:10 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:50 AM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:00 PM&lt;br /&gt;
| Neil Dickson&lt;br /&gt;
|-&lt;br /&gt;
| 12:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:20 PM&lt;br /&gt;
| Richard Gould&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Thursday, Nov. 1st==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 1:30 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:40 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:50 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:10 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:20 PM&lt;br /&gt;
| &lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Tuesday, Nov. 6th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:00 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:10 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| David Tremayne&lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:30 PM&lt;br /&gt;
| Maria Krol&lt;br /&gt;
|-&lt;br /&gt;
| 2:40 PM&lt;br /&gt;
| Adam Becevello&lt;br /&gt;
|-&lt;br /&gt;
| 2:50 PM&lt;br /&gt;
| Jeff Snell&lt;br /&gt;
|-&lt;br /&gt;
| 3:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 3:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 3:20 PM&lt;br /&gt;
|&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Wednesday, Nov. 7th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| Kenneth Chan&lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| Adrian Batos Parac&lt;br /&gt;
|-&lt;br /&gt;
| 11:00 AM&lt;br /&gt;
| Dan Edmunds&lt;br /&gt;
|-&lt;br /&gt;
| 11:10 AM&lt;br /&gt;
| Adam McNamara&lt;br /&gt;
|-&lt;br /&gt;
| 11:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:40 AM&lt;br /&gt;
| Geoffrey Johnson&lt;br /&gt;
|-&lt;br /&gt;
| 11:50 AM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 12:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:20 PM&lt;br /&gt;
| Roy Hooper&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Thursday, Nov. 8th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 1:30 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:40 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:50 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:10 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:20 PM&lt;br /&gt;
| &lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=Proposal_Meeting_Schedule&amp;diff=1543</id>
		<title>Proposal Meeting Schedule</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=Proposal_Meeting_Schedule&amp;diff=1543"/>
		<updated>2007-10-26T04:33:10Z</updated>

		<summary type="html">&lt;p&gt;Dan: /* Wednesday, Nov. 7th */&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;==Tuesday, Oct. 30th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:00 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:10 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:30 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:40 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:50 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 3:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 3:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 3:20 PM&lt;br /&gt;
|&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Wednesday, Oct. 31st==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:00 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:10 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:50 AM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:00 PM&lt;br /&gt;
| Neil Dickson&lt;br /&gt;
|-&lt;br /&gt;
| 12:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:20 PM&lt;br /&gt;
| Richard Gould&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Thursday, Nov. 1st==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 1:30 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:40 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:50 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:10 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:20 PM&lt;br /&gt;
| &lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Tuesday, Nov. 6th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:00 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:10 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| David Tremayne&lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:30 PM&lt;br /&gt;
| Maria Krol&lt;br /&gt;
|-&lt;br /&gt;
| 2:40 PM&lt;br /&gt;
| Adam Becevello&lt;br /&gt;
|-&lt;br /&gt;
| 2:50 PM&lt;br /&gt;
| Jeff Snell&lt;br /&gt;
|-&lt;br /&gt;
| 3:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 3:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 3:20 PM&lt;br /&gt;
|&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Wednesday, Nov. 7th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 10:30 AM&lt;br /&gt;
| Kenneth Chan&lt;br /&gt;
|-&lt;br /&gt;
| 10:40 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 10:50 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:00 AM&lt;br /&gt;
| Dan Edmunds&lt;br /&gt;
|-&lt;br /&gt;
| 11:10 AM&lt;br /&gt;
| Adam McNamara&lt;br /&gt;
|-&lt;br /&gt;
| 11:20 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:30 AM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 11:40 AM&lt;br /&gt;
| Geoffrey Johnson&lt;br /&gt;
|-&lt;br /&gt;
| 11:50 AM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 12:10 PM&lt;br /&gt;
|&lt;br /&gt;
|-&lt;br /&gt;
| 12:20 PM&lt;br /&gt;
| Roy Hooper&lt;br /&gt;
|}&lt;br /&gt;
&lt;br /&gt;
==Thursday, Nov. 8th==&lt;br /&gt;
&lt;br /&gt;
{| class=&amp;quot;wikitable&amp;quot;&lt;br /&gt;
| 1:30 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:40 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 1:50 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:00 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:10 PM&lt;br /&gt;
| &lt;br /&gt;
|-&lt;br /&gt;
| 2:20 PM&lt;br /&gt;
| &lt;br /&gt;
|}&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1524</id>
		<title>High-level Synchronization and IPC</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1524"/>
		<updated>2007-10-21T23:30:12Z</updated>

		<summary type="html">&lt;p&gt;Dan: /* Limitations of Semaphores */  fix small formating problem&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;==Dinning Philosophers Problem==&lt;br /&gt;
&lt;br /&gt;
[[Image:DiningPhilosophers.jpg]]&lt;br /&gt;
&lt;br /&gt;
* Thought experiment used to understand synchronization primitives&lt;br /&gt;
* Five philosophers are dining at a frugal Chinese restaurant where they are only provided with one chopstick each. They need two chopsticks to pick up the noodles in order to eat.&lt;br /&gt;
** Whenever they are hungry they pick up two chopsticks to eat.&lt;br /&gt;
** When they are done eating they put down the chopsticks.&lt;br /&gt;
** The philosophers do not talk to each other about eating, only high-minded ideas.&lt;br /&gt;
* Have to define a strategy to make sure that no philosopher starves to death.&lt;br /&gt;
* We can think of each chopstick as a semaphore.&lt;br /&gt;
&lt;br /&gt;
===Problems===&lt;br /&gt;
* &#039;&#039;&#039;Starvation&#039;&#039;&#039;&lt;br /&gt;
** One (or more) philosopher is never able to get two chopsticks and dies.&lt;br /&gt;
* &#039;&#039;&#039;Deadlock&#039;&#039;&#039;&lt;br /&gt;
** All philosophers have one chopstick and are waiting for another philosopher to put one down and they all starve to death.&lt;br /&gt;
&lt;br /&gt;
===Possible Solutions===&lt;br /&gt;
* Could use a scheme involving timeouts, but we want a &#039;&#039;PERFECT&#039;&#039; solution&lt;br /&gt;
* Could use synchronization; philosophers either grab one chopstick or none, this illustrates &#039;&#039;&#039;AND synchronization&#039;&#039;&#039;&lt;br /&gt;
&lt;br /&gt;
How could AND synchronization be implemented?&lt;br /&gt;
* Could turn multiple semaphores into one by abstracting the semaphores into 1 class.&lt;br /&gt;
* In general this is done by encapsulating the resources and using another process/thread/program to hand them out&lt;br /&gt;
&lt;br /&gt;
==Limitations of Semaphores==&lt;br /&gt;
&lt;br /&gt;
Semaphores are hard to use...&lt;br /&gt;
* If the programmer forgets to grab the semaphore this can lead to corruption.&lt;br /&gt;
* If the programmer never releases the semaphore this can lead to the program locking.&lt;br /&gt;
&lt;br /&gt;
The underlying problem is that the down and up need to be matched, with non-standard exits this can be difficult.&lt;br /&gt;
&lt;br /&gt;
But in C it&#039;s all you&#039;ve got...&lt;br /&gt;
* Solution: write a program to check the code (eg: Stanford Checker)&lt;br /&gt;
** Problem: not reliable, can produce many false positives&lt;br /&gt;
&lt;br /&gt;
How about building synchronization into the language as a base level construct?&lt;br /&gt;
* Java&#039;s &#039;&#039;synchronized&#039;&#039;&lt;br /&gt;
** Hides semaphores from developer&lt;br /&gt;
** Developer still has to decide where to synchronize&lt;br /&gt;
** Don&#039;t want to  synchronize all your code, this leads to single threaded behaviour&lt;br /&gt;
&lt;br /&gt;
General  idea: Monitor Object&lt;br /&gt;
* An object instance where only one thread can be executing certain pieces of code at once.&lt;br /&gt;
* Provides exclusive access to variables&lt;br /&gt;
&lt;br /&gt;
From review lecture Oct 17th:&lt;br /&gt;
&lt;br /&gt;
A monitor is a language construct for doing mutual exclusion.  In Java this is known as synchronized.&lt;br /&gt;
&lt;br /&gt;
==Events and Message Passing==&lt;br /&gt;
&lt;br /&gt;
[[image:Structs.jpg]]&lt;br /&gt;
&lt;br /&gt;
===Message Passing===&lt;br /&gt;
* Uses buffers&lt;br /&gt;
* Message passing is used in large contexts&lt;br /&gt;
&lt;br /&gt;
===Events===&lt;br /&gt;
* Message queue&lt;br /&gt;
* When do they occur?&lt;br /&gt;
** User does something&lt;br /&gt;
** Device does something&lt;br /&gt;
* Synchronization can be based around waiting for an event to occur.&lt;br /&gt;
&lt;br /&gt;
When an event occurs:&lt;br /&gt;
* It has to be stored (in a buffer)&lt;br /&gt;
* It might not be processed right away&lt;br /&gt;
** What if the event is no longer relevant?&lt;br /&gt;
*** Could implement events with timeouts&lt;br /&gt;
** What if no one&#039;s listening for the event?&lt;br /&gt;
*** Could drop the event&lt;br /&gt;
** The challenge is determining which events should be dropped and which should be queued.&lt;br /&gt;
*** In the case of a file open if the event is dropped there will be a leak&lt;br /&gt;
*** In the case of mouse movement when no application is listening we want to drop it so that if an application starts listening to mouses events the mouse doesn&#039;t go crazy&lt;br /&gt;
&lt;br /&gt;
==Message passing in a WAN==&lt;br /&gt;
&lt;br /&gt;
Message passing in a WAN is unreliable. Packets can be dropped.&lt;br /&gt;
&lt;br /&gt;
In some cases it is acceptable to lose the data&lt;br /&gt;
* Streaming video&lt;br /&gt;
* VOIP&lt;br /&gt;
* Cell phones&lt;br /&gt;
&lt;br /&gt;
In others we need a reliable transmission&lt;br /&gt;
* Downloading content&lt;br /&gt;
&lt;br /&gt;
===TCP/IP===&lt;br /&gt;
* A mechanism to resend dropped packets&lt;br /&gt;
* If we build on top of TCP/IP we can assume reliable transmission&lt;br /&gt;
&lt;br /&gt;
===NAT (Network Address Translation)===&lt;br /&gt;
* Used in local networks, to the outside world the whole network appears as a single address, NAT devices remember outgoing requests to route the response to the correct party.&lt;br /&gt;
* Firewalls will automatically drop messages that do not have a corresponding outgoing request&lt;br /&gt;
** Firewalls will often only let certain types of traffic through, often only TCP/IP&lt;br /&gt;
*** Application developers may want to use UDP but may be forced to use TCP/IP to get through the firewall&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=Computer_Organization&amp;diff=1522</id>
		<title>Computer Organization</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=Computer_Organization&amp;diff=1522"/>
		<updated>2007-10-21T17:09:47Z</updated>

		<summary type="html">&lt;p&gt;Dan: /* SIMD Processors */  fix a small mistake, wrote &amp;quot;introduction&amp;quot; instead of instruction&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;==Computer Organization==&lt;br /&gt;
&lt;br /&gt;
===Introduction===&lt;br /&gt;
&lt;br /&gt;
* Information has been posted on the [http://homeostasis.scs.carleton.ca/os/index.php/Main_Page class website] regarding running a Linux distribution on your own home machine (or a virtual desktop environment).  Students are encouraged to familiarize themselves with the linux/unix environment as it is far less restricted and more robust than Windows/OSX.  &lt;br /&gt;
* Our research paper (ie. Operating Systems) should be well on its way.  Students should have key words and topics narrowed down.    &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Stored Program Computer===&lt;br /&gt;
&lt;br /&gt;
The concept of a stored program computer began in the early 1800s with Charles Babbage and his &amp;quot;&#039;&#039;Difference Engine&#039;&#039;&amp;quot;.  Since then it has evolved into what we now consider &#039;&#039;&#039;Von Neumann Architecture&#039;&#039;&#039;.  Although Von Neumann was only a piece of the puzzle of modern day computer architecture, we still refer to the architecture as being named after him.  In more recent times, the overall Von Neumann model has changed, but conceptually, it is still the same.  Several different components talk to each other (via buses) all at the same time - this scenario can be very complicated at times. &lt;br /&gt;
&lt;br /&gt;
[[Image:Neumann.jpg]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Central Processing Unit===&lt;br /&gt;
&lt;br /&gt;
The &#039;&#039;&#039;central processing unit&#039;&#039;&#039; (CPU) is made up of an Arithmetic Logic Unit (ALU) and Control Unit (CU).  &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
====Arithmetic Logic Unit====&lt;br /&gt;
&lt;br /&gt;
The &#039;&#039;&#039;ALU&#039;&#039;&#039; can perform all arithmetic (addition, subtraction, multiplication, division) and logical (AND, OR, NOT ... ) calculations at a very rapid rate. &lt;br /&gt;
 &lt;br /&gt;
&lt;br /&gt;
====Control Unit====&lt;br /&gt;
&lt;br /&gt;
The &#039;&#039;&#039;CU&#039;&#039;&#039; is made up of several components and duties :&lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Program counter&#039;&#039; or PC is the address of the currently executing instruction.  &lt;br /&gt;
* &#039;&#039;Status word&#039;&#039; is used to store information regarding the current state of the control unit.  Information such as overflows and execution status can be stored.  A word is typically what the processor natively deals with in terms of data size.  Most modern processors are 32 or 64 - bits.  That is, their registers store data segments that are 32 or 64 bits wide - which we collectively call a word.  The size of the word will vary from CPU to CPU.  &lt;br /&gt;
* Responsible for the &#039;&#039;fetch-decode cycle&#039;&#039; which is an algorithm that grabs information from main memory, reads it, decodes it and then hands it off to the specific part of the processor to handle the instruction (oftentimes in involving the ALU).&lt;br /&gt;
&lt;br /&gt;
The above description of the control unit is very simplified.  It is actually a reflection of a CPU in the early to mid 1980s.  For instance, &#039;&#039;&#039;Moore&#039;s Law&#039;&#039;&#039; states that that every 18 months, the number of transistors that sit on a CPU chip will double.  This &amp;quot;law&amp;quot; was introduced in the 1960s and currently still holds true.  &#039;&#039;&#039;Why?&#039;&#039;&#039;  Mainly because the industry targets themselves towards this figure.  Given &#039;&#039;&#039;Moore&#039;s Law&#039;&#039;&#039;, you can imagine how much CPU architecture has changed since the 1980s.  &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
====Transistors====&lt;br /&gt;
&lt;br /&gt;
With all of these extra transistors, what can we do with all of them?  One (or both) of two things :&lt;br /&gt;
&lt;br /&gt;
=====Integration=====&lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;Integration&#039;&#039;&#039; refers to more and more components getting packed into the CPU.  Devices such as :&lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Memory Controller&#039;&#039; (circuitry needed to drive the RAM)&lt;br /&gt;
* &#039;&#039;Math Co-Processor&#039;&#039; (handles floating point values)&lt;br /&gt;
* &#039;&#039;Memory&#039;&#039; Management Unit (MMU - used for virtualization)&lt;br /&gt;
* &#039;&#039;L1 &amp;amp; L2 Cache&#039;&#039; (used for fast memory access when we don&#039;t want to go all the way to main memory.  L1 cache is small but fast.  L2 cache is large but slower).&lt;br /&gt;
* &#039;&#039;Systems on a Chip&#039;&#039; : systems that have all transistors incorporated into a single chip.  These currently exist, but are fairly slow in regards to overall performance.  &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
=====Duplication=====&lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;Why have one ALU when we can have 10?&#039;&#039;&#039; Modern processors use this technology quite a bit.  Certain components can only be duplicated so much.  &#039;&#039;&#039;Why?&#039;&#039;&#039;  Aside from hardware constraints, we also have bottlenecks.  For instance, a CPU has only one PC (program counter).  What is the point of having several ALUs if the program counter can not keep up?&lt;br /&gt;
&lt;br /&gt;
The most common forms of &#039;&#039;&#039;duplication&#039;&#039;&#039; include :&lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Pipelining&#039;&#039; : In pipelined CPUs a specific function unit of a CPU is partitioned into K smaller parts, called &#039;&#039;&#039;pipelined stages&#039;&#039;&#039;.  When an operation is to be executed, it goes through the K stages of execution.  The stages can execute independently and in parallel because they are separate units of hardware.  The operation can be broken up into pieces where each stage can work on a particular piece so no part of the CPU is left idle.  What happens if two instructions need the same piece of hardware? The CPU can guess which way the instruction will go afterwards and continue on its way.  If it later learns that the path it took was incorrect, it can roll back its changes and carry on with the correct path.  If the guesses are constantly wrong, the system is slowed down to a crawl.  This entire process of &#039;guessing&#039; is known as &#039;&#039;&#039;speculative execution&#039;&#039;&#039;.  &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Image:Pipeline.jpg]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Thread Level Parallelism&#039;&#039; : if we can&#039;t get a single instruction stream parallelized, we can just have several separate threads.  This means we can share all of the address space information such as cache because the threads will be running in the same process. &lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Multi core&#039;&#039; : the most current technology in practice.  After several years of packing CPUs with more and more features, bottlenecks began to arise. &#039;&#039;&#039;Solution?&#039;&#039;&#039;  Duplicate the CPU!  As the industry currently shows, more and more computers are being released with &#039;&#039;&#039;dual&#039;&#039;&#039; or even &#039;&#039;&#039;quad core&#039;&#039;&#039; processors.  What are some bottlenecks that can arise with &#039;&#039;&#039;multi core processors&#039;&#039;&#039;?  The bus is one obvious answer.  If we increase the amount of information the buses can handle, we can accommodate multiple CPUs.  &lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;CPU cache&#039;&#039; : Why not get rid of RAM altogether and simply put all of it on the CPU as L1 or L2 cache?  Cache memory is known as &#039;&#039;&#039;static memory&#039;&#039;&#039;, whereas RAM is known as &#039;&#039;&#039;dynamic memory&#039;&#039;&#039; or &#039;&#039;&#039;DRAM&#039;&#039;&#039;.  DRAM can store information using about 1/4 of the amount of transistors as static memory.  This is why there is only around 4MB of CPU cache available in most modern day processors.  What RAM lacks in speed it makes up in size and parallelism.  &lt;br /&gt;
&lt;br /&gt;
The book &amp;quot; [http://books.google.ca/books?id=R7Frpn3g9AEC&amp;amp;dq=&amp;amp;pg=PP1&amp;amp;ots=f0kVd8_wKX&amp;amp;sig=66x6OTPMjfzJneY-mOHyQJOC1iM&amp;amp;prev=http://www.google.ca/search%3Fhl%3Den%26client%3Dfirefox-a%26rls%3Dorg.mozilla:en-US:official%26hs%3DdDa%26sa%3DX%26oi%3Dspell%26resnum%3D0%26ct%3Dresult%26cd%3D1%26q%3DComputer%2BArchitecture%2BHennessy%2BPAtterson%26spell%3D1&amp;amp;sa=X&amp;amp;oi=print&amp;amp;ct=title#PPP1,M1 Computer Architecture : A Quantitative Approach] &amp;quot;  by Hennessy and Patterson is an excellent book on CPU Architecture.&lt;br /&gt;
&lt;br /&gt;
What does all of this have to do with Operating Systems?  If the OS does not understand resources (underlying hardware), then the system is doomed.  We have to design the OS around the architecture (especially important with multi-core systems).  &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
===Latency &amp;amp; Bandwidth===&lt;br /&gt;
&lt;br /&gt;
Imagine the following situation : We have two facilities, A &amp;amp; B.  A very large amount of information has to be transfered between the locations.  We can do it in one of two ways : &lt;br /&gt;
&lt;br /&gt;
* Load up a semi-truck full of hard drives that are full of the information needed.&lt;br /&gt;
* Connect the locations together via fiber optical cable to transfer the information.&lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;Q&#039;&#039;&#039; :  What method will allow the most information to be transported? &lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;A&#039;&#039;&#039; :  Obviously the truck full of hard drives, especially with modern day drives being capable of storing up to a Tera-bye of information.  The amount of information to be transfered is known as &#039;&#039;&#039;bandwidth&#039;&#039;&#039;.  &lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;Q&#039;&#039;&#039; :  What is the quickest way to get one single bit of information from location B to location A?&lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;A&#039;&#039;&#039; :  This time, the answer is the fiber optic cable.  How quickly information is transfered is known as &#039;&#039;&#039;latency&#039;&#039;&#039;.  &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
[[Image:Hds.jpg]]&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
The above situation is also apparent when dealing with RAM.  RAM is known as high-latency, high bandwidth.  Moving large amounts of data to the CPU can sometimes be sped up using the cache.  This issue is a very commonly studied topic in the world of RAM - CPU connections.  &lt;br /&gt;
&lt;br /&gt;
Other devices and their latency :&lt;br /&gt;
&lt;br /&gt;
* CPU registers : lowest latency&lt;br /&gt;
* L2 &amp;amp; L1 Cache : low latency&lt;br /&gt;
* Hard disk : highest latency&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
====I/O====&lt;br /&gt;
&lt;br /&gt;
There are several different ways &#039;&#039;&#039;Input/Output&#039;&#039;&#039; devices can communication to the CPU and RAM :&lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Polled I/O&#039;&#039; : Think of it as giving a person a task.  You can poke the person every 5 minutes to see if they have completed the task.  If the person has  finished the task, they have to respond with a &#039;yes&#039;, otherwise &#039;no&#039;.  In this scenario, one party has to poke every 5 seconds and another party has to respond every 5 seconds as they get poked.  Obviously, not the most efficient solution.  &lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Interrupt I/O&#039;&#039; : In this situation, when giving the person the task, we simply ask them to tell us when they have completed it.  Much more efficient than  polling.  &lt;br /&gt;
&lt;br /&gt;
If we have a &#039;&#039;&#039;small&#039;&#039;&#039; amount of data being processed by the I/O device, then our system runs relatively efficient.  If the data is a large in size, the communication involved between the I/O device, the CPU and the RAM is frequent.  There are two solutions to this problem:&lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Memory Mapped I/O&#039;&#039; : devices are associated with logical primary memory addresses rather than having a specialized device address table.  &lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Direct Memory Access (DMA)&#039;&#039; is when devices and their controllers are able to read and write information directly from the primary memory without any CPU getting in the way.  The DMA contains the same algorithms the CPU uses, except conceptually it does not need any registers.  This direct access can increase the speed significantly.  &lt;br /&gt;
&lt;br /&gt;
* &#039;&#039;Intelligent Devices&#039;&#039; : a final alternative is to have memory and chip sets that act in a way the CPU does on the I/O device itself.  This means we do not need to talk to the CPU or RAM at all (it can all be done on the device itself).&lt;br /&gt;
&lt;br /&gt;
===SIMD Processors===&lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;Single Instruction Multiple Data (SIMD)&#039;&#039;&#039; processors handle one instruction accompanied with an entire array of data.  This type of architecture is quite efficient at processing large amounts of data that all need to be operated on using the same instruction.  Processing speeds are increased significantly because the processor does not have to decode every single instruction(since all pieces of data are only using one single instruction).  Multimedia devices like MP3 players and gaming consoles all take advantage of this technology, these machines are also known as vector machines.  &lt;br /&gt;
&lt;br /&gt;
The Sony PS3 has one MIMD (multiple instruction, multiple data) processor and several SIMD processors.  Compilers need to be adjusted and game programmers need to adapt their writing skills in favor of this new technology.  Someone eventually has to slave over a library of machine language instructions in order to optimize the particular application to SIMD technology (not the most exciting task).&lt;br /&gt;
&lt;br /&gt;
===Boot Process===&lt;br /&gt;
&lt;br /&gt;
How is the OS started?  How does the computer go from powering on to running the Operating Systems (Windows, OSX ...).  This process takes a really long time when you consider how fast computers have become, &#039;&#039;&#039;why&#039;&#039;&#039; is this?  This occurs because the system must determine all of the hardware present in the machine, each hardware&#039;s manufacturer, specifications, requirements etc.  It can become a real mess!  Before the OS can even begin to start, all of the hardware in the system must be properly configured.  This all begins with the BIOS.&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
====BIOS====&lt;br /&gt;
&lt;br /&gt;
&#039;&#039;&#039;BIOS (basic input/output system)&#039;&#039;&#039; is the code that runs before any other code runs.  It is built into the system.  In older times, the BIOS was burned into the machine and never changed.  Nowadays, it can be flashed and reprogrammed.  This can occur when bugs in the BIOS are so troublesome, that an update is needed (typically when the OS does not work well with the hardware).  The BIOS configures all the hardware by running diagnostics etc.  After hardware is configured, how is the OS launched?  This process is not direct, mainly because the BIOS has limited capabilities because of legacy issues.  The BIOS is configured in a way that it believes your computer&#039;s hardware is reflective of the year 1982.  This means it believes  you only have several megabytes of RAM, an 8086 processor, limited disk space and so on.  It does however, know how to load information of up to 512 bytes.  This is where bootstrapping comes into play. &lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
====Bootstrapping====&lt;br /&gt;
&lt;br /&gt;
After hardware is configured, the BIOS runs a small amount of code (approx 400 bytes, while the remaining amount is used to store partition tables) known as the &#039;&#039;&#039;boot block&#039;&#039;&#039;.  The BIOS loads this algorithm into memory where it is executed.  The &#039;&#039;&#039;boot block&#039;&#039;&#039; is just enough code to load another algorithm that makes the machine reflect more of what it really is (not an 8086).  From here we can initialize RAM, hard disk and eventually start the OS.  &lt;br /&gt;
&lt;br /&gt;
Macintosh computers, run a next generation, more sophisticated BIOS called &#039;&#039;&#039;EFI&#039;&#039;&#039;.  The EFI allows the computer to go from BIOS to OS startup more easily and direct.  This is because the EFI recognizes the hardware of the computer for what it truly is.  Older macs use &#039;&#039;&#039;open boot&#039;&#039;&#039;, which is a BIOS that allows the user to enter commands via a prompt.&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1510</id>
		<title>High-level Synchronization and IPC</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1510"/>
		<updated>2007-10-13T04:33:30Z</updated>

		<summary type="html">&lt;p&gt;Dan: some quick formatting fixes&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;==Dinning Philosophers Problem==&lt;br /&gt;
&lt;br /&gt;
[[Image:DiningPhilosophers.jpg]]&lt;br /&gt;
&lt;br /&gt;
* Thought experiment used to understand synchronization primitives&lt;br /&gt;
* Five philosophers are dining at a frugal Chinese restaurant where they are only provided with one chopstick each. They need two chopsticks to pick up the noodles in order to eat.&lt;br /&gt;
** Whenever they are hungry they pick up two chopsticks to eat.&lt;br /&gt;
** When they are done eating they put down the chopsticks.&lt;br /&gt;
** The philosophers do not talk to each other about eating, only high-minded ideas.&lt;br /&gt;
* Have to define a strategy to make sure that no philosopher starves to death.&lt;br /&gt;
* We can think of each chopstick as a semaphore.&lt;br /&gt;
&lt;br /&gt;
===Problems===&lt;br /&gt;
* &#039;&#039;&#039;Starvation&#039;&#039;&#039;&lt;br /&gt;
** One (or more) philosopher is never able to get two chopsticks and dies.&lt;br /&gt;
* &#039;&#039;&#039;Deadlock&#039;&#039;&#039;&lt;br /&gt;
** All philosophers have one chopstick and are waiting for another philosopher to put one down and they all starve to death.&lt;br /&gt;
&lt;br /&gt;
===Possible Solutions===&lt;br /&gt;
* Could use a scheme involving timeouts, but we want a &#039;&#039;PERFECT&#039;&#039; solution&lt;br /&gt;
* Could use synchronization; philosophers either grab one chopstick or none, this illustrates &#039;&#039;&#039;AND synchronization&#039;&#039;&#039;&lt;br /&gt;
&lt;br /&gt;
How could AND synchronization be implemented?&lt;br /&gt;
* Could turn multiple semaphores into one by abstracting the semaphores into 1 class.&lt;br /&gt;
* In general this is done by encapsulating the resources and using another process/thread/program to hand them out&lt;br /&gt;
&lt;br /&gt;
==Limitations of Semaphores==&lt;br /&gt;
&lt;br /&gt;
Semaphores are hard to use...&lt;br /&gt;
** If the programmer forgets to grab the semaphore this can lead to corruption.&lt;br /&gt;
** If the programmer never releases the semaphore this can lead to the program locking.&lt;br /&gt;
&lt;br /&gt;
The underlying problem is that the down and up need to be matched, with non-standard exits this can be difficult.&lt;br /&gt;
&lt;br /&gt;
But in C it&#039;s all you&#039;ve got...&lt;br /&gt;
* Solution: write a program to check the code (eg: Stanford Checker)&lt;br /&gt;
** Problem: not reliable, can produce many false positives&lt;br /&gt;
&lt;br /&gt;
How about building synchronization into the language as a base level construct?&lt;br /&gt;
* Java&#039;s &#039;&#039;synchronized&#039;&#039;&lt;br /&gt;
** Hides semaphores from developer&lt;br /&gt;
** Developer still has to decide where to synchronize&lt;br /&gt;
** Don&#039;t want to  synchronize all your code, this leads to single threaded behaviour&lt;br /&gt;
&lt;br /&gt;
General  idea: Monitor Object&lt;br /&gt;
* An object instance where only one thread can be executing certain pieces of code at once.&lt;br /&gt;
* Provides exclusive access to variables&lt;br /&gt;
&lt;br /&gt;
==Events and Message Passing==&lt;br /&gt;
&lt;br /&gt;
[[image:Structs.jpg]]&lt;br /&gt;
&lt;br /&gt;
===Message Passing===&lt;br /&gt;
* Uses buffers&lt;br /&gt;
* Message passing is used in large contexts&lt;br /&gt;
&lt;br /&gt;
===Events===&lt;br /&gt;
* Message queue&lt;br /&gt;
* When do they occur?&lt;br /&gt;
** User does something&lt;br /&gt;
** Device does something&lt;br /&gt;
* Synchronization can be based around waiting for an event to occur.&lt;br /&gt;
&lt;br /&gt;
When an event occurs:&lt;br /&gt;
* It has to be stored (in a buffer)&lt;br /&gt;
* It might not be processed right away&lt;br /&gt;
** What if the event is no longer relevant?&lt;br /&gt;
*** Could implement events with timeouts&lt;br /&gt;
** What if no one&#039;s listening for the event?&lt;br /&gt;
*** Could drop the event&lt;br /&gt;
** The challenge is determining which events should be dropped and which should be queued.&lt;br /&gt;
*** In the case of a file open if the event is dropped there will be a leak&lt;br /&gt;
*** In the case of mouse movement when no application is listening we want to drop it so that if an application starts listening to mouses events the mouse doesn&#039;t go crazy&lt;br /&gt;
&lt;br /&gt;
==Message passing in a WAN==&lt;br /&gt;
&lt;br /&gt;
Message passing in a WAN is unreliable. Packets can be dropped.&lt;br /&gt;
&lt;br /&gt;
In some cases it is acceptable to lose the data&lt;br /&gt;
* Streaming video&lt;br /&gt;
* VOIP&lt;br /&gt;
* Cell phones&lt;br /&gt;
&lt;br /&gt;
In others we need a reliable transmission&lt;br /&gt;
* Downloading content&lt;br /&gt;
&lt;br /&gt;
===TCP/IP===&lt;br /&gt;
* A mechanism to resent dropped packets&lt;br /&gt;
* If we build on top of TCP/IP we can assume reliable transmission&lt;br /&gt;
&lt;br /&gt;
===NAT (Network Address Translation)===&lt;br /&gt;
* Used in local networks, to the outside world the whole network appears as a single address, NAT devices remember outgoing requests to route the response to the correct party.&lt;br /&gt;
* Firewalls will automatically drop messages that do not have a corresponding outgoing request&lt;br /&gt;
** Firewalls will often only let certain types of traffic through, often only TCP/IP&lt;br /&gt;
*** Application developers may want to use UDP but may be forced to use TCP/IP to get through the firewall&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1509</id>
		<title>High-level Synchronization and IPC</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1509"/>
		<updated>2007-10-13T04:31:58Z</updated>

		<summary type="html">&lt;p&gt;Dan: finished &amp;quot;wiki-izing&amp;quot;&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;=High-Level Synchronization and IPC=&lt;br /&gt;
&lt;br /&gt;
==Dinning Philosophers Problem==&lt;br /&gt;
&lt;br /&gt;
[[Image:DiningPhilosophers.jpg]]&lt;br /&gt;
&lt;br /&gt;
* Thought experiment used to understand synchronization primitives&lt;br /&gt;
* Five philosophers are dining at a frugal Chinese restaurant where they are only provided with one chopstick each. They need two chopsticks to pick up the noodles in order to eat.&lt;br /&gt;
** Whenever they are hungry they pick up two chopsticks to eat.&lt;br /&gt;
** When they are done eating they put down the chopsticks.&lt;br /&gt;
** The philosophers do not talk to each other about eating, only high-minded ideas.&lt;br /&gt;
* Have to define a strategy to make sure that no philosopher starves to death.&lt;br /&gt;
* We can think of each chopstick as a semaphore.&lt;br /&gt;
&lt;br /&gt;
===Problems===&lt;br /&gt;
* &#039;&#039;&#039;Starvation&#039;&#039;&#039;&lt;br /&gt;
** One (or more) philosopher is never able to get two chopsticks and dies.&lt;br /&gt;
* &#039;&#039;&#039;Deadlock&#039;&#039;&#039;&lt;br /&gt;
** All philosophers have one chopstick and are waiting for another philosopher to put one down and they all starve to death.&lt;br /&gt;
&lt;br /&gt;
===Possible Solutions===&lt;br /&gt;
* Could use a scheme involving timeouts, but we want a &#039;&#039;PERFECT&#039;&#039; solution&lt;br /&gt;
* Could use synchronization; philosophers either grab one chopstick or none, this illustrates &#039;&#039;&#039;AND synchronization&#039;&#039;&#039;&lt;br /&gt;
&lt;br /&gt;
How could AND synchronization be implemented?&lt;br /&gt;
* Could turn multiple semaphores into one by abstracting the semaphores into 1 class.&lt;br /&gt;
* In general this is done by encapsulating the resources and using another process/thread/program to hand them out&lt;br /&gt;
&lt;br /&gt;
==Limitations of Semaphores==&lt;br /&gt;
&lt;br /&gt;
Semaphores are hard to use...&lt;br /&gt;
** If the programmer forgets to grab the semaphore this can lead to corruption.&lt;br /&gt;
** If the programmer never releases the semaphore this can lead to the program locking.&lt;br /&gt;
&lt;br /&gt;
The underlying problem is that the down and up need to be matched, with non-standard exits this can be difficult.&lt;br /&gt;
&lt;br /&gt;
But in C it&#039;s all you&#039;ve got...&lt;br /&gt;
* Solution: write a program to check the code (eg: Stanford Checker)&lt;br /&gt;
** Problem: not reliable, can produce many false positives&lt;br /&gt;
&lt;br /&gt;
How about building synchronization into the language as a base level construct?&lt;br /&gt;
* Java&#039;s &#039;&#039;synchronized&#039;&#039;&lt;br /&gt;
** Hides semaphores from developer&lt;br /&gt;
** Developer still has to decide where to synchronize&lt;br /&gt;
** Don&#039;t want to  synchronize all your code, this leads to single threaded behaviour&lt;br /&gt;
&lt;br /&gt;
General  idea: Monitor Object&lt;br /&gt;
* An object instance where only one thread can be executing certain pieces of code at once.&lt;br /&gt;
* Provides exclusive access to variables&lt;br /&gt;
&lt;br /&gt;
==Events and Message Passing==&lt;br /&gt;
&lt;br /&gt;
===Message Passing===&lt;br /&gt;
* Uses buffers&lt;br /&gt;
* Message passing is used in large contexts&lt;br /&gt;
&lt;br /&gt;
===Events===&lt;br /&gt;
* Message queue&lt;br /&gt;
* When do they occur?&lt;br /&gt;
** User does something&lt;br /&gt;
** Device does something&lt;br /&gt;
* Synchronization can be based around waiting for an event to occur.&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
When an event occurs:&lt;br /&gt;
* It has to be stored (in a buffer)&lt;br /&gt;
* It might not be processed right away&lt;br /&gt;
** What if the event is no longer relevant?&lt;br /&gt;
*** Could implement events with timeouts&lt;br /&gt;
** What if no one&#039;s listening for the event?&lt;br /&gt;
*** Could drop the event&lt;br /&gt;
** The challenge is determining which events should be dropped and which should be queued.&lt;br /&gt;
*** In the case of a file open if the event is dropped there will be a leak&lt;br /&gt;
*** In the case of mouse movement when no application is listening we want to drop it so that if an application starts listening to mouses events the mouse doesn&#039;t go crazy&lt;br /&gt;
&lt;br /&gt;
==Message passing in a WAN==&lt;br /&gt;
&lt;br /&gt;
Message passing in a WAN is unreliable. Packets can be dropped.&lt;br /&gt;
&lt;br /&gt;
In some cases it is acceptable to lose the data&lt;br /&gt;
* Streaming video&lt;br /&gt;
* VOIP&lt;br /&gt;
* Cell phones&lt;br /&gt;
&lt;br /&gt;
In others we need a reliable transmission&lt;br /&gt;
* Downloading content&lt;br /&gt;
&lt;br /&gt;
===TCP/IP===&lt;br /&gt;
* A mechanism to resent dropped packets&lt;br /&gt;
* If we build on top of TCP/IP we can assume reliable transmission&lt;br /&gt;
&lt;br /&gt;
===NAT (Network Address Translation)===&lt;br /&gt;
* Used in local networks, to the outside world the whole network appears as a single address, NAT devices remember outgoing requests to route the response to the correct party.&lt;br /&gt;
* Firewalls will automatically drop messages that do not have a corresponding outgoing request&lt;br /&gt;
** Firewalls will often only let certain types of traffic through, often only TCP/IP&lt;br /&gt;
*** Application developers may want to use UDP but may be forced to use TCP/IP to get through the firewall&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1508</id>
		<title>High-level Synchronization and IPC</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=High-level_Synchronization_and_IPC&amp;diff=1508"/>
		<updated>2007-10-13T04:04:33Z</updated>

		<summary type="html">&lt;p&gt;Dan: first edit, will &amp;quot;wiki-ize&amp;quot; on preceeding edits&lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;Dinning Philosophers Problem:&lt;br /&gt;
- Thought experiment used to understand synchronization primitives&lt;br /&gt;
&lt;br /&gt;
Five philosophers are dining at a frugal Chinese restaurant where they are only provided with one chopstick each. They need two chopsticks to pick up the noodles.&lt;br /&gt;
Whenever they are hungry they pick up two chopsticks to eat.&lt;br /&gt;
When they are done eating they put down the chopsticks.&lt;br /&gt;
The philosophers do not talk to each other about eating, only high-minded ideas.&lt;br /&gt;
Have to define a strategy to make sure that no philosopher starves to death.&lt;br /&gt;
We can think of each chopstick as a semaphore.&lt;br /&gt;
&lt;br /&gt;
Problems:&lt;br /&gt;
&lt;br /&gt;
Starvation:&lt;br /&gt;
- one (or more) philosopher is never able to get two chopsticks and dies.&lt;br /&gt;
&lt;br /&gt;
Deadlock&lt;br /&gt;
- All philosophers have one chopstick and are waiting for another philosopher to put one down&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
Possible Solutions:&lt;br /&gt;
- Could use a scheme involving timeouts, but we want a PERFECT solution&lt;br /&gt;
- Could use synchronization; philosophers either grab one chopstick or none, this illustrates AND synchronization&lt;br /&gt;
&lt;br /&gt;
How could AND synchronization be implemented?&lt;br /&gt;
- could turn multiple semaphores into one by abstracting the semaphores into 1 class.&lt;br /&gt;
- In general this is done by encapsulating the resources and using another process/thread/program to hand them out&lt;br /&gt;
&lt;br /&gt;
Limitations of Semaphores:&lt;br /&gt;
- Semaphores are hard to use&lt;br /&gt;
    - if the programmer forgets to grab the semaphore this can lead to corruption&lt;br /&gt;
    - if the programmer never releases the semaphore this can lead to the program locking&lt;br /&gt;
&lt;br /&gt;
The underlying problem is that the down and up need to be matched, with non-standard exits this can be difficult.&lt;br /&gt;
&lt;br /&gt;
But in C it&#039;s all you&#039;ve got...&lt;br /&gt;
&lt;br /&gt;
Solution: write a program to check the code (eg: Stanford Checker)&lt;br /&gt;
Problem: not reliable, can produce many false positives&lt;br /&gt;
&lt;br /&gt;
How about building synchronization into the language as a base level construct?&lt;br /&gt;
- Java&#039;s synchronized takes care of semaphores&lt;br /&gt;
    - hides semaphores from developer&lt;br /&gt;
    - developer still has to decide where to synchronize&lt;br /&gt;
    - Don&#039;t want to  synchronize all your code, this leads to single threaded behaviour&lt;br /&gt;
&lt;br /&gt;
General  idea: Monitor Object&lt;br /&gt;
- An object instance where only one thread can be executing certain pieces of code at once.&lt;br /&gt;
- Provides exclusive access to variables&lt;br /&gt;
&lt;br /&gt;
Events:&lt;br /&gt;
- message queue&lt;br /&gt;
- when do they occur?&lt;br /&gt;
    - user does something&lt;br /&gt;
    - device does something&lt;br /&gt;
- synchronization based on waiting for an event&lt;br /&gt;
&lt;br /&gt;
Message Passing&lt;br /&gt;
- uses buffers&lt;br /&gt;
- message passing is used in large contexts&lt;br /&gt;
&lt;br /&gt;
When an event occurs:&lt;br /&gt;
- it has to be stored (in a buffer)&lt;br /&gt;
- might not be processed right away&lt;br /&gt;
    - what if the event is no longer relevant?&lt;br /&gt;
       - Could implement events with timeouts&lt;br /&gt;
    - what if no one&#039;s listening for the event?&lt;br /&gt;
        - Could drop the event&lt;br /&gt;
             - In the case of a file open if the event is dropped there will be a leak&lt;br /&gt;
&lt;br /&gt;
Message passing in a WAN&lt;br /&gt;
&lt;br /&gt;
Message passing in a WAN is unreliable. Packets can be dropped.&lt;br /&gt;
&lt;br /&gt;
In some cases we can lose the data&lt;br /&gt;
- streaming video&lt;br /&gt;
- VOIP&lt;br /&gt;
- cell phones&lt;br /&gt;
&lt;br /&gt;
In others we need a reliable transmission&lt;br /&gt;
- downloading content&lt;br /&gt;
&lt;br /&gt;
TCP/IP&lt;br /&gt;
- a mechanism to resent dropped packets&lt;br /&gt;
- if we build on top of TCP/IP we can assume reliable tranmission&lt;br /&gt;
&lt;br /&gt;
NAT (network address translation)&lt;br /&gt;
- Used in local networks, to the outside world the whole network appears as a single address, NAT devices remember outgoing requests to route the response to the correct party.&lt;br /&gt;
- Firewalls will automatically drop messages that do not have a corresponding outgoing request&lt;br /&gt;
    - Firewalls will often only let certain types of traffic through, often only TCP/IP&lt;br /&gt;
       - Application developers may want to use UDP but may be forced to use TCP/IP to get through the firewall&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
&lt;br /&gt;
Further Reading:&lt;br /&gt;
http://en.wikipedia.org/wiki/Dining_philosophers_problem&lt;br /&gt;
http://en.wikipedia.org/wiki/Monitor_(synchronization)&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=File:Structs.jpg&amp;diff=1507</id>
		<title>File:Structs.jpg</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=File:Structs.jpg&amp;diff=1507"/>
		<updated>2007-10-13T04:03:28Z</updated>

		<summary type="html">&lt;p&gt;Dan: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
	<entry>
		<id>https://homeostasis.scs.carleton.ca/wiki/index.php?title=File:DiningPhilosophers.jpg&amp;diff=1506</id>
		<title>File:DiningPhilosophers.jpg</title>
		<link rel="alternate" type="text/html" href="https://homeostasis.scs.carleton.ca/wiki/index.php?title=File:DiningPhilosophers.jpg&amp;diff=1506"/>
		<updated>2007-10-13T04:01:37Z</updated>

		<summary type="html">&lt;p&gt;Dan: &lt;/p&gt;
&lt;hr /&gt;
&lt;div&gt;&lt;/div&gt;</summary>
		<author><name>Dan</name></author>
	</entry>
</feed>