<?xml version="1.0" encoding="ISO-8859-1"?>

<!-- OPML generated by OPML Editor v10.1a16 on Sun, 14 Apr 2013 15:40:29 GMT -->

<opml version="2.0">

	<head>

		<title>What Adam Curry is reading</title>

		<dateCreated>Sun, 14 Apr 2013 03:53:38 GMT</dateCreated>

		<dateModified>Sun, 14 Apr 2013 15:40:29 GMT</dateModified>

		<ownerName>Adam Curry</ownerName>

		<ownerEmail>adam@curry.com</ownerEmail>

		<expansionState></expansionState>

		<vertScrollState>17</vertScrollState>

		<windowTop>69</windowTop>

		<windowLeft>-477</windowLeft>

		<windowBottom>391</windowBottom>

		<windowRight>-24</windowRight>

		</head>

	<body>

		<outline text="Opening script">

			<outline text="I'm your keyboard cowboy!"/>

			</outline>

		<outline text="GVS"/>

		<outline text="Ex-Military Bio-Environmental Engineer | Kristen Meghan | Blows Whistle On" name="exmilitaryBioenvironmentalEngineerKristenMeghanBlowsWhistleOn" type="link" url="http://www.youtube.com/watch?v=rAxXyMAmBMs&amp;feature=youtu.be"/>

		<outline text="Bieber: Hopelijk was Anne Frank een fan">

			<outline text="Link to Article" type="link" url="http://www.telegraaf.nl/feed/21477359/__Bieber__Hopelijk_was_Anne_Frank_een_fan__.html?cid=rss"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365949930_zV2XHGhh.html"/>

			<outline text="Source: Telegraaf.nl - prive" type="link" url="http://www.telegraaf.nl/rss/prive.xml"/>

			<outline text="Sun, 14 Apr 2013 09:32"/>

			<outline text=""/>

			<outline text="Kritiek op Justin Bieber om bericht Anne Frank Huis - Nieuws | Altijd op de hoogte van het laatste nieuws met Telegraaf.nl [Feed]Uw browser ondersteunt geen javascript of javascript staat uitgeschakeld. Hierdoor kunnen er cookies geplaatst worden waar u geen toestemming voor heeft gegeven."/>

			<outline text="Uw browser ondersteunt geen javascript of javascript staat uitgeschakeld. Hierdoor kunnen er cookies geplaatst worden waar u geen toestemming voor heeft gegeven."/>

			</outline>

		<outline text="Dr Quantum - Double Slit Experiment - YouTube">

			<outline text="Link to Article" type="link" url="http://www.youtube.com/watch?v=DfPeprQ7oGc"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365942712_MQyLcmCv.html"/>

			<outline text="Sun, 14 Apr 2013 07:31"/>

			<outline text=""/>

			</outline>

		<outline text="Bush Sr Laughs at JFK Shooting YouTube freecorder com">

			<outline text="Link to Article" name="linkToArticle" type="link" url="https://www.youtube.com/watch?v=VkfhE_MfnPA"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365920282_W2X6hxmT.html"/>

			<outline text="Source: pg.chrys news feed" type="link" url="http://s3.amazonaws.com/radio2/pg.chrys/linkblog.xml"/>

			<outline text="Sun, 14 Apr 2013 01:18"/>

			<outline text=""/>

			</outline>

		<outline text="Are you ready for Mayor Anthony Weiner?">

			<outline text="Link to Article" name="linkToArticle" type="link" url="http://usfollowme.blogspot.com/2013/04/are-you-ready-for-mayor-anthony-weiner.html"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365920215_DcZ66k3f.html"/>

			<outline text="Source: usfollowme" type="link" url="http://usfollowme.blogspot.com/feeds/posts/default?alt=rss"/>

			<outline text="Sun, 14 Apr 2013 01:16"/>

			<outline text=""/>

			<outline text="http://hotair.com/archives/2013/04/10/are-you-ready-for-mayor-anthony-weiner/"/>

			</outline>

		<outline text="MSNBC host: I'm not backing off my point that children belong to whole communities">

			<outline text="Link to Article" name="linkToArticle" type="link" url="http://usfollowme.blogspot.com/2013/04/msnbc-host-im-not-backing-off-my-point.html"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365920208_FBJw9GrQ.html"/>

			<outline text="Source: usfollowme" type="link" url="http://usfollowme.blogspot.com/feeds/posts/default?alt=rss"/>

			<outline text="Sun, 14 Apr 2013 01:16"/>

			<outline text=""/>

			<outline text="http://hotair.com/archives/2013/04/09/msnbc-host-im-not-backing-off-my-point-that-children-belong-to-whole-communities/"/>

			</outline>

		<outline text="DC joins several states classifying smoking as a ''pre-existing medical condition'' under ObamaCare">

			<outline text="Link to Article" name="linkToArticle" type="link" url="http://usfollowme.blogspot.com/2013/04/dc-joins-several-states-classifying.html"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365920182_ppGsP4Jg.html"/>

			<outline text="Source: usfollowme" type="link" url="http://usfollowme.blogspot.com/feeds/posts/default?alt=rss"/>

			<outline text="Sun, 14 Apr 2013 01:16"/>

			<outline text=""/>

			<outline text="http://hotair.com/archives/2013/04/10/dc-joins-several-states-classifying-smoking-as-a-pre-existing-medical-condition-under-obamacare/"/>

			</outline>

		<outline text="LISTEN: Jay-Z responds to critics of his Cuba trip with a new rap">

			<outline text="Link to Article" type="link" url="http://usfollowme.blogspot.com/2013/04/listen-jay-z-responds-to-critics-of-his.html"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365920036_62Y6LaRR.html"/>

			<outline text="Source: usfollowme" type="link" url="http://usfollowme.blogspot.com/feeds/posts/default?alt=rss"/>

			<outline text="Sun, 14 Apr 2013 01:13"/>

			<outline text=""/>

			<outline text="http://theweek.com/article/index/242667/listen-jay-z-responds-to-critics-of-his-cuba-trip-with-a-new-rap"/>

			</outline>

		<outline text="U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.">

			<outline text="Link to Article" name="linkToArticle" type="link" url="http://www.godlikeproductions.com/forum1/message2195001/pg1"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365919905_wHH2UXJn.html"/>

			<outline text="Sun, 14 Apr 2013 01:11"/>

			<outline text=""/>

			<outline text="Previous Page"/>

			<outline text=" Anonymous CowardUser ID: 37637438 United States04/07/2013 10:13 PMReport Abusive PostReport Copyright ViolationU.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.(CNN) -- Anne Smedinghoff lived inside a heavily secured compound.But the public diplomacy officer for the U.S. Embassy in Kabul was always pushing to get out."/>

			<outline text="&quot;She particularly enjoyed the opportunity to work directly with the Afghan people,&quot; her parents said in a statement, &quot;and was always looking for opportunities to reach out and help make a difference in the lives of those living in a country ravaged by war.&quot;"/>

			<outline text="This weekend, the 25-year-old was trying to do just that -- delivering books to a school in southern Afghanistan -- when a suicide bomber smashed into her convoy Saturday. She is believed to be the first U.S. diplomat killed since the September attack in Benghazi, Libya."/>

			<outline text="..."/>

			<outline text="--------------------"/>

			<outline text="[link to cnsnews.com]"/>

			<outline text="--------------------..."/>

			<outline text="Her father said they knew the assignments were dangerous, though she spent most of her time at the U.S. Embassy compound. Trips outside were in heavily armored convoys '-- as was Saturday's trip that killed five Americans, including Smedinghoff. The U.S. Department of Defense did not release the names of the others who died: three soldiers and one employee."/>

			<outline text="&quot;It's like a nightmare, you think will go away and it's not,&quot; he said. &quot;We keep saying to ourselves, we're just so proud of her, we take consolation in the fact that she was doing what she loved.&quot;"/>

			<outline text="Friends remembered her Sunday for her charity work too."/>

			<outline text="Smedinghoff participated in a 2009 cross-country bike ride for The 4K for Cancer '-- part of the Ulman Cancer Fund for Young Adults '-- according to the group. She served on the group's board of directors after the ride from Baltimore to San Francisco."/>

			<outline text="&quot;She was an incredible young woman. She was always optimistic,&quot; said Ryan Hanley, a founder of the group. &quot;She always had a smile on her face and incredible devotion to serving others.&quot;"/>

			<outline text="..."/>

			<outline text="--------------------"/>

			<outline text="[link to cnsnews.com]"/>

			<outline text="Anonymous Coward (OP)User ID: 37637438 United States04/07/2013 10:19 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.JreeeegsUser ID: 34368782 United States04/07/2013 10:19 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Anonymous CowardUser ID: 37536239 Canada04/07/2013 10:22 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Payback for US Drone Strike that killed 11 kids the day before.Live by the drone die by the bomb"/>

			<outline text="Anonymous CowardUser ID: 37536239 Canada04/07/2013 10:23 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Payback for US Drone Strike that killed 11 kids the day before.Live by the drone die by the bomb"/>

			<outline text=" Quoting: Anonymous Coward 37536239[link to www.bbc.co.uk]"/>

			<outline text="Anonymous CowardUser ID: 1445809 Czech Republic04/07/2013 10:25 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.SHHHHhhhhhhhh, dont make a fuss, Dear Leader may slice worse than normal!!"/>

			<outline text="Anonymous CowardUser ID: 3998821 Canada04/07/2013 10:27 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.It's just a drop in the bucket but stories like these make me like there's still hope."/>

			<outline text="Anonymous CowardUser ID: 3998821 Canada04/07/2013 10:27 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.It's just a drop in the bucket but stories like these make me feel like there's still hope."/>

			<outline text="Anonymous Coward (OP)User ID: 37637438 United States04/07/2013 10:28 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.----------The friends and family of Anne Smedinghoff are mourning the 25-year-old Foreign Service Officer killed in a car bomb blast in southern Afghanistan whom they describe as vivacious and loving."/>

			<outline text="Smedinghoff was one of five Americans killed in a suicide bomb attack in Qalat, Zabul."/>

			<outline text="Working as a press officer for the U.S. embassy in Kabul, she was helping Afghan journalists cover an event at a boys school where the local U.S. Provincial Reconstruction Team was to donate math and science books"/>

			<outline text="The other Americans killed in the attack were three military service members and a civilian working for the Defense Department. Four State Department officials, including one described as critically injured, were among the 10 injured in the attack."/>

			<outline text="&quot;The world lost a truly beautiful soul,&quot; Tom and Mary Beth Smedinghoff said in a statement. &quot;Anne absolutely loved the work she was doing&quot; as a press officer at the U.S. embassy in Kabul, they said."/>

			<outline text="Having served in the Foreign Service for only three years, Smedinghoff volunteered to serve in Afghanistan and arrived last July."/>

			<outline text="----------"/>

			<outline text="[link to abcnews.go.com]"/>

			<outline text="Anonymous CowardUser ID: 3998821 Canada04/07/2013 10:30 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.How many children did they murder in her name."/>

			<outline text="Anonymous CowardUser ID: 23317409 United States04/07/2013 10:36 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Payback for US Drone Strike that killed 11 kids the day before.Live by the drone die by the bomb"/>

			<outline text=" Quoting: Anonymous Coward 37536239Can't really get behind the hand clapping part, but otherwise, yeah, that's the way it goes."/>

			<outline text="Anonymous CowardUser ID: 35118214 Germany04/07/2013 10:36 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.she is so gorgeous! I am really sad they did not get to gangrape her before they killed her."/>

			<outline text="Anonymous CowardUser ID: 8656872 United States04/07/2013 10:46 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.shhhhh...........don't break oblahs concentration, he's trying to hit a golf balll"/>

			<outline text="JreeeegsUser ID: 34368782 United States04/07/2013 10:46 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.she is so gorgeous! I am really sad they did not get to gangrape her before they killed her."/>

			<outline text=" Quoting: Anonymous Coward 35118214"/>

			<outline text="Anonymous CowardUser ID: 37643276 United States04/07/2013 10:58 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.--------State Department diplomat with Chicago ties killed in Afghanistan"/>

			<outline text="Anne Smedinghoff, the young American diplomat who was killed Saturday while delivering textbooks to children in Afghanistan, left behind a long trail of people she had impressed with her passion, intelligence and poise, both in her career and while she was growing up near Chicago, in River Forest."/>

			<outline text="They included Secretary of State John Kerry, whom Smedinghoff shepherded around Afghanistan on a recent visit, a plum assignment that underscored her achievements and promise. &quot;A selfless, idealistic young woman,&quot; Kerry said."/>

			<outline text="Her parents grieved for the &quot;beautiful soul&quot; they lost. &quot;She was doing what she loved, and she was doing great things,&quot; said her father, Tom Smedinghoff. &quot;We're just in total shock.&quot;"/>

			<outline text="Smedinghoff, 25, a Fenwick High School graduate who grew up in River Forest, died along with four other Americans in what the State Department said was a Taliban attack in Zabul province in southern Afghanistan. Smedinghoff and the other victims were traveling in a convoy of vehicles when a bomb exploded, according to the State Department."/>

			<outline text="--------"/>

			<outline text="[link to www.chicagotribune.com]"/>

			<outline text="LunaticFringeUser ID: 11502006 United States04/07/2013 11:00 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Meanwhile...Thread: Meanwhile: Obama Spends Second Week In A Row On Golf Course."/>

			<outline text="&quot;You ain't nuffin! You a punk faggot! Now come n' do sumpin!&quot; - The Reverend, Alfred Charles Sharpton - circa 2010."/>

			<outline text="Anonymous CowardUser ID: 17373378 Canada04/07/2013 11:10 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.What did she expect? Shes on the side of an occupation that invaded a country, setup shop to steal resources, fortified herself in a base, comes out do deliver books to school children acting all nice and human now? yea right.. but whatever lets say she did, what was she expecting? She got what she deserved."/>

			<outline text="Anonymous CowardUser ID: 3571320 United States04/07/2013 11:22 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Who is the idiot sending a little girl to a combat zone to be the next victim? She has no business in that country. She is a liability for the troops there. They Taliban will not even accept a thing she has to say. Being politically correct in a middle eastern country is suicide for the young girl. When will our dear leader learn.It would also help if we stop butchering they're children.Just a thought..."/>

			<outline text="Anonymous CowardUser ID: 37643276 United States04/07/2013 11:33 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.shhhhh...........don't break oblahs concentration, he's trying to hit a golf balll"/>

			<outline text=" Quoting: Anonymous Coward 8656872Barack Obama is always too busy vacationing, campaigning, partying in the white house, or golfing when the worst stuff happens."/>

			<outline text="Anonymous CowardUser ID: 27112816 United States04/07/2013 11:51 PMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.From what I see on the TV everyone of these dead CIA / state department operatives sounds like the nicest people you'd ever want to meet! They are really doing God's work over there!"/>

			<outline text="Anonymous CowardUser ID: 37643276 United States04/08/2013 12:07 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Does anyone know if one of the books she was giving out was the Bible?Maybe she was handing out copies or a copy of the Bible privately on her own?"/>

			<outline text="They are not telling us everything, like Benghazi."/>

			<outline text="I am trying to find some information on this."/>

			<outline text="PsyOpUser ID: 8994816 United States04/08/2013 12:20 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.She and her entourage were intelligence operatives, think about it really, war zone handing out books c'mon, read between the lines."/>

			<outline text="s. d. butlerUser ID: 974819 United States04/08/2013 12:37 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.From what I see on the TV everyone of these dead CIA / state department operatives sounds like the nicest people you'd ever want to meet! They are really doing God's work over there!"/>

			<outline text=" Quoting: Anonymous Coward 27112816yeah isn't that something? Functionaries of the empire are always canonized."/>

			<outline text="Just an aside about the three soldiers and one civilian killed, they must not count for much."/>

			<outline text="Last Edited by s. d. butler on 04/08/2013 03:24 AM"/>

			<outline text="Anonymous CowardUser ID: 322602 United States04/08/2013 12:51 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.another jewish spy posing as a do=gooder.bruatally murder tens of thousands with drones F16s, then give em a consolation prize of a paperback or some teddy bears.call it the Palestine model"/>

			<outline text="Anonymous CowardUser ID: 37641682 United States04/08/2013 12:52 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Being a CIA agent isn't fun &amp; games. Should have been a cam whore."/>

			<outline text="Anonymous CowardUser ID: 37641682 United States04/08/2013 12:54 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.another jewish spy posing as a do=gooder.bruatally murder tens of thousands with drones F16s, then give em a consolation prize of a paperback or some teddy bears.call it the Palestine model"/>

			<outline text=" Quoting: Anonymous Coward 322602No. Just a dumb goy vermin.SpiderJonesUser ID: 1540313 United States04/08/2013 12:58 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.watching and waiting patiently..."/>

			<outline text="Anonymous CowardUser ID: 37643276 United States04/08/2013 01:07 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Anonymous CowardUser ID: 10743044 United States04/08/2013 01:11 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.Payback for US Drone Strike that killed 11 kids the day before.Live by the drone die by the bomb"/>

			<outline text=" Quoting: Anonymous Coward 37536239[link to www.bbc.co.uk]"/>

			<outline text=" Quoting: Anonymous Coward 37536239Anonymous CowardUser ID: 27988379 United States04/08/2013 01:15 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.s.jUser ID: 37649864 United States04/08/2013 01:23 AMReport Abusive PostReport Copyright ViolationRe: U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In Afghanistan. Worst Attack Since The Disaster Benghazi, Libya.smells like psyop mind fucking - wish it twern't so"/>

			<outline text="Previous Page"/>

			</outline>

		<outline text="Anne SmedinghoffGATE">

			<outline text="Link to Article" type="link" url="http://lamecherry.blogspot.com/2013/04/anne-smedinghoffgate.html"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365919766_qGuHmw9E.html"/>

			<outline text="Source: Lame Cherry" type="link" url="http://lamecherry.blogspot.com/feeds/posts/default"/>

			<outline text="Sun, 14 Apr 2013 01:09"/>

			<outline text=""/>

			<outline text="In a most troubling criminal cover up, concerning the mass murder of more Americans, this time in Afghanistan, the heart tugger, Anne Smedinghoff, age 25, was somehow in the lead on this diplomatic mission, in a war zone, a female who could be barely out of university, having no combat status nor status in comprehending security, was prettily in charge of this &quot;donating books to an Afghani school&quot;."/>

			<outline text="There is a huge problem in this, besides a girl charge in a combat zone, because the Obama regime was caught lying again just like at Benghazi."/>

			<outline text="Anne Smedinghoff left a secure column, took US Soldiers out into the open, for a 200 yard walk in which all got lost in thinking a Ministry building was a school.Then they walked back in this exposed combat zone to the target base, where a suicide bomber was waiting and then blew himself up murdering and injuring the party."/>

			<outline text="This party included an injured Afghani reporter, LOST in his own city."/>

			<outline text="Instead of stating the reality of this, the Obama regime and all media instead stated this woman was in an armoured column and was rammed by a suicide bomber in another vehicle."/>

			<outline text="The Obama regime got caught in another flat ass bold faced lie, and they told all the families the same lie."/>

			<outline text="For the reality, Anne Smedinghoff, was in over her head. No diplomat worth anything ever exposes themselves to the kinds of danger involved in combat zones, as high priority targets are targeted. THAT is what was wrong with Chris Stevens at Benghazi in Obama is sending in diplomats to unsecure zones they have no business being in."/>

			<outline text="What on earth the Soldiers were doing is beyond belief, as the commanding officer there knew damn well not to do this, and yet he or she must have been a political officer who allowed this carnage to take place."/>

			<outline text="This blog noted in Obama on the Korean border looking through binoculars at North Korea exposed, is something no officer would every allow, and it was allowed."/>

			<outline text="At this point I do not know what was taking place in Afghanistan with Anne Smedinghoff, but the reality is she was either murdered for something she knew or this was such incompetence that the military officers and the Obama regime are guilty of murder."/>

			<outline text="This Germanic hottie was a target in this. There is a cover up involved in this, and that is the entire story in this.There is not any way that a military officer would have allowed this, no more than a combat crew would have condoned this as one gets a court martial over this stuff. Any commander is going to raise holy hell on an Soldier exposing themselves to danger, and yet I will repeat the Germanic hottie part in this..........as she has a German name, but this woman was JEWISH."/>

			<outline text="U.S. Diplomat Anne Smedinghoff and Others Killed In A Bombing In ...www.godlikeproductions.com/forum1/message2195001/pg131 posts - 4 authors - 5 days ago(CNN) -- Anne Smedinghoff lived inside a heavily secured compound. But the public ... another jewish spy posing as a do=gooder. bruatally ...Anne Smedinghoff versus Rachel Corrielaconics.forumotion.com/t393-anne-smedinghoff-versus-rachel-corrie12 posts - 4 authors - 4 days agoAnne Smedinghoff a young jewess in afghanistan - Praise as a heroine Rachel Corrie is ... Marines in Helmud Province protecting Jew Heroin ...I will point this out and make this point again.Barack Hussein Obama stationed a homosexual in Chris Stevens into Muslim Brotherhood territory and he was brutally raped and murdered, costing others lives."/>

			<outline text="Barack Hussein Obama stationed a Jewish woman in Anne Smedinghoff in into al Qaeda and Taliban country, and it caused her murder and the lives of others."/>

			<outline text="She was a target in being Jewish and now the question is was she an Obama Muslim prize to his al Qaeda operatives to murder a Jew?"/>

			<outline text="This woman had no sense or she would not be walking around and exposing her escort to danger in a war zone. So why did not the military have sense to protect her?"/>

			<outline text="Who gave the orders to expose this diplomat and how was it that conveniently there was a suicide bomber waiting there to assassinate her as the prime target...............and once again like ANALGATE, there was the Obama regime with a cover story that was an absolute lie again to cover up an operation apparently once again directed from 1600 Penn Avenue."/>

			<outline text="The very idea of sending a Jewess into a Muslim war zone is a death sentence. This is the work of Hillary Clinton and John Kerry in exposing these diplomats to apparently murder trophy status for Islamists, and that now means that JEWGATE is the match for ANALGATE in the Obama regime guilty of the murder of US diplomats in exposing and not keeping them secure."/>

			<outline text="There are reports in this orginally that the convoy crossed route with the provincial Governor's convoy, but that is not what is currently being reported in the revised reality."/>

			<outline text="There is though this interesting bit of information:"/>

			<outline text="Her first assignment for the foreign service was in Caracas, Venezuela, and she volunteered for the Afghanistan assignment after that."/>

			<outline text="This Jewess from Obama Chicago, fresh out of university ended up in Hugo Chavez's Venezuela, hot bed of counter Chavez operations and the operation to cancer innoculate Chavez.What did Smedinghoff really do in Venezuela, and what did she know about Chavez's murder, that questions were now surfacing around the globe calling for investigations into how Chavez was given cancer?"/>

			<outline text="What better way to end an operatives life with too much information, that was in reality flowing directly back to Mosaad, by offering her up as a trophy to Obama's al Qaeda in ending the Chavez connection."/>

			<outline text="Exactly like Benghazi in ANALGATE as broken only here in matter anti matter exclusive, there was the Obama regime cover story of Governors and convoys, but now the reality appears in Smedinghoff left the convoy, and the regime's real operation was a car bomb ramming into Smedinghoff's convoy, but the operation was changed when she left the convoy and any approaching vehicle would have been strafed and ended before it approached.That left this back up plan of a walking bomber who closed and murdered this woman to silence her in what she knew about Hugo Chavez's assassination that she had been giving intelligence on to Mosaad."/>

			<outline text="The Obama regime caught flat footed in another Muslim al Qaeda change of plan, published the car bomb story as they did not consider confirmation as the plans were in place and after Benghazi no more alternatives were to allowed."/>

			<outline text="This though then is SMEDINGHOFF GATE in the Ghost of Hugo Chavez."/>

			<outline text="The regime wanted her dead and offered her up for a trophy."/>

			<outline text="This woman did not have to perish with others if ANALGATE had simply been exposed."/>

			<outline text="nuff said"/>

			<outline text="agtG 217"/>

			</outline>

		<outline text="Russia Announces American Officials Connected To Torture Are Banned From Entering Their Counrty">

			<outline text="Link to Article" name="linkToArticle" type="link" url="http://www.youtube.com/watch?v=nIjA-jkxf3Q&amp;feature=youtube_gdata"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365918873_pDUF3pbn.html"/>

			<outline text="Source: Uploads by MOXNEWSd0tC0M" type="link" url="http://gdata.youtube.com/feeds/base/users/MOXNEWSd0tC0M/uploads?alt=rss&amp;amp;v=2&amp;amp;orderby=published&amp;amp;client=ytapi-youtube-profile"/>

			<outline text="Sun, 14 Apr 2013 00:54"/>

			<outline text=""/>

			</outline>

		<outline text="House to vote on CISPA next week despite being horribly flawed after secret committee markup">

			<outline text="Link to Article" type="link" url="http://EndtheLie.com/2013/04/12/house-to-vote-on-cispa-next-week-despite-being-horribly-flawed-after-secret-committee-markup/?utm_source=feedburner&amp;utm_medium=feed&amp;utm_campaign=Feed%3A+EndTheLie+%28End+the+Lie%29"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365803371_Vkdn9u42.html"/>

			<outline text="Source: End the Lie - Independent News" type="link" url="http://feeds.feedburner.com/EndTheLie"/>

			<outline text="Fri, 12 Apr 2013 16:49"/>

			<outline text=""/>

			<outline text="By Madison Ruppert"/>

			<outline text="Editor of End the Lie"/>

			<outline text="(Image credit: DonkeyHotey)"/>

			<outline text="Despite the fact that the secret markup of the Cyber Intelligence Sharing and Protection Act (CISPA) left most of the problems unsolved, the House is still slated to vote on the flawed legislation next week, potentially allowing the total destruction of what little online privacy remains."/>

			<outline text="Unfortunately this is precisely the same situation we were presented with almost a year ago when it was similarly dangerous. Of course CISPA still enjoys massive corporate support as it did last year, according to the International Business Times."/>

			<outline text="Today Michelle Richardson, Legislative Counsel for the American Civil Liberties Union's Washington Legislative Office pointed out that while CISPA's sponsors and corporate backers claim ''that all privacy and civil liberties problems have been solved,'' this is nowhere near the truth."/>

			<outline text="During the markup process, only three of the four major categories of problems with CISPA were addressed, according to the ACLU."/>

			<outline text="The fix for the potential of widespread sharing of information under CISPA offered by Rep. Jan Schakowsky failed."/>

			<outline text="The limiting of the personal information that can be shared under CISPA offered by Rep. Adam Schiff also failed."/>

			<outline text="While a partial fix for the unlimited immunity offered for ''hack backs'' under CISPA was offered by Rep. Jim Langevin, it still leaves a lot to be desired."/>

			<outline text="The only real major problem solved was the lack of protections offered for information after it is shared under CISPA. This was fixed to a large degree by solutions offered by Reps. Jim Himes and Terri Sewell."/>

			<outline text="Still, the most major problem with CISPA is that it allows far too much highly sensitive personal information to be shared with far too many entities including none other than the National Security Agency. This glaring issue has not been fixed in the slightest."/>

			<outline text="''Two provisions in the manager's amendment actually removed protections added to CISPA last year,'' according to the ACLU."/>

			<outline text="''First, Rep. Justin Amash's (R-Mich.) ban on collecting library, tax, gun, and other records was reversed so that these records are collected pursuant to the privacy and minimization procedures that will apply to all records under the new draft,'' Richardson writes."/>

			<outline text="''Second, the government is no longer banned from 'affirmatively searching' through the information collected through CISPA, so long as it does so for an authorized purpose,'' according to the ACLU."/>

			<outline text="The simple fact is that the new CISPA is far too much like the old CISPA, to the point where there is no real meaningful distinction between the two."/>

			<outline text="As such, some 34 civil liberties groups, including the ACLU and Electronic Frontier Foundation, penned a letter in March expressing their opposition to the bill."/>

			<outline text="Even after the markup, the ACLU states that they continue to stand against this ''overbroad privacy-eviscerating bill.''"/>

			<outline text="''We will support floor amendments to fix the remaining problems and will be looking for your support,'' the ACLU states."/>

			<outline text="Currently, the Obama administration has hinted that they cannot fully support the bill as is, but have not actually threatened a veto as they did in the past."/>

			<outline text="''We continue to believe that information sharing improvements are essential to effective legislation, but they must include privacy and civil liberties protections, reinforce the roles of civilian and intelligence agencies, and include targeted liability protections,'' the National Security Council said, according to the Hill."/>

			<outline text="The council added that they are seeking to continue to work with the House on CISPA, but a deadline of next Tuesday for members to file amendments to the bill has been set by the House Rules Committee."/>

			<outline text="''The committee will also meet Tuesday to approve rule for the bill; once a rule is approved, the House will be able to consider the bill on the floor as early as the next day,'' according to the Hill."/>

			<outline text="Since the House is expected to vote on the bill next week, there is little time left to contact members of Congress. Please take a few minutes of your day to contact your Congressional representatives to tell them to vote no on CISPA until the aforementioned problems are actually resolved."/>

			<outline text="The ACLU is also encouraging Americans to tell Obama to veto CISPA, though it seems that any promise from the Obama administration is just about completely and totally worthless at this point."/>

			<outline text="If CISPA is signed into law as is, the very little privacy protections enjoyed by Americans online will be completely eliminated."/>

			<outline text="Did I forget anything or miss any errors? Would you like to make me aware of a story or subject to cover? Or perhaps you want to bring your writing to a wider audience? Feel free to contact me at [email protected] with your concerns, tips, questions, original writings, insults or just about anything that may strike your fancy."/>

			<outline text="Please support our work and help us start to pay contributors by doing your shopping through our Amazon link or check out some must-have products at our store."/>

			<outline text="Help Spread Alternative News"/>

			</outline>

		<outline text="Evidence of Revision is a 9 hour long documentary series whose...">

			<outline text="Link to Article" type="link" url="http://oswaldofguadalupe.tumblr.com/post/47792408664/evidence-of-revision-is-a-9-hour-long-documentary"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365793694_w5nFDNcL.html"/>

			<outline text="Source: oswald of guadalupe" type="link" url="http://oswaldofguadalupe.tumblr.com/rss"/>

			<outline text="Fri, 12 Apr 2013 14:08"/>

			<outline text=""/>

			<outline text="Evidence of Revision is a 9 hour long documentary series whose purpose is to present the publicly unavailable and even suppressed historical audio, video, and film recordings largely unseen by the American public relating to the assassination of the Kennedy brothers, the little known classified Black Ops actually used to intentionally create the massive war in Viet Nam, the CIA ''mind control'' programs and their involvement in the RFK assassination and the Jonestown massacre and other important truths of our post-modern time."/>

			<outline text="The U.S. Government's Orwellian Office of Public Diplomacy has been in existence in various forms and under various names since World War ONE. The union of American governance and American corporate interests began in Abraham Lincoln's day and the massaging of public truth began even before the Roman Empire."/>

			<outline text="The more you know about real history versus official history, the better equipped you are to see behind the lies of our times, even as they are told to you. Some of us knew what was really happening even before the second plane hit the tower."/>

			<outline text="Episodes included:"/>

			<outline text="The Assassinations of Kennedy and Oswald,The Why of it all referenced to Viet Nam and LBJ,LBJ, Hoover and Others. What So Few Know Even Today,The RFK Assassination As Never Seen Before,The RFK Assassination Continued, MK ULTRA and the Jonestown Massacre,MLK Conspiratus."/>

			</outline>

		<outline text="Climate disruption April 12: Icebreaker stuck fast in frozen Saimaa canal.">

			<outline text="Link to Article" type="link" url="http://yle.fi/uutiset/video_icebreaker_stuck_fast_in_frozen_saimaa_canal/6576790"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365793621_bPFmxXs5.html"/>

			<outline text="Source: TheCandyman's news feed" type="link" url="http://s3.amazonaws.com/radio2/wonderhelm/linkblog.xml"/>

			<outline text="Fri, 12 Apr 2013 14:07"/>

			<outline text=""/>

			<outline text="J&amp;#164;&amp;#164;nmurtajat Protector ja Artemis avasivat Saimaan kanavaa 10. huhtikuuta.Video: Yle.The vessel, named 'Protector', began clearing the ice on Wednesday. The intention was to have it clear all the way to Savonlinna by Thuesday, but things are now several days behind schedule."/>

			<outline text="Jukka V&amp;#164;is&amp;#164;nen of the Transport Agency said that the problem was not so much the thickness of the ice, which was expected, but its strength. On Friday morning the vessel was stuck in Joutseno, halfway between Lappeenranta and Imatra."/>

			<outline text="The Finnish football league has seen some brutal cost-cutting in recent years, and that shines through in the clubs expected to compete at the top of the table. The austerity league could still provide thrills and spills, however, as clubs battle to get the most out of their precious resources. 17:26"/>

			<outline text="Veikkausliiga starts this weekend with high expectations for last season's champions. Mikael Forssell is one big-name player frustrated by the delay, but glad to be back on home turf. He told Yle News why he wanted to return, how he hopes to persuade glory-hunters to start supporting football clubs, and whether he will finally return to Twitter. 17:26"/>

			<outline text="The Independence day ball will be held in Tampere this year. The Presidential Palace in Helsinki is closed for renovations. 14:38"/>

			<outline text="On Friday the South-Savo district court heard a case involving the robbery and assault of a couple as they were sleeping in their Haukivuori home. 13:51"/>

			<outline text="The Saimaa canal, which connects eastern Finland to Vyborg, remains closed thanks to unusually thick ice cover. The canal is currently topped with than half a metre of strong ice, causing one icebreaker to get stuck when it tried to clear the waterway on Wednesday. 12:04"/>

			<outline text="The Finnish pensions firm Ilmarinen has gsold its stake in Talvivaara, the troubled mining firm. Ilmarinen based its decision on Talvivaara's weak financial position and continuing environmental difficulties. 11:42"/>

			<outline text="Finnish companies owned by the state are to be required to declare their tax arrangements. The requirement comes in following an Yle report on the state-owned postal firm Itella's use of offshore-domiciled subsidiaries. 11:29"/>

			<outline text="Fiskars says the move is essential to secure glass production in Finland. 11.4."/>

			<outline text="The head of STX Europe made a rare public appearance in Helsinki for the launch of a new type of Arctic vessel. 11.4."/>

			<outline text="Late and potentially high spring floods are in the offing as heavy ice remains on rivers even in southernmost Finland. In Oulu, the sea ice is still strong enough to drive on. 11.4."/>

			</outline>

		<outline text="The Daily Bell - New Book on EU Corruption Gets Author Investigated">

			<outline text="Link to Article" name="linkToArticle" type="link" url="http://www.thedailybell.com/28970/New-Book-on-EU-Corruption-Gets-Author-Investigated"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365793566_Rxt3W44B.html"/>

			<outline text="Fri, 12 Apr 2013 14:06"/>

			<outline text=""/>

			<outline text="Publish Book '' Get Turned Over by Police ... No, I am not making this up. My memoirs A Mote in Brussels' Eye describing my five years in the Brussels lunatic asylum came out as an eBook at the end of January. Today, March 5, nine policemen arrived unannounced at my front door armed with a warrant to search our home. Much of my book details my efforts on behalf of the taxpayers of SE England to expose the gross misuse of public funds by the EU, and hold them accountable. Such an unusually fast reaction by Brussels tells me they are both very angry and terrified. My book is obviously causing them trouble. '' Ashley Mote's blog"/>

			<outline text="Dominant Social Theme: Only criminals criticize the state."/>

			<outline text="Free-Market Analysis: Ashley Mote is a convicted felon, but also a prolific freedom-author, and he seems to have written an interesting book. The book is based on his experiences from 2004 to 2009 as an independent member of the European Parliament. Once in Brussels, Ashley Mote described his experiences thus:"/>

			<outline text="&quot;When you are on the field of battle, you use every weapon at your disposal. Turning my back on the EU like many of my former colleagues in UKIP would not defeat it. I found myself a fifth-columnist within the city walls. But unlike most fifth-columnists, my foes knew I was there '' a quite extraordinary situation, like much else in the EU."/>

			<outline text="&quot;Being a member at least gave me access to information, and to people. I now had the ability to take the concerns of ordinary people into boardrooms and ministerial offices as never before, and have them taken seriously."/>

			<outline text="&quot;I also had access to funds which could be used to help fight for the restoration of British sovereignty. The EU does not create wealth. It takes it from taxpayers all over the member states. It is a financial leech on the body politic. Bringing it back to the UK to finance our fight was the best possible use of British taxpayers' money."/>

			<outline text="&quot;It also helped finance a research team in Brussels and the UK who were able to dig into the detail of EU regulations, directives and future plans. We had access to information and officials, and the power to call the Commission to account."/>

			<outline text="A Mote in Brussels' Eye was published January 23, 2013; we were made aware of it via a private press release. The subtitle is &quot;The diary of a Member of the European Parliament,&quot; and information provided by Mote features the following points:"/>

			<outline text="- A full, frank and controversial account of five years fighting the EU from within the castle walls."/>

			<outline text="- The first ever blow-by-blow memoirs of a British MEP."/>

			<outline text="- Sensational new evidence wrung out of the EU revealing:"/>

			<outline text="Industrial-scale institutionalised looting of British taxpayers' money."/>

			<outline text="Indisputable evidence of endemic EU corruption and fraud."/>

			<outline text="'Diversity' the claim, 'regulation and standardisation' the reality."/>

			<outline text="Huge hidden cash piles as the EU demands ever more."/>

			<outline text="Uncontrolled migration across EU's eastern borders totally ignored."/>

			<outline text="Illegal seizure of power and control from nation states."/>

			<outline text="Dilution of national identities by mass migration and imported criminality."/>

			<outline text="3000 secret committees endlessly planning new EU 'law'."/>

			<outline text="Refusals by the Serious Fraud Office and Scotland Yard to examine unequivocal evidence of illegal payments to Brussels."/>

			<outline text="EU officials deliberately misleading the House of Lords."/>

			<outline text="Millions in soft loans to the BBC to buy editorial support."/>

			<outline text="The European Central Bank authorising a flood of new 500-euro banknotes, used mainly by drug barons for money laundering."/>

			<outline text="&quot;Nobody is responsible, everybody else is to blame, and who cares anyway?&quot; '' EU bureaucrat."/>

			<outline text="It is well known that the EU is corrupt but books like this one (we have not read it) may provide historical perspectives that will be useful in sorting through the way this continental disaster was put together and peddled."/>

			<outline text="What is clear is that there are emergent parallel justice systems in Europe, as in the US: One for those cooperating with the current globalist scheme and another for those who do not cooperate or speak out against it."/>

			<outline text="The globalist justice system actually encourages illegalities and corruption and seeks no prosecution for those involved in them. The second system, run aggressively '' almost pathologically '' by the same globalists, wiretaps, scrutinizes and otherwise investigates those deemed to be enemies of the current state of affairs."/>

			<outline text="Mote obviously falls into the second category, which certainly explains his book and might explain his prosecution, as well."/>

			<outline text="If the book delivers on its various promises in a credible way, it would certainly add to a growing collection of exposes about how the EU is run and by whom and for what purposes."/>

			<outline text="Of course, we have a pretty good idea, but eyewitness accounts can help buttress those conclusions and historical records are indeed important."/>

			<outline text="Conclusion: We would invite anyone who reads the book to report back."/>

			</outline>

		<outline text="Free Range Kids &gt;&gt; Who Saw the ''School Shooting'' Episode of Glee?">

			<outline text="Link to Article" type="link" url="http://www.freerangekids.com/who-saw-the-school-shooting-episode-of-glee/"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365783689_sH9gj4EY.html"/>

			<outline text="Source: Dave says..." type="link" url="http://dave.sobr.org/microblog.rss"/>

			<outline text="Fri, 12 Apr 2013 11:21"/>

			<outline text=""/>

			<outline text="Hi Readers '-- I didn't see the ''Shooting Star'' episode of Glee last night that featured a lockdown after shots rang out at the uber-musical school. (And I can't find it on the web without going to some very scary sites that seem ready to infect my computer with high-tech herpes.) But some of you have alerted me to the plot, which is why it's today's topic. So, spoiler alert: Turns out there's no mad gunman, but for a while the audience doesn't realize this, and neither, of course, do the characters."/>

			<outline text="The script was written before the Sandy Hook tragedy, so it's not like the Glee writers have gone Law &amp; Order on us. And I really do not worry that as goes  Glee, so goes America. (Except for the good part, that as goes Glee, so has gone a huge upswing in glee clubs and musicals.) So is there anything significant about having a scary lockdown on the show? Kenny Felder sums up the caveat:"/>

			<outline text="The writers of ''Glee'' have to come up with a new plot every week, following the same basic formula but yet different from any other plot they've done. They have to make it more exciting, at every minute of every episode, than all the other options you can click to. And who wouldn't rather watch a high school  lockdown '-- along with cheerleaders in short skirts, famous and near-famous guest stars, and that amazing soundtrack '-- than read about the latest crisis at the Federal Reserve? Given all that, I certainly can't fault them for milking this one for all it's worth. I might do the same in their place."/>

			<outline text="On the other hand, the real stakes here are emotional. The problem isn't just that people think, ''I saw a lot of school shootings on TV, so they are probably pretty common.'' It's that every time they send their kids to school they imagine a gunman, and Glee feeds into that, the same way the news does."/>

			<outline text="Now, the typical cynical thing to say about Hollywood is that they don't care about principles and just want to make a buck. But in many ways I think the makers of Glee are very principled. They have certainly gone out of their way to showcase gay characters, handicapped characters, and so on. So imagine an episode that really shows the difference between a school that is run like a prison, and a school that isn't in the grips of the kind of panic we're seeing in a lot of schools these days. Imagine showing viewers the value of allowing high school students more freedom, and ignoring that ''What if'' voice that increasingly shouts in our heads. Imagine episodes in which some students go off campus, unsupervised, to work in the community, or take biology field trips into the woods, all while singing and dancing of course. And instead of terrible consequences happening, they all learn and grow in a heartwarming way."/>

			<outline text="A show like that could make more difference than all the statistics in the world."/>

			<outline text="Agreed! Probably even more difference than a blog that talks about how pop culture's obsession with kids in danger ends up influencing our parenting, our politics and our school policing. Speaking of which, here's a great NY Times piece on how having more cops in the schools for ''safety'' ends up funneling more kids into the prison pipeline."/>

			<outline text="Nope, I'm not blaming Glee for that. '' L."/>

			<outline text=" "/>

			<outline text="Filed under: Media Madness, Miscellaneous"/>

			</outline>

		<outline text=""/>

		<outline text="Anti-Gun Fanatic Dem Sen. Dianne Feinstein: Private Gun Sales Play Into Terrorists' Hands'...">

			<outline text="Link to Article" type="link" url="http://weaselzippers.us/2013/04/12/anti-gun-fanatic-dem-sen-dianne-feinstein-private-gun-sales-play-into-terrorists-hands/"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365783528_HuHep2UL.html"/>

			<outline text="Source: Weasel Zippers" type="link" url="http://weaselzippers.us/feed/"/>

			<outline text="Fri, 12 Apr 2013 11:18"/>

			<outline text=""/>

			<outline text="No shame in DiFi's game."/>

			<outline text="(Washington DC) - During an appearance on MSNBC's Morning Joe, Senator Dianne Feinstein (D-CA) said expanded background checks are ''not enough,'' and private gun sales cannot continue because they allow terrorists to buy guns in the United States."/>

			<outline text="Feinstein said private gun sales allow felons to buy firearms as well."/>

			<outline text="In addition, Feinstein believes the failure of the filibuster threat against Senate gun control opens the door to her ''assault weapons'' ban once again:"/>

			<outline text="''Do you need a 30 round clip and an AR-15 to hunt? '...I don't understand, and I never have, why this nation is better, how we protect our nation better, with these weapons available.''"/>

			<outline text="''So the bill I have, which is close [to what they just instituted in Connecticut], really doesn't take a weapon away from anybody. If they sell that weapon there's a background check. If they keep that weapon, it has to be kept with a trigger lock and in a safe. And it exempts 2,258 weapons in 96 pages of bill language by make and model. What it does do, is dry up of the supply [of these weapons] over time.''"/>

			<outline text="Keep reading'..."/>

			</outline>

		<outline text="Rick Warren: Son Committed Suicide With Unregistered Gun">

			<outline text="Link to Article" type="link" url="http://talkingpointsmemo.com/news/rick-warren-son-committed-suicide-with-unregistered-gun.php"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365780121_wPTvCxA3.html"/>

			<outline text="Source: TPM News" type="link" url="http://feeds.feedburner.com/tpm-news"/>

			<outline text="Fri, 12 Apr 2013 10:22"/>

			<outline text=""/>

			<outline text="GILLIAN FLACCUS April 12, 2013, 9:03 AMTUSTIN, Calif. (AP) '-- Pastor Rick Warren said his son killed himself with an unregistered gun he purchased through the Internet."/>

			<outline text="Warren sent a tweet Thursday saying he forgives whoever sold the weapon to his 27-year-old son Matthew, who committed suicide last Friday."/>

			<outline text="The Orange County Sheriff's Department is trying to find the seller but it won't be easy. The gun's serial number was scratched off, making it impossible to trace, spokesman Jim Amormino said."/>

			<outline text="''We can't tell if it's registered or not because the serial number is scratched off,'' he said. ''At one point in time, it may have been, but it's going to be impossible to find out.''"/>

			<outline text="It's illegal in California to buy a gun without a background check and purchasers are supposed to register their firearms. Defacing a gun's serial number is a federal offense."/>

			<outline text="Rick Warren is pastor of Saddleback Church in Orange County and author of ''The Purpose-Driven Life,'' a bestseller."/>

			<outline text="He and his wife, Kay, said in an email on Saturday that their son struggled for much of his life with severe depression and suicidal thoughts. They have set up a mental health fund in Matthew's memory."/>

			<outline text="Warren has tweeted daily since his son's death, including items about seeing his son's body at the coroner's office for the first time, combating the stigma of mental illness and the challenges of grieving when a public figure."/>

			</outline>

		<outline text="The Yalu Line">

			<outline text="Link to Article" name="linkToArticle" type="link" url="http://lamecherry.blogspot.com/2013/04/the-yalu-line.html"/>

			<outline text="Archived Version" name="archivedVersion" type="link" url="http://adam.curry.com/art/1365774659_Xn7c6j49.html"/>

			<outline text="Source: Lame Cherry" name="sourceLameCherry" type="link" url="http://lamecherry.blogspot.com/feeds/posts/default"/>

			<outline text="Fri, 12 Apr 2013 08:50"/>

			<outline text=""/>

			<outline text="When the Lame Cherry produced the matter anti matter exclusive concerning the set up which was coming aimed at Kim Jong Un of North Korea in launching &quot;a nuke&quot; at Okinawa, the proof of what was being set up, was released in a doctored Pentagon report from Obama's chief military minder, feeding this operation that Kerry and Biden were in on."/>

			<outline text="The quotes are telling in:"/>

			<outline text="Representative Lamborn read from the report toward the end of a defense budget hearing."/>

			<outline text="''They say, 'DIA assess with moderate confidence the North currently has nuclear weapons capable of delivery by ballistic missiles. However, the reliability will be low.' General, would you agree with that assessment by DIA?'' he asked"/>

			<outline text="In a plausible denial, the military minder, Gen. Dempsey, stated that the report officially released that the GOP Congressman bit on and quoted from, but not officially made public would not be commented on.''I can't touch that one,'' Dempsey answered."/>

			<outline text="The story further laid the groundwork in:North Korea is expected to launch a missile soon as a show of defiance against the West. The administration said Thursday there is no indication that the missiles readied for launch are nuclear-armed, media reports said."/>

			<outline text="That is the reality in North Korea is not a group of idiots. Madam Auntie who has been guiding Kim Jong Un, has been flawless in the tactical maneuvering in this operation. North Korea does not have an infinite supply of nuclear warheads, which this blog assesses are constructed around the Dr. Khan cone shaped warhead which Iran is creating in quantity.Kim Jong Un is not going to launch a nuclear payload for an attack, as he does not have the numbers required to fend off an American landing and it too expensive in resources for America to continue a protracted war.The North Koreans do have a scorched earth scenario of actually nuking &quot;invaders&quot; on their own soil, to contaminate it, to keep the Americans from trying a second wave invasion."/>

			<outline text="That is another Lame Cherry exclusive in this."/>

			<outline text="This all dovetails now in the groundwork being laid in this blog, of Kim has in this match, loaded two missiles to launch, which as this blog has the Obama regime flat footed, as the regime was planning on only one missile hitting Okinawa, which would be salted with &quot;nuclear warhead&quot; which Obama had constructed in order to force a public opinion change in Peking and Moscow in this machinatiion of Obama."/>

			<outline text="The Pentagon has created amplified assessments for an Obama operation that Chuck Hagel inherited out of the Pentagon in dealing with this situation in an Obama slight of hand."/>

			<outline text="This blog is exposing all of this, as this is not the correct way to carry out foreign policy as this Obama regime manufactures stories constantly and it has been a disaster for the world and only benefits the feudal few."/>

			<outline text="This blog has been proven right again and again, it is currently attempting to stop this very bad operation of manipulating the American people again and causing more murderous death. This regime has not diverged yet from the first October One O story and is pressing on, with a now deliberate manipulation of Congressional oversight."/>

			<outline text="What has just been carried out by the Pentagon should bring the immediate arrest of Birther Hussein Obama and his cohorts of Kerry, Hagel and Biden by civilian authorities, with military arrest of Gen. Dempsey in this deliberate deception of Congress."/>

			<outline text="Kim Jong Un in delaying past April 10, and showing two missiles has given the regime of Birther Obama time now to build on the story first posted here in &quot;one nuclear missile and one decoy&quot; landing in Okinawa."/>

			<outline text="Birther Obama has squabbling in the military minders wanting to claim that the missile was &quot;neutralized&quot; and shot down, but that is being over riden by the handicap that now that the missiles were &quot;known&quot; to be nuclear capable, why did Obama wait for Okinawa to be nuked."/>

			<outline text="This fraud policy from fraud Birther Hussein Obama run by his minders is full of holes that examination will expose, but like ANALGATE, all of these slights of hands are not being reported to a brain dead public which follows whatever it is told."/>

			<outline text="Another Lame Cherry matter anti matter exclusive."/>

			<outline text="agtG 308Y"/>

			</outline>

		<outline text="Iranian 'time machine' can 'predict' oil prices and wars">

			<outline text="Link to Article" type="link" url="http://rt.com/news/iranian-time-machine-prediction-692/"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365774333_yqML8cBh.html"/>

			<outline text="Source: News RSS" type="link" url="http://rt.com/rss/news/"/>

			<outline text="Thu, 11 Apr 2013 08:08"/>

			<outline text=""/>

			<outline text="An Iranian inventor claims to have created a 'time machine' that can predict a person's future. He boasts that the device is relatively cheap, but says he has not built one yet because he fears that the Chinese will steal his idea."/>

			<outline text="Ali Razeghi, 27, has submitted his invention to the state-run Centre for Strategic Inventions for registration."/>

			<outline text="The device is called ''The Aryayek Time Traveling Machine,'' FARS news agency reported. Razeghi said he worked on his creation for the last 10 years, resulting in a desktop-computer-sized machine that can &quot;predict five to eight years of the future life of any individual, with 98 percent accuracy.&quot;"/>

			<outline text="The man, who has 179 other inventions under his belt, eyes governmental applications for his prediction device with uses both civilian and military."/>

			<outline text="&quot;Naturally a government that can see five years into the future would be able to prepare itself for challenges that might destabilize it,&quot; he explained. &quot;As such we expect to market this invention among states as well as individuals once we reach a mass-production stage.&quot;"/>

			<outline text="Razeghi also claimed to have beaten competitors working on similar devices: ''The Americans are trying to make this invention by spending millions of dollars on it where I have already achieved it by a fraction of the cost.&quot;"/>

			<outline text="He added that he is concerned about industrial espionage, as other nations will be eager to learn his secrets. &quot;The reason that we are not launching our prototype at this stage is that the Chinese will steal the idea and produce it in millions overnight,&quot; he said."/>

			<outline text="Predicting the future, even on relatively narrow issues, is a notoriously complex task. It usually requires creating an accurate computer model of a system that takes into account numerous factors, and often requires plenty of computational power. Predicting a future event in its entirety is virtually impossible with existing technology."/>

			</outline>

		<outline text="Federal Register | Federal Employee Pay Schedules and Rates That Are Set by Administrative Discretion">

			<outline text="Link to Article" name="linkToArticle" type="link" url="https://www.federalregister.gov/articles/2013/04/10/2013-08523/federal-employee-pay-schedules-and-rates-that-are-set-by-administrative-discretion"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365774301_CC7k4hft.html"/>

			<outline text="Thu, 11 Apr 2013 08:50"/>

			<outline text=""/>

			<outline text=" "/>

			<outline text="Section 1112 of the Consolidated and Further Continuing Appropriations Act, 2013 (Public Law 113-6), reflects the Congress's decision to continue to deny statutory adjustments to any pay systems or pay schedules covering executive branch employees. In light of the Congress's action, I am instructing heads of executive departments and agencies to continue through December 31, 2013, to adhere to the policy set forth in my memoranda of December 22, 2010, and December 21, 2012, regarding general increases in pay schedules and employees' rates of pay that might otherwise take effect as a result of the exercise of administrative discretion."/>

			<outline text="This memorandum shall be carried out to the extent permitted by law and consistent with executive departments' and agencies' legal authorities. This memorandum is not intended to, and does not, create any right or benefit, substantive or procedural, enforceable at law or in equity by any party against the United States, its departments, agencies, or entities, its officers, employees, or agents, or any other person."/>

			<outline text="The Director of the Office of Personnel Management shall issue any necessary guidance on implementing this memorandum, and is also hereby authorized and directed to publish this memorandum in the Federal Register."/>

			<outline text="THE WHITE HOUSE,Washington, April 5, 2013"/>

			<outline text="[FR Doc. 2013-08523Filed 4-9-13; 8:45 am]"/>

			<outline text="Billing code 6325-01"/>

			</outline>

		<outline text="Sequestration Order for Fiscal Year 2014">

			<outline text="Link to Article" type="link" url="http://www.whitehouse.gov/the-press-office/2013/04/10/sequestration-order-fiscal-year-2014"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365771307_rUNgZyTV.html"/>

			<outline text="Source: White House.gov Press Office Feed" type="link" url="http://www.whitehouse.gov/feed/press"/>

			<outline text="Fri, 12 Apr 2013 07:55"/>

			<outline text=""/>

			<outline text="The White House"/>

			<outline text="Office of the Press Secretary"/>

			<outline text="For Immediate Release"/>

			<outline text="April 10, 2013"/>

			<outline text="By the authority vested in me as President by the laws of the United States of America, and in accordance with section 251A of the Balanced Budget and Emergency Deficit Control Act (the &quot;Act&quot;), as amended, 2 U.S.C. 901a, I hereby order that, on October 1, 2013, direct spending budgetary resources for fiscal year 2014 in each non-exempt budget account be reduced by the amount calculated by the Office of Management and Budget in its report to the Congress of April 10, 2013."/>

			<outline text="All sequestrations shall be made in strict accordance with the requirements of section 251A of the Act and the specifications of the Office of Management and Budget's report of April 10, 2013, prepared pursuant to section 251A(11) of the Act."/>

			<outline text="BARACK OBAMA"/>

			</outline>

		<outline text="Are DDoS attacks being used to fix Bitcoin rates? '-- RT USA">

			<outline text="Link to Article" type="link" url="http://rt.com/usa/users-gox-ddos-bitcoin-707/"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365771050_xvy54Vak.html"/>

			<outline text="Source: BadChad's ThoughtPile" type="link" url="http://cartusers.curry.com/chad.christiandgk2/badchad"/>

			<outline text="Fri, 12 Apr 2013 07:50"/>

			<outline text=""/>

			<outline text="A massive surge in the number of Bitcoin users and reports of hacking attempts are being blamed for a worldwide panic that caused the cryptocurrency's worth to fluctuate drastically in recent days."/>

			<outline text="The value of Bitcoin, the unregulated and practically untraceable digital currency that exists only on computers, has gone on a roller coaster ride as of late, quintupling in price during the last month before losing more than half of its worth during a Wednesday crash."/>

			<outline text="Mt. Gox, the leading Bitcoin exchange on the Web, confirmed Wednesday evening that the number of trades executed during the previous 24 hours tripled beyond their expectations. The surge, they said, could be blamed on the number of new accounts, which climbed to roughly 20,000 per day so far in April."/>

			<outline text="But while those statistics are good for Bitcoin traders hoping to open up the currency to a wider market, the influx of users has created a crippling effect on the online exchanges where BTCs are bought and sold. Now after an exponential rise in value and an equally intensive drop, Bitcoin experts are asking questions about the future of the currency."/>

			<outline text="As trading pinnacled on Wednesday as more and more users signed up to create digital wallets, the surge in traffic on sites like Mt. Gox became too much for the BTC exchange to handle. Mt. Gox has since gone offline in order to add servers to its system so it can try to process the rampant buying and selling it was unprepared for, but for many users the downtime caused by the crippling traffic surge was enough to scare them away from these services and Bitcoin in general."/>

			<outline text="''The big slowdown was initially pegged as a 'distributed denial of service' (DDoS) attack, in which hackers use large groups of computers to flood a website with connections, such that no one else can connect to it,'' Matthew Boesler writes for Business Insider."/>

			<outline text="On their official Facebook page, Mt. Gox says ''we were not last night victim of a DDoS but instead victim of our own success.''"/>

			<outline text="''Indeed the rather astonishing amount of new account opened in the last few days added to the existing one plus the number of trade made a huge impact on the overall system that started to lag. As expected in such situation people started to panic, started to sell Bitcoin in mass (Panic Sale) resulting in an increase of trade that ultimately froze the trade engine!''"/>

			<outline text="But while Mt. Gox says that a DDoS attack wasn't to blame, that type of cyber assault wasn't unexpected either: one week before BTC sold at its all-time high, hackers hit the exchange with an assault that shut down service and brought Bitcoin trading to a halt. At the time, the company said the impact was to blame for ''its worst trading lag ever.''"/>

			<outline text="Should BTC sites like Mt. Gox be hit with further cyberattacks, new users looking to trade the cryptocurrency and use it to buy goods and services over the Web might be deterred from jumping on a bandwagon that has taken on an unexpected number of fresh accounts in recent weeks. After last week's attack, Mt. Gox explained how hitting their servers with a DDoS attack could do wonders for veteran users looking to make some extra money:"/>

			<outline text="''Attackers wait until the price of Bitcoins reaches a certain value, sell, destabilize the exchange, wait for everybody to panic-sell their Bitcoins, wait for the price to drop to a certain amount, then stop the attack and start buying as much as they can.''"/>

			<outline text="''Repeat this two or three times like we saw over the past few days and they profit,'' the company says."/>

			<outline text="Mt. Gox says they are now &quot;working around the clock&quot; to improve stability, but warns that another DDoS attack wouldn't be unexpected."/>

			<outline text="''Even though we are using one of the best companies to help us fight against these DDoS attacks, we are still being affected,'' they announced after last week's cyberattack."/>

			</outline>

		<outline text="No Panic in Koreas Despite Talk of Missile Launch">

			<outline text="Link to Article" type="link" url="http://www.military.com/daily-news/2013/04/10/no-panic-in-koreas-despite-talk-of-missile-launch.html?ESRC=topstories.RSS"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365770734_u5m2LUun.html"/>

			<outline text="Thu, 11 Apr 2013 18:56"/>

			<outline text=""/>

			<outline text="PYONGYANG, North Korea -- The prospect of a North Korean missile launch is &quot;considerably high,&quot; South Korea's foreign minister told lawmakers Wednesday as Pyongyang calmly prepared to mark the April 15 birthday of its founder, historically a time when it seeks to draw the world's attention with dramatic displays of military power."/>

			<outline text="The missile is expected to be a medium-range missile with a range of 3,500 kilometers (2,180 miles) capable of flying over Japan, Foreign Minister Yun Byung-se told lawmakers in Seoul. Earlier, a Defense Ministry official said preparations appeared to be complete, and that the launch could take place at any time."/>

			<outline text="Yun said Seoul was bracing for the test-fire of a ballistic missile dubbed &quot;Musudan&quot; by foreign experts after the name of the northeastern village where North Korea has a launch pad. Experts said the Musudan is built to reach the U.S. territory of Guam as well as U.S. military installations in Japan."/>

			<outline text="North Korean officials have not announced plans to launch a missile, but have told foreign diplomats in Pyongyang that they will not be able to guarantee their safety starting Wednesday. Officials also have urged tourists in South Korea to take cover, warning that a nuclear war is imminent. However, most diplomats and foreign residents appeared to be staying put."/>

			<outline text="The threats are largely seen as rhetoric and an attempt by North Korea to scare foreigners into pressing their governments to pressure Washington and Seoul to change their policies toward Pyongyang, as well as to boost the military credentials of North Korea's young leader, Kim Jong Un. North Korea does not have diplomatic relations with the U.S. and South Korea, its foes during the Korean War of the 1950s."/>

			<outline text="On the streets of Pyongyang, the focus was less on preparing for war and more on beautifying the city ahead of the nation's biggest holiday. Soldiers laid blankets of sod to liven up a city still coming out of a long, cold winter; gardeners got down on their knees to plant flowers and trees, and students marched off to school -- ordinary springtime activities belying the high tensions."/>

			<outline text="Downtown, schoolchildren headed toward the towering statues of the two late leaders, Kim Il Sung and Kim Jong Il, dragging brooms behind them. The brooms are used to sweep the plaza where the bronze statues stand on a hilltop overlooking Pyongyang. A group of women with coats thrown over traditional dresses rushed through the spring chill after leaving a rehearsal for a dance set to take place for Kim Il Sung's birthday celebrations."/>

			<outline text="At the base of Mansu Hill, a group of young people held a small rally to pledge their loyalty to Kim Jong Un and to sing the Kim ode &quot;We Will Defend the Marshal With Our Lives.&quot;"/>

			<outline text="Kim Un Chol, the 40-year-old head of a political unit at Pyongyang's tobacco factory, said he had been discharged from the military but was willing to re-enlist if war breaks out. He said North Koreans were resolute."/>

			<outline text="&quot;The people of Pyongyang are confident. They know we can win any war,&quot; he told The Associated Press. &quot;We now have nuclear weapons. So you won't see any worry on people's faces, even if the situation is tense.&quot;"/>

			<outline text="North Korea sporadically holds civil air raid drills during which citizens practice blacking out their windows and seeking shelter. But no such drills have been held in recent months, local residents said."/>

			<outline text="Last year, the days surrounding the centennial of the birth of Kim Il Sung, grandfather of the current ruler, was marked by parades of tanks, goose-stepping soldiers and missiles, as well as the failed launch of a satellite-carrying rocket widely believed by the U.S. and its allies in the West to be a test of ballistic missile technology. A subsequent test in December went off successfully, and that was followed by the country's third underground nuclear test on Feb. 12 this year, possibly taking the regime closer to mastering the technology for mounting an atomic bomb on a missile."/>

			<outline text="The resulting U.N. sanctions and this spring's annual U.S.-South Korean military drills have been met with an unending string of threats and provocations from the North."/>

			<outline text="Adm. Samuel Locklear, commander of U.S. Pacific Command, told the Senate Armed Services Committee in Washington on Tuesday that North Korea's persistent nuclear and missile programs and threats have created &quot;an environment marked by the potential for miscalculation.&quot;"/>

			<outline text="He said the U.S. military and its allies would be ready if North Korea tries to strike."/>

			<outline text="North Korea has been escalating tensions with the U.S. and South Korea, its wartime foes, for months. The tightened U.N. sanctions that followed the nuclear test drew the ire of North Korea, which accused Washington and Seoul of leading the campaign against it. Annual U.S.-South Korean military drills south of the border have further incensed Pyongyang, which sees them as practice for an invasion."/>

			<outline text="Last week, Kim Jong Un enshrined the pursuit of nuclear weapons -- which the North characterizes as a defense against the U.S. -- as a national goal, along with improving the economy. North Korea also declared it would restart a mothballed nuclear complex."/>

			<outline text="Citing the tensions with Seoul, North Korea on Monday pulled more than 50,000 workers from the Kaesong industrial park, which combines South Korean technology and know-how with cheap North Korean labor. It was the first time that production was stopped at the decade-old factory park, the only remaining symbol of economic cooperation between the Koreas."/>

			<outline text="A test-fire of the Musudan missile would violate U.N. Security Council resolutions banning North Korea from nuclear and missile activity, and escalate tensions with the U.S., South Korea and Japan."/>

			<outline text="Japan has deployed PAC-3 missile interceptors in key locations around Tokyo."/>

			<outline text="The South Korean and U.S. militaries also have raised their surveillance level, called Watch Condition, a South Korean Defense Ministry official said. He refused to confirm a Yonhap News Agency report in Seoul saying it had been raised to 2, the second-highest level. He spoke on condition of anonymity, saying he wasn't authorized to speak to media."/>

			<outline text="One historian, James Person, noted that it isn't the first time North Korea has warned that a war was imminent."/>

			<outline text="He said that in 1968, following North Korea's seizure of an American ship, the USS Pueblo, Pyongyang persistently advised foreign diplomats to prepare for a U.S. counterattack. Cables from the Romanian mission in Pyongyang showed embassies were instructed to build anti-air bunkers &quot;to protect foreigners against air attacks,&quot; he said."/>

			<outline text="The cables were obtained and posted online by the Wilson Center's North Korea International Documentation Project."/>

			<outline text="Person called it one of North Korea's first forays into what he dubs &quot;military adventurism.&quot;"/>

			<outline text="&quot;In 1968, there was some concern there would be an attack, but (the North Koreans) certainly were building it up to be more than it was in hopes of getting more assistance from their allies at the time,&quot; Person said by phone from Alexandria, Virginia."/>

			<outline text="&quot;I think much of it was hot air then. Today, I think again, it's more hot air,&quot; he said. &quot;The idea is to scare people into pressuring the United States to return to negotiations with North Korea. That's the bottom line.&quot;"/>

			<outline text="-- Kim Kwang Hyon and David Guttenfelder in Pyongyang; Hyung-jin Kim in Seoul, South Korea; Matthew Pennington, and Donna Cassata and Richard Lardner in Washington contributed to this report."/>

			<outline text="(C) Copyright 2013 Associated Press. All rights reserved. This material may not be published, broadcast, rewritten or redistributed."/>

			</outline>

		<outline text="Railguns remain in Navy's future plans">

			<outline text="Link to Article" type="link" url="http://defensetech.org/2013/04/10/railguns-remain-in-navys-future-plans/"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365770724_26uxXsqp.html"/>

			<outline text="Thu, 11 Apr 2013 19:03"/>

			<outline text=""/>

			<outline text="They may not be ready to punch holes in Decepticons, but the Navy is banking on electromagnetic railguns one day arming its newest ships."/>

			<outline text="Directed energy weapons have created quite a buzz at this year's Sea Air Space Expo at National Harbor, Md. They have always been a popular prop in science fiction movies; now the Navy is deploying the first directed energy ''laser'' weapon early next year aboard the amphibious transport dock Ponce."/>

			<outline text="While not as popular as lasers, high-energy rail guns are also generating plenty of excitement at this year's show. The Navy has studied arming its new DDG 1000-class destroyer and Littoral Combat Ships with just such a weapon."/>

			<outline text="''There are feasible sizes of railguns we think we can put on these ships,'' Robin White, director of Surface Ship Design and Systems Engineering, said Wednesday."/>

			<outline text="The Navy has spent the past eight years testing rail guns, most notably rolling out the first weaponized railgun in January 2012. The technology stands to offer additional range for land strikes as well as added capabilities in ballistic and cruise missile defense."/>

			<outline text="Railguns are capable of launching high-speed projectiles at targets out to 100 miles with out explosive propellants. They also can be guided on target, said Rear Adm. Matthew Klunder, chief of Naval Research and director of Innovation, Technology Requirements and Test &amp; Evaluation."/>

			<outline text="These highly-advanced weapons, however, won't come without challenges to overcome, White said. There will be power-storage issues to deal with as well as weight impacts for shipboard use. Heat dissipation and cooling will also have to be addressed."/>

			<outline text="''There will be many things to work through as we go forward, but the good news is these weapons provide tremendous advantage for shipboard use,'' White said."/>

			<outline text="What type of shipboard use is still to be seen. Former Navy Under Secretary Robert Work said in January that he'd recommend delaying the decision on railguns until they Navy can decide how these weapons fit into the fleet design."/>

			<outline text="''Naval to naval exchanges just aren't our thing right now. What it is is about projecting power in theaters where these land based anti-access aerial denial networks with guided weapons that can be thrown at range in salvos is a very, very difficult problem and the Navy is very focused on,'' Work said in January."/>

			<outline text="April 10th, 2013 | Sea, Sea Services, Ships and Subs | 1986333 Commentshttp%3A%2F%2Fdefensetech.org%2F2013%2F04%2F10%2Frailguns-remain-in-navys-future-plans%2FRailguns+remain+in+Navy%27s+future+plans2013-04-10+16%3A53%3A29Matt+Coxhttp%3A%2F%2Fdefensetech.org%2F%3Fp%3D19863"/>

			</outline>

		<outline text="Bitcoin: Out with a POP!">

			<outline text="Link to Article" type="link" url="http://www.surlytrader.com/bitcoin-out-with-a-pop/#utm_source=feed&amp;utm_medium=feed&amp;utm_campaign=feed"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365769168_Es8n7qqv.html"/>

			<outline text="Source: SurlyTrader" type="link" url="http://feeds.feedburner.com/Surlytrader"/>

			<outline text="Fri, 12 Apr 2013 07:19"/>

			<outline text=""/>

			<outline text="When it comes to behavioral finance, there is nothing like a good bubble to prove the concept:"/>

			<outline text=" "/>

			<outline text=" "/>

			<outline text="The popping of the bubble will continue, because of the same behavioral finance that created it in the first place. It is possible that Bitcoin has some lasting power as an alternative, but definitely not if it is thinly traded with no ability to short it. Make it have a completely free exchange and I might not bet against it'...but I also would not be hoping for 1,000% returns."/>

			<outline text="You can read some of the pain that it has created at reddit.com"/>

			<outline text="or just browse the subgroup."/>

			<outline text="Related posts:"/>

			<outline text="Bitcoin Mania!Bitcoin HysteriaPray for Higher RatesTail Risk Hedging '' James MontierLoss of Cabin Pressure"/>

			</outline>

		<outline text="CWSkimmer-Ericsson Labs | Beyond HTML5 - Audio Capture in Web Browsers">

			<outline text="Link to Article" type="link" url="https://labs.ericsson.com/developer-community/blog/beyond-html5-audio-capture-web-browsers"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365735080_4WVgaFZx.html"/>

			<outline text="Thu, 11 Apr 2013 21:51"/>

			<outline text=""/>

			<outline text="HTML5 - the next generation standard for web browsers is in last call in WHATWG and is currently being implemented by browser vendors. The work to complete the specification has taken over five years and is still in progress. HTML5 takes a great leap into making the browser a more powerful application platform by adding several new features. To name a few: WebSockets and Server-sent events open up for more flexible networked applications and the media elements and canvas element can be used to seamlessly embed new types of content into web pages. So what is the next step in HTML?"/>

			<outline text="One answer to the question above is the ability to control new types of input devices. The device element represents a device selector which allows the user to grant a web page access to input devices such as a microphone or web camera. The code snippets below demonstrates how the device element and the Stream API can be used to record a short audio clip. Note that the device element and the related APIs are not available in any browser yet, and the APIs may change before this happens. But anyway - here is the code."/>

			<outline text="Select device: The page layout content simply consists of the a device selector, represented by the device tag, and a button. The type attribute of the device element has been specified to &quot;audio_capture&quot; to narrow down the list of devices to only include the ones capable of recording audio."/>

			<outline text="// in window.onloaddocument.getElementById(&quot;media_device&quot;).onchange = function(){// ready to record audioStream = this.data; recordCtlBut.disabled = false;};Upon loading the page, we will attach a listener to the device element to monitor changes. When a device is selected, i.e. the change event is triggered, the data property represents the Stream object that is connected to the selected device."/>

			<outline text="// in window.onloadrecordCtlBut = document.getElementById(&quot;record_ctl_but&quot;); recordCtlBut.onclick = function(){if(!recorder){// start recording recordCtlBut.value = &quot;Stop&quot;; recorder = audioStream.record(); // set the maximum audio clip length to 10 seconds recordTimer = setTimeout(stopRecording, 10000); }else stopRecording();};A single button is used to both start and stop the recording and the label alternates to display the recording state. Calling the record method on the Stream object starts the recording and returns a StreamRecorder object. A timer is started to limit the maximum length of a recorded audio clip to 10 seconds."/>

			<outline text="function stopRecording(){ clearTimeout(recordTimer); var audioFile = recorder.stop(); useAudioFile(audioFile); // reset to allow new recording session recorder = null; recordCtlBut.value = &quot;Record&quot;;}The recording is stopped when the stop button is clicked, or the 10-second timer times out. The recorded audio data is retrieved from the StreamRecorder object by calling the stop method. The recorded audio data is represented as a File object (W3C File API). It is then up to you what to do with the recorded clip; perhaps publish it on a web server."/>

			<outline text="As mentioned above, the device element is not limited to audio devices. It could in a similar manner be used to select other types of devices, such as a web camera, and use the video element to display what the camera is seeing. The next step could be to share live audio and video with others in a web based video conferencing system."/>

			<outline text="To conclude, the device element and related APIs will open up for bringing new devices into the web experience that previously only could be accessed by using browser plug-ins. The browser is really becoming a powerful application platform while still having the advantage of superior application portability. It will be interesting to see the specification evolve."/>

			</outline>

		<outline text="CWSKimmer Project-Web Audio API">

			<outline text="Link to Article" type="link" url="https://dvcs.w3.org/hg/audio/raw-file/tip/webaudio/specification.html"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365734821_mMyVnSLk.html"/>

			<outline text="Thu, 11 Apr 2013 21:47"/>

			<outline text=""/>

			<outline text="AbstractThis specification describes a high-level JavaScript API for processing and synthesizing audio in web applications. The primary paradigm is of an audio routing graph, where a number of AudioNode objects are connected together to define the overall audio rendering. The actual processing will primarily take place in the underlying implementation (typically optimized Assembly / C / C++ code), but direct JavaScript processing and synthesis is also supported."/>

			<outline text="The introductory section covers the motivation behind this specification."/>

			<outline text="This API is designed to be used in conjunction with other APIs and elements on the web platform, notably: XMLHttpRequest (using the responseType and response attributes). For games and interactive applications, it is anticipated to be used with the canvas 2D and WebGL 3D graphics APIs."/>

			<outline text="Status of this DocumentThis section describes the status of this document at the time of its publication. Other documents may supersede this document. A list of current W3C publications and the latest revision of this technical report can be found in the W3C technical reports index at http://www.w3.org/TR/."/>

			<outline text="This is the Editor's Draft of the Web Audio API specification. It has been produced by the W3C Audio Working Group , which is part of the W3C WebApps Activity."/>

			<outline text="Please send comments about this document to  (public archives of the W3C audio mailing list). Web content and browser developers are encouraged to review this draft."/>

			<outline text="Publication as a Working Draft does not imply endorsement by the W3C Membership. This is a draft document and may be updated, replaced or obsoleted by other documents at any time. It is inappropriate to cite this document as other than work in progress."/>

			<outline text="This document was produced by a group operating under the 5 February 2004 W3C Patent Policy. W3C maintains a public list of any patent disclosures made in connection with the deliverables of the group; that page also includes instructions for disclosing a patent. An individual who has actual knowledge of a patent which the individual believes contains Essential Claim(s) must disclose the information in accordance with section 6 of the W3C Patent Policy."/>

			<outline text="2. ConformanceEverything in this specification is normative except for examples and sections marked as being informative."/>

			<outline text="The keywords ''MUST'', ''MUST NOT'', ''REQUIRED'', ''SHALL'', ''SHALL NOT'', ''RECOMMENDED'', ''MAY'' and ''OPTIONAL'' in this document are to be interpreted as described in Key words for use in RFCs to Indicate Requirement Levels[RFC2119]."/>

			<outline text="The following conformance classes are defined by this specification:"/>

			<outline text="conforming implementationA user agent is considered to be a conforming implementation if it satisfies all of the MUST-, REQUIRED- and SHALL-level criteria in this specification that apply to implementations."/>

			<outline text="4. The Audio API"/>

			<outline text="4.1. The AudioContext InterfaceThis interface represents a set of AudioNode objects and their connections. It allows for arbitrary routing of signals to the AudioDestinationNode (what the user ultimately hears). Nodes are created from the context and are then connected together. In most use cases, only a single AudioContext is used per document."/>

			<outline text="Web IDL"/>

			<outline text="callback DecodeSuccessCallback = void (AudioBuffer decodedData);callback DecodeErrorCallback = void ();[Constructor]interface AudioContext : EventTarget { readonly attribute AudioDestinationNode destination; readonly attribute float sampleRate; readonly attribute double currentTime; readonly attribute AudioListener listener; readonly attribute unsigned long activeSourceCount; AudioBuffer createBuffer(unsigned long numberOfChannels, unsigned long length, float sampleRate); AudioBuffer createBuffer(ArrayBuffer buffer, boolean mixToMono); void decodeAudioData(ArrayBuffer audioData, DecodeSuccessCallback successCallback, optional DecodeErrorCallback errorCallback); AudioBufferSourceNode createBufferSource(); MediaElementAudioSourceNode createMediaElementSource(HTMLMediaElement mediaElement); MediaStreamAudioSourceNode createMediaStreamSource(MediaStream mediaStream); MediaStreamAudioDestinationNode createMediaStreamDestination(); ScriptProcessorNode createScriptProcessor(unsigned long bufferSize, optional unsigned long numberOfInputChannels = 2, optional unsigned long numberOfOutputChannels = 2); AnalyserNode createAnalyser(); GainNode createGain(); DelayNode createDelay(optional double maxDelayTime = 1.0); BiquadFilterNode createBiquadFilter(); WaveShaperNode createWaveShaper(); PannerNode createPanner(); ConvolverNode createConvolver(); ChannelSplitterNode createChannelSplitter(optional unsigned long numberOfOutputs = 6); ChannelMergerNode createChannelMerger(optional unsigned long numberOfInputs = 6); DynamicsCompressorNode createDynamicsCompressor(); OscillatorNode createOscillator(); WaveTable createWaveTable(Float32Array real, Float32Array imag);};4.1.1. AttributesdestinationAn AudioDestinationNode with a single input representing the final destination for all audio. Usually this will represent the actual audio hardware. All AudioNodes actively rendering audio will directly or indirectly connect to destination."/>

			<outline text="sampleRateThe sample rate (in sample-frames per second) at which the AudioContext handles audio. It is assumed that all AudioNodes in the context run at this rate. In making this assumption, sample-rate converters or &quot;varispeed&quot; processors are not supported in real-time processing."/>

			<outline text="currentTimeThis is a time in seconds which starts at zero when the context is created and increases in real-time. All scheduled times are relative to it. This is not a &quot;transport&quot; time which can be started, paused, and re-positioned. It is always moving forward. A GarageBand-like timeline transport system can be very easily built on top of this (in JavaScript). This time corresponds to an ever-increasing hardware timestamp."/>

			<outline text="listenerAn AudioListener which is used for 3D spatialization."/>

			<outline text="activeSourceCountThe number of AudioBufferSourceNodes that are currently playing."/>

			<outline text="4.1.2. Methods and ParametersThe createBuffer methodCreates an AudioBuffer of the given size. The audio data in the buffer will be zero-initialized (silent). An exception will be thrown if the numberOfChannels or sampleRate are out-of-bounds."/>

			<outline text="The numberOfChannels parameter determines how many channels the buffer will have. An implementation must support at least 32 channels."/>

			<outline text="The length parameter determines the size of the buffer in sample-frames."/>

			<outline text="The sampleRate parameter describes the sample-rate of the linear PCM audio data in the buffer in sample-frames per second. An implementation must support sample-rates in at least the range 22050 to 96000."/>

			<outline text="The createBuffer from ArrayBuffer methodCreates an AudioBuffer given the audio file data contained in the ArrayBuffer. The ArrayBuffer can, for example, be loaded from an XMLHttpRequest's response attribute after setting the responseType to &quot;arraybuffer&quot;. Audio file data can be in any of the formats supported by the audio element."/>

			<outline text="The buffer parameter contains the audio file data (for example from a .wav file)."/>

			<outline text="The mixToMono parameter determines if a mixdown to mono will be performed. Normally, this would not be set."/>

			<outline text="The following steps must be performed:"/>

			<outline text="Decode the encoded buffer from the AudioBuffer into linear PCM. If a decoding error is encountered due to the audio format not being recognized or supported, or because of corrupted/unexpected/inconsistent data then return NULL (and these steps will be terminated).If mixToMono is true, then mixdown the decoded linear PCM data to mono.Take the decoded (possibly mixed-down) linear PCM audio data, and resample it to the sample-rate of the AudioContext if it is different from the sample-rate of buffer. The final result will be stored in an AudioBuffer and returned as the result of this method.The decodeAudioData methodAsynchronously decodes the audio file data contained in the ArrayBuffer. The ArrayBuffer can, for example, be loaded from an XMLHttpRequest's response attribute after setting the responseType to &quot;arraybuffer&quot;. Audio file data can be in any of the formats supported by the audio element."/>

			<outline text="The decodeAudioData() method is preferred over the createBuffer() from ArrayBuffer method because it is asynchronous and does not block the main JavaScript thread."/>

			<outline text="audioData is an ArrayBuffer containing audio file data."/>

			<outline text="successCallback is a callback function which will be invoked when the decoding is finished. The single argument to this callback is an AudioBuffer representing the decoded PCM audio data."/>

			<outline text="errorCallback is a callback function which will be invoked if there is an error decoding the audio file data."/>

			<outline text="The following steps must be performed:"/>

			<outline text="Temporarily neuter the audioData ArrayBuffer in such a way that JavaScript code may not access or modify the data.Queue a decoding operation to be performed on another thread.The decoding thread will attempt to decode the encoded audioData into linear PCM. If a decoding error is encountered due to the audio format not being recognized or supported, or because of corrupted/unexpected/inconsistent data then the audioData neutered state will be restored to normal and the errorCallback will be scheduled to run on the main thread's event loop and these steps will be terminated.The decoding thread will take the result, representing the decoded linear PCM audio data, and resample it to the sample-rate of the AudioContext if it is different from the sample-rate of audioData. The final result (after possibly sample-rate converting) will be stored in an AudioBuffer.The audioData neutered state will be restored to normalThe successCallback function will be scheduled to run on the main thread's event loop given the AudioBuffer from step (4) as an argument.The createBufferSource methodCreates an AudioBufferSourceNode."/>

			<outline text="The createMediaElementSource methodCreates a MediaElementAudioSourceNode given an HTMLMediaElement. As a consequence of calling this method, audio playback from the HTMLMediaElement will be re-routed into the processing graph of the AudioContext."/>

			<outline text="The createMediaStreamSource methodCreates a MediaStreamAudioSourceNode given a MediaStream. As a consequence of calling this method, audio playback from the MediaStream will be re-routed into the processing graph of the AudioContext."/>

			<outline text="The createMediaStreamDestination methodCreates a MediaStreamAudioDestinationNode."/>

			<outline text="The createScriptProcessor method (please also see the alternate names section)Creates a ScriptProcessorNode for direct audio processing using JavaScript. An exception will be thrown if bufferSize or numberOfInputChannels or numberOfOutputChannels are outside the valid range."/>

			<outline text="The bufferSize parameter determines the buffer size in units of sample-frames. It must be one of the following values: 256, 512, 1024, 2048, 4096, 8192, 16384. This value controls how frequently the audioprocess event is dispatched and how many sample-frames need to be processed each call. Lower values for bufferSize will result in a lower (better) latency. Higher values will be necessary to avoid audio breakup and glitches. The value chosen must carefully balance between latency and audio quality."/>

			<outline text="The numberOfInputChannels parameter (defaults to 2) and determines the number of channels for this node's input. Values of up to 32 must be supported."/>

			<outline text="The numberOfOutputChannels parameter (defaults to 2) and determines the number of channels for this node's output. Values of up to 32 must be supported."/>

			<outline text="It is invalid for both numberOfInputChannels and numberOfOutputChannels to be zero."/>

			<outline text="The createAnalyser methodCreates a AnalyserNode."/>

			<outline text="The createGain method (please also see the alternate names section)Creates a GainNode."/>

			<outline text="The createDelay method (please also see the alternate names section)Creates a DelayNode representing a variable delay line. The initial default delay time will be 0 seconds."/>

			<outline text="The maxDelayTime parameter is optional and specifies the maximum delay time in seconds allowed for the delay line. If specified, this value MUST be greater than zero and less than three minutes or a NOT_SUPPORTED_ERR exception will be thrown."/>

			<outline text="The createBiquadFilter methodCreates a BiquadFilterNode representing a second order filter which can be configured as one of several common filter types."/>

			<outline text="The createWaveShaper methodCreates a WaveShaperNode representing a non-linear distortion."/>

			<outline text="The createPanner methodCreates an PannerNode."/>

			<outline text="The createConvolver methodCreates a ConvolverNode."/>

			<outline text="The createChannelSplitter methodCreates an ChannelSplitterNode representing a channel splitter. An exception will be thrown for invalid parameter values."/>

			<outline text="The numberOfOutputs parameter determines the number of outputs. Values of up to 32 must be supported. If not specified, then 6 will be used."/>

			<outline text="The createChannelMerger methodCreates an ChannelMergerNode representing a channel merger. An exception will be thrown for invalid parameter values."/>

			<outline text="The numberOfInputs parameter determines the number of inputs. Values of up to 32 must be supported. If not specified, then 6 will be used."/>

			<outline text="The createDynamicsCompressor methodCreates a DynamicsCompressorNode."/>

			<outline text="The createOscillator methodCreates an OscillatorNode."/>

			<outline text="The createWaveTable methodCreates a WaveTable representing a waveform containing arbitrary harmonic content. The real and imag parameters must be of type Float32Array of equal lengths greater than zero and less than or equal to 4096 or an exception will be thrown. These parameters specify the Fourier coefficients of a Fourier series representing the partials of a periodic waveform. The created WaveTable will be used with an OscillatorNode and will represent a normalized time-domain waveform having maximum absolute peak value of 1. Another way of saying this is that the generated waveform of an OscillatorNode will have maximum peak value at 0dBFS. Conveniently, this corresponds to the full-range of the signal values used by the Web Audio API. Because the WaveTable will be normalized on creation, the real and imag parameters represent relative values."/>

			<outline text="The real parameter represents an array of cosine terms (traditionally the A terms). In audio terminology, the first element (index 0) is the DC-offset of the periodic waveform and is usually set to zero. The second element (index 1) represents the fundamental frequency. The third element represents the first overtone, and so on."/>

			<outline text="The imag parameter represents an array of sine terms (traditionally the B terms). The first element (index 0) should be set to zero (and will be ignored) since this term does not exist in the Fourier series. The second element (index 1) represents the fundamental frequency. The third element represents the first overtone, and so on."/>

			<outline text="4.1.3. LifetimeThis section is informative."/>

			<outline text="Once created, an AudioContext will continue to play sound until it has no more sound to play, or the page goes away."/>

			<outline text="4.1b. The OfflineAudioContext InterfaceOfflineAudioContext is a particular type of AudioContext for rendering/mixing-down (potentially) faster than real-time. It does not render to the audio hardware, but instead renders as quickly as possible, calling a completion event handler with the result provided as an AudioBuffer."/>

			<outline text="Web IDL"/>

			<outline text="[Constructor(unsigned long numberOfChannels, unsigned long length, float sampleRate)]interface OfflineAudioContext : AudioContext { void startRendering(); attribute EventHandler oncomplete;};4.1b.1. AttributesoncompleteAn EventHandler of type OfflineAudioCompletionEvent."/>

			<outline text="4.1b.2. Methods and ParametersThe startRendering methodGiven the current connections and scheduled changes, starts rendering audio. The oncomplete handler will be called once the rendering has finished. This method must only be called one time or an exception will be thrown."/>

			<outline text="4.1c. The OfflineAudioCompletionEvent InterfaceThis is an Event object which is dispatched to OfflineAudioContext."/>

			<outline text="Web IDL"/>

			<outline text="interface OfflineAudioCompletionEvent : Event { readonly attribute AudioBuffer renderedBuffer;};4.1c.1. AttributesrenderedBufferAn AudioBuffer containing the rendered audio data once an OfflineAudioContext has finished rendering. It will have a number of channels equal to the numberOfChannels parameter of the OfflineAudioContext constructor."/>

			<outline text="4.2. The AudioNode InterfaceAudioNodes are the building blocks of an AudioContext. This interface represents audio sources, the audio destination, and intermediate processing modules. These modules can be connected together to form processing graphs for rendering audio to the audio hardware. Each node can have inputs and/or outputs. A source node has no inputs and a single output. An AudioDestinationNode has one input and no outputs and represents the final destination to the audio hardware. Most processing nodes such as filters will have one input and one output. Each type of AudioNode differs in the details of how it processes or synthesizes audio. But, in general, AudioNodes will process its inputs (if it has any), and generate audio for its outputs (if it has any)."/>

			<outline text="Each output has one or more channels. The exact number of channels depends on the details of the specific AudioNode."/>

			<outline text="An output may connect to one or more AudioNode inputs, thus fan-out is supported. An input initially has no connections, but may be connected from one or more AudioNode outputs, thus fan-in is supported. When the connect() method is called to connect an output of an AudioNode to an input of an AudioNode, we call that a connection to the input."/>

			<outline text="Each AudioNode input has a specific number of channels at any given time. This number can change depending on the connection(s) made to the input. If the input has no connections then it has one channel which is silent."/>

			<outline text="For each input, an AudioNode performs a mixing (usually an up-mixing) of all connections to that input. Please see Mixer Gain Structure for more informative details, and the Channel up-mixing and down-mixing section for normative requirements."/>

			<outline text="For performance reasons, practical implementations will need to use block processing, with each AudioNode processing a fixed number of sample-frames of size block-size. In order to get uniform behavior across implementations, we will define this value explicitly. block-size is defined to be 128 sample-frames which corresponds to roughly 3ms at a sample-rate of 44.1KHz."/>

			<outline text="AudioNodes are EventTargets, as described in DOM[DOM]. This means that it is possible to dispatch events to AudioNodes the same way that other EventTargets accept events."/>

			<outline text="Web IDL"/>

			<outline text="enum ChannelCountMode { ''max'', ''clamped-max'', ''explicit''};enum ChannelInterpretation { ''speakers'', ''discrete''};interface AudioNode : EventTarget { void connect(AudioNode destination, optional unsigned long output = 0, optional unsigned long input = 0); void connect(AudioParam destination, optional unsigned long output = 0); void disconnect(optional unsigned long output = 0); readonly attribute AudioContext context; readonly attribute unsigned long numberOfInputs; readonly attribute unsigned long numberOfOutputs; // Channel up-mixing and down-mixing rules for all inputs. attribute unsigned long channelCount; attribute ChannelCountMode channelCountMode; attribute ChannelInterpretation channelInterpretation;};4.2.1. AttributescontextThe AudioContext which owns this AudioNode."/>

			<outline text="numberOfInputsThe number of inputs feeding into the AudioNode. For source nodes, this will be 0."/>

			<outline text="numberOfOutputsThe number of outputs coming out of the AudioNode. This will be 0 for an AudioDestinationNode."/>

			<outline text="channelCountThe number of channels for the node. The default value is 2."/>

			<outline text="See the Channel up-mixing and down-mixing section for more information on this attribute."/>

			<outline text="channelCountModeDetermines how the channel count used for up-mixing and down-mixing is computed."/>

			<outline text="See the Channel up-mixing and down-mixing section for more information on this attribute."/>

			<outline text="channelInterpretationDetermines how individual channels will be treated."/>

			<outline text="See the Channel up-mixing and down-mixing section for more information on this attribute."/>

			<outline text="4.2.2. Methods and ParametersThe connect to AudioNode methodConnects the AudioNode to another AudioNode."/>

			<outline text="The destination parameter is the AudioNode to connect to."/>

			<outline text="The output parameter is an index describing which output of the AudioNode from which to connect. An out-of-bound value throws an exception."/>

			<outline text="The input parameter is an index describing which input of the destination AudioNode to connect to. An out-of-bound value throws an exception."/>

			<outline text="It is possible to connect an AudioNode output to more than one input with multiple calls to connect(). Thus, &quot;fan-out&quot; is supported."/>

			<outline text="It is possible to connect an AudioNode to another AudioNode which creates a cycle. In other words, an AudioNode may connect to another AudioNode, which in turn connects back to the first AudioNode. This is allowed only if there is at least one DelayNode in the cycle or an exception will be thrown."/>

			<outline text="There can only be one connection between a given output of one specific node and a given input of another specific node. Multiple connections with the same termini are ignored. For example:"/>

			<outline text="nodeA.connect(nodeB); nodeA.connect(nodeB); will have the same effect as nodeA.connect(nodeB); The connect to AudioParam methodConnects the AudioNode to an AudioParam, controlling the parameter value with an audio-rate signal."/>

			<outline text="The destination parameter is the AudioParam to connect to."/>

			<outline text="The output parameter is an index describing which output of the AudioNode from which to connect. An out-of-bound value throws an exception."/>

			<outline text="It is possible to connect an AudioNode output to more than one AudioParam with multiple calls to connect(). Thus, &quot;fan-out&quot; is supported."/>

			<outline text="It is possible to connect more than one AudioNode output to a single AudioParam with multiple calls to connect(). Thus, &quot;fan-in&quot; is supported."/>

			<outline text="An AudioParam will take the rendered audio data from any AudioNode output connected to it and convert it to mono by down-mixing if it is not already mono, then mix it together with other such outputs and finally will mix with the intrinsic parameter value (the value the AudioParam would normally have without any audio connections), including any timeline changes scheduled for the parameter."/>

			<outline text="There can only be one connection between a given output of one specific node and a specific AudioParam. Multiple connections with the same termini are ignored. For example:"/>

			<outline text="nodeA.connect(param); nodeA.connect(param); will have the same effect as nodeA.connect(param); The disconnect methodDisconnects an AudioNode's output."/>

			<outline text="The output parameter is an index describing which output of the AudioNode to disconnect. An out-of-bound value throws an exception."/>

			<outline text="4.2.3. LifetimeThis section is informative."/>

			<outline text="An implementation may choose any method to avoid unnecessary resource usage and unbounded memory growth of unused/finished nodes. The following is a description to help guide the general expectation of how node lifetime would be managed."/>

			<outline text="An AudioNode will live as long as there are any references to it. There are several types of references:"/>

			<outline text="A normal JavaScript reference obeying normal garbage collection rules.A playing reference for both AudioBufferSourceNodes and OscillatorNodes. These nodes maintain a playing reference to themselves while they are currently playing.A connection reference which occurs if another AudioNode is connected to it.A tail-time reference which an AudioNode maintains on itself as long as it has any internal processing state which has not yet been emitted. For example, a ConvolverNode has a tail which continues to play even after receiving silent input (think about clapping your hands in a large concert hall and continuing to hear the sound reverberate throughout the hall). Some AudioNodes have this property. Please see details for specific nodes.Any AudioNodes which are connected in a cycle and are directly or indirectly connected to the AudioDestinationNode of the AudioContext will stay alive as long as the AudioContext is alive."/>

			<outline text="When an AudioNode has no references it will be deleted. But before it is deleted, it will disconnect itself from any other AudioNodes which it is connected to. In this way it releases all connection references (3) it has to other nodes."/>

			<outline text="Regardless of any of the above references, it can be assumed that the AudioNode will be deleted when its AudioContext is deleted."/>

			<outline text="4.4. The AudioDestinationNode InterfaceThis is an AudioNode representing the final audio destination and is what the user will ultimately hear. It can often be considered as an audio output device which is connected to speakers. All rendered audio to be heard will be routed to this node, a &quot;terminal&quot; node in the AudioContext's routing graph. There is only a single AudioDestinationNode per AudioContext, provided through the destination attribute of AudioContext."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 0 channelCount = 2; channelCountMode = &quot;explicit&quot;; channelInterpretation = &quot;speakers&quot;;Web IDL"/>

			<outline text="interface AudioDestinationNode : AudioNode { readonly attribute unsigned long maxChannelCount;};4.4.1. AttributesmaxChannelCountThe maximum number of channels that the channelCount attribute can be set to. An AudioDestinationNode representing the audio hardware end-point (the normal case) can potentially output more than 2 channels of audio if the audio hardware is multi-channel. maxChannelCount is the maximum number of channels that this hardware is capable of supporting. If this value is 0, then this indicates that channelCount may not be changed. This will be the case for an AudioDestinationNode in an OfflineAudioContext and also for basic implementations with hardware support for stereo output only."/>

			<outline text="channelCount defaults to 2 for a destination in a normal AudioContext, and may be set to any non-zero value less than or equal to maxNumberOfChannels. An exception will be thrown if this value is not within the valid range. Giving a concrete example, if the audio hardware supports 8-channel output, then we may set numberOfChannels to 8, and render 8-channels of output."/>

			<outline text="For an AudioDestinationNode in an OfflineAudioContext, the channelCount is determined when the offline context is created and this value may not be changed."/>

			<outline text="4.5. The AudioParam InterfaceAudioParam controls an individual aspect of an AudioNode's functioning, such as volume. The parameter can be set immediately to a particular value using the &quot;value&quot; attribute. Or, value changes can be scheduled to happen at very precise times (in the coordinate system of AudioContext.currentTime), for envelopes, volume fades, LFOs, filter sweeps, grain windows, etc. In this way, arbitrary timeline-based automation curves can be set on any AudioParam. Additionally, audio signals from the outputs of AudioNodes can be connected to an AudioParam, summing with the intrinsic parameter value."/>

			<outline text="Some synthesis and processing AudioNodes have AudioParams as attributes whose values must be taken into account on a per-audio-sample basis. For other AudioParams, sample-accuracy is not important and the value changes can be sampled more coarsely. Each individual AudioParam will specify that it is either an a-rate parameter which means that its values must be taken into account on a per-audio-sample basis, or it is a k-rate parameter."/>

			<outline text="Implementations must use block processing, with each AudioNode processing 128 sample-frames in each block."/>

			<outline text="For each 128 sample-frame block, the value of a k-rate parameter must be sampled at the time of the very first sample-frame, and that value must be used for the entire block. a-rate parameters must be sampled for each sample-frame of the block."/>

			<outline text="Web IDL"/>

			<outline text="interface AudioParam { attribute float value; readonly attribute float defaultValue; void setValueAtTime(float value, double startTime); void linearRampToValueAtTime(float value, double endTime); void exponentialRampToValueAtTime(float value, double endTime); void setTargetAtTime(float target, double startTime, double timeConstant); void setValueCurveAtTime(Float32Array values, double startTime, double duration); void cancelScheduledValues(double startTime);};4.5.1. AttributesvalueThe parameter's floating-point value. This attribute is initialized to the defaultValue. If a value is set during a time when there are any automation events scheduled then it will be ignored and no exception will be thrown."/>

			<outline text="defaultValueInitial value for the value attribute"/>

			<outline text="4.5.2. Methods and ParametersAn AudioParam maintains a time-ordered event list which is initially empty. The times are in the time coordinate system of AudioContext.currentTime. The events define a mapping from time to value. The following methods can change the event list by adding a new event into the list of a type specific to the method. Each event has a time associated with it, and the events will always be kept in time-order in the list. These methods will be called automation methods:"/>

			<outline text="setValueAtTime() - SetValuelinearRampToValueAtTime() - LinearRampToValueexponentialRampToValueAtTime() - ExponentialRampToValuesetTargetAtTime() - SetTargetsetValueCurveAtTime() - SetValueCurveThe following rules will apply when calling these methods:"/>

			<outline text="If one of these events is added at a time where there is already an event of the exact same type, then the new event will replace the old one.If one of these events is added at a time where there is already one or more events of a different type, then it will be placed in the list after them, but before events whose times are after the event.If setValueCurveAtTime() is called for time T and duration D and there are any events having a time greater than T, but less than T + D, then an exception will be thrown. In other words, it's not ok to schedule a value curve during a time period containing other events.Similarly an exception will be thrown if any automation method is called at a time which is inside of the time interval of a SetValueCurve event at time T and duration D.The setValueAtTime methodSchedules a parameter value change at the given time."/>

			<outline text="The value parameter is the value the parameter will change to at the given time."/>

			<outline text="The startTime parameter is the time in the same time coordinate system as AudioContext.currentTime."/>

			<outline text="If there are no more events after this SetValue event, then for t &gt;= startTime, v(t) = value. In other words, the value will remain constant."/>

			<outline text="If the next event (having time T1) after this SetValue event is not of type LinearRampToValue or ExponentialRampToValue, then, for t: startTime +1. It can contain one or more channels. Typically, it would be expected that the length of the PCM data would be fairly short (usually somewhat less than a minute). For longer sounds, such as music soundtracks, streaming should be used with the audio element and MediaElementAudioSourceNode."/>

			<outline text="An AudioBuffer may be used by one or more AudioContexts."/>

			<outline text="Web IDL"/>

			<outline text="interface AudioBuffer { readonly attribute float sampleRate; readonly attribute long length; readonly attribute double duration; readonly attribute long numberOfChannels; Float32Array getChannelData(unsigned long channel);};4.9.1. AttributessampleRateThe sample-rate for the PCM audio data in samples per second."/>

			<outline text="lengthLength of the PCM audio data in sample-frames."/>

			<outline text="durationDuration of the PCM audio data in seconds."/>

			<outline text="numberOfChannelsThe number of discrete audio channels."/>

			<outline text="4.9.2. Methods and ParametersThe getChannelData methodReturns the Float32Array representing the PCM audio data for the specific channel."/>

			<outline text="The channel parameter is an index representing the particular channel to get data for. An index value of 0 represents the first channel. This index value MUST be less than numberOfChannels or an exception will be thrown."/>

			<outline text="4.10. The AudioBufferSourceNode InterfaceThis interface represents an audio source from an in-memory audio asset in an AudioBuffer. It generally will be used for short audio assets which require a high degree of scheduling flexibility (can playback in rhythmically perfect ways). The playback state of an AudioBufferSourceNode goes through distinct stages during its lifetime in this order: UNSCHEDULED_STATE, SCHEDULED_STATE, PLAYING_STATE, FINISHED_STATE. The start() method causes a transition from the UNSCHEDULED_STATE to SCHEDULED_STATE. Depending on the time argument passed to start(), a transition is made from the SCHEDULED_STATE to PLAYING_STATE, at which time sound is first generated. Following this, a transition from the PLAYING_STATE to FINISHED_STATE happens when either the buffer's audio data has been completely played (if the loop attribute is false), or when the stop() method has been called and the specified time has been reached. Please see more details in the start() and stop() description. Once an AudioBufferSourceNode has reached the FINISHED state it will no longer emit any sound. Thus start() and stop() may not be issued multiple times for a given AudioBufferSourceNode."/>

			<outline text="numberOfInputs : 0 numberOfOutputs : 1 The number of channels of the output always equals the number of channels of the AudioBuffer assigned to the .buffer attribute, or is one channel of silence if .buffer is NULL."/>

			<outline text="Web IDL"/>

			<outline text="interface AudioBufferSourceNode : AudioNode { const unsigned short UNSCHEDULED_STATE = 0; const unsigned short SCHEDULED_STATE = 1; const unsigned short PLAYING_STATE = 2; const unsigned short FINISHED_STATE = 3; readonly attribute unsigned short playbackState; attribute AudioBuffer? buffer; readonly attribute AudioParam playbackRate; attribute boolean loop; attribute double loopStart; attribute double loopEnd; void start(double when, optional double offset = 0, optional double duration); void stop(double when);};4.10.1. AttributesplaybackStateThe playback state, initialized to UNSCHEDULED_STATE."/>

			<outline text="bufferRepresents the audio asset to be played."/>

			<outline text="playbackRateThe speed at which to render the audio stream. The default playbackRate.value is 1. This parameter is a-rate"/>

			<outline text="loopIndicates if the audio data should play in a loop. The default value is false."/>

			<outline text="loopStartAn optional value in seconds where looping should begin if the loop attribute is true. Its default value is 0, and it may usefully be set to any value between 0 and the duration of the buffer."/>

			<outline text="loopEndAn optional value in seconds where looping should end if the loop attribute is true. Its default value is 0, and it may usefully be set to any value between 0 and the duration of the buffer."/>

			<outline text="4.10.2. Methods and ParametersThe start method (please also see the alternate names section)Schedules a sound to playback at an exact time."/>

			<outline text="The when parameter describes at what time (in seconds) the sound should start playing. It is in the same time coordinate system as AudioContext.currentTime. If 0 is passed in for this value or if the value is less than currentTime, then the sound will start playing immediately. start may only be called one time and must be called before stop is called or an exception will be thrown."/>

			<outline text="The offset parameter describes the offset time in the buffer (in seconds) where playback will begin. This parameter is optional with a default value of 0 (playing back from the beginning of the buffer)."/>

			<outline text="The duration parameter describes the duration of the portion (in seconds) to be played. This parameter is optional, with the default value equal to the total duration of the AudioBuffer minus the offset parameter. Thus if neither offset nor duration are specified then the implied duration is the total duration of the AudioBuffer."/>

			<outline text="The stop method (please also see the alternate names section)Schedules a sound to stop playback at an exact time. Please see deprecation section for the old method name."/>

			<outline text="The when parameter describes at what time (in seconds) the sound should stop playing. It is in the same time coordinate system as AudioContext.currentTime. If 0 is passed in for this value or if the value is less than currentTime, then the sound will stop playing immediately. stop must only be called one time and only after a call to start or stop, or an exception will be thrown."/>

			<outline text="4.10.3. LoopingIf the loop attribute is true when start() is called, then playback will continue indefinitely until stop() is called and the stop time is reached. We'll call this &quot;loop&quot; mode. Playback always starts at the point in the buffer indicated by the offset argument of start(), and in loop mode will continue playing until it reaches the actualLoopEnd position in the buffer (or the end of the buffer), at which point it will wrap back around to the actualLoopStart position in the buffer, and continue playing according to this pattern."/>

			<outline text="In loop mode then the actual loop points are calculated as follows from the loopStart and loopEnd attributes:"/>

			<outline text="if ((loopStart || loopEnd) &amp;&amp; loopStart &gt;= 0 &amp;&amp; loopEnd &gt; 0 &amp;&amp; loopStart &lt; &gt;Note that the default values for loopStart and loopEnd are both 0, which indicates that looping should occur from the very start to the very end of the buffer."/>

			<outline text="Please note that as a low-level implementation detail, the AudioBuffer is at a specific sample-rate (usually the same as the AudioContext sample-rate), and that the loop times (in seconds) must be converted to the appropriate sample-frame positions in the buffer according to this sample-rate."/>

			<outline text="A MediaElementAudioSourceNode is created given an HTMLMediaElement using the AudioContext createMediaElementSource() method."/>

			<outline text="The number of channels of the single output equals the number of channels of the audio referenced by the HTMLMediaElement passed in as the argument to createMediaElementSource(), or is 1 if the HTMLMediaElement has no audio."/>

			<outline text="The HTMLMediaElement must behave in an identical fashion after the MediaElementAudioSourceNode has been created, except that the rendered audio will no longer be heard directly, but instead will be heard as a consequence of the MediaElementAudioSourceNode being connected through the routing graph. Thus pausing, seeking, volume, .src attribute changes, and other aspects of the HTMLMediaElement must behave as they normally would if not used with a MediaElementAudioSourceNode."/>

			<outline text="ECMAScript"/>

			<outline text="var mediaElement = document.getElementById('mediaElementID');var sourceNode = context.createMediaElementSource(mediaElement);sourceNode.connect(filterNode); 4.12. The ScriptProcessorNode InterfaceThis interface is an AudioNode which can generate, process, or analyse audio directly using JavaScript."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 1 channelCountMode = &quot;max&quot;; channelInterpretation = &quot;speakers&quot;;The ScriptProcessorNode is constructed with a bufferSize which must be one of the following values: 256, 512, 1024, 2048, 4096, 8192, 16384. This value controls how frequently the audioprocess event is dispatched and how many sample-frames need to be processed each call. Lower numbers for bufferSize will result in a lower (better) latency. Higher numbers will be necessary to avoid audio breakup and glitches. The value chosen must carefully balance between latency and audio quality."/>

			<outline text="numberOfInputChannels and numberOfOutputChannels determine the number of input and output channels. It is invalid for both numberOfInputChannels and numberOfOutputChannels to be zero."/>

			<outline text="var node = context.createScriptProcessor(bufferSize, numberOfInputChannels, numberOfOutputChannels); Web IDL"/>

			<outline text="interface ScriptProcessorNode : AudioNode { attribute EventHandler onaudioprocess; readonly attribute long bufferSize;};4.12.1. AttributesonaudioprocessA property used to set the EventHandler (described in HTML) for the audioprocess event that is dispatched to ScriptProcessorNode node types. An event of type AudioProcessingEvent will be dispatched to the event handler."/>

			<outline text="bufferSizeThe size of the buffer (in sample-frames) which needs to be processed each time onprocessaudio is called. Legal values are (256, 512, 1024, 2048, 4096, 8192, 16384)."/>

			<outline text="4.13. The AudioProcessingEvent InterfaceThis is an Event object which is dispatched to ScriptProcessorNode nodes."/>

			<outline text="The event handler processes audio from the input (if any) by accessing the audio data from the inputBuffer attribute. The audio data which is the result of the processing (or the synthesized data if there are no inputs) is then placed into the outputBuffer."/>

			<outline text="Web IDL"/>

			<outline text="interface AudioProcessingEvent : Event { attribute ScriptProcessorNode node; readonly attribute double playbackTime; readonly attribute AudioBuffer inputBuffer; readonly attribute AudioBuffer outputBuffer;};4.13.1. AttributesnodeThe ScriptProcessorNode associated with this processing event."/>

			<outline text="playbackTimeThe time when the audio will be played in the same time coordinate system as AudioContext.currentTime. playbackTime allows for very tight synchronization between processing directly in JavaScript with the other events in the context's rendering graph."/>

			<outline text="inputBufferAn AudioBuffer containing the input audio data. It will have a number of channels equal to the numberOfInputChannels parameter of the createScriptProcessor() method. This AudioBuffer is only valid while in the scope of the onaudioprocess function. Its values will be meaningless outside of this scope."/>

			<outline text="outputBufferAn AudioBuffer where the output audio data should be written. It will have a number of channels equal to the numberOfOutputChannels parameter of the createScriptProcessor() method. Script code within the scope of the onaudioprocess function is expected to modify the Float32Array arrays representing channel data in this AudioBuffer. Any script modifications to this AudioBuffer outside of this scope will not produce any audible effects."/>

			<outline text="4.14. The PannerNode InterfaceThis interface represents a processing node which positions / spatializes an incoming audio stream in three-dimensional space. The spatialization is in relation to the AudioContext's AudioListener (listener attribute)."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 1 channelCount = 2; channelCountMode = &quot;clamped-max&quot;; channelInterpretation = &quot;speakers&quot;;The audio stream from the input will be either mono or stereo, depending on the connection(s) to the input."/>

			<outline text="The output of this node is hard-coded to stereo (2 channels) and currently cannot be configured."/>

			<outline text="Web IDL"/>

			<outline text="enum PanningModelType { &quot;equalpower&quot;, &quot;HRTF&quot;, &quot;soundfield&quot;};enum DistanceModelType { &quot;linear&quot;, &quot;inverse&quot;, &quot;exponential&quot;};interface PannerNode : AudioNode { attribute PanningModelType panningModel; void setPosition(double x, double y, double z); void setOrientation(double x, double y, double z); void setVelocity(double x, double y, double z); attribute DistanceModelType distanceModel; attribute double refDistance; attribute double maxDistance; attribute double rolloffFactor; attribute double coneInnerAngle; attribute double coneOuterAngle; attribute double coneOuterGain;};4.14.2. AttributespanningModelDetermines which spatialization algorithm will be used to position the audio in 3D space. The default is &quot;HRTF&quot;."/>

			<outline text="&quot;equalpower&quot;A simple and efficient spatialization algorithm using equal-power panning."/>

			<outline text="&quot;HRTF&quot;A higher quality spatialization algorithm using a convolution with measured impulse responses from human subjects. This panning method renders stereo output."/>

			<outline text="&quot;soundfield&quot;An algorithm which spatializes multi-channel audio using sound field algorithms."/>

			<outline text="distanceModelDetermines which algorithm will be used to reduce the volume of an audio source as it moves away from the listener. The default is &quot;inverse&quot;."/>

			<outline text="&quot;linear&quot;A linear distance model which calculates distanceGain according to:"/>

			<outline text="1 - rolloffFactor * (distance - refDistance) / (maxDistance - refDistance) &quot;inverse&quot;An inverse distance model which calculates distanceGain according to:"/>

			<outline text="refDistance / (refDistance + rolloffFactor * (distance - refDistance)) &quot;exponential&quot;An exponential distance model which calculates distanceGain according to:"/>

			<outline text="pow(distance / refDistance, -rolloffFactor) refDistanceA reference distance for reducing volume as source move further from the listener. The default value is 1."/>

			<outline text="maxDistanceThe maximum distance between source and listener, after which the volume will not be reduced any further. The default value is 10000."/>

			<outline text="rolloffFactorDescribes how quickly the volume is reduced as source moves away from listener. The default value is 1."/>

			<outline text="coneInnerAngleA parameter for directional audio sources, this is an angle, inside of which there will be no volume reduction. The default value is 360."/>

			<outline text="coneOuterAngleA parameter for directional audio sources, this is an angle, outside of which the volume will be reduced to a constant value of coneOuterGain. The default value is 360."/>

			<outline text="coneOuterGainA parameter for directional audio sources, this is the amount of volume reduction outside of the coneOuterAngle. The default value is 0."/>

			<outline text="4.14.3. Methods and ParametersThe setPosition methodSets the position of the audio source relative to the listener attribute. A 3D cartesian coordinate system is used."/>

			<outline text="The x, y, z parameters represent the coordinates in 3D space."/>

			<outline text="The default value is (0,0,0)"/>

			<outline text="The setOrientation methodDescribes which direction the audio source is pointing in the 3D cartesian coordinate space. Depending on how directional the sound is (controlled by the cone attributes), a sound pointing away from the listener can be very quiet or completely silent."/>

			<outline text="The x, y, z parameters represent a direction vector in 3D space."/>

			<outline text="The default value is (1,0,0)"/>

			<outline text="The setVelocity methodSets the velocity vector of the audio source. This vector controls both the direction of travel and the speed in 3D space. This velocity relative to the listener's velocity is used to determine how much doppler shift (pitch change) to apply. The units used for this vector is meters / second and is independent of the units used for position and orientation vectors."/>

			<outline text="The x, y, z parameters describe a direction vector indicating direction of travel and intensity."/>

			<outline text="The default value is (0,0,0)"/>

			<outline text="4.15. The AudioListener InterfaceThis interface represents the position and orientation of the person listening to the audio scene. All PannerNode objects spatialize in relation to the AudioContext's listener. See this section for more details about spatialization."/>

			<outline text="Web IDL"/>

			<outline text="interface AudioListener { attribute double dopplerFactor; attribute double speedOfSound; void setPosition(double x, double y, double z); void setOrientation(double x, double y, double z, double xUp, double yUp, double zUp); void setVelocity(double x, double y, double z);};4.15.1. AttributesdopplerFactorA constant used to determine the amount of pitch shift to use when rendering a doppler effect. The default value is 1."/>

			<outline text="speedOfSoundThe speed of sound used for calculating doppler shift. The default value is 343.3."/>

			<outline text="4.15.2. Methods and ParametersThe setPosition methodSets the position of the listener in a 3D cartesian coordinate space. PannerNode objects use this position relative to individual audio sources for spatialization."/>

			<outline text="The x, y, z parameters represent the coordinates in 3D space."/>

			<outline text="The default value is (0,0,0)"/>

			<outline text="The setOrientation methodDescribes which direction the listener is pointing in the 3D cartesian coordinate space. Both a front vector and an up vector are provided. In simple human terms, the front vector represents which direction the person's nose is pointing. The up vector represents the direction the top of a person's head is pointing. These values are expected to be linearly independent (at right angles to each other). For normative requirements of how these values are to be interpreted, see the spatialization section."/>

			<outline text="The x, y, z parameters represent a front direction vector in 3D space, with the default value being (0,0,-1)"/>

			<outline text="The xUp, yUp, zUp parameters represent an up direction vector in 3D space, with the default value being (0,1,0)"/>

			<outline text="The setVelocity methodSets the velocity vector of the listener. This vector controls both the direction of travel and the speed in 3D space. This velocity relative to an audio source's velocity is used to determine how much doppler shift (pitch change) to apply. The units used for this vector is meters / second and is independent of the units used for position and orientation vectors."/>

			<outline text="The x, y, z parameters describe a direction vector indicating direction of travel and intensity."/>

			<outline text="The default value is (0,0,0)"/>

			<outline text="4.16. The ConvolverNode InterfaceThis interface represents a processing node which applies a linear convolution effect given an impulse response. Normative requirements for multi-channel convolution matrixing are described here."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 1 channelCount = 2; channelCountMode = &quot;clamped-max&quot;; channelInterpretation = &quot;speakers&quot;;Web IDL"/>

			<outline text="interface ConvolverNode : AudioNode { attribute AudioBuffer? buffer; attribute boolean normalize;};4.16.1. AttributesbufferA mono, stereo, or 4-channel AudioBuffer containing the (possibly multi-channel) impulse response used by the ConvolverNode. This AudioBuffer must be of the same sample-rate as the AudioContext or an exception will be thrown. At the time when this attribute is set, the buffer and the state of the normalize attribute will be used to configure the ConvolverNode with this impulse response having the given normalization. The initial value of this attribute is null."/>

			<outline text="normalizeControls whether the impulse response from the buffer will be scaled by an equal-power normalization when the buffer atttribute is set. Its default value is true in order to achieve a more uniform output level from the convolver when loaded with diverse impulse responses. If normalize is set to false, then the convolution will be rendered with no pre-processing/scaling of the impulse response. Changes to this value do not take effect until the next time the buffer attribute is set."/>

			<outline text="If the normalize attribute is false when the buffer attribute is set then the ConvolverNode will perform a linear convolution given the exact impulse response contained within the buffer."/>

			<outline text="Otherwise, if the normalize attribute is true when the buffer attribute is set then the ConvolverNode will first perform a scaled RMS-power analysis of the audio data contained within buffer to calculate a normalizationScale given this algorithm:"/>

			<outline text="float calculateNormalizationScale(buffer){ const float GainCalibration = 0.00125; const float GainCalibrationSampleRate = 44100; const float MinPower = 0.000125; // Normalize by RMS power. size_t numberOfChannels = buffer-&gt;numberOfChannels(); size_t length = buffer-&gt;length(); float power = 0; for (size_t i = 0; i &lt; &gt;channel(i)-&gt;data(); float channelPower = 0; int n = length; while (n--) { float sample = *sourceP++; channelPower += sample * sample; } power += channelPower; } power = sqrt(power / (numberOfChannels * length)); // Protect against accidental overload. if (isinf(power) || isnan(power) || power &lt; &gt;sampleRate()) scale *= GainCalibrationSampleRate / buffer-&gt;sampleRate(); // True-stereo compensation. if (buffer-&gt;numberOfChannels() == 4) scale *= 0.5; return scale;} During processing, the ConvolverNode will then take this calculated normalizationScale value and multiply it by the result of the linear convolution resulting from processing the input with the impulse response (represented by the buffer) to produce the final output. Or any mathematically equivalent operation may be used, such as pre-multiplying the input by normalizationScale, or pre-multiplying a version of the impulse-response by normalizationScale."/>

			<outline text="4.17. The AnalyserNode InterfaceThis interface represents a node which is able to provide real-time frequency and time-domain analysis information. The audio stream will be passed un-processed from input to output."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 1 Note that this output may be left unconnected. channelCount = 1; channelCountMode = &quot;explicit&quot;; channelInterpretation = &quot;speakers&quot;;Web IDL"/>

			<outline text="interface AnalyserNode : AudioNode { void getFloatFrequencyData(Float32Array array); void getByteFrequencyData(Uint8Array array); void getByteTimeDomainData(Uint8Array array); attribute unsigned long fftSize; readonly attribute unsigned long frequencyBinCount; attribute double minDecibels; attribute double maxDecibels; attribute double smoothingTimeConstant;};4.17.1. AttributesfftSizeThe size of the FFT used for frequency-domain analysis. This must be a non-zero power of two in the range 32 to 2048, otherwise an INDEX_SIZE_ERR exception MUST be thrown. The default value is 2048."/>

			<outline text="frequencyBinCountHalf the FFT size."/>

			<outline text="minDecibelsThe minimum power value in the scaling range for the FFT analysis data for conversion to unsigned byte values. The default value is -100. If the value of this attribute is set to a value more than or equal to maxDecibels, an INDEX_SIZE_ERR exception MUST be thrown."/>

			<outline text="maxDecibelsThe maximum power value in the scaling range for the FFT analysis data for conversion to unsigned byte values. The default value is -30. If the value of this attribute is set to a value less than or equal to minDecibels, an INDEX_SIZE_ERR exception MUST be thrown."/>

			<outline text="smoothingTimeConstantA value from 0 -&gt; 1 where 0 represents no time averaging with the last analysis frame. The default value is 0.8. If the value of this attribute is set to a value less than 0 or more than 1, an INDEX_SIZE_ERR exception MUST be thrown."/>

			<outline text="4.17.2. Methods and ParametersThe getFloatFrequencyData methodCopies the current frequency data into the passed floating-point array. If the array has fewer elements than the frequencyBinCount, the excess elements will be dropped. If the array has more elements than the frequencyBinCount, the excess elements will be ignored."/>

			<outline text="The array parameter is where frequency-domain analysis data will be copied."/>

			<outline text="The getByteFrequencyData methodCopies the current frequency data into the passed unsigned byte array. If the array has fewer elements than the frequencyBinCount, the excess elements will be dropped. If the array has more elements than the frequencyBinCount, the excess elements will be ignored."/>

			<outline text="The array parameter is where frequency-domain analysis data will be copied."/>

			<outline text="The getByteTimeDomainData methodCopies the current time-domain (waveform) data into the passed unsigned byte array. If the array has fewer elements than the fftSize, the excess elements will be dropped. If the array has more elements than fftSize, the excess elements will be ignored."/>

			<outline text="The array parameter is where time-domain analysis data will be copied."/>

			<outline text="4.18. The ChannelSplitterNode InterfaceThe ChannelSplitterNode is for use in more advanced applications and would often be used in conjunction with ChannelMergerNode."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : Variable N (defaults to 6) // number of &quot;active&quot; (non-silent) outputs is determined by number of channels in the input channelCountMode = &quot;max&quot;; channelInterpretation = &quot;speakers&quot;;This interface represents an AudioNode for accessing the individual channels of an audio stream in the routing graph. It has a single input, and a number of &quot;active&quot; outputs which equals the number of channels in the input audio stream. For example, if a stereo input is connected to an ChannelSplitterNode then the number of active outputs will be two (one from the left channel and one from the right). There are always a total number of N outputs (determined by the numberOfOutputs parameter to the AudioContext method createChannelSplitter()), The default number is 6 if this value is not provided. Any outputs which are not &quot;active&quot; will output silence and would typically not be connected to anything."/>

			<outline text="Example:Please note that in this example, the splitter does not interpret the channel identities (such as left, right, etc.), but simply splits out channels in the order that they are input."/>

			<outline text="One application for ChannelSplitterNode is for doing &quot;matrix mixing&quot; where individual gain control of each channel is desired."/>

			<outline text="Web IDL"/>

			<outline text="interface ChannelSplitterNode : AudioNode {};4.19. The ChannelMergerNode InterfaceThe ChannelMergerNode is for use in more advanced applications and would often be used in conjunction with ChannelSplitterNode."/>

			<outline text="numberOfInputs : Variable N (default to 6) // number of connected inputs may be less than this numberOfOutputs : 1 channelCountMode = &quot;max&quot;; channelInterpretation = &quot;speakers&quot;;This interface represents an AudioNode for combining channels from multiple audio streams into a single audio stream. It has a variable number of inputs (defaulting to 6), but not all of them need be connected. There is a single output whose audio stream has a number of channels equal to the sum of the numbers of channels of all the connected inputs. For example, if an ChannelMergerNode has two connected inputs (both stereo), then the output will be four channels, the first two from the first input and the second two from the second input. In another example with two connected inputs (both mono), the output will be two channels (stereo), with the left channel coming from the first input and the right channel coming from the second input."/>

			<outline text="Example:Please note that in this example, the merger does not interpret the channel identities (such as left, right, etc.), but simply combines channels in the order that they are input."/>

			<outline text="Be aware that it is possible to connect an ChannelMergerNode in such a way that it outputs an audio stream with a large number of channels greater than the maximum supported by the audio hardware. In this case where such an output is connected to the AudioContext .destination (the audio hardware), then the extra channels will be ignored. Thus, the ChannelMergerNode should be used in situations where the number of channels is well understood."/>

			<outline text="Web IDL"/>

			<outline text="interface ChannelMergerNode : AudioNode {};4.20. The DynamicsCompressorNode InterfaceDynamicsCompressorNode is an AudioNode processor implementing a dynamics compression effect."/>

			<outline text="Dynamics compression is very commonly used in musical production and game audio. It lowers the volume of the loudest parts of the signal and raises the volume of the softest parts. Overall, a louder, richer, and fuller sound can be achieved. It is especially important in games and musical applications where large numbers of individual sounds are played simultaneous to control the overall signal level and help avoid clipping (distorting) the audio output to the speakers."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 1 channelCount = 2; channelCountMode = &quot;explicit&quot;; channelInterpretation = &quot;speakers&quot;;Web IDL"/>

			<outline text="interface DynamicsCompressorNode : AudioNode { readonly attribute AudioParam threshold; // in Decibels readonly attribute AudioParam knee; // in Decibels readonly attribute AudioParam ratio; // unit-less readonly attribute AudioParam reduction; // in Decibels readonly attribute AudioParam attack; // in Seconds readonly attribute AudioParam release; // in Seconds};4.20.1. AttributesAll parameters are k-rate"/>

			<outline text="thresholdThe decibel value above which the compression will start taking effect. Its default value is -24, with a nominal range of -100 to 0."/>

			<outline text="kneeA decibel value representing the range above the threshold where the curve smoothly transitions to the &quot;ratio&quot; portion. Its default value is 30, with a nominal range of 0 to 40."/>

			<outline text="ratioThe amount of dB change in input for a 1 dB change in output. Its default value is 12, with a nominal range of 1 to 20."/>

			<outline text="reductionA read-only decibel value for metering purposes, representing the current amount of gain reduction that the compressor is applying to the signal. If fed no signal the value will be 0 (no gain reduction). The nominal range is -20 to 0."/>

			<outline text="attackThe amount of time (in seconds) to reduce the gain by 10dB. Its default value is 0.003, with a nominal range of 0 to 1."/>

			<outline text="releaseThe amount of time (in seconds) to increase the gain by 10dB. Its default value is 0.250, with a nominal range of 0 to 1."/>

			<outline text="4.21. The BiquadFilterNode InterfaceBiquadFilterNode is an AudioNode processor implementing very common low-order filters."/>

			<outline text="Low-order filters are the building blocks of basic tone controls (bass, mid, treble), graphic equalizers, and more advanced filters. Multiple BiquadFilterNode filters can be combined to form more complex filters. The filter parameters such as &quot;frequency&quot; can be changed over time for filter sweeps, etc. Each BiquadFilterNode can be configured as one of a number of common filter types as shown in the IDL below. The default filter type is &quot;lowpass&quot;."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 1 channelCountMode = &quot;max&quot;; channelInterpretation = &quot;speakers&quot;;The number of channels of the output always equals the number of channels of the input."/>

			<outline text="Web IDL"/>

			<outline text="enum BiquadFilterType { &quot;lowpass&quot;, &quot;highpass&quot;, &quot;bandpass&quot;, &quot;lowshelf&quot;, &quot;highshelf&quot;, &quot;peaking&quot;, &quot;notch&quot;, &quot;allpass&quot;};interface BiquadFilterNode : AudioNode { attribute BiquadFilterType type; readonly attribute AudioParam frequency; // in Hertz readonly attribute AudioParam detune; // in Cents readonly attribute AudioParam Q; // Quality factor readonly attribute AudioParam gain; // in Decibels void getFrequencyResponse(Float32Array frequencyHz, Float32Array magResponse, Float32Array phaseResponse);};The filter types are briefly described below. We note that all of these filters are very commonly used in audio processing. In terms of implementation, they have all been derived from standard analog filter prototypes. For more technical details, we refer the reader to the excellent reference by Robert Bristow-Johnson."/>

			<outline text="All parameters are k-rate with the following default parameter values:"/>

			<outline text="frequency350Hz, with a nominal range of 10 to the Nyquist frequency (half the sample-rate).Q1, with a nominal range of 0.0001 to 1000.gain0, with a nominal range of -40 to 40.4.21.1 &quot;lowpass&quot;A lowpass filter allows frequencies below the cutoff frequency to pass through and attenuates frequencies above the cutoff. It implements a standard second-order resonant lowpass filter with 12dB/octave rolloff."/>

			<outline text="frequencyThe cutoff frequencyQControls how peaked the response will be at the cutoff frequency. A large value makes the response more peaked. Please note that for this filter type, this value is not a traditional Q, but is a resonance value in decibels.gainNot used in this filter type4.21.2 &quot;highpass&quot;A highpass filter is the opposite of a lowpass filter. Frequencies above the cutoff frequency are passed through, but frequencies below the cutoff are attenuated. It implements a standard second-order resonant highpass filter with 12dB/octave rolloff."/>

			<outline text="frequencyThe cutoff frequency below which the frequencies are attenuatedQControls how peaked the response will be at the cutoff frequency. A large value makes the response more peaked. Please note that for this filter type, this value is not a traditional Q, but is a resonance value in decibels.gainNot used in this filter type4.21.3 &quot;bandpass&quot;A bandpass filter allows a range of frequencies to pass through and attenuates the frequencies below and above this frequency range. It implements a second-order bandpass filter."/>

			<outline text="frequencyThe center of the frequency bandQControls the width of the band. The width becomes narrower as the Q value increases.gainNot used in this filter type4.21.4 &quot;lowshelf&quot;The lowshelf filter allows all frequencies through, but adds a boost (or attenuation) to the lower frequencies. It implements a second-order lowshelf filter."/>

			<outline text="frequencyThe upper limit of the frequences where the boost (or attenuation) is applied.QNot used in this filter type.gainThe boost, in dB, to be applied. If the value is negative, the frequencies are attenuated.4.21.5 &quot;highshelf&quot;The highshelf filter is the opposite of the lowshelf filter and allows all frequencies through, but adds a boost to the higher frequencies. It implements a second-order highshelf filter"/>

			<outline text="frequencyThe lower limit of the frequences where the boost (or attenuation) is applied.QNot used in this filter type.gainThe boost, in dB, to be applied. If the value is negative, the frequencies are attenuated.4.21.6 &quot;peaking&quot;The peaking filter allows all frequencies through, but adds a boost (or attenuation) to a range of frequencies."/>

			<outline text="frequencyThe center frequency of where the boost is applied.QControls the width of the band of frequencies that are boosted. A large value implies a narrow width.gainThe boost, in dB, to be applied. If the value is negative, the frequencies are attenuated.4.21.7 &quot;notch&quot;The notch filter (also known as a band-stop or band-rejection filter) is the opposite of a bandpass filter. It allows all frequencies through, except for a set of frequencies."/>

			<outline text="frequencyThe center frequency of where the notch is applied.QControls the width of the band of frequencies that are attenuated. A large value implies a narrow width.gainNot used in this filter type.4.21.8 &quot;allpass&quot;An allpass filter allows all frequencies through, but changes the phase relationship between the various frequencies. It implements a second-order allpass filter"/>

			<outline text="frequencyThe frequency where the center of the phase transition occurs. Viewed another way, this is the frequency with maximal group delay.QControls how sharp the phase transition is at the center frequency. A larger value implies a sharper transition and a larger group delay.gainNot used in this filter type.4.21.9. MethodsThe getFrequencyResponse methodGiven the current filter parameter settings, calculates the frequency response for the specified frequencies."/>

			<outline text="The frequencyHz parameter specifies an array of frequencies at which the response values will be calculated."/>

			<outline text="The magResponse parameter specifies an output array receiving the linear magnitude response values."/>

			<outline text="The phaseResponse parameter specifies an output array receiving the phase response values in radians."/>

			<outline text="4.22. The WaveShaperNode InterfaceWaveShaperNode is an AudioNode processor implementing non-linear distortion effects."/>

			<outline text="Non-linear waveshaping distortion is commonly used for both subtle non-linear warming, or more obvious distortion effects. Arbitrary non-linear shaping curves may be specified."/>

			<outline text="numberOfInputs : 1 numberOfOutputs : 1 channelCountMode = &quot;max&quot;; channelInterpretation = &quot;speakers&quot;;The number of channels of the output always equals the number of channels of the input."/>

			<outline text="Web IDL"/>

			<outline text="interface WaveShaperNode : AudioNode { attribute Float32Array curve;};4.22.1. AttributescurveThe shaping curve used for the waveshaping effect. The input signal is nominally within the range -1 -&gt; +1. Each input sample within this range will index into the shaping curve with a signal level of zero corresponding to the center value of the curve array. Any sample value less than -1 will correspond to the first value in the curve array. Any sample value less greater than +1 will correspond to the last value in the curve array."/>

			<outline text="4.23. The OscillatorNode InterfaceOscillatorNode represents an audio source generating a periodic waveform. It can be set to a few commonly used waveforms. Additionally, it can be set to an arbitrary periodic waveform through the use of a WaveTable object."/>

			<outline text="Oscillators are common foundational building blocks in audio synthesis. An OscillatorNode will start emitting sound at the time specified by the start() method."/>

			<outline text="Mathematically speaking, a continuous-time periodic waveform can have very high (or infinitely high) frequency information when considered in the frequency domain. When this waveform is sampled as a discrete-time digital audio signal at a particular sample-rate, then care must be taken to discard (filter out) the high-frequency information higher than the Nyquist frequency (half the sample-rate) before converting the waveform to a digital form. If this is not done, then aliasing of higher frequencies (than the Nyquist frequency) will fold back as mirror images into frequencies lower than the Nyquist frequency. In many cases this will cause audibly objectionable artifacts. This is a basic and well understood principle of audio DSP."/>

			<outline text="There are several practical approaches that an implementation may take to avoid this aliasing. But regardless of approach, the idealized discrete-time digital audio signal is well defined mathematically. The trade-off for the implementation is a matter of implementation cost (in terms of CPU usage) versus fidelity to achieving this ideal."/>

			<outline text="It is expected that an implementation will take some care in achieving this ideal, but it is reasonable to consider lower-quality, less-costly approaches on lower-end hardware."/>

			<outline text="Both .frequency and .detune are a-rate parameters and are used together to determine a computedFrequency value:"/>

			<outline text="computedFrequency(t) = frequency(t) * pow(2, detune(t) / 1200)The OscillatorNode's instantaneous phase at each time is the time integral of computedFrequency."/>

			<outline text="numberOfInputs : 0 numberOfOutputs : 1 (mono output) Web IDL"/>

			<outline text="enum OscillatorType { &quot;sine&quot;, &quot;square&quot;, &quot;sawtooth&quot;, &quot;triangle&quot;, &quot;custom&quot;};interface OscillatorNode : AudioNode { attribute OscillatorType type; const unsigned short UNSCHEDULED_STATE = 0; const unsigned short SCHEDULED_STATE = 1; const unsigned short PLAYING_STATE = 2; const unsigned short FINISHED_STATE = 3; readonly attribute unsigned short playbackState; readonly attribute AudioParam frequency; // in Hertz readonly attribute AudioParam detune; // in Cents void start(double when); void stop(double when); void setWaveTable(WaveTable waveTable);};4.23.1. AttributestypeThe shape of the periodic waveform. It may directly be set to any of the type constant values except for &quot;custom&quot;. The setWaveTable() method can be used to set a custom waveform, which results in this attribute being set to &quot;custom&quot;. The default value is &quot;sine&quot;."/>

			<outline text="playbackStatedefined as in AudioBufferSourceNode."/>

			<outline text="frequencyThe frequency (in Hertz) of the periodic waveform. This parameter is a-rate"/>

			<outline text="detuneA detuning value (in Cents) which will offset the frequency by the given amount. This parameter is a-rate"/>

			<outline text="6. Mixer Gain StructureThis section is informative."/>

			<outline text="BackgroundOne of the most important considerations when dealing with audio processing graphs is how to adjust the gain (volume) at various points. For example, in a standard mixing board model, each input bus has pre-gain, post-gain, and send-gains. Submix and master out busses also have gain control. The gain control described here can be used to implement standard mixing boards as well as other architectures."/>

			<outline text="Summing Inputs"/>

			<outline text="The inputs to AudioNodes have the ability to accept connections from multiple outputs. The input then acts as a unity gain summing junction with each output signal being added with the others:"/>

			<outline text="In cases where the channel layouts of the outputs do not match, a mix (usually up-mix) will occur according to the mixing rules."/>

			<outline text="Gain ControlBut many times, it's important to be able to control the gain for each of the output signals. The GainNode gives this control:"/>

			<outline text="Using these two concepts of unity gain summing junctions and GainNodes, it's possible to construct simple or complex mixing scenarios."/>

			<outline text="Example: Mixer with Send BussesIn a routing scenario involving multiple sends and submixes, explicit control is needed over the volume or &quot;gain&quot; of each connection to a mixer. Such routing topologies are very common and exist in even the simplest of electronic gear sitting around in a basic recording studio."/>

			<outline text="Here's an example with two send mixers and a main mixer. Although possible, for simplicity's sake, pre-gain control and insert effects are not illustrated:"/>

			<outline text="This diagram is using a shorthand notation where &quot;send 1&quot;, &quot;send 2&quot;, and &quot;main bus&quot; are actually inputs to AudioNodes, but here are represented as summing busses, where the intersections g2_1, g3_1, etc. represent the &quot;gain&quot; or volume for the given source on the given mixer. In order to expose this gain, an GainNode is used:"/>

			<outline text="Here's how the above diagram could be constructed in JavaScript:"/>

			<outline text="ECMAScript"/>

			<outline text="var context = 0;var compressor = 0;var reverb = 0;var delay = 0;var s1 = 0;var s2 = 0;var source1 = 0;var source2 = 0;var g1_1 = 0;var g2_1 = 0;var g3_1 = 0;var g1_2 = 0;var g2_2 = 0;var g3_2 = 0;function setupRoutingGraph() { context = new AudioContext(); compressor = context.createDynamicsCompressor(); reverb = context.createConvolver(); delay = context.createDelay(); compressor.connect(context.destination); s1 = context.createGain(); reverb.connect(s1); s1.connect(compressor); s2 = context.createGain(); delay.connect(s2); s2.connect(compressor); source1 = context.createBufferSource(); source2 = context.createBufferSource(); source1.buffer = manTalkingBuffer; source2.buffer = footstepsBuffer; g1_1 = context.createGain(); g2_1 = context.createGain(); g3_1 = context.createGain(); source1.connect(g1_1); source1.connect(g2_1); source1.connect(g3_1); g1_1.connect(compressor); g2_1.connect(reverb); g3_1.connect(delay); g1_2 = context.createGain(); g2_2 = context.createGain(); g3_2 = context.createGain(); source2.connect(g1_2); source2.connect(g2_2); source2.connect(g3_2); g1_2.connect(compressor); g2_2.connect(reverb); g3_2.connect(delay); g2_1.gain.value = 0.2; } 7. Dynamic LifetimeBackgroundThis section is informative. Please see AudioContext lifetime and AudioNode lifetime for normative requirements"/>

			<outline text="In addition to allowing the creation of static routing configurations, it should also be possible to do custom effect routing on dynamically allocated voices which have a limited lifetime. For the purposes of this discussion, let's call these short-lived voices &quot;notes&quot;. Many audio applications incorporate the ideas of notes, examples being drum machines, sequencers, and 3D games with many one-shot sounds being triggered according to game play."/>

			<outline text="In a traditional software synthesizer, notes are dynamically allocated and released from a pool of available resources. The note is allocated when a MIDI note-on message is received. It is released when the note has finished playing either due to it having reached the end of its sample-data (if non-looping), it having reached a sustain phase of its envelope which is zero, or due to a MIDI note-off message putting it into the release phase of its envelope. In the MIDI note-off case, the note is not released immediately, but only when the release envelope phase has finished. At any given time, there can be a large number of notes playing but the set of notes is constantly changing as new notes are added into the routing graph, and old ones are released."/>

			<outline text="The audio system automatically deals with tearing-down the part of the routing graph for individual &quot;note&quot; events. A &quot;note&quot; is represented by an AudioBufferSourceNode, which can be directly connected to other processing nodes. When the note has finished playing, the context will automatically release the reference to the AudioBufferSourceNode, which in turn will release references to any nodes it is connected to, and so on. The nodes will automatically get disconnected from the graph and will be deleted when they have no more references. Nodes in the graph which are long-lived and shared between dynamic voices can be managed explicitly. Although it sounds complicated, this all happens automatically with no extra JavaScript handling required."/>

			<outline text="ExampleThe low-pass filter, panner, and second gain nodes are directly connected from the one-shot sound. So when it has finished playing the context will automatically release them (everything within the dotted line). If there are no longer any JavaScript references to the one-shot sound and connected nodes, then they will be immediately removed from the graph and deleted. The streaming source, has a global reference and will remain connected until it is explicitly disconnected. Here's how it might look in JavaScript:"/>

			<outline text="ECMAScript"/>

			<outline text="var context = 0;var compressor = 0;var gainNode1 = 0;var streamingAudioSource = 0; function setupAudioContext() { context = new AudioContext(); compressor = context.createDynamicsCompressor(); gainNode1 = context.createGain(); // Create a streaming audio source. var audioElement = document.getElementById('audioTagID'); streamingAudioSource = context.createMediaElementSource(audioElement); streamingAudioSource.connect(gainNode1); gainNode1.connect(compressor); compressor.connect(context.destination);}function playSound() { var oneShotSound = context.createBufferSource(); oneShotSound.buffer = dogBarkingBuffer; var lowpass = context.createBiquadFilter(); var panner = context.createPanner(); var gainNode2 = context.createGain(); oneShotSound.connect(lowpass); lowpass.connect(panner); panner.connect(gainNode2); gainNode2.connect(compressor); oneShotSound.start(context.currentTime + 0.75);}9. Channel up-mixing and down-mixingThis section is normative."/>

			<outline text="Mixer Gain Structure describes how an input to an AudioNode can be connected from one or more outputs of an AudioNode. Each of these connections from an output represents a stream with a specific non-zero number of channels. An input has mixing rules for combining the channels from all of the connections to it. As a simple example, if an input is connected from a mono output and a stereo output, then the mono connection will usually be up-mixed to stereo and summed with the stereo connection. But, of course, it's important to define the exact mixing rules for every input to every AudioNode. The default mixing rules for all of the inputs have been chosen so that things &quot;just work&quot; without worrying too much about the details, especially in the very common case of mono and stereo streams. But the rules can be changed for advanced use cases, especially multi-channel."/>

			<outline text="To define some terms, up-mixing refers to the process of taking a stream with a smaller number of channels and converting it to a stream with a larger number of channels. down-mixing refers to the process of taking a stream with a larger number of channels and converting it to a stream with a smaller number of channels."/>

			<outline text="An AudioNode input use three basic pieces of information to determine how to mix all the outputs connected to it. As part of this process it computes an internal value computedNumberOfChannels representing the actual number of channels of the input at any given time:"/>

			<outline text="The AudioNode attributes involved in channel up-mixing and down-mixing rules are defined above. The following is a more precise specification on what each of them mean."/>

			<outline text="channelCount is used to help compute computedNumberOfChannels.channelCountMode determines how computedNumberOfChannels will be computed. Once this number is computed, all of the connections will be up or down-mixed to that many channels. For most nodes, the default value is &quot;max&quot;.''max'': computedNumberOfChannels is computed as the maximum of the number of channels of all connections. In this mode channelCount is ignored.''clamped-max'': same as ''max'' up to a limit of the channelCount''explicit'': computedNumberOfChannels is the exact value as specified in channelCountchannelInterpretation determines how the individual channels will be treated. For example, will they be treated as speakers having a specific layout, or will they be treated as simple discrete channels? This value influences exactly how the up and down mixing is performed. The default value is &quot;speakers&quot;.''speakers'': use up-down-mix equations for mono/stereo/quad/5.1. In cases where the number of channels do not match any of these basic speaker layouts, revert to &quot;discrete&quot;.''discrete'': up-mix by filling channels until they run out then zero out remaining channels. down-mix by filling as many channels as possible, then dropping remaining channelsFor each input of an AudioNode, an implementation must:"/>

			<outline text="Compute computedNumberOfChannels.For each connection to the input:up-mix or down-mix the connection to computedNumberOfChannels according to channelInterpretation.Mix it together with all of the other mixed streams (from other connections). This is a straight-forward mixing together of each of the corresponding channels from each connection.9.1. Speaker Channel LayoutsThis section is normative."/>

			<outline text="When channelInterpretation is &quot;speakers&quot; then the up-mixing and down-mixing is defined for specific channel layouts."/>

			<outline text="It's important to define the channel ordering (and define some abbreviations) for these speaker layouts."/>

			<outline text="For now, only considers cases for mono, stereo, quad, 5.1. Later other channel layouts can be defined."/>

			<outline text="9.1.1. Channel ordering Mono 0: M: mono Stereo 0: L: left 1: R: right Quad 0: L: left 1: R: right 2: SL: surround left 3: SR: surround right 5.1 0: L: left 1: R: right 2: C: center 3: LFE: subwoofer 4: SL: surround left 5: SR: surround right 9.1.2. Up Mixing speaker layoutsMono up-mix: 1 -&gt; 2 : up-mix from mono to stereo output.L = input; output.R = input; 1 -&gt; 4 : up-mix from mono to quad output.L = input; output.R = input; output.SL = 0; output.SR = 0; 1 -&gt; 5.1 : up-mix from mono to 5.1 output.L = 0; output.R = 0; output.C = input; // put in center channel output.LFE = 0; output.SL = 0; output.SR = 0;Stereo up-mix: 2 -&gt; 4 : up-mix from stereo to quad output.L = input.L; output.R = input.R; output.SL = 0; output.SR = 0; 2 -&gt; 5.1 : up-mix from stereo to 5.1 output.L = input.L; output.R = input.R; output.C = 0; output.LFE = 0; output.SL = 0; output.SR = 0;Quad up-mix: 4 -&gt; 5.1 : up-mix from stereo to 5.1 output.L = input.L; output.R = input.R; output.C = 0; output.LFE = 0; output.SL = input.SL; output.SR = input.SR;9.1.3. Down Mixing speaker layoutsA down-mix will be necessary, for example, if processing 5.1 source material, but playing back stereo."/>

			<outline text="Mono down-mix: 2 -&gt; 1 : stereo to mono output = 0.5 * (input.L + input.R); 4 -&gt; 1 : quad to mono output = 0.25 * (input.L + input.R + input.SL + input.SR); 5.1 -&gt; 1 : 5.1 to mono output = 0.7071 * (input.L + input.R) + input.C + 0.5 * (input.SL + input.SR)Stereo down-mix: 4 -&gt; 2 : quad to stereo output.L = 0.5 * (input.L + input.SL); output.R = 0.5 * (input.R + input.SR); 5.1 -&gt; 2 : 5.1 to stereo output.L = L + 0.7071 * (input.C + input.SL) output.R = R + 0.7071 * (input.C + input.SR)Quad down-mix: 5.1 -&gt; 4 : 5.1 to quad output.L = L + 0.7071 * input.C output.R = R + 0.7071 * input.C output.SL = input.SL output.SR = input.SR9.2. Channel Rules ExamplesThis section is informative."/>

			<outline text="// Set gain node to explicit 2-channels (stereo).gain.channelCount = 2;gain.channelCountMode = &quot;explicit&quot;;gain.channelInterpretation = &quot;speakers&quot;;// Set ''hardware output'' to 4-channels for DJ-app with two stereo output busses.context.destination.channelCount = 4;context.destination.channelCountMode = &quot;explicit&quot;;context.destination.channelInterpretation = &quot;discrete&quot;;// Set ''hardware output'' to 8-channels for custom multi-channel speaker array// with custom matrix mixing.context.destination.channelCount = 8;context.destination.channelCountMode = &quot;explicit&quot;;context.destination.channelInterpretation = &quot;discrete&quot;;// Set ''hardware output'' to 5.1 to play an HTMLAudioElement.context.destination.channelCount = 6;context.destination.channelCountMode = &quot;explicit&quot;;context.destination.channelInterpretation = &quot;speakers&quot;;// Explicitly down-mix to mono.gain.channelCount = 1;gain.channelCountMode = &quot;explicit&quot;;gain.channelInterpretation = &quot;speakers&quot;;11. Spatialization / PanningBackgroundA common feature requirement for modern 3D games is the ability to dynamically spatialize and move multiple audio sources in 3D space. Game audio engines such as OpenAL, FMOD, Creative's EAX, Microsoft's XACT Audio, etc. have this ability."/>

			<outline text="Using an PannerNode, an audio stream can be spatialized or positioned in space relative to an AudioListener. An AudioContext will contain a single AudioListener. Both panners and listeners have a position in 3D space using a right-handed cartesian coordinate system. The units used in the coordinate system are not defined, and do not need to be because the effects calculated with these coordinates are independent/invariant of any particular units such as meters or feet. PannerNode objects (representing the source stream) have an orientation vector representing in which direction the sound is projecting. Additionally, they have a sound cone representing how directional the sound is. For example, the sound could be omnidirectional, in which case it would be heard anywhere regardless of its orientation, or it can be more directional and heard only if it is facing the listener. AudioListener objects (representing a person's ears) have an orientation and up vector representing in which direction the person is facing. Because both the source stream and the listener can be moving, they both have a velocity vector representing both the speed and direction of movement. Taken together, these two velocities can be used to generate a doppler shift effect which changes the pitch."/>

			<outline text="During rendering, the PannerNode calculates an azimuth and elevation. These values are used internally by the implementation in order to render the spatialization effect. See the Panning Algorithm section for details of how these values are used."/>

			<outline text="The following algorithm must be used to calculate the azimuth and elevation:"/>

			<outline text="// Calculate the source-listener vector.vec3 sourceListener = source.position - listener.position;if (sourceListener.isZero()) { // Handle degenerate case if source and listener are at the same point. azimuth = 0; elevation = 0; return;}sourceListener.normalize();// Align axes.vec3 listenerFront = listener.orientation;vec3 listenerUp = listener.up;vec3 listenerRight = listenerFront.cross(listenerUp);listenerRight.normalize();vec3 listenerFrontNorm = listenerFront;listenerFrontNorm.normalize();vec3 up = listenerRight.cross(listenerFrontNorm);float upProjection = sourceListener.dot(up);vec3 projectedSource = sourceListener - upProjection * up;projectedSource.normalize();azimuth = 180 * acos(projectedSource.dot(listenerRight)) / PI;// Source in front or behind the listener.double frontBack = projectedSource.dot(listenerFrontNorm);if (frontBack &lt; &gt;= 0) &amp;&amp; (azimuth 90) elevation = 180 - elevation;else if (elevation &lt; &gt;Panning Algorithmmono-&gt;stereo and stereo-&gt;stereo panning must be supported. mono-&gt;stereo processing is used when all connections to the input are mono. Otherwise stereo-&gt;stereo processing is used."/>

			<outline text="The following algorithms must be implemented:"/>

			<outline text="Equal-power (Vector-based) panningThis is a simple and relatively inexpensive algorithm which provides basic, but reasonable results. It is commonly used when panning musical sources."/>

			<outline text="The elevation value is ignored in this panning algorithm.The following steps are used for processing:"/>

			<outline text="The azimuth value is first contained to be within the range -90 +90. if (azimuth &lt; &gt; 90) azimuth = 180 - azimuth; A 0 -&gt; 1 normalized value x is calculated from azimuth for mono-&gt;stereo as:"/>

			<outline text="x = (azimuth + 90) / 180 Or for stereo-&gt;stereo as:"/>

			<outline text="if (azimuth 0 // inputL -&gt; outputL and &quot;equal-power pan&quot; inputR as in mono case // by transforming the &quot;azimuth&quot; value from -90 -&gt; 0 degrees into the range -90 -&gt; +90. x = (azimuth + 90) / 90; } else { // from 0 -&gt; +90 // inputR -&gt; outputR and &quot;equal-power pan&quot; inputL as in mono case // by transforming the &quot;azimuth&quot; value from 0 -&gt; +90 degrees into the range -90 -&gt; +90. x = azimuth / 90; } Left and right gain values are then calculated:"/>

			<outline text="gainL = cos(0.5 * PI * x); gainR = sin(0.5 * PI * x); For mono-&gt;stereo, the output is calculated as:"/>

			<outline text="outputL = input * gainL outputR = input * gainR Else for stereo-&gt;stereo, the output is calculated as:"/>

			<outline text="if (azimuth 0 outputL = inputL + inputR * gainL; outputR = inputR * gainR; } else { // from 0 -&gt; +90 outputL = inputL * gainL; outputR = inputR + inputL * gainR; } HRTF panning (stereo only)This requires a set of HRTF impulse responses recorded at a variety of azimuths and elevations. There are a small number of open/free impulse responses available. The implementation requires a highly optimized convolution function. It is somewhat more costly than &quot;equal-power&quot;, but provides a more spatialized sound."/>

			<outline text="Distance EffectsSounds which are closer are louder, while sounds further away are quieter. Exactly how a sound's volume changes according to distance from the listener depends on the distanceModel attribute."/>

			<outline text="During audio rendering, a distance value will be calculated based on the panner and listener positions according to:"/>

			<outline text="v = panner.position - listener.positiondistance = sqrt(dot(v, v))distance will then be used to calculate distanceGain which depends on the distanceModel attribute. See the distanceModel section for details of how this is calculated for each distance model."/>

			<outline text="As part of its processing, the PannerNode scales/multiplies the input audio signal by distanceGain to make distant sounds quieter and nearer ones louder."/>

			<outline text="Sound ConesThe listener and each sound source have an orientation vector describing which way they are facing. Each sound source's sound projection characteristics are described by an inner and outer &quot;cone&quot; describing the sound intensity as a function of the source/listener angle from the source's orientation vector. Thus, a sound source pointing directly at the listener will be louder than if it is pointed off-axis. Sound sources can also be omni-directional."/>

			<outline text="The following algorithm must be used to calculate the gain contribution due to the cone effect, given the source (the PannerNode) and the listener:"/>

			<outline text="if (source.orientation.isZero() || ((source.coneInnerAngle == 360) &amp;&amp; (source.coneOuterAngle == 360))) return 1; // no cone specified - unity gain// Normalized source-listener vectorvec3 sourceToListener = listener.position - source.position;sourceToListener.normalize();vec3 normalizedSourceOrientation = source.orientation;normalizedSourceOrientation.normalize();// Angle between the source orientation vector and the source-listener vectordouble dotProduct = sourceToListener.dot(normalizedSourceOrientation);double angle = 180 * acos(dotProduct) / PI;double absAngle = fabs(angle);// Divide by 2 here since API is entire angle (not half-angle)double absInnerAngle = fabs(source.coneInnerAngle) / 2;double absOuterAngle = fabs(source.coneOuterAngle) / 2;double gain = 1;if (absAngle = absOuterAngle) // Max attenuation gain = source.coneOuterGain;else { // Between inner and outer cones // inner -&gt; outer, x goes from 0 -&gt; 1 double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle); gain = (1 - x) + source.coneOuterGain * x;}return gain;Doppler ShiftIntroduces a pitch shift which can realistically simulate moving sources.Depends on: source / listener velocity vectors, speed of sound, doppler factor.The following algorithm must be used to calculate the doppler shift value which is used as an additional playback rate scalar for all AudioBufferSourceNodes connecting directly or indirectly to the AudioPannerNode:"/>

			<outline text="double dopplerShift = 1; // Initialize to default valuedouble dopplerFactor = listener.dopplerFactor;if (dopplerFactor &gt; 0) { double speedOfSound = listener.speedOfSound; // Don't bother if both source and listener have no velocity. if (!source.velocity.isZero() || !listener.velocity.isZero()) { // Calculate the source to listener vector. vec3 sourceToListener = source.position - listener.position; double sourceListenerMagnitude = sourceToListener.length(); double listenerProjection = sourceToListener.dot(listener.velocity) / sourceListenerMagnitude; double sourceProjection = sourceToListener.dot(source.velocity) / sourceListenerMagnitude; listenerProjection = -listenerProjection; sourceProjection = -sourceProjection; double scaledSpeedOfSound = speedOfSound / dopplerFactor; listenerProjection = min(listenerProjection, scaledSpeedOfSound); sourceProjection = min(sourceProjection, scaledSpeedOfSound); dopplerShift = ((speedOfSound - dopplerFactor * listenerProjection) / (speedOfSound - dopplerFactor * sourceProjection)); fixNANs(dopplerShift); // Avoid illegal values // Limit the pitch shifting to 4 octaves up and 3 octaves down. dopplerShift = min(dopplerShift, 16); dopplerShift = max(dopplerShift, 0.125); }}12. Linear Effects using ConvolutionBackgroundConvolution is a mathematical process which can be applied to an audio signal to achieve many interesting high-quality linear effects. Very often, the effect is used to simulate an acoustic space such as a concert hall, cathedral, or outdoor amphitheater. It can also be used for complex filter effects, like a muffled sound coming from inside a closet, sound underwater, sound coming through a telephone, or playing through a vintage speaker cabinet. This technique is very commonly used in major motion picture and music production and is considered to be extremely versatile and of high quality."/>

			<outline text="Each unique effect is defined by an impulse response. An impulse response can be represented as an audio file and can be recorded from a real acoustic space such as a cave, or can be synthetically generated through a great variety of techniques."/>

			<outline text="Motivation for use as a StandardA key feature of many game audio engines (OpenAL, FMOD, Creative's EAX, Microsoft's XACT Audio, etc.) is a reverberation effect for simulating the sound of being in an acoustic space. But the code used to generate the effect has generally been custom and algorithmic (generally using a hand-tweaked set of delay lines and allpass filters which feedback into each other). In nearly all cases, not only is the implementation custom, but the code is proprietary and closed-source, each company adding its own &quot;black magic&quot; to achieve its unique quality. Each implementation being custom with a different set of parameters makes it impossible to achieve a uniform desired effect. And the code being proprietary makes it impossible to adopt a single one of the implementations as a standard. Additionally, algorithmic reverberation effects are limited to a relatively narrow range of different effects, regardless of how the parameters are tweaked."/>

			<outline text="A convolution effect solves these problems by using a very precisely defined mathematical algorithm as the basis of its processing. An impulse response represents an exact sound effect to be applied to an audio stream and is easily represented by an audio file which can be referenced by URL. The range of possible effects is enormous."/>

			<outline text="Implementation GuideLinear convolution can be implemented efficiently. Here are some notes describing how it can be practically implemented."/>

			<outline text="Reverb Effect (with matrixing)This section is normative."/>

			<outline text="In the general case the source has N input channels, the impulse response has K channels, and the playback system has M output channels. Thus it's a matter of how to matrix these channels to achieve the final result."/>

			<outline text="The subset of N, M, K below must be implemented (note that the first image in the diagram is just illustrating the general case and is not normative, while the following images are normative). Without loss of generality, developers desiring more complex and arbitrary matrixing can use multiple ConvolverNode objects in conjunction with an ChannelMergerNode."/>

			<outline text="Single channel convolution operates on a mono audio input, using a mono impulse response, and generating a mono output. But to achieve a more spacious sound, 2 channel audio inputs and 1, 2, or 4 channel impulse responses will be considered. The following diagram, illustrates the common cases for stereo playback where N and M are 1 or 2 and K is 1, 2, or 4."/>

			<outline text="Recording Impulse ResponsesThis section is informative."/>

			<outline text="The most modern and accurate way to record the impulse response of a real acoustic space is to use a long exponential sine sweep. The test-tone can be as long as 20 or 30 seconds, or longer.Several recordings of the test tone played through a speaker can be made with microphones placed and oriented at various positions in the room. It's important to document speaker placement/orientation, the types of microphones, their settings, placement, and orientations for each recording taken."/>

			<outline text="Post-processing is required for each of these recordings by performing an inverse-convolution with the test tone, yielding the impulse response of the room with the corresponding microphone placement. These impulse responses are then ready to be loaded into the convolution reverb engine to re-create the sound of being in the room."/>

			<outline text="ToolsTwo command-line tools have been written:generate_testtones generates an exponential sine-sweep test-tone and its inverse. Another tool convolve was written for post-processing. With these tools, anybody with recording equipment can record their own impulse responses. To test the tools in practice, several recordings were made in a warehouse space with interesting acoustics. These were later post-processed with the command-line tools."/>

			<outline text="% generate_testtones -hUsage: generate_testtone [-o /Path/To/File/To/Create] Two files will be created: .tone and .inverse [-rate ] sample rate of the generated test tones [-duration ] The duration, in seconds, of the generated files [-min_freq ] The minimum frequency, in hertz, for the sine sweep% convolve -hUsage: convolve input_file impulse_response_file output_fileRecording SetupAudio Interface: Metric Halo Mobile I/O 2882"/>

			<outline text="Microphones: AKG 414s, Speaker: Mackie HR824"/>

			<outline text="The Warehouse Space13. JavaScript Synthesis and ProcessingThis section is informative."/>

			<outline text="The Mozilla project has conducted Experiments to synthesize and process audio directly in JavaScript. This approach is interesting for a certain class of audio processing and they have produced a number of impressive demos. This specification includes a means of synthesizing and processing directly using JavaScript by using a special subtype of AudioNode called ScriptProcessorNode."/>

			<outline text="Here are some interesting examples where direct JavaScript processing can be useful:"/>

			<outline text="Custom DSP EffectsUnusual and interesting custom audio processing can be done directly in JS. It's also a good test-bed for prototyping new algorithms. This is an extremely rich area."/>

			<outline text="Educational ApplicationsJS processing is ideal for illustrating concepts in computer music synthesis and processing, such as showing the de-composition of a square wave into its harmonic components, FM synthesis techniques, etc."/>

			<outline text="JavaScript PerformanceJavaScript has a variety of performance issues so it is not suitable for all types of audio processing. The approach proposed in this document includes the ability to perform computationally intensive aspects of the audio processing (too expensive for JavaScript to compute in real-time) such as multi-source 3D spatialization and convolution in optimized C++ code. Both direct JavaScript processing and C++ optimized code can be combined due to the APIs modular approach."/>

			<outline text="15. Performance Considerations15.1. Latency: What it is and Why it's Important"/>

			<outline text="For web applications, the time delay between mouse and keyboard events (keydown, mousedown, etc.) and a sound being heard is important."/>

			<outline text="This time delay is called latency and is caused by several factors (input device latency, internal buffering latency, DSP processing latency, output device latency, distance of user's ears from speakers, etc.), and is cummulative. The larger this latency is, the less satisfying the user's experience is going to be. In the extreme, it can make musical production or game-play impossible. At moderate levels it can affect timing and give the impression of sounds lagging behind or the game being non-responsive. For musical applications the timing problems affect rhythm. For gaming, the timing problems affect precision of gameplay. For interactive applications, it generally cheapens the users experience much in the same way that very low animation frame-rates do. Depending on the application, a reasonable latency can be from as low as 3-6 milliseconds to 25-50 milliseconds."/>

			<outline text="15.2. Audio Glitching"/>

			<outline text="Audio glitches are caused by an interruption of the normal continuous audio stream, resulting in loud clicks and pops. It is considered to be a catastrophic failure of a multi-media system and must be avoided. It can be caused by problems with the threads responsible for delivering the audio stream to the hardware, such as scheduling latencies caused by threads not having the proper priority and time-constraints. It can also be caused by the audio DSP trying to do more work than is possible in real-time given the CPU's speed."/>

			<outline text="15.3. Hardware ScalabilityThe system should gracefully degrade to allow audio processing under resource constrained conditions without dropping audio frames."/>

			<outline text="First of all, it should be clear that regardless of the platform, the audio processing load should never be enough to completely lock up the machine. Second, the audio rendering needs to produce a clean, un-interrupted audio stream without audible glitches."/>

			<outline text="The system should be able to run on a range of hardware, from mobile phones and tablet devices to laptop and desktop computers. But the more limited compute resources on a phone device make it necessary to consider techniques to scale back and reduce the complexity of the audio rendering. For example, voice-dropping algorithms can be implemented to reduce the total number of notes playing at any given time."/>

			<outline text="Here's a list of some techniques which can be used to limit CPU usage:"/>

			<outline text="15.3.1. CPU monitoringIn order to avoid audio breakup, CPU usage must remain below 100%."/>

			<outline text="The relative CPU usage can be dynamically measured for each AudioNode (and chains of connected nodes) as a percentage of the rendering time quantum. In a single-threaded implementation, overall CPU usage must remain below 100%. The measured usage may be used internally in the implementation for dynamic adjustments to the rendering. It may also be exposed through a cpuUsage attribute of AudioNode for use by JavaScript."/>

			<outline text="In cases where the measured CPU usage is near 100% (or whatever threshold is considered too high), then an attempt to add additional AudioNodes into the rendering graph can trigger voice-dropping."/>

			<outline text="15.3.2. Voice DroppingVoice-dropping is a technique which limits the number of voices (notes) playing at the same time to keep CPU usage within a reasonable range. There can either be an upper threshold on the total number of voices allowed at any given time, or CPU usage can be dynamically monitored and voices dropped when CPU usage exceeds a threshold. Or a combination of these two techniques can be applied. When CPU usage is monitored for each voice, it can be measured all the way from a source node through any effect processing nodes which apply uniquely to that voice."/>

			<outline text="When a voice is &quot;dropped&quot;, it needs to happen in such a way that it doesn't introduce audible clicks or pops into the rendered audio stream. One way to achieve this is to quickly fade-out the rendered audio for that voice before completely removing it from the rendering graph."/>

			<outline text="When it is determined that one or more voices must be dropped, there are various strategies for picking which voice(s) to drop out of the total ensemble of voices currently playing. Here are some of the factors which can be used in combination to help with this decision:"/>

			<outline text="Older voices, which have been playing the longest can be dropped instead of more recent voices.Quieter voices, which are contributing less to the overall mix may be dropped instead of louder ones.Voices which are consuming relatively more CPU resources may be dropped instead of less &quot;expensive&quot; voices.An AudioNode can have a priority attribute to help determine the relative importance of the voices.15.3.3. Simplification of Effects ProcessingMost of the effects described in this document are relatively inexpensive and will likely be able to run even on the slower mobile devices. However, the convolution effect can be configured with a variety of impulse responses, some of which will likely be too heavy for mobile devices. Generally speaking, CPU usage scales with the length of the impulse response and the number of channels it has. Thus, it is reasonable to consider that impulse responses which exceed a certain length will not be allowed to run. The exact limit can be determined based on the speed of the device. Instead of outright rejecting convolution with these long responses, it may be interesting to consider truncating the impulse responses to the maximum allowed length and/or reducing the number of channels of the impulse response."/>

			<outline text="In addition to the convolution effect. The PannerNode may also be expensive if using the HRTF panning model. For slower devices, a cheaper algorithm such as EQUALPOWER can be used to conserve compute resources."/>

			<outline text="15.3.4. Sample RateFor very slow devices, it may be worth considering running the rendering at a lower sample-rate than normal. For example, the sample-rate can be reduced from 44.1KHz to 22.05KHz. This decision must be made when the AudioContext is created, because changing the sample-rate on-the-fly can be difficult to implement and will result in audible glitching when the transition is made."/>

			<outline text="15.3.5. Pre-flightingIt should be possible to invoke some kind of &quot;pre-flighting&quot; code (through JavaScript) to roughly determine the power of the machine. The JavaScript code can then use this information to scale back any more intensive processing it may normally run on a more powerful machine. Also, the underlying implementation may be able to factor in this information in the voice-dropping algorithm."/>

			<outline text="TODO: add specification and more detail here"/>

			<outline text="15.3.6. Authoring for different user agentsJavaScript code can use information about user-agent to scale back any more intensive processing it may normally run on a more powerful machine.15.3.7. Scalability of Direct JavaScript Synthesis / ProcessingAny audio DSP / processing code done directly in JavaScript should also be concerned about scalability. To the extent possible, the JavaScript code itself needs to monitor CPU usage and scale back any more ambitious processing when run on less powerful devices. If it's an &quot;all or nothing&quot; type of processing, then user-agent check or pre-flighting should be done to avoid generating an audio stream with audio breakup."/>

			<outline text="15.4. JavaScript Issues with real-time Processing and Synthesis:"/>

			<outline text="While processing audio in JavaScript, it is extremely challenging to get reliable, glitch-free audio while achieving a reasonably low-latency, especially under heavy processor load.JavaScript is very much slower than heavily optimized C++ code and is not able to take advantage of SSE optimizations and multi-threading which is critical for getting good performance on today's processors. Optimized native code can be on the order of twenty times faster for processing FFTs as compared with JavaScript. It is not efficient enough for heavy-duty processing of audio such as convolution and 3D spatialization of large numbers of audio sources.setInterval() and XHR handling will steal time from the audio processing. In a reasonably complex game, some JavaScript resources will be needed for game physics and graphics. This creates challenges because audio rendering is deadline driven (to avoid glitches and get low enough latency).JavaScript does not run in a real-time processing thread and thus can be pre-empted by many other threads running on the system.Garbage Collection (and autorelease pools on Mac OS X) can cause unpredictable delay on a JavaScript thread.Multiple JavaScript contexts can be running on the main thread, stealing time from the context doing the processing.Other code (other than JavaScript) such as page rendering runs on the main thread.Locks can be taken and memory is allocated on the JavaScript thread. This can cause additional thread preemption.The problems are even more difficult with today's generation of mobile devices which have processors with relatively poor performance and power consumption / battery-life issues.16. Example ApplicationsThis section is informative."/>

			<outline text="Please see the demo page for working examples."/>

			<outline text="Here are some of the types of applications a web audio system should be able to support:"/>

			<outline text="Basic Sound PlaybackSimple and low-latency playback of sound effects in response to simple user actions such as mouse click, roll-over, key press."/>

			<outline text="3D Environments and GamesElectronic Arts has produced an impressive immersive game called Strike Fortress, taking advantage of 3D spatialization and convolution for room simulation."/>

			<outline text="3D environments with audio are common in games made for desktop applications and game consoles. Imagine a 3D island environment with spatialized audio, seagulls flying overhead, the waves crashing against the shore, the crackling of the fire, the creaking of the bridge, and the rustling of the trees in the wind. The sounds can be positioned naturally as one moves through the scene. Even going underwater, low-pass filters can be tweaked for just the right underwater sound."/>

			<outline text="Box2D is an interesting open-source library for 2D game physics. It has various implementations, including one based on Canvas 2D. A demo has been created with dynamic sound effects for each of the object collisions, taking into account the velocities vectors and positions to spatialize the sound events, and modulate audio effect parameters such as filter cutoff."/>

			<outline text="A virtual pool game with multi-sampled sound effects has also been created."/>

			<outline text="Musical ApplicationsMany music composition and production applications are possible. Applications requiring tight scheduling of audio events can be implemented and can be both educational and entertaining. Drum machines, digital DJ applications, and even timeline-based digital music production software with some of the features of GarageBand can be written."/>

			<outline text="Music VisualizersWhen combined with WebGL GLSL shaders, realtime analysis data can be presented in entertaining ways. These can be as advanced as any found in iTunes."/>

			<outline text="Educational ApplicationsA variety of educational applications can be written, illustrating concepts in music theory and computer music synthesis and processing."/>

			<outline text="Artistic Audio ExplorationThere are many creative possibilites for artistic sonic environments for installation pieces."/>

			<outline text="17. Security ConsiderationsThis section is informative."/>

			<outline text="18. Privacy ConsiderationsThis section is informative. When giving various information on available AudioNodes, the Web Audio API potentially exposes information on characteristic features of the client (such as audio hardware sample-rate) to any page that makes use of the AudioNode interface. Additionally, timing information can be collected through the RealtimeAnalyzerNode or ScriptProcessorNode interface. The information could subsequently be used to create a fingerprint of the client."/>

			<outline text="Currently audio input is not specified in this document, but it will involve gaining access to the client machine's audio input or microphone. This will require asking the user for permission in an appropriate way, probably via the getUserMedia() API."/>

			<outline text="20. Required Support for Alternate NamesThis section is normative."/>

			<outline text="Some method and attribute names have been improved during API review. The new names are described in the main body of this specification in the description for each node type, etc. An implementation must also support the older names:"/>

			<outline text="partial interface AudioBufferSourceNode { // Same as start() void noteOn(double when); void noteGrainOn(double when, double grainOffset, double grainDuration); // Same as stop() void noteOff(double when);};partial interface AudioContext { // Same as createGain() GainNode createGainNode(); // Same as createDelay() DelayNode createDelayNode(optional double maxDelayTime = 1.0); // Same as createScriptProcessor() ScriptProcessorNode createJavaScriptNode(unsigned long bufferSize, optional unsigned long numberOfInputChannels = 2, optional unsigned long numberOfOutputChannels = 2);};partial interface OscillatorNode { // Same as start() void noteOn(double when); // Same as stop() void noteOff(double when);};partial interface AudioParam { // Same as setTargetAtTime() void setTargetValueAtTime(float target, double startTime, double timeConstant);};Some attributes taking constant values have changed during API review. The old way uses integer values, while the new way uses Web IDL string values. An implementation must support both integer and string values for setting these attributes:"/>

			<outline text="// PannerNode constants for the .panningModel attribute// Old wayconst unsigned short EQUALPOWER = 0;const unsigned short HRTF = 1;const unsigned short SOUNDFIELD = 2;// New wayenum PanningModelType { &quot;equalpower&quot;, &quot;HRTF&quot;, &quot;soundfield&quot;};// PannerNode constants for the .distanceModel attribute// Old wayconst unsigned short LINEAR_DISTANCE = 0;const unsigned short INVERSE_DISTANCE = 1;const unsigned short EXPONENTIAL_DISTANCE = 2;// New wayenum DistanceModelType { &quot;linear&quot;, &quot;inverse&quot;, &quot;exponential&quot;};// BiquadFilterNode constants for the .type attribute// Old wayconst unsigned short LOWPASS = 0;const unsigned short HIGHPASS = 1;const unsigned short BANDPASS = 2;const unsigned short LOWSHELF = 3;const unsigned short HIGHSHELF = 4;const unsigned short PEAKING = 5;const unsigned short NOTCH = 6;const unsigned short ALLPASS = 7;// New wayenum BiquadFilterType { &quot;lowpass&quot;, &quot;highpass&quot;, &quot;bandpass&quot;, &quot;lowshelf&quot;, &quot;highshelf&quot;, &quot;peaking&quot;, &quot;notch&quot;, &quot;allpass&quot;};// OscillatorNode constants for the .type attribute// Old wayconst unsigned short SINE = 0;const unsigned short SQUARE = 1;const unsigned short SAWTOOTH = 2;const unsigned short TRIANGLE = 3;const unsigned short CUSTOM = 4;// New wayenum OscillatorType { &quot;sine&quot;, &quot;square&quot;, &quot;sawtooth&quot;, &quot;triangle&quot;, &quot;custom&quot;};A.ReferencesA.2 Informative referencesNo informative references."/>

			<outline text="B.AcknowledgementsSpecial thanks to the W3C Audio Working Group. Members of the Working Group are (at the time of writing, and by alphabetical order):Berkovitz, Joe (public Invited expert);Cardoso, Gabriel (INRIA);Carlson, Eric (Apple, Inc.);Gregan, Matthew (Mozilla Foundation);J&amp;#164;genstedt, Philip (Opera Software);Kalliokoski, Jussi (public Invited expert);Lowis, Chris (British Broadcasting Corporation);MacDonald, Alistair (W3C Invited Experts);Michel, Thierry (W3C/ERCIM);Noble, Jer (Apple, Inc.);O'Callahan, Robert(Mozilla Foundation);Paradis, Matthew (British Broadcasting Corporation);Raman, T.V. (Google, Inc.);Rogers, Chris (Google, Inc.);Schepers, Doug (W3C/MIT);Shires, Glen (Google, Inc.);Smith, Michael (W3C/Keio);Thereaux, Olivier (British Broadcasting Corporation);Wei, James (Intel Corporation);Wilson, Chris (Google, Inc.);"/>

			<outline text="C. Web Audio API Change Loguser: crogersdate: Sun Dec 09 17:13:56 2012 -0800summary: Basic description of OfflineAudioContextuser: crogersdate: Tue Dec 04 15:59:30 2012 -0800summary: minor correction to wording for minValue and maxValueuser: crogersdate: Tue Dec 04 15:49:29 2012 -0800summary: Bug 20161: Make decodeAudioData neuter its array buffer argument when it begins decoding a buffer, and bring it back to normal when the decoding is finisheduser: crogersdate: Tue Dec 04 15:35:17 2012 -0800summary: Bug 20039: Refine description of audio decodinguser: crogersdate: Tue Dec 04 15:23:07 2012 -0800summary: elaborate on decoding steps for AudioContext createBuffer() and decodeAudioData()user: crogersdate: Tue Dec 04 14:56:19 2012 -0800summary: Bug 19770: Note that if the last event for an AudioParam is a setCurveValue event, the computed value after that event will be equal to the latest curve valueuser: crogersdate: Tue Dec 04 14:48:04 2012 -0800summary: Bug 19769: Note that before the first automation event, the computed AudioParam value will be AudioParam.valueuser: crogersdate: Tue Dec 04 14:40:51 2012 -0800summary: Bug 19768: Explicitly mention that the initial value of AudioParam.value will be defaultValueuser: crogersdate: Tue Dec 04 14:35:59 2012 -0800summary: Bug 19767: Explicitly mention that the 2nd component of AudioParam.computedValue will be 0 if there are no AudioNodes connected to ituser: crogersdate: Tue Dec 04 14:30:08 2012 -0800summary: Bug 19764: Note in the spec that AudioParam.minValue/maxValue are merely informationaluser: crogersdate: Mon Dec 03 18:03:13 2012 -0800summary: Convert integer constants to Web IDL enum string constantsuser: crogersdate: Mon Dec 03 15:19:22 2012 -0800summary: Bug 17411: (AudioPannerNodeUnits): AudioPannerNode units are underspecifieduser: Ehsan Akhgari (Mozilla)date: Thu Nov 29 15:59:38 2012 -0500summary: Change the Web IDL description of decodeAudioData argumentsuser: crogersdate: Wed Nov 14 13:24:01 2012 -0800summary: Bug 17393: (UseDoubles): float/double inconsistencyuser: crogersdate: Wed Nov 14 13:16:57 2012 -0800summary: Bug 17356: (AudioListenerOrientation): AudioListener.setOrientation vectorsuser: crogersdate: Wed Nov 14 12:56:06 2012 -0800summary: Bug 19957: PannerNode.coneGain is unuseduser: crogersdate: Wed Nov 14 12:40:46 2012 -0800summary: Bug 17412: AudioPannerNodeVectorNormalization): AudioPannerNode orientation normalization unspecifieduser: crogersdate: Wed Nov 14 12:16:41 2012 -0800summary: Bug 17411: (AudioPannerNodeUnits): AudioPannerNode units are underspecifieduser: crogersdate: Tue Nov 13 16:14:22 2012 -0800summary: be more explicit about maxDelayTime unitsuser: crogersdate: Tue Nov 13 16:02:50 2012 -0800summary: Bug 19766: Clarify that reading AudioParam.computedValue will return the latest computed value for the latest audio quantumuser: crogersdate: Tue Nov 13 15:47:25 2012 -0800summary: Bug 19872: Should specify the defaults for PannerNode's position, ...user: crogersdate: Tue Nov 13 15:27:53 2012 -0800summary: Bug 17390: (Joe Berkovitz): Loop start/stop pointsuser: crogerdate: Tue Nov 13 14:49:20 2012 -0800summary: Bug 19765: Note that setting AudioParam.value will be ignored when any automation events have been set on the objectuser: crogersdate: Tue Nov 13 14:39:07 2012 -0800summary: Bug 19873: Clarify PannerNode.listeneruser: crogersdate: Tue Nov 13 13:35:21 2012 -0800summary: Bug 19900: Clarify the default values for the AudioParam attributes of BiquadFilterNodeuser: crogersdate: Tue Nov 13 13:06:38 2012 -0800summary: Bug 19884: Specify the default value and ranges for the DynamicsCompressorNode AudioParam membersuser: crogersdate: Tue Nov 13 12:57:02 2012 -0800summary: Bug 19910: Disallow AudioContext.createDelay(max) where max"/>

			</outline>

		<outline text="CWSkimmer Project Audio Data API - MozillaWiki">

			<outline text="Link to Article" type="link" url="https://wiki.mozilla.org/Audio_Data_API"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365734433_5r3fxUHD.html"/>

			<outline text="Thu, 11 Apr 2013 21:40"/>

			<outline text=""/>

			<outline text="Defining an Enhanced API for Audio (Draft Recommendation)AbstractThe HTML5 specification introduces the and media elements, and with them the opportunity to dramatically change the way we integrate media on the web. The current HTML5 media API provides ways to play and get limited information about audio and video, but gives no way to programatically access or create such media. We present a new Mozilla extension to this API, which allows web developers to read and write raw audio data."/>

			<outline text="AuthorsOther ContributorsThomas SaundersTed MielczarekStandardization NotePlease note that this document describes a non-standard experimental API. This API is considered deprecated and may not be supported in future releases. The World Wide Web Consortium (W3C) has chartered the Audio Working Group to develop standardized audio API specifications, including Web Audio API. Please refer to the Audio Working Group website for further details."/>

			<outline text="API TutorialThis API extends the HTMLMediaElement and HTMLAudioElement (e.g., affecting and ), and implements the following basic API for reading and writing raw audio data:"/>

			<outline text="Reading AudioAudio data is made available via an event-based API. As the audio is played, and therefore decoded, sample data is passed to content scripts in a framebuffer for processing after becoming available to the audio layer--hence the name, MozAudioAvailable. These samples may or may not have been played yet at the time of the event. The audio samples returned in the event are raw, and have not been adjusted for mute/volume settings on the media element. Playing, pausing, and seeking the audio also affect the streaming of this raw audio data."/>

			<outline text="Users of this API can register two callbacks on the or element in order to consume this data:"/>

			<outline text="var audio = document.getElementById(&quot;audio&quot;); audio.addEventListener('MozAudioAvailable', audioAvailableFunction, false); audio.addEventListener('loadedmetadata', loadedMetadataFunction, false);The loadedmetadata event is a standard part of HTML5. It now indicates that a media element (audio or video) has useful metadata loaded, which can be accessed using three new attributes:"/>

			<outline text="mozChannelsmozSampleRatemozFrameBufferLengthPrior to the loadedmetadata event, accessing these attributes will cause an exception to be thrown, indicating that they are not known, or there is no audio. These attributes indicate the number of channels, audio sample rate per second, and the default size of the framebuffer that will be used in MozAudioAvailable events. This event is fired once as the media resource is first loaded, and is useful for interpreting or writing the audio data."/>

			<outline text="The MozAudioAvailable event provides two pieces of data. The first is a framebuffer (i.e., an array) containing decoded audio sample data (i.e., floats). The second is the time for these samples measured from the start in seconds. Web developers consume this event by registering an event listener in script like so:"/>

			<outline text="var audio = document.getElementById(&quot;audio&quot;); audio.addEventListener('MozAudioAvailable', someFunction, false);An audio or video element can also be created with script outside the DOM:"/>

			<outline text="var audio = new Audio();audio.src = &quot;song.ogg&quot;;audio.addEventListener('MozAudioAvailable', someFunction, false);audio.play();The following is an example of how both events might be used:"/>

			<outline text="var channels, rate, frameBufferLength, samples;function audioInfo() { var audio = document.getElementById('audio'); // After loadedmetadata event, following media element attributes are known: channels = audio.mozChannels; rate = audio.mozSampleRate; frameBufferLength = audio.mozFrameBufferLength;}function audioAvailable(event) { var samples = event.frameBuffer; var time = event.time; for (var i = 0; i &lt; &gt;This example calculates and displays FFT spectrum data for the playing audio:"/>

			<outline text="JavaScript Spectrum Example var canvas = document.getElementById('fft'), ctx = canvas.getContext('2d'), channels, rate, frameBufferLength, fft; function loadedMetadata() { channels = audio.mozChannels; rate = audio.mozSampleRate; frameBufferLength = audio.mozFrameBufferLength; fft = new FFT(frameBufferLength / channels, rate); } function audioAvailable(event) { var fb = event.frameBuffer, t = event.time, /* unused, but it's there */ signal = new Float32Array(fb.length / channels), magnitude; for (var i = 0, fbl = frameBufferLength / 2; i &lt; &gt;&gt; 1; while ( limit &lt; &gt; 1; } for ( var i = 0; i &lt; &gt;Audio data written using the mozWriteAudio() method needs to be written at a regular interval in equal portions, in order to keep a little ahead of the current sample offset (the sample offset that is currently being played by the hardware can be obtained with mozCurrentSampleOffset()), where a little means something on the order of 500ms of samples. For example, if working with 2 channels at 44100 samples per second, a writing interval of 100ms, and a pre-buffer equal to 500ms, one would write an array of (2 * 44100 / 10) = 8820 samples, and a total of (currentSampleOffset + 2 * 44100 / 2)."/>

			<outline text="It's also possible to auto detect the minimal duration of the pre-buffer, such that the sound is played without interruptions, and lag between writing and playback is minimal. To do this start writing the data in small portions and wait for the value returned by mozCurrentSampleOffset() to be more than 0."/>

			<outline text="var prebufferSize = sampleRate * 0.020; // Initial buffer is 20 msvar autoLatency = true, started = new Date().valueOf();...// Auto latency detectionif (autoLatency) { prebufferSize = Math.floor(sampleRate * (new Date().valueOf() - started) / 1000); if (audio.mozCurrentSampleOffset()) { // Play position moved? autoLatency = false; }}Complete Example: Creating a Web Based Tone GeneratorThis example creates a simple tone generator, and plays the resulting tone."/>

			<outline text="JavaScript Audio Write Example Hz play stop function AudioDataDestination(sampleRate, readFn) { // Initialize the audio output. var audio = new Audio(); audio.mozSetup(1, sampleRate); var currentWritePosition = 0; var prebufferSize = sampleRate / 2; // buffer 500ms var tail = null, tailPosition; // The function called with regular interval to populate // the audio output buffer. setInterval(function() { var written; // Check if some data was not written in previous attempts. if(tail) { written = audio.mozWriteAudio(tail.subarray(tailPosition)); currentWritePosition += written; tailPosition += written; if(tailPosition &lt; &gt; 0) { // Request some sound data from the callback function. var soundData = new Float32Array(available); readFn(soundData); // Writting the data. written = audio.mozWriteAudio(soundData); if(written &lt; soundData.length) { // Not all the data was written, saving the tail. tail = soundData; tailPosition = written; } currentWritePosition += written; } }, 100); } // Control and generate the sound. var frequency = 0, currentSoundSample; var sampleRate = 44100; function requestSoundData(soundData) { if (!frequency) { return; // no sound selected } var k = 2* Math.PI * frequency / sampleRate; for (var i=0, size=soundData.length; i"/>

			</outline>

		<outline text="Virginia's Fears of a 'Visa-for-Sale Scheme' - National Review Online">

			<outline text="Link to Article" type="link" url="http://www.nationalreview.com/blogs/print/345255"/>

			<outline text="Archived Version" type="link" url="http://adam.curry.com/art/1365731706_XyPa7aAg.html"/>

			<outline text="Thu, 11 Apr 2013 20:55"/>

			<outline text=""/>

			<outline text="What is the purpose of GreenTech Automotive?"/>

			<outline text="The company, founded by Terry McAuliffe, is now a top issue in this year's Virginia race for governor. Until recently, the controversy over the company centered on the firm's October 2009 decision to build a plant in Mississippi instead of Virginia. McAuliffe contended that he wanted to build a plant in Virginia, but the Virginia Economic Development Partnership (VEDP) '-- the state's business-recruitment agency '-- wouldn't play ball."/>

			<outline text="''We had sites, we had meetings, and they chose that they weren't going to bid on it,'' McAuliffe declared. PolitiFact looked at the paperwork and rated that assertion false, concluding that ''VEDP asked GreenTech to address its concerns and waited in vain for replies.''"/>

			<outline text="But internal communications from VEDP now reveal that the state agency didn't merely think that McAuliffe's company had a risky business model. At least two high-ranking officials actually suspected that the company's real aim was to make money by selling U.S. residency visas to wealthy foreigners."/>

			<outline text="In an e-mail dated November 17, 2009, Liz Povar, then the director of business development at VEDP, wrote to her colleagues:"/>

			<outline text="Sandi et al. Even if the company has investors ''lined up'', I maintain serious concerns about the establishment of an EB-5 center in general, and most specifically based on this company. Not only based on (lack of) management expertise, (lack of) market preparation, etc. but also still can't get my head around this being anything other than a visa-for-sale scheme with potential national security implications that we have no way to confirm or discount. . . . "/>

			<outline text="This ''feels'' like a national political play instead of a Virginia economic development opportunity.I am not willing to stake Virginia's reputation on this at this juncture. "/>

			<outline text="The e-mails were revealed pursuant to a Freedom of Information Act request filed by PolitiFact; 79 pages of documents were posted online in January."/>

			<outline text="Before the Mississippi Development Authority, a state agency, loaned GreenTech $5 million to help get started and buy the land for its production facility in that state, the company sought assistance and incentives from Virginia. High on its list of priorities was the establishment of a ''Regional Center'' to help attract foreign investors who would also be interested in obtaining an EB-5 U.S. residency visa."/>

			<outline text="Congress created the federal EB-5 program in 1990 to stimulate the U.S. economy through job creation and capital investment by foreign investors. To qualify, a foreigner must invest at least $1 million, or $500,000 in either a rural area or an area with high unemployment. The investment must ''create or preserve at least 10 full-time jobs for qualifying U.S. workers within two years.'' The government makes 10,000 EB-5 visas available each year, with 3,000 administered through Regional Centers, government-approved organizations that aim to help economic growth in a particular area. According to one advocate for the program quoted in the Memphis Commercial Appeal, three out of every four visa recipients come from China."/>

			<outline text="While the Regional Centers are not allowed to sell the U.S. visas, they are allowed to point out that investment in their projects may qualify a foreign citizen for a residence visa, and they may appear to suggest that one directly leads to the other. For example, at the top of the website for Gulf Coast Funds Management LLC, the Statue of Liberty's torch is next to the slogan, ''Invest in your future with EB-5.''"/>

			<outline text="One of Gulf Coast Funds Management's current clients is . . . GreenTech Automotive."/>

			<outline text="GreenTech Automotive asked Democratic Virginia governor Tim Kaine to write to secretary of homeland security Janet Napolitano to ask her to expand a Regional Center into the company's preferred corner of rural Virginia. (The site location that GreenTech sought has not been specified, although one VEDP memo refers to its being ''outside Richmond,'' and another refers to ''the Sussex site.'' Sussex County is in southeast Virginia.)"/>

			<outline text="Povar had been in her position at VEDP since January 1995, serving under both Democratic and Republican governors. At the time of GreenTech's request, her agency was tasked with making recommendations to the administration of outgoing governor Tim Kaine '-- who, at that point, was chairman of the Democratic National Committee. Lest one suspect that Povar had some kind of partisan axe to grind against McAuliffe, it should be noted that her sole campaign donation in recent years was $100 in 2009 to Democratic lieutenant governor candidate Jody Wagner."/>

			<outline text="About an hour after Povar sent her e-mail, Roy Dahlquist, the international-investment manager at VEDP, replied, ''Liz, please note that I agree with your observations and opinions on this subject.''"/>

			<outline text="Dahlquist elaborated on his concerns in another e-mail the following day:"/>

			<outline text="The executives involved with this project are not selecting Virginia based on its business value proposition. They are not evaluating Virginia based on the long term success of the company, its employees and the community partnership in general. They are choosing Virginia because we are the #1 best state for business and they are using our recognition as a selling tool to easily raise funding.They are also using our current political ''opportunities'' to rush this process prior to a new administration taking office."/>

			<outline text="At the time of that message, Republican Bob McDonnell had just won the state's gubernatorial election and would be sworn in in January 2010."/>

			<outline text="Jeffrey Anderson, the VEDP executive director, wrote to Governor Kaine's secretary of commerce and trade, Patrick Gottschalt, on November 19, 2009, about the political risk presented by the GreenTech proposal:"/>

			<outline text="We are concerned that the financing plan does not fit the rules of the EB-5 Program. If the rules of the EB-5 Program are not followed, the investors will not receive the visas that they thought they would receive. If all, or any significant portion, of the investors were to not ultimately receive the visas, that would give the Commonwealth [of Virginia] a black eye, in the view of other companies or investors looking for possible business connections with the Commonwealth. Knowing that the Governor of Virginia strongly supported the creation of the Regional Center would cause some to conclude that the Commonwealth knew, or should have known, that there were problems, but proceeded nonetheless. "/>

			<outline text="Anderson also warned:"/>

			<outline text="  GreenTech was unlikely to create ten jobs in two years at the plant site."/>

			<outline text="   Even if GreenTech's claims about the number of investors who were willing to offer $500,000 were true, they would still be at least $1 billion short of the capital that they needed."/>

			<outline text="  Creating a Regional Center designed to assist one specific project and one group of investors created a potential conflict of interest."/>

			<outline text="He did not mention any fears that GreenTech represented a ''visa-for-sale scheme.'' But that concern, expressed by Povar, is not based on far-fetched paranoia; sadly, the EB-5 program provides an increasingly enticing lure for unscrupulous con artists. NYU professor Ann Lee wrote in the New York Times last year that ''the program is so rife with fraud and corruption that it could actually . . . deter investment.''"/>

			<outline text="Just last month, the Securities and Exchange Commission announced charges and an asset freeze against two companies and an alleged scam artist living in Illinois; allegedly, these parties were behind a scheme to defraud 261 Chinese investors seeking profitable returns and a legal path to U.S. residency through a federal visa program. The SEC charges describe a ''green energy'' angle to that alleged scheme as well, as investors were told that they would be financing construction of the '''World's First Zero Carbon Emission Platinum LEED certified' hotel and conference center near Chicago's O'Hare Airport.''"/>

			<outline text="McAuliffe stepped down from GreenTech's board in December 2012, but didn't mention publicly that he had done so. In fact, at least seven days after GreenTech's CEO accepted McAuliffe's resignation as chairman, he continued to talk about the company with the pronoun ''we'' '-- ''we wanted to,'' ''our headquarters are here,'' ''we had meetings,'' ''I have to go where the incentives are.''"/>

			<outline text="Aside from the fear that GreenTech's EB-5 efforts were designed to help the company effectively ''sell'' U.S. residency visas, Virginia officials had good reason to be wary of the company's prospects. On October 22, 2009, Mike Lehmkuhler, the vice president of business attraction at VEDP, assessed GreenTech's hurdles in withering fashion:"/>

			<outline text="The sales forecasts suggest a completely successful start-up, despite"/>

			<outline text="  no brand recognition"/>

			<outline text="  no demonstrated vehicle performance"/>

			<outline text="  no safety and fuel economy certification from the National Highway Traffic Safety Administration"/>

			<outline text="  no emissions approval from the EPA"/>

			<outline text="  no established distribution network"/>

			<outline text="  no demonstrated automotive industry experience within the executive management team"/>

			<outline text="All of this was to manufacture a product with a quite limited market. Almost no media coverage of GreenTech mentioned the limits of the GreenTech Automotive product: ''MyCar is a Neighborhood Electric Vehicle in the U.S., but can be adapted to 45mph for in Europe. NEVs are low-speed vehicles; and depending upon the state you live in are limited, by law, to 25''35 mph. No highway driving, please.''"/>

			<outline text="'-- Jim Geraghty writes the Campaign Spot on NRO."/>

			</outline>

		</body>

	</opml>

