diff --git a/.gitattributes b/.gitattributes index 9106b3f7cb879ab3a5ca0423652acbdbeab9d92f..aa93f99a97cbc7b738a07963993d6f2730ffdf0e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1339,3 +1339,13 @@ keypoints/allocentric_SCPpM9i7GPU.json filter=lfs diff=lfs merge=lfs -text keypoints/allocentric_Z550DeGoTgU.json filter=lfs diff=lfs merge=lfs -text keypoints/allocentric_2lfVFusH-lA.json filter=lfs diff=lfs merge=lfs -text keypoints/allocentric_qYYTOnevfrk.json filter=lfs diff=lfs merge=lfs -text +video/TED_E6NTM793zvo.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_P_6vDLq64gE.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_1zpf8H_Dd40.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_4TQETLZZmcM.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_I5x1wQ6kHX0.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_cef35Fk7YD8.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_nvaPzA50eQA.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_rSQNi5sAwuc.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/TED_-FOCpMAww28.f140.m4a filter=lfs diff=lfs merge=lfs -text +video/podcast_d8w9gn5yQQg.f303.webm.part filter=lfs diff=lfs merge=lfs -text diff --git a/keypoints_video/allocentric_2lfVFusH-lA.mp4 b/keypoints_video/allocentric_2lfVFusH-lA.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e445132a7e247ad0390f82992e62f9ce5b647cb7 --- /dev/null +++ b/keypoints_video/allocentric_2lfVFusH-lA.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fa77a43e9159772dcbf44a34b6b6a2da7edd6ca7b1aaa3fea2c7ffbc3820b75 +size 2521629822 diff --git a/keypoints_video/allocentric_2vwQyeV-LQ4.mp4 b/keypoints_video/allocentric_2vwQyeV-LQ4.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..64ab9d3b149f620e01c06017532669f876d55a13 --- /dev/null +++ b/keypoints_video/allocentric_2vwQyeV-LQ4.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18aee641d7f5d45452fdd5de91984a6f9436c3320edae68b4b971c0a107cf1c2 +size 3906898500 diff --git a/keypoints_video/allocentric_MuRVOQY8KoY.mp4 b/keypoints_video/allocentric_MuRVOQY8KoY.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8868ee0d45869dab967005f1551a9e841327d3cf --- /dev/null +++ b/keypoints_video/allocentric_MuRVOQY8KoY.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d825cd057780932575852fb93b508edb87ed50ee7f00b6cbc14d5a559c3849b +size 3265493982 diff --git a/keypoints_video/allocentric_SCPpM9i7GPU.mp4 b/keypoints_video/allocentric_SCPpM9i7GPU.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..f8ffdf5ac22ceb6e8cfa83720eb6c08f2c85ed3a --- /dev/null +++ b/keypoints_video/allocentric_SCPpM9i7GPU.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:da69e8271292ec5bc30610f83086f8e56c60a8980c56fdcce1a8644128b9b2a7 +size 1734370226 diff --git a/keypoints_video/allocentric_p0A_IRKfG-w.mp4 b/keypoints_video/allocentric_p0A_IRKfG-w.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2c416c54e18e4a1c3cc21e68071551a9a6551bc3 --- /dev/null +++ b/keypoints_video/allocentric_p0A_IRKfG-w.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae41e76b27f02dcee1d75bcf924121737137a3247290718b393f9571030be13b +size 42256144 diff --git a/keypoints_video/allocentric_ppxK4R8XWfU.mp4 b/keypoints_video/allocentric_ppxK4R8XWfU.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2524f0c951a7fb14b30221a1acfa8a2181cc4975 --- /dev/null +++ b/keypoints_video/allocentric_ppxK4R8XWfU.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d948ebb0e47c00b1919cc3b848999d15daccd119d05508a9a5c566a18bfcac3 +size 2752547484 diff --git a/keypoints_video/allocentric_vm9vMjOPr2k.mp4 b/keypoints_video/allocentric_vm9vMjOPr2k.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..de2adf4ca51e7e72ce4935d7efffaa9c1e560c4f --- /dev/null +++ b/keypoints_video/allocentric_vm9vMjOPr2k.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e52d574991e783622f6d21e102e3433534e2ba25dbf5de26365b3e3971839b76 +size 4957708817 diff --git a/keypoints_video/allocentric_wW7Z52plM0s.mp4 b/keypoints_video/allocentric_wW7Z52plM0s.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2e632af2863f1f38b0cee5162b084ee2b51f5b69 --- /dev/null +++ b/keypoints_video/allocentric_wW7Z52plM0s.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d64e06b2aacd697cec99cc6c5ef1438e6805a8c599af4b9944afc58bed2bbfdc +size 195634511 diff --git a/keypoints_video/allocentric_xF4GkHLiHJQ.mp4 b/keypoints_video/allocentric_xF4GkHLiHJQ.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..7e78dcb1f78bb6f50bb6b1fc81826255008a1212 --- /dev/null +++ b/keypoints_video/allocentric_xF4GkHLiHJQ.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21f38ecc3014caceeece05c17d3c4dc119fe98e9ee3769836c0cfa48ea2cea4e +size 2707666751 diff --git a/keypoints_video/makeup_OFxVtlKAu7Y.mp4 b/keypoints_video/makeup_OFxVtlKAu7Y.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..1473ddf57db425af9832e83b37b43fe496e1a902 --- /dev/null +++ b/keypoints_video/makeup_OFxVtlKAu7Y.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2669d3557b8caddeb1511c354849e213fa25583d2faf1ce0c822efc6ada22d2d +size 17291810449 diff --git a/transcript/allocentric_SCPpM9i7GPU.txt b/transcript/allocentric_SCPpM9i7GPU.txt new file mode 100644 index 0000000000000000000000000000000000000000..d02fe33184260983ae08e8896194a95b51115796 --- /dev/null +++ b/transcript/allocentric_SCPpM9i7GPU.txt @@ -0,0 +1,1579 @@ +[0.000 --> 3.960] All right, hello there, everybody. +[3.960 --> 5.680] This is my favorite part. +[5.680 --> 7.480] I don't know if you guys get to see this. +[7.480 --> 9.800] I get to see the participant numbers, +[9.800 --> 11.920] just sort of climb and roll in. +[11.920 --> 13.880] This is where I get to imagine you +[13.880 --> 17.720] streaming into the venue and taking your seats +[17.720 --> 19.600] and speaking to your neighbors +[19.600 --> 21.600] and getting all excited about this. +[21.600 --> 25.720] But as I will drag myself away from my favorite part +[25.720 --> 29.560] just to say hello, everyone and welcome. +[29.560 --> 32.880] Historically, today is National High Five Day. +[32.880 --> 35.560] But I think the pandemic has officially +[35.560 --> 38.920] replaced that with National Elbow Bump Day. +[38.920 --> 42.320] In case you didn't know, today is also McDonald's Day. +[42.320 --> 47.080] Today in 1955, Roy Croc opened the first McDonald's +[47.080 --> 49.120] in Deplanes, Illinois. +[49.120 --> 53.280] And while they are hands down the best frontries, +[53.280 --> 55.720] our New York cortex has been struggling +[55.720 --> 59.080] with whether or not we really should be eating them. +[59.120 --> 63.040] And so welcome, everyone, to skeptical and choir presents. +[63.040 --> 66.280] This is a series of live online presentations from experts +[66.280 --> 69.640] who are devoted to advancing science over pseudoscience, +[69.640 --> 71.880] media literacy, over conspiracy theories, +[71.880 --> 74.600] and critical thinking over magical thinking. +[74.600 --> 78.440] My name is Leon Lorde and I'm delighted to be your host. +[78.440 --> 80.760] I am a stand-up comedian and author, +[80.760 --> 85.440] and I'm also a co-host for the Point of Inquiry podcast. +[85.440 --> 86.880] If you are so inclined, +[86.880 --> 89.400] you can also find out more about me and my work +[89.400 --> 92.200] at veryfunnylady.com. +[92.200 --> 95.640] Now, before we get going, I have a few reminders. +[95.640 --> 100.440] The Center for Inquiry's Coronavirus Resource Center +[100.440 --> 104.760] continues doing the work of fact checking misinformation +[104.760 --> 107.120] and sharing reliable news at +[107.120 --> 110.080] CenterforInquiry.org slash coronavirus. +[110.080 --> 112.480] And some of the articles that they've curated this week +[112.480 --> 116.200] include a piece from Wired about a new type of Bill Gates +[116.200 --> 119.760] conspiracy theory that is going viral on Facebook. +[119.760 --> 121.720] I don't know where folks find the time. +[121.720 --> 122.840] I really don't. +[122.840 --> 125.480] And an article from the Medical Care Blog +[125.480 --> 130.320] about how COVID-19 scams are spreading just as fast as COVID-19, +[130.320 --> 135.560] so far to the tune of $397 million. +[135.560 --> 138.240] That's a lot of franchise. +[138.240 --> 141.200] And as always, I invite you to subscribe +[141.200 --> 143.240] to skeptical and choir magazine. +[143.240 --> 145.120] There are two ways to do that. +[145.120 --> 147.000] Both digital and print. +[147.000 --> 149.840] And the bonus, the print subscription also gives you access +[149.840 --> 151.520] to the digital version. +[151.520 --> 155.960] So you can get either and or both at skepticalenquiry.org. +[155.960 --> 159.120] And by all means, please, please, please mark your calendar +[159.120 --> 161.920] for our next skepticalenquiry presents, +[161.920 --> 165.120] which is on Thursday, April 29th. +[165.120 --> 169.000] Look, this is the one you don't want to miss, okay? +[169.000 --> 171.360] Given the presentations that we've had here in the past +[171.360 --> 174.120] and the questions that have proliferated on this topic, +[174.120 --> 177.600] you will want to be here to hear Mick West talking +[177.600 --> 180.440] about escaping the rabbit hole. +[180.440 --> 184.160] How to help your conspiracy theorist friend? +[184.160 --> 186.120] Yes, this is the one everybody. +[186.120 --> 187.440] There's hope. +[187.440 --> 190.280] Now, if you're new here, here's the deal. +[190.280 --> 192.480] The flow of the evening is easy breezy. +[192.480 --> 194.560] You keep doing whatever you're doing. +[194.560 --> 197.920] I will introduce our guest, their Razzle and Dazzle, +[197.920 --> 201.080] after which we will open it up for your questions. +[201.080 --> 203.600] And at the bottom of your screen, you know the deal. +[203.600 --> 207.080] You'll see this little Q&A button at that icon there. +[207.080 --> 209.560] That's the place for you to type your questions +[209.560 --> 213.080] in the form of a question everybody. +[213.080 --> 215.320] Don't need the CV for that. +[215.320 --> 217.920] And if you miss any of the talk tonight, +[217.920 --> 220.680] it is being recorded and will be available +[220.680 --> 223.120] on skepticalenquiry.org. +[223.120 --> 224.120] All right, everybody. +[224.120 --> 226.440] So now off we go. +[226.440 --> 231.200] I had the pleasure of meeting tonight's guest in 2019 at Sicon. +[231.200 --> 234.120] And I can honestly say that my neo cortex +[234.120 --> 235.920] has not been the same since. +[235.920 --> 239.880] Our guest was elected to the National Academy of Engineering +[239.880 --> 240.920] in the early arts. +[240.920 --> 244.760] He directed the Redwood Neuroscience Institute, +[244.760 --> 246.440] now located at UC Berkeley. +[246.440 --> 249.560] He is a scientist and co-founder at New Menta, +[249.560 --> 253.040] a research company focused on neo-cortical theory. +[253.040 --> 257.120] Simply put, it's where neuroscience meets machine intelligence. +[257.120 --> 260.080] He wrote the book on intelligence. +[260.080 --> 263.040] And a thousand brains, a new theory of intelligence, +[263.040 --> 265.480] which is the subject of tonight's talk. +[265.480 --> 270.440] He is widely known for founding Palm Computing and Hanspring +[270.440 --> 272.400] and is basically credited with starting +[272.400 --> 275.080] the entire handheld computing industry. +[275.080 --> 278.160] And I'd like to think I helped with that +[278.160 --> 283.160] since I owned and loved the Palm 3, Palm 5, and the Palm M515, +[284.880 --> 288.760] doing my part for retail, consumerism, and science. +[288.760 --> 290.280] You're welcome. +[290.280 --> 294.040] So with us tonight to talk about how the brain learns +[294.040 --> 296.200] and why it sometimes gets it wrong, +[296.200 --> 298.600] please welcome Jeff Hawkins. +[298.600 --> 300.640] Jeff, you have the con. +[301.760 --> 302.600] Thank you, Lee. +[302.600 --> 304.680] And that's very, very kind introduction. +[304.680 --> 307.840] Yeah, we met, as you said, about a year and a half ago +[307.840 --> 311.440] and at the Sicon conference in Las Vegas. +[311.440 --> 316.120] And the talk I'm gonna give today is similar +[316.120 --> 317.360] to when I gave then. +[317.360 --> 318.880] It's been slightly modified. +[318.880 --> 320.760] But I want to tell a story about that conference +[320.760 --> 325.760] because I spoke early in the morning on one of the days. +[325.960 --> 329.600] And Richard Dawkins spoke at the end of the day. +[329.600 --> 334.360] And he gave a beautiful and elegant talk as he always does. +[334.360 --> 335.880] And so I approached him afterwards. +[335.880 --> 339.240] I met Richard a few times, but he didn't really know him too well. +[339.240 --> 340.520] And I say, asked him, I said, +[340.520 --> 342.040] well, Richard, did you hear my talk in the morning? +[342.040 --> 342.880] He goes, yeah, I didn't. +[342.880 --> 344.760] I said, oh, I said, well, what do you think? +[344.760 --> 345.760] I'm kind of nervous, you know, he says, +[345.760 --> 347.200] oh, that was pretty good. +[347.200 --> 350.440] And then I then I went bolder and I said, +[350.440 --> 352.280] well, you know, I'm writing this book. +[352.280 --> 354.680] And it's not done yet, but would you be +[354.680 --> 356.040] so kind of reading early draft? +[356.040 --> 357.040] Would you be interested in that? +[357.040 --> 358.920] And he said, yeah, I'll do that. +[358.920 --> 362.480] So months or so later I sent him a draft of this book. +[362.480 --> 363.720] I was writing. +[363.720 --> 366.280] And a few weeks later, he began to say, +[366.280 --> 367.480] Richard, did you start reading it? +[367.480 --> 368.320] And then what do you think? +[368.320 --> 369.160] He said, I think it's pretty good. +[369.160 --> 370.640] I'm about halfway through. +[370.640 --> 373.160] And so I said, okay, well, I said, +[373.160 --> 375.960] we do consider writing a forward for the book. +[375.960 --> 378.160] And I'm doing this email exchange. +[378.160 --> 380.080] And he says, yeah, consider it. +[380.080 --> 381.400] So a few weeks later, I wrote him again, +[381.400 --> 382.840] I said, Richard, well, what do you think? +[382.840 --> 383.680] He said, oh, I've written it. +[383.680 --> 385.080] Here it is. +[385.080 --> 388.680] And that book just came out last month, +[388.680 --> 391.000] called A Thousand Brains, a New Theory of Intelligence. +[391.000 --> 392.720] And the talk I'm going to get today +[392.720 --> 395.960] is talking about part of it to that book. +[395.960 --> 399.000] And Richard Duck and Jordan, very, very generous forward +[399.000 --> 399.920] to the book. +[399.920 --> 402.720] So that's a little introduction about tying this back +[402.720 --> 404.520] the last time that Leon and I met. +[404.520 --> 409.000] And also when we were at the last real person conference. +[409.000 --> 412.280] Okay, so I'm going to talk about brains today. +[412.280 --> 415.840] And it's a pretty deep topic, +[415.840 --> 418.600] but I'm going to make it as accessible as I can. +[419.560 --> 421.360] It's going to require show some images. +[421.360 --> 423.160] So we're going to do a presentation on this. +[423.160 --> 426.520] And hopefully everyone can follow along. +[426.520 --> 430.080] And when we get to the end, we'll look forward to doing Q&A. +[430.080 --> 432.240] So I'm going to now share my screen. +[432.360 --> 434.080] We're going to get started with this. +[434.080 --> 436.960] And hopefully this is all going to work as it's supposed to. +[438.840 --> 441.600] Like that, and like that, and like that. +[441.600 --> 444.600] And I can, I assume everyone can see that. +[444.600 --> 448.320] So as we said, that's the title of my talk. +[448.320 --> 451.160] And as you said, I work for this company called The Meta, +[451.160 --> 454.320] which does sort of neuroscience research. +[454.320 --> 456.600] I run a research lab and we also do machine learning +[456.600 --> 458.600] and AI work related to that. +[458.600 --> 460.840] And I mentioned the new book, which is here, +[460.840 --> 463.240] the Thousand Brains, New Surin Intelligence. +[463.240 --> 465.240] And I shame as you plugging it right now +[465.240 --> 467.760] and once again at the very end of the talk. +[467.760 --> 469.520] So let's just jump right into it. +[469.520 --> 470.640] Hopefully you recognize this. +[470.640 --> 472.440] Everyone has one. +[472.440 --> 474.720] It's at least everyone here has one. +[474.720 --> 477.680] It's a picture or drawing over here in brain. +[477.680 --> 482.040] And we can roughly divide that into two parts. +[482.040 --> 484.600] The one part, the quote, the New York court text, +[484.600 --> 486.400] is a big sheet of cells. +[486.400 --> 490.160] It's about the size of a large dinner napkin. +[490.160 --> 492.560] And it's about 2 1 1 1 half millimeter six, +[492.560 --> 494.560] maybe twice as thick as a dinner napkin. +[494.560 --> 497.960] And it wraps around the rest of the brain and fills our skull. +[497.960 --> 500.600] It's about 70% of the volume of our brain. +[500.600 --> 502.160] And those little ridges and values +[502.160 --> 505.720] are just trying to stop this sheet of cells into your skull. +[505.720 --> 507.560] There's a lot of other parts of the brain. +[507.560 --> 509.280] Doesn't have other regions. +[509.280 --> 511.760] And we can just roughly call them older brain areas +[511.760 --> 515.160] because they're mostly evolutionary older. +[515.160 --> 516.600] And they do lots of special things. +[516.600 --> 518.320] And they occupy about 30% of the brain. +[518.320 --> 519.520] And you can't see most of them. +[519.520 --> 521.820] They're stuffed up inside the inside the New York +[521.820 --> 523.720] court text or wraps around them. +[523.720 --> 525.720] If we want to say like, what are these older brain areas do? +[525.720 --> 527.720] Where are they take care of a lot of bodily functions +[527.720 --> 530.400] such as breathing, digestion, reflex behaviors? +[530.400 --> 533.840] Even things like you might think you learn to do. +[533.840 --> 536.000] You don't really do walking and running and chewing. +[536.000 --> 538.880] These are things that are your your your bio your genes +[538.880 --> 539.960] know how to do. +[539.960 --> 541.840] We're just born prematurely. +[541.840 --> 543.520] And so we don't walk right away. +[543.520 --> 545.880] Also in the older brain areas are things like our emotions. +[545.880 --> 549.560] If you get angry or sad or someone becomes violent, +[549.560 --> 551.880] that's part of these older brain areas. +[551.880 --> 553.440] So we can roughly divide it like this. +[553.440 --> 554.840] The New York court text on the other hand +[554.840 --> 557.040] is everything we think about is intelligence. +[557.040 --> 559.920] So anything you're conscious of, your conscious perceptions. +[559.920 --> 562.200] When you look at something or see or you're looking at the +[562.200 --> 565.200] computer right now or looking around the room, +[565.200 --> 567.560] you're hearing things, you're touching things, +[567.560 --> 569.560] that's your New York court text doing that. +[569.560 --> 571.400] It's also responsible all language. +[571.400 --> 573.760] And not just like spoken language, like I'm doing right now, +[573.760 --> 575.000] but written language. +[575.000 --> 578.400] Also the language of mathematics, sign language, +[578.400 --> 581.520] the language of music, the New York court text creates it +[581.520 --> 582.920] and understand it. +[582.920 --> 585.520] So right now there are cells in my head that are spiking on +[585.520 --> 587.720] and off, creating the movements of my lips +[587.720 --> 590.080] and my voice box, which are creating my language. +[590.080 --> 592.280] All things we might think of as cognition or thought +[592.280 --> 594.240] or planning happen near court text. +[594.240 --> 596.480] So all the accomplishments of the humans have done over the +[596.480 --> 598.520] years from engineering, math, science, literature, +[598.520 --> 600.640] agriculture, you name it. +[600.640 --> 602.480] That's it's a product in your court text. +[602.480 --> 605.000] So the New York court text is a pretty amazing organ +[605.000 --> 608.640] and all mammals have one, but in humans it's particularly large +[608.640 --> 611.680] relative to our body area and we are particularly smart. +[611.680 --> 614.800] So there's no question about why it's due to the New York +[614.800 --> 616.240] court text. +[616.240 --> 619.080] Now the New York court text interesting is, +[619.080 --> 621.520] although it generates a lot of our behaviors, +[621.520 --> 624.040] like my speech right now and all the things we do, +[624.040 --> 625.920] day to day, it doesn't, and then none of the cells in +[625.920 --> 628.080] New York court text directly control any muscles. +[628.080 --> 630.440] So the New York court text can't make any muscles move +[630.440 --> 631.280] directly. +[631.360 --> 633.160] None of these cells project the muscles. +[633.160 --> 634.280] And the cells in the New York court text +[634.280 --> 636.000] protect the other areas of the brain, +[636.000 --> 638.200] which then can make movement. +[638.200 --> 641.320] And so it's not really in control all the time. +[641.320 --> 642.360] So if you think about it, like, +[642.360 --> 643.840] take something as simple as breathing. +[643.840 --> 645.400] Well, you don't need to think about breathing. +[645.400 --> 646.920] We just read them or sleep you feel. +[646.920 --> 648.080] Call them or you breathe. +[648.080 --> 649.560] I'm not thinking about breathing right now. +[649.560 --> 650.720] I'm not talking about it. +[652.440 --> 653.840] But we could, if I said, okay, +[653.840 --> 656.760] we're gonna take two deep breaths and we're gonna hold a breath. +[656.760 --> 658.120] Well, we could all do that and that's a New York +[658.120 --> 660.000] court text controlling that. +[660.000 --> 661.920] But after a while, the old brain says, +[661.920 --> 663.840] you know what, I'm gonna need some oxygen +[663.840 --> 665.800] and we're just gonna go for it. +[665.800 --> 667.760] And we're gonna breathe and you can't stop that. +[667.760 --> 669.680] The same thing, just, you know, if you, +[669.680 --> 671.120] you might leave your house the morning saying, +[671.120 --> 673.160] I'm gonna eat only healthy food today. +[673.160 --> 675.800] And so you get to the break room and there's some old doughnuts +[675.800 --> 677.120] and you're saying, ah, I should need that. +[677.120 --> 679.200] But then your old brain smells it and looks at it and you do. +[679.200 --> 681.520] Anyway, that's because of these +[681.520 --> 683.800] emosimations and drives that the New York court text +[683.800 --> 686.280] is not in control of all the time. +[686.280 --> 689.360] So that's, that's a good place, a big part of who we are. +[689.360 --> 692.600] And why we don't always do good things. +[692.600 --> 694.520] Now, if we, we ask ourselves, +[694.520 --> 696.160] what does the New York court text do? +[697.240 --> 698.680] You know, I should stop for a second point now. +[698.680 --> 700.520] I think I'm hoping you're finding this interesting. +[700.520 --> 701.960] I think everyone should want to know +[701.960 --> 702.960] what their brain does. +[702.960 --> 704.600] I mean, we are our brain, right? +[704.600 --> 707.040] So, and this is what you are. +[707.040 --> 708.880] So I think, you know, always, +[708.880 --> 710.640] I think hope everyone's interested in this +[710.640 --> 712.360] because it's important to know who we are. +[712.360 --> 713.960] I think it's even critical. +[713.960 --> 714.880] Critical action. +[714.880 --> 716.960] Okay, so what does the New York court text do? +[716.960 --> 719.040] You might think it's like a computer and say, +[719.040 --> 721.200] oh, it gets some inputs and processes and access +[721.200 --> 722.040] and do something. +[722.040 --> 722.880] That's not right. +[722.880 --> 724.040] That's not the way to think about it. +[724.040 --> 725.040] The way to think about the New York court text +[725.040 --> 727.160] is it learns a model of the world. +[727.160 --> 730.640] It literally creates a model of the world in your head. +[730.640 --> 732.880] And, and let's just talk about that. +[732.880 --> 735.560] Yeah, first of all, everything you know about the world +[735.560 --> 737.000] is stored in this model. +[737.000 --> 739.200] So you've learned things feel like +[739.200 --> 740.640] and what they look like, what they sound like. +[740.640 --> 741.880] Even simple things. +[741.880 --> 743.520] You know, if you pick something up, +[743.520 --> 746.080] I often use a copy couple, I use that example today. +[746.080 --> 747.400] You have to learn what it's, +[747.440 --> 749.240] the surfaces feel like and what it, +[749.240 --> 750.960] and how it, and the sound it makes +[750.960 --> 752.960] when you put it on a table and so on. +[752.960 --> 754.120] You do this for everything, you know, +[754.120 --> 756.760] you have this complex model of the world +[756.760 --> 758.880] about everything looks and feels and sounds like. +[758.880 --> 760.400] We also have to learn where things are located. +[760.400 --> 762.560] Our brain is not just a list of things. +[762.560 --> 764.000] We know where we keep everything. +[764.000 --> 767.040] Where's, you know, where do I keep the knives in my kitchen? +[767.040 --> 769.480] Where is the chairs in my living room? +[769.480 --> 773.160] Where do I, you know, where is the buildings in my town? +[773.160 --> 774.000] And so on. +[774.000 --> 776.960] Everything has a location and the court text knows this. +[777.000 --> 779.040] We also have to learn how things change +[779.040 --> 780.520] when we interact with them. +[780.520 --> 782.840] Take a simple like a thing like a smartphone. +[782.840 --> 785.240] Well, it's an object you can feel it and look at it, +[785.240 --> 787.920] but when you touch it, the icons change or make sounds. +[787.920 --> 789.880] And you touch another icon and something else happens. +[789.880 --> 791.680] And you push the buttons and so on. +[791.680 --> 793.040] Something as simple as a stapler. +[793.040 --> 794.520] You know, what happens when you push it down? +[794.520 --> 796.200] How do you open up and change the stapler? +[796.200 --> 798.120] These seem very simple things, +[798.120 --> 800.440] but these have to be stored in your head someplace. +[800.440 --> 802.160] And of course, we learned, +[802.160 --> 806.360] couldn't have earned this number of conceptual things. +[806.360 --> 810.240] So we have tens of thousands of things about the world, +[810.240 --> 811.600] but we also know things like words. +[811.600 --> 813.240] Every one of us knows 40,000 words. +[813.240 --> 818.320] And we learn concepts like democracy and fear and humility. +[818.320 --> 820.240] And these are things that are somehow stored +[820.240 --> 822.000] in our head in this model. +[822.000 --> 824.720] What's the advantage of having a model? +[824.720 --> 827.600] A model allows you to do several things. +[827.600 --> 829.760] It allows you to know your current situation. +[829.760 --> 831.480] I can open my eyes, look around and say, +[831.480 --> 832.720] oh, I know where I am. +[832.720 --> 833.480] I can recognize it. +[833.480 --> 834.480] I can see it. +[834.480 --> 835.880] Or I might be been a new place, +[835.880 --> 836.720] but I'll know what it is. +[836.720 --> 837.560] Oh, this is a restaurant. +[837.560 --> 840.520] And I can see where the kitchen is and things like that. +[840.520 --> 843.640] But mostly important model lets us to predict the future. +[843.640 --> 845.000] And so we can predict a concept +[845.000 --> 846.000] that comes with deserved actions. +[846.000 --> 848.520] Like what would happen if I won't go down this hallway? +[848.520 --> 849.640] What would happen if I turned left? +[849.640 --> 851.120] What would see if I turned right? +[851.120 --> 853.400] And how to achieve goals? +[853.400 --> 857.280] And this way it really becomes an important part of our survival. +[857.280 --> 859.560] It allows us to say, given a model of the world, +[859.560 --> 862.520] how is it that I might achieve a particular goal, +[862.520 --> 864.880] whether something simple is getting a bite to eat +[864.880 --> 867.200] or something complex, like getting a promotion at work, +[867.200 --> 868.520] something like that. +[868.520 --> 871.080] Now, I found that a lot of people have a trouble understanding +[871.080 --> 872.280] what it means to have a model. +[872.280 --> 873.920] Like what do you mean a model of the world? +[873.920 --> 875.200] We're going to talk about it a bit. +[875.200 --> 877.280] But I've added a few slides here. +[877.280 --> 878.120] I want to talk about this, +[878.120 --> 880.280] just to give you a sense of what we're talking about a model. +[880.280 --> 882.480] Here's a picture of a model. +[882.480 --> 885.520] This is a model of a house that an architect might have made +[885.520 --> 887.000] at the physical model. +[887.000 --> 888.640] And the reason we do this is the people +[888.640 --> 891.560] build models like this is that you can imagine +[891.560 --> 894.320] what this building would look like from different angles. +[894.320 --> 896.600] You can imagine you could say, well, how far is it +[896.600 --> 897.920] from the driveway to the pool? +[897.920 --> 900.440] Or what will my views be from different directions? +[900.440 --> 903.160] And how would I plan to do things in this house? +[903.160 --> 904.640] And that's how we build a model. +[904.640 --> 906.560] But now this is a physical model. +[906.560 --> 909.720] And we clearly don't have physical models in our head. +[909.720 --> 911.880] But nowadays, a lot of models are built on a computer. +[911.880 --> 914.240] So here's another model of a house. +[914.240 --> 916.080] This is a computer model of a house. +[916.080 --> 918.800] And the same things, you can say, what does a house look like +[918.800 --> 920.920] from different positions, how many steps +[920.920 --> 921.760] are between things. +[921.760 --> 923.440] So I could say, OK, this is one location, +[923.440 --> 925.160] whatever I was looking down upon the house, +[925.160 --> 927.000] what was looking from a different angle. +[927.000 --> 929.520] Now, how are models like this created? +[929.520 --> 931.040] And that's, of course, in creating a computer. +[931.040 --> 934.720] The basic way that models like this are created in a computer +[934.720 --> 937.400] is they create reference frames for these things. +[937.400 --> 939.600] So you can think of it like the X, Y, and Z reference frames. +[939.600 --> 942.160] Hopefully you'll remember that from high school. +[942.160 --> 946.240] And you can locate things like where is the door relative +[946.240 --> 948.520] to where is the window and things like that. +[948.520 --> 951.400] And objects in the, I can model like this house. +[951.400 --> 954.920] Imagine there's a garage door on this model. +[954.920 --> 957.040] And it has its own models. +[957.040 --> 959.560] And when engineers create models like this, +[959.560 --> 962.960] they say, oh, there's a reference frame in green for the door, +[962.960 --> 964.760] the garage door, and it's a location +[964.760 --> 967.880] to the reference frame for the house, which is the blue hours. +[967.880 --> 970.280] Now, I imagine this because surprisingly, +[970.280 --> 973.360] this is very close to what's going on in your head. +[973.360 --> 976.120] And when I talk about you building models of the world +[976.120 --> 979.840] in your head, you're going to see it's something very much like this. +[979.840 --> 982.000] And it allows you, but you're doing it for everything, +[982.000 --> 982.720] not just your house. +[982.720 --> 985.160] You're doing it for your town, and every possession you have, +[985.160 --> 988.080] and everything you ever interact with. +[988.080 --> 990.200] So we're going to go back to the presentation here. +[990.200 --> 991.200] And we're going to say, OK, so we've +[991.200 --> 995.560] talked about what models are good for. +[995.560 --> 998.000] Now there's a surprising thing about our model in our head. +[998.000 --> 1002.320] And that the model in your head is actually your reality. +[1002.320 --> 1005.320] What you perceive about the world is the model. +[1005.320 --> 1007.080] You're actually not perceiving the real world. +[1007.080 --> 1008.240] You're perceiving the model. +[1008.240 --> 1011.040] I know this sounds weird, but it's true. +[1011.040 --> 1014.160] And I'll just give you a walkies a little bit about it. +[1014.160 --> 1017.600] You can think of your brain as being in this box, your skull. +[1017.600 --> 1019.520] And the inputs are coming from your senses, +[1019.520 --> 1021.680] like your eyes and your ears and your skin. +[1021.680 --> 1024.640] But how they enter the skull is these little fibers, +[1024.640 --> 1029.280] these nerve fibers, called axons, but the nerves. +[1029.280 --> 1031.480] And they just like a little wire. +[1031.480 --> 1033.240] And they have these spikes coming down the wires +[1033.240 --> 1034.440] just to call action potentials. +[1034.440 --> 1037.080] You might have heard of spikes of action potentials. +[1037.080 --> 1038.440] And now the interesting thing about it +[1038.440 --> 1039.960] is there's a bunch of these coming from the eye. +[1039.960 --> 1041.480] And there's a bunch of these coming about a million +[1041.480 --> 1043.280] from the eye, about a million from your skin, +[1043.280 --> 1045.280] and tens of thousands of years. +[1045.280 --> 1046.640] But they're all identical. +[1046.640 --> 1048.280] The fibers coming into your head are there +[1048.280 --> 1049.120] is no difference between them. +[1049.120 --> 1051.800] You can't look at a fiber and tell this one represents light, +[1051.800 --> 1053.120] this one represents touch or something. +[1053.120 --> 1054.800] That doesn't have that. +[1054.800 --> 1056.160] It's just these spikes. +[1056.160 --> 1057.880] And then your perception, so there +[1057.880 --> 1059.560] is no light entering head and there is no sound. +[1059.560 --> 1061.680] And your perception of what going on in the world +[1061.680 --> 1063.640] is built up from your model. +[1063.640 --> 1065.800] And it turns out things like color don't really exist +[1065.800 --> 1067.040] in the world. +[1067.040 --> 1068.800] I know you might think they do, but they don't. +[1068.800 --> 1070.240] There's frequency of light. +[1070.240 --> 1070.760] And so on. +[1070.760 --> 1072.240] So we live in this model. +[1072.240 --> 1074.000] The model is tied to the world. +[1074.000 --> 1076.240] It's learned from the world. +[1076.240 --> 1078.240] But we actually perceive the model +[1078.240 --> 1081.800] and it forms our basis of all of our beliefs. +[1081.800 --> 1084.400] And that's what this talk is going to be getting to, +[1084.400 --> 1087.600] is about our belief systems, how the brain creates beliefs. +[1087.600 --> 1091.280] So we don't, we clearly are model of the world +[1091.280 --> 1093.040] relates to the real world. +[1093.040 --> 1094.520] But it doesn't always get it right. +[1094.520 --> 1095.880] And come back to that moment. +[1095.880 --> 1098.920] But everything you believe, all your things you think +[1098.920 --> 1101.160] you know for certain are really part of this model. +[1101.160 --> 1103.960] And if that model's accurate, then they're accurate. +[1103.960 --> 1105.640] If it's not accurate, they're not accurate. +[1105.640 --> 1107.840] So this leads us to the question, how +[1107.840 --> 1110.720] is it that the cortex learns the model of the world? +[1110.720 --> 1116.200] This is the, gets the crux of what my team does in our research. +[1116.200 --> 1119.440] So I'm going to delve into some more neuroscience here. +[1119.440 --> 1120.680] I hope it's OK. +[1120.680 --> 1122.080] And I want to get, lose anyone here. +[1122.080 --> 1124.200] But I think it's really fascinating. +[1124.200 --> 1126.360] So let's go on. +[1126.360 --> 1127.360] Whoops. +[1127.360 --> 1128.360] I need to click here. +[1128.360 --> 1128.860] All right. +[1128.860 --> 1131.360] So now the next thing we can say is the new cortex, +[1131.360 --> 1136.200] although it looks very uniform, from the outside, +[1136.200 --> 1139.120] it just looks like I'm going to try to give this a smile on my screen. +[1139.120 --> 1139.960] Excuse me for a second. +[1139.960 --> 1141.440] I got something on my screen. +[1144.800 --> 1146.600] It looks very uniform. +[1146.600 --> 1150.080] But it's actually divided into different functional regions. +[1150.080 --> 1151.880] And it's too bad here. +[1151.880 --> 1154.180] So if you look at a human brain, there's +[1154.180 --> 1156.320] some areas in the back your head that are visual. +[1156.320 --> 1158.520] There's areas on the side of your head that are auditory regions. +[1158.520 --> 1160.920] And these thematic is just the touch region. +[1160.920 --> 1164.160] So this is where the inputs from those different century +[1164.160 --> 1165.560] organs come into the brain. +[1165.560 --> 1168.080] And then there's other regions such as these +[1168.080 --> 1170.680] on the side here, which are responsible for creating language. +[1170.680 --> 1172.880] This is what these are responsible for my, +[1172.880 --> 1175.240] creating my speech right now on your listening. +[1175.240 --> 1176.800] And then of course, there's a lot of other regions +[1176.800 --> 1178.280] that do have more high level things. +[1178.280 --> 1180.600] They're very difficult to characterize. +[1180.600 --> 1184.720] Now, you might think that, OK, well, the visual regions +[1184.720 --> 1185.440] are doing vision. +[1185.440 --> 1188.080] The auditory regions are doing sound and hearing. +[1188.080 --> 1190.280] And language are doing language and so on. +[1190.280 --> 1191.920] You'd think, well, they must be operating +[1191.920 --> 1193.000] on some different principle. +[1193.000 --> 1195.080] They must be different in some ways, +[1195.080 --> 1198.800] because sight and sound and language don't seem the same to us. +[1198.800 --> 1200.120] But that's not the truth. +[1200.120 --> 1202.280] And the truth is very surprising. +[1202.280 --> 1205.320] When people started looking at the near cortex, +[1205.320 --> 1207.480] about 120 years ago. +[1207.480 --> 1210.840] And these are images from a famous scientist, Ramonika Howe. +[1210.840 --> 1213.480] He was the first person to look at the cells +[1213.480 --> 1215.600] in the near cortex of a microscope. +[1215.600 --> 1217.960] And he drew these drawings by hand. +[1217.960 --> 1218.880] It's not a photograph. +[1218.880 --> 1220.360] Those are hand drawings. +[1220.360 --> 1221.200] He did 1000 of them. +[1221.200 --> 1223.280] He went on a Nobel Prize for his work. +[1223.280 --> 1225.080] And so what he's looking at here, +[1225.080 --> 1227.360] he started looking at the near cortex. +[1227.360 --> 1232.040] And these are pictures of what he saw through the microscope, +[1232.040 --> 1234.800] a slice through the 2 1 1 1 2 half millimeter thickness. +[1234.800 --> 1236.600] So this is like a slice in the little dinner. +[1236.600 --> 1238.640] And I've got a very small little slice. +[1238.640 --> 1240.320] And what you see on the left picture, +[1240.320 --> 1242.080] those little dots are the actual neurons. +[1242.080 --> 1244.040] Now, they're far more than you can see here. +[1244.040 --> 1246.160] This is a very small subset of them. +[1246.160 --> 1248.600] But you can see that the neurons are, +[1249.560 --> 1251.560] they have different sizes, they're different shapes, +[1251.560 --> 1253.640] and they're different packing densities. +[1253.640 --> 1256.600] And so you can imagine there's these layers going horizontally. +[1256.600 --> 1258.320] And people start talking about the near cortex, +[1258.320 --> 1259.960] it's composed of layers. +[1259.960 --> 1261.880] The second drawing shows some of the connections +[1261.880 --> 1263.040] between the neurons. +[1263.040 --> 1265.000] And these are the axons and dendrites. +[1265.000 --> 1266.600] And you can see these are mostly vertical, +[1266.600 --> 1268.560] then it's a cutting across the 2 1 1 2 1 2 1 2 +[1268.560 --> 1271.520] millimeters and information flows up and down. +[1271.520 --> 1274.800] Now, this is 120 some years ago. +[1274.800 --> 1277.080] There have been thousands of neuroscience papers, +[1277.080 --> 1279.840] scientific papers published on the near cortex, +[1279.840 --> 1281.720] and its architecture. +[1281.720 --> 1283.440] It's an incredible amount of data +[1283.440 --> 1285.520] that has been collected over the decades. +[1285.520 --> 1287.880] And it's hard to even comprehend +[1287.880 --> 1289.480] all the information we know about it. +[1289.480 --> 1293.200] But people like myself, we try to organize this information +[1293.200 --> 1295.680] and try to figure, okay, what are the different types of cells, +[1295.680 --> 1298.800] how they organize, what are the connections between them. +[1298.800 --> 1300.280] Now, here's the amazing thing. +[1300.280 --> 1301.920] We make these kind of diagrams. +[1301.920 --> 1306.920] But for the most part, these details are the same everywhere. +[1306.920 --> 1309.960] If you look in a language area, a visionary, a touch area, +[1309.960 --> 1312.120] if you look at a cat brain, a rat's brain, +[1312.120 --> 1314.360] a mouse's brain, you all have a near cortex, +[1314.360 --> 1317.240] you're going to see an amazingly similar circuitry. +[1317.240 --> 1319.240] There are some differences. +[1319.240 --> 1320.840] Sometimes you'll see more of oneself type +[1320.840 --> 1323.160] in others, some a little bit thicker and less thick and so on. +[1323.160 --> 1326.880] But this incredibly preserved detail architecture. +[1326.880 --> 1328.840] And this doesn't at first make any sense. +[1328.840 --> 1331.240] And how could all the different things that we do +[1331.240 --> 1334.320] be built on the same detailed architecture? +[1334.320 --> 1336.400] Well, the first person to figure that out, +[1336.400 --> 1337.800] so that's the question we're going to answer. +[1337.800 --> 1338.960] The first person to figure it out, +[1338.960 --> 1340.880] or at least suggest a way of thinking about it, +[1340.880 --> 1343.240] was this man, Vernon Mountcastle. +[1343.240 --> 1346.160] And he was a neurophysiologist at Johns Hopkins University. +[1346.160 --> 1350.360] He wrote a famous monograph, which is a sort of a small technical book, +[1350.360 --> 1352.560] in which he made the following arguments. +[1352.560 --> 1355.880] He said, all areas in near cortex look the same +[1355.880 --> 1358.280] because they perform the same intrinsic function, +[1358.280 --> 1360.800] meaning internally they're doing the same thing. +[1360.800 --> 1363.760] He says, why one reads an division and another reads an auditory, +[1363.760 --> 1364.880] is then what you connect it to. +[1364.880 --> 1367.000] So he says, if you just take a bunch of near cortex +[1367.000 --> 1369.040] and connect it to the eye, you're going to get vision. +[1369.040 --> 1371.480] If you connect it to the ears, you're going to get hearing. +[1371.480 --> 1374.040] If you're going to touch the skin, you get touch sensation. +[1374.040 --> 1376.840] If you connect outputs of these, one section, +[1376.840 --> 1378.080] in these different regions of the cortex, +[1378.080 --> 1379.400] into other regions, those other regions +[1379.400 --> 1382.560] might do things like high level thinking or language. +[1382.560 --> 1385.640] And so which is an incredible idea. +[1385.640 --> 1387.640] And then he said, one thing further, he says, +[1387.640 --> 1390.960] well, we can actually think of the cortex +[1390.960 --> 1394.520] as divided into functional units that are the same. +[1394.520 --> 1396.120] And he proposed that the function unit +[1396.120 --> 1399.040] is something called a cortical column, +[1399.040 --> 1402.040] which you can think about it as like a millimeter in area +[1402.040 --> 1403.440] and 2 1 1 1 2 1 1 1. +[1403.440 --> 1407.760] And so in this picture, this is a cartoon drawing. +[1407.760 --> 1410.240] You might think about what near cortex might look like. +[1410.240 --> 1411.880] It's this sheet of metal tissue, +[1411.880 --> 1413.360] composed of all these little columns, +[1413.360 --> 1414.920] snacking our side by side. +[1414.920 --> 1418.440] In a human, we would have about 150,000 of these columns +[1418.440 --> 1419.720] in our near cortex. +[1419.720 --> 1421.400] Now, these columns are very complex. +[1421.400 --> 1423.800] Each one has 100,000 neurons, +[1423.800 --> 1426.720] about 500 million connections between neurons, +[1426.720 --> 1431.360] each of these columns, and they're very complex entities. +[1431.360 --> 1433.760] But we have 150,000 in our brain. +[1433.760 --> 1435.960] Other animals have different size near cortex. +[1435.960 --> 1437.880] And they also have columns, but they're just +[1437.880 --> 1438.960] different numbers of columns. +[1438.960 --> 1442.520] So we got more in some ways and some ways, not always, +[1442.520 --> 1443.040] but in some ways. +[1443.040 --> 1445.880] That's why we're smarter than other animals. +[1445.880 --> 1448.760] So now our question is, well, okay, +[1448.760 --> 1450.120] what does a cortical column do? +[1450.120 --> 1453.200] If we can figure out what one cortical column does, +[1453.200 --> 1456.560] then we can figure out what all of them do. +[1456.560 --> 1458.760] And so this is like a great scientific puzzle. +[1458.760 --> 1461.320] Like what kind of function could a cortical column do? +[1461.600 --> 1463.720] That can explain vision and hearing and touch +[1463.720 --> 1464.920] and all these other things. +[1465.880 --> 1468.480] And how could you just make a brain out of lots of them +[1468.480 --> 1470.080] and how do they work together? +[1470.080 --> 1470.920] That kind of thing. +[1470.920 --> 1472.000] So that's what we study. +[1472.000 --> 1474.600] And we made a lot of progress understanding this. +[1474.600 --> 1475.800] A great deal, actually. +[1475.800 --> 1477.640] We think we have a pretty good idea of what's going on. +[1477.640 --> 1480.040] I'm gonna share with you right now. +[1480.040 --> 1483.120] I'm gonna start with a little thought experiment. +[1483.120 --> 1487.200] And so to do that, I'm gonna stop sharing, +[1487.200 --> 1489.800] and you hopefully can see me again. +[1489.800 --> 1490.880] And this is the thought experiment +[1491.000 --> 1491.720] that actually happened. +[1491.720 --> 1493.960] This is how we had a real breakthrough here. +[1493.960 --> 1495.280] I was holding this cup. +[1495.280 --> 1497.800] This is a Nemento coffee cup. +[1497.800 --> 1499.160] I hope you can all see that. +[1499.160 --> 1501.880] And I was just idly playing with it. +[1501.880 --> 1503.640] And I had my finger on the side of the cup, +[1503.640 --> 1506.920] and I said, well, I'm touching this cup with my index finger. +[1506.920 --> 1508.920] And I said, if I move my finger up to this outlet cup, +[1508.920 --> 1510.280] I can predict what I'll feel. +[1510.280 --> 1512.520] I said, oh yeah, I'm gonna feel this rounded edge up here. +[1512.520 --> 1514.000] If I move my finger to the side of the cup, +[1514.000 --> 1517.000] I know my brain is gonna predict what I'm gonna feel this handle. +[1517.000 --> 1518.680] And if I move my finger to the bottom of the cup, +[1518.680 --> 1520.840] I know I'm gonna feel this unglazed rough area +[1520.840 --> 1522.440] at the bottom down here. +[1522.440 --> 1525.520] Now, I know I make this prediction because I can imagine it, +[1525.520 --> 1527.840] but I also know that the brain's always making predictions. +[1527.840 --> 1529.560] And if any of these predictions weren't true, +[1529.560 --> 1530.880] I would notice it. +[1530.880 --> 1532.000] But the question I asked him, +[1532.000 --> 1534.840] what does the brain need to know to make that prediction? +[1534.840 --> 1537.400] And the answer was at least partially pretty simple. +[1537.400 --> 1540.160] It said, well, first of all, I need an on touching this cup, +[1540.160 --> 1541.000] right? +[1541.000 --> 1542.000] That's because it matters when I'm touching +[1542.000 --> 1543.800] something else will feel different. +[1543.800 --> 1545.480] But it also needs my brain needs to know +[1545.480 --> 1548.600] where my finger is relative to the cup. +[1548.600 --> 1551.000] It needs to know the location of my finger on the cup. +[1551.000 --> 1552.680] And as I bat to move my finger, +[1552.680 --> 1555.920] it needs to know where my finger will be after it stops moving. +[1555.920 --> 1558.160] Because otherwise, it wouldn't be able to make that prediction. +[1558.160 --> 1559.880] So it needs to know the location where it is +[1559.880 --> 1562.480] and where it will be when it's done moving as I move. +[1562.480 --> 1563.680] If I move one way, I feel one thing, +[1563.680 --> 1566.080] if I move another way, I feel something else. +[1566.080 --> 1569.280] Now, this is actually something really hard for neurons to do. +[1569.280 --> 1571.080] How could they do this? +[1571.080 --> 1572.880] This knowing where my finger is, +[1572.880 --> 1574.480] there's nothing to do with where the cup is relative. +[1574.480 --> 1576.120] To me, it doesn't matter whether the cup is oriented +[1576.120 --> 1577.400] or hard on the sideways, +[1577.400 --> 1578.880] I make the same predictions. +[1578.880 --> 1580.920] So it's really, I need to know my finger's location +[1580.920 --> 1584.000] relative to this cup, where were the cup is in the world? +[1584.000 --> 1587.240] And that was a key insight that sort of +[1587.240 --> 1588.720] exploded the whole thing open. +[1588.720 --> 1594.080] So we'll go back to talking about the presentation here, hopefully. +[1594.080 --> 1597.200] And oh, yeah, I can just like this. +[1597.200 --> 1598.400] I should be able to do this and that. +[1598.400 --> 1601.800] OK, so this ultimately light up to several years, +[1601.800 --> 1603.720] what we call the 1000 brain seriopentelegens. +[1603.720 --> 1606.880] And that's what the name of the book is, the 1000 brains. +[1606.880 --> 1609.120] So I'm going to explain what that means. +[1609.120 --> 1611.120] So here is a picture of a cortical column. +[1611.120 --> 1612.320] I know this is getting deep. +[1612.320 --> 1613.920] Some people, I hope you can all follow this. +[1613.920 --> 1615.920] But I think it's pretty cool. +[1615.920 --> 1618.680] We'll bump up again a little bit. +[1618.680 --> 1620.000] So here's a picture of a cortical column. +[1620.000 --> 1621.480] And imagine this one is getting input +[1621.480 --> 1623.480] from the tip of my index finger touching that cup. +[1623.480 --> 1625.760] Like I just talked about. +[1625.760 --> 1627.280] And it's just one column. +[1627.280 --> 1628.840] And it's just getting input from a little part of the tip. +[1628.840 --> 1631.000] Now, there's actually two things that come into this column. +[1631.000 --> 1631.760] This is facts. +[1631.760 --> 1633.840] This is not speculation. +[1633.840 --> 1636.720] There is the actual sensation from the tip of your finger, +[1636.720 --> 1638.040] what you're feeling. +[1638.040 --> 1640.160] And then there's a movement command, which basically +[1640.160 --> 1643.360] represent how is the finger or the hand moving? +[1643.360 --> 1645.640] So the column gets to know where the finger, what's +[1645.640 --> 1648.640] the finger's feeling, and which way the finger is moving. +[1648.640 --> 1653.080] In the internals of the column, this is a, +[1653.080 --> 1654.760] I'm going to explain this in a very simple way. +[1654.760 --> 1656.200] It's more complicated than I'm explaining it. +[1656.200 --> 1659.200] But basically, this is the right idea. +[1659.200 --> 1660.320] There's two things that are known. +[1660.320 --> 1663.360] One is the column keeps track of the location of the finger +[1663.360 --> 1664.960] in a reference range relative to the cup, +[1664.960 --> 1666.600] just like the reference range I talked about. +[1666.600 --> 1668.640] And the house. +[1668.640 --> 1672.320] And when I move my finger, the column updates the location +[1672.320 --> 1674.560] of the finger in the reference frame of the cup. +[1674.560 --> 1676.440] And then, of course, there's a sensation coming in. +[1676.440 --> 1678.440] And that goes into another layer cells. +[1678.440 --> 1680.320] And the blue line here represents how +[1680.320 --> 1683.120] you learn the shape or the feeling of the cup. +[1683.120 --> 1686.960] It basically pairs what you sense with the location +[1686.960 --> 1689.280] of what you sense. +[1689.280 --> 1690.360] Now, think about this. +[1690.360 --> 1693.320] I can learn an entire cup just by putting my finger on it. +[1693.320 --> 1695.600] I can put my hand in a black box and touch something. +[1695.600 --> 1697.920] And I move my hand around it and touch it with just one finger. +[1697.920 --> 1701.680] I can learn the shape of this cup or learn shape of anything. +[1701.680 --> 1704.080] And what you're doing is you're literally moving the finger +[1704.080 --> 1707.400] and you're learning what you're sensing at each location. +[1707.400 --> 1711.160] There is another layer cells which represent the object +[1711.160 --> 1712.440] the cup itself. +[1712.440 --> 1715.560] So the object, in this case, the coffee cup, +[1715.560 --> 1718.800] is essentially a collection of locations and sensations. +[1718.800 --> 1720.440] It's like, what are all the things I'm +[1720.440 --> 1722.800] feeling at these locations? +[1723.760 --> 1725.640] And so you can learn a model of a cup. +[1725.640 --> 1726.560] So this is pretty impressive. +[1726.560 --> 1727.960] A single column now. +[1727.960 --> 1729.840] This one little piece of the new cortex, +[1729.840 --> 1732.200] touching one little finger can learn the entire model +[1732.200 --> 1734.400] of a coffee cup, what it feels like. +[1734.400 --> 1737.200] So basic ideas, it columns create reference +[1737.200 --> 1739.200] range of every object they know. +[1739.200 --> 1740.760] And the reference chain is used in the same way +[1740.760 --> 1741.600] I mentioned it before. +[1741.600 --> 1743.640] It specifies locations of features. +[1743.640 --> 1745.480] It can lose it to predict the outcomes of movements. +[1745.480 --> 1747.720] Like what will I feel at my moment finger? +[1747.720 --> 1750.320] It allows us to plan and achieve goals like how do I reach? +[1750.320 --> 1752.840] If I want to reach and grab the handle, which wet direction? +[1752.840 --> 1755.040] If I want to stick my finger into the liquid in the cup, +[1755.040 --> 1756.200] which weather I go? +[1756.200 --> 1759.920] And our modeling and simulation, so that a single column +[1759.920 --> 1765.080] can learn hundreds of objects in a very sophisticated way. +[1765.080 --> 1767.400] So the second board of thousand brain theories +[1767.400 --> 1769.400] is that there are thousands of complementary models +[1769.400 --> 1770.320] for every object. +[1770.320 --> 1772.160] If I said, well, where is the knowledge of a coffee +[1772.160 --> 1772.920] cup in my brain? +[1772.920 --> 1774.360] It's not one place. +[1774.360 --> 1775.960] It's in many places, in many columns. +[1775.960 --> 1778.520] It's not all the columns, but in thousands of columns +[1778.520 --> 1780.800] in the cortex know what the coffee cup thinks about. +[1780.800 --> 1782.760] Here's a simple way of thinking about it. +[1782.760 --> 1786.400] Here is a picture of a hand touching the coffee cup. +[1786.400 --> 1789.480] And let's say we're using three fingers at a time. +[1789.480 --> 1791.480] So there are three fingers touching the cup. +[1791.480 --> 1793.560] Each finger is at a different location. +[1793.560 --> 1795.880] Each finger is feeling something different. +[1795.880 --> 1797.560] And so each column is going to model, +[1797.560 --> 1799.640] each of the three columns associated with the fingertips, +[1799.640 --> 1802.080] each is going to try to learn the model of that object. +[1802.080 --> 1804.360] And they can do that. +[1804.360 --> 1808.760] And so they can, but now there's something else that could happen. +[1808.760 --> 1811.160] And because they're all touching the same object, +[1811.160 --> 1813.440] in this case, they're all touching the coffee cup, +[1813.440 --> 1815.560] they should agree on what the object is. +[1815.560 --> 1818.080] They should say, yes, we all know this is a coffee cup. +[1818.080 --> 1818.760] And they do. +[1818.760 --> 1821.240] There is a, there are these long range connections +[1821.240 --> 1824.200] in the cortex between certain layers, which we believe +[1824.200 --> 1824.920] are voting. +[1824.920 --> 1826.520] It's the ways where the columns are saying, +[1826.520 --> 1827.640] well, I'm touching an edge. +[1827.640 --> 1828.640] It might be this or that. +[1828.640 --> 1830.440] And sometimes I'm touching this surface. +[1830.440 --> 1831.440] It might be this or that. +[1831.440 --> 1833.080] And sometimes I'm saying, I'm touching something else. +[1833.080 --> 1833.880] I don't know what it is. +[1833.880 --> 1834.880] But they can all get together. +[1834.880 --> 1836.840] And they can say, the only thing that makes sense here +[1836.840 --> 1837.680] is a cup. +[1837.680 --> 1839.760] And they great quickly resolve this and say, +[1839.760 --> 1841.040] we're all touching a cup. +[1841.040 --> 1842.200] Now, you can think about this. +[1842.200 --> 1844.840] If I were to ask you to recognize an object by sticking your hand +[1844.840 --> 1847.120] in a black box and touching with one finger, +[1847.120 --> 1848.560] well, then you'd have to move your finger +[1848.560 --> 1850.440] around it a bit to feel what it looks like. +[1850.440 --> 1852.280] But if I grab it with my hand all at once, +[1852.280 --> 1854.800] often you can recognize it with a single grass. +[1854.800 --> 1856.360] That's because the columns are voting, +[1856.360 --> 1857.880] and they don't need to move, or at least, +[1857.880 --> 1860.000] they need to move less. +[1860.000 --> 1862.520] The same thing is happening with vision. +[1862.520 --> 1865.280] You may think vision feels like a different type of sensation, +[1865.280 --> 1867.280] but it's really not. +[1867.280 --> 1869.800] In this case, there is a, you could +[1869.800 --> 1872.600] think about the back of the retina is an array of sensors, +[1872.600 --> 1874.520] just like the skin is an array of sensors. +[1874.520 --> 1876.320] They happen to move together, but that's +[1876.320 --> 1878.120] not that important at this time. +[1878.120 --> 1880.760] And what happens is that each of those little patches +[1880.760 --> 1883.080] of your retina project to a cortical column +[1883.080 --> 1885.400] in your cortex. +[1885.400 --> 1887.040] So when you look at something, you're +[1887.040 --> 1888.880] not really looking at a picture of it. +[1888.880 --> 1890.800] You're actually looking at lots of little pieces, +[1890.800 --> 1893.080] each piece in your brain is keeping track of where it is +[1893.080 --> 1895.240] on the object you're looking at. +[1895.240 --> 1896.600] And when you look at something, you say, +[1896.600 --> 1898.280] oh, that's a cat or that's a dog. +[1898.280 --> 1900.120] All those columns vote together. +[1900.120 --> 1902.160] If you looked at the world through a straw, +[1902.160 --> 1903.640] so imagine you had a little skinny straw, +[1903.640 --> 1905.400] and you could only look at the world through a straw, +[1905.400 --> 1907.120] well, then you wouldn't be able to see much at once. +[1907.120 --> 1908.800] You'd only be activating a free column, +[1908.800 --> 1910.200] and you'd have to move the straw around, +[1910.200 --> 1911.960] just like you have to move your finger around. +[1911.960 --> 1915.480] So it's very analogous to what's going on with touch. +[1915.480 --> 1919.720] OK, so the interesting about this is, +[1919.720 --> 1921.520] what do we perceive? +[1921.520 --> 1923.880] It turns out that the only part of this system +[1923.880 --> 1926.560] that we can perceive is the sort of voting +[1926.560 --> 1928.720] there on the object in some sense. +[1928.720 --> 1931.160] So when you look out the world, it seems stable. +[1931.160 --> 1933.480] If I'm looking at a coffee cup, or I'm looking at a person, +[1933.480 --> 1935.840] or I'm looking at a refrigerator, or whatever, +[1935.840 --> 1937.760] my eyes are constantly moving. +[1937.760 --> 1939.000] They're moving about three times a second. +[1939.000 --> 1940.760] Everyone's eyes are moving about three times a second. +[1940.760 --> 1942.640] They're jumping this way and that, they're cults of cod. +[1942.640 --> 1944.360] You're not aware of real at all. +[1944.360 --> 1945.760] The world seems stable. +[1945.760 --> 1947.840] You don't think that the inputs are changing. +[1947.840 --> 1949.560] What's going on is the inputs are changing +[1949.560 --> 1950.400] your brain. +[1950.400 --> 1951.760] These columns are all jumping around +[1951.760 --> 1953.680] and looking at different locations, different sense features. +[1953.680 --> 1955.480] And yet, they're voting on the same thing. +[1955.480 --> 1956.400] It's still the refrigerator. +[1956.400 --> 1957.640] It's still your friend. +[1957.640 --> 1960.120] And that is the only part of the brain we can perceive. +[1960.120 --> 1962.040] We can only perceive the operation of the voting there. +[1962.040 --> 1964.640] And so we're not aware of what mostly is going on our brain. +[1964.640 --> 1967.720] We're not able to perceive all these crazy things +[1967.720 --> 1968.640] that are happening underneath. +[1968.640 --> 1971.040] We're just perceiving what's the consensus vote +[1971.040 --> 1972.600] of what's going on out there. +[1972.600 --> 1974.320] And this is important because this is why we +[1974.320 --> 1976.400] have a singular sensation in the world. +[1976.400 --> 1979.200] This is why we don't feel like we have thousands of models +[1979.200 --> 1981.440] operating independently voting, trying to figure +[1981.440 --> 1986.400] what's going on, is because we only perceive what they agree to. +[1986.400 --> 1988.920] And this is something that some of people may have heard +[1988.920 --> 1990.160] of it called the binding problem. +[1990.160 --> 1992.800] Like how does the brain bind all our sensations together? +[1992.800 --> 1994.240] It doesn't seem possible. +[1994.240 --> 1997.160] And the answer to it is, is that the columns vote +[1997.160 --> 2000.440] and we're only able to perceive the voting. +[2000.440 --> 2002.280] So that's not been pretty nice thing about that. +[2002.280 --> 2006.000] OK, so let's talk about knowledge in general. +[2006.000 --> 2008.080] And I'm going to claim here that all knowledge is stored +[2008.080 --> 2008.960] in reference range. +[2008.960 --> 2012.160] So again, this is the idea that if the entire new cortex +[2012.160 --> 2013.600] is working on the same principles, +[2013.600 --> 2016.160] then everything we know must be stored this way. +[2016.160 --> 2019.640] And so it's like everything is stored in reference range. +[2019.640 --> 2023.600] And now that's a sort of deduced operation, but that's true. +[2023.600 --> 2025.880] So now we can think about a column as a generic column. +[2025.880 --> 2027.800] Let's say someplace else in the cortex doing math +[2027.800 --> 2030.120] or talking about history or something like that. +[2030.120 --> 2031.920] And so you can think of it as, OK, well, +[2031.920 --> 2032.960] there's inputs to this column. +[2032.960 --> 2035.040] These inputs may not be coming from your senses. +[2035.040 --> 2036.440] They're becoming from other parts of the brain. +[2036.440 --> 2038.600] They may be objects you recognize or other things +[2038.600 --> 2040.680] you've already perceived. +[2040.680 --> 2042.120] But those are the inputs to this column +[2042.120 --> 2043.440] and this other part of the brain. +[2043.440 --> 2046.560] And the movements, in this case, are not physical movements +[2046.560 --> 2047.920] of the body necessarily. +[2047.920 --> 2050.480] It's sort of the same idea. +[2050.480 --> 2052.120] It's like mentally moving through space. +[2052.120 --> 2054.480] It's like you can imagine mentally moving through your house +[2054.480 --> 2054.960] right now. +[2054.960 --> 2057.200] I can say, well, go on the front door and look right. +[2057.200 --> 2058.560] And on the other side, what do you see? +[2058.560 --> 2061.160] So what we're doing is when we're accessing knowledge +[2061.160 --> 2063.320] in our life, we're mentally moving through reference +[2063.320 --> 2065.560] range, recalling the facts that are stored +[2065.560 --> 2066.720] at different locations. +[2066.720 --> 2067.880] And you're not aware of this. +[2067.880 --> 2070.280] You're not thinking, oh, my knowledge is stored in reference range. +[2070.280 --> 2071.640] No, you're not aware of that at all. +[2071.640 --> 2072.480] It's just some ideas. +[2072.480 --> 2074.080] These concepts pop in your head. +[2074.080 --> 2076.480] But what's going on underneath is that you've +[2076.480 --> 2078.720] stored all the knowledge of the world +[2078.720 --> 2082.720] in these reference range and you're moving around and getting them. +[2082.720 --> 2086.240] This is important because it makes knowledge actionable. +[2086.240 --> 2089.560] Our knowledge of the world, again, it's not like a list of facts. +[2089.560 --> 2091.680] It's stuff we can think in reason about. +[2091.680 --> 2096.960] We can say, oh, what about think about evolution? +[2096.960 --> 2097.960] What do I know about it? +[2097.960 --> 2099.000] How does it behave? +[2099.000 --> 2101.600] What would I do if I was trying to, if I changed the way +[2101.600 --> 2104.600] this gene works and so on, knowledge becomes actionable +[2104.600 --> 2106.440] because we've arranged in reference range. +[2106.440 --> 2108.560] Just like the knowledge about a coffee cup +[2108.560 --> 2112.320] is actionable because that range put it in a reference range. +[2112.320 --> 2115.360] And now thinking is what occurs when your brain moves from +[2115.360 --> 2117.760] locations to locations in reference range. +[2117.760 --> 2120.560] As we think during the day, these ideas sort of pop in their head +[2120.560 --> 2123.240] all day long, just constantly, we're awake. +[2123.240 --> 2126.280] And what's going on there is literally the cells in your +[2126.280 --> 2128.760] cortex are accessing different locations in these reference +[2128.760 --> 2130.360] range, moving from one location to the other. +[2130.360 --> 2133.000] And when it moves to the reference range, it recalls some fact +[2133.000 --> 2136.400] and a fact pops into your head. +[2136.400 --> 2139.400] And that's what we do when we think. +[2139.400 --> 2141.400] Now, here's an interesting thing. +[2141.400 --> 2146.000] You can take the same facts in a range of different types +[2146.000 --> 2147.120] of reference frames. +[2147.120 --> 2150.400] And this can lead to different beliefs about those facts. +[2150.400 --> 2153.280] So I give a simple example in the book. +[2153.400 --> 2156.320] You can imagine I take a bunch of historical facts, +[2156.320 --> 2157.760] things that happen. +[2157.760 --> 2160.240] And I can say, OK, let's arrange them on a reference +[2160.240 --> 2162.680] frame that looks like a timeline. +[2162.680 --> 2163.800] That's a type of reference frame. +[2163.800 --> 2165.760] It's a one-dimensional reference frame. +[2165.760 --> 2168.360] And if I do that, I can see the two facts next each other +[2168.360 --> 2170.160] might be causal related in time. +[2170.160 --> 2171.880] Like, oh, this one's a characteristic of that one. +[2171.880 --> 2173.680] That may be the related. +[2173.680 --> 2176.280] I can take the same set of facts and arrange them +[2176.280 --> 2179.120] in a different reference frame, think about like a map. +[2179.120 --> 2181.240] And I put these events on a map. +[2181.240 --> 2183.640] And now I can say, oh, I can say these two facts +[2183.640 --> 2185.920] were happened right next to each other in space. +[2185.920 --> 2187.240] Maybe they're causal related because they're +[2187.240 --> 2187.840] next to each other. +[2187.840 --> 2189.640] Or these facts occur in next to mountains. +[2189.640 --> 2192.080] And maybe they occur because they're in next to mountains. +[2192.080 --> 2195.200] The point is, you can take the same set of facts +[2195.200 --> 2197.480] in a range of different different reference frames. +[2197.480 --> 2199.160] We can all agree on the facts. +[2199.160 --> 2201.800] But Pew people might view those facts differently. +[2201.800 --> 2203.800] Now, it turns out in the brain, these reference +[2203.800 --> 2205.440] frames are not set in stone. +[2205.440 --> 2207.120] You don't, they're discovered. +[2207.120 --> 2208.400] They're part of the learning process. +[2208.400 --> 2209.640] You're not aware you're doing this. +[2209.640 --> 2210.880] But they're part of the learning process. +[2210.880 --> 2213.280] So two people can take the same set of data, +[2213.280 --> 2215.480] the same facts and information, +[2215.480 --> 2218.360] arrange them in different ways, in different reference +[2218.360 --> 2219.360] frames. +[2219.360 --> 2220.360] And they will make different predictions. +[2220.360 --> 2223.640] And they'll have different beliefs about those things. +[2223.640 --> 2226.240] That can be very good at times because it can mean like two people +[2226.240 --> 2228.040] can look at the same things different ways. +[2228.040 --> 2229.280] And they can help each other. +[2229.280 --> 2231.360] But it can also be a problem. +[2231.360 --> 2232.760] Sometimes you can arrange things. +[2232.760 --> 2236.120] And it's not a useful way of arranging. +[2236.120 --> 2239.360] Now, as I said a moment ago, some of your columns +[2239.360 --> 2241.920] get direct input from the senses. +[2241.920 --> 2243.720] And these columns, I highlighted here +[2243.720 --> 2246.600] from the skin and the eyes and the ears. +[2246.600 --> 2250.800] When they build models, it's hard for those models to be wrong. +[2250.800 --> 2254.000] So for example, if I touch a coffee cup and you touch a coffee +[2254.000 --> 2257.720] cup, if one of us built an incorrect model of the coffee cup, +[2257.720 --> 2260.360] we very quickly find out because our predictions don't +[2260.360 --> 2261.440] work out right. +[2261.440 --> 2264.120] If I think the coffee cup is round and you think it's square, +[2264.120 --> 2265.640] well, one of us is going to be wrong. +[2265.640 --> 2267.520] And as soon as you start touching the cup, +[2267.520 --> 2269.880] one of us will find out that, hey, it's not round or it isn't +[2269.880 --> 2271.240] square. +[2271.240 --> 2274.560] So these columns can verify their predictions all the time. +[2274.560 --> 2276.540] So two people, no matter where you live in the world, +[2276.540 --> 2278.880] we'll all have similar ideas what a coffee cup looks like. +[2278.880 --> 2279.880] If we give them a coffee cup, we'll +[2279.880 --> 2281.960] form a similar model of it, or if we're given something +[2281.960 --> 2284.840] a cell phone, we'll form a similar model of it. +[2284.840 --> 2287.480] But then there's a lot of parts of the brain, which, +[2287.480 --> 2289.360] in the cortex, where they're getting +[2289.360 --> 2290.680] input from other columns. +[2290.680 --> 2292.800] They're not directly sensing anything. +[2292.800 --> 2294.960] In fact, much of what we learn about the world +[2294.960 --> 2297.360] is coming through language. +[2297.880 --> 2301.400] If we learn through language, I can't directly verify that it's +[2301.400 --> 2302.400] correct. +[2302.400 --> 2305.720] So we build a model of the world that may be consistent, +[2305.720 --> 2308.320] but it may not be accurately fucked in the world. +[2308.320 --> 2310.000] I'll give you two examples. +[2310.000 --> 2312.520] Or just two, for example, I've never +[2312.520 --> 2314.400] been to the city of Nevada. +[2314.400 --> 2315.880] But I believe it exists. +[2315.880 --> 2316.880] Why do I believe it exists? +[2316.880 --> 2318.480] Because I've read about it. +[2318.480 --> 2320.080] And people have told me about it. +[2320.080 --> 2320.840] I've never been there. +[2320.840 --> 2323.720] I've never never verified that it exists. +[2323.720 --> 2325.640] I've never even seen Cuba. +[2325.680 --> 2328.840] But I believe it's there because I have read things about it. +[2328.840 --> 2330.840] I don't believe it haven't exists. +[2330.840 --> 2332.520] But some people do. +[2332.520 --> 2334.560] And they've read about it, too. +[2334.560 --> 2336.080] So who's right? +[2336.080 --> 2336.960] Maybe we're both right. +[2336.960 --> 2338.120] Maybe we're both wrong. +[2338.120 --> 2340.040] The problem, this is an inherent problem +[2340.040 --> 2341.920] of the way the brain works, of relearning things +[2341.920 --> 2343.800] through other people, through language. +[2343.800 --> 2348.040] We can form very believable sets of knowledge +[2348.040 --> 2350.600] that are consistent with each other. +[2350.600 --> 2353.040] And yet they make completely different and wrong. +[2353.040 --> 2354.640] Or someone could be wrong, someone could be right. +[2354.640 --> 2356.560] And this is an inherent problem +[2356.560 --> 2357.800] the way the brain is designed. +[2357.800 --> 2359.520] Of course, the scientific method is the way +[2359.520 --> 2363.480] we have to suss out these false beliefs. +[2363.480 --> 2365.600] We just keep looking for more evidence +[2365.600 --> 2367.080] that contradicts our beliefs. +[2367.080 --> 2369.360] But if we don't do that, and if we're not exposed to it, +[2369.360 --> 2371.680] well, that's what we're going to believe. +[2371.680 --> 2375.960] And so these two methods for how false beliefs can arise. +[2375.960 --> 2377.240] One is we can take information, +[2377.240 --> 2378.600] arrange it in different reference frames. +[2378.600 --> 2380.640] And we have different beliefs about the data. +[2380.640 --> 2382.280] And the other thing is we can form beliefs +[2382.280 --> 2385.200] based not on direct observation, but through language. +[2385.200 --> 2387.480] And that's very useful in general, +[2387.480 --> 2390.600] but it also leaves the false beliefs. +[2390.600 --> 2397.200] So in summary, we can talk about, this is what the basic way +[2397.200 --> 2397.960] to talk about here. +[2397.960 --> 2400.040] The New York heart text is learned to model the world. +[2400.040 --> 2402.760] That's the first thing you need to know. +[2402.760 --> 2406.120] We're beginning to understand exactly how this model works. +[2406.120 --> 2408.440] In details, the neurons do this. +[2408.440 --> 2409.600] It's a distributed model. +[2409.600 --> 2411.080] So it's not a single model world. +[2411.080 --> 2415.000] We have thousands and thousands of submodels for everything. +[2415.000 --> 2417.000] And those models have built on reference frames +[2417.000 --> 2418.080] in every quarter of column. +[2418.080 --> 2421.640] And that's how knowledge is structured in reference rooms. +[2421.640 --> 2423.280] The brain's model is our reality. +[2423.280 --> 2426.320] It's what we perceive and what we believe. +[2426.320 --> 2430.000] And it's very, very, if it's wrong, what we believe is wrong. +[2430.000 --> 2431.920] If it's right, what we believe is right. +[2431.920 --> 2435.360] It turns out, we can't really sense the entire world. +[2435.360 --> 2437.200] We can't know what the world really is like. +[2437.200 --> 2438.960] The world is much larger than we can sense. +[2438.960 --> 2440.240] We can only sense small parts of it. +[2440.240 --> 2442.960] So in some sense, our model of the world +[2442.960 --> 2446.560] is always an approximation of the real world, +[2446.560 --> 2447.280] which is OK. +[2447.280 --> 2448.320] It's usually pretty useful. +[2448.320 --> 2450.080] But it can be wrong. +[2450.080 --> 2452.080] And so our model, and beliefs, can be wrong. +[2452.080 --> 2453.800] And then I've talked about two ways that can happen. +[2453.800 --> 2456.840] You can arrange the same facts in different reference frames, +[2456.840 --> 2459.640] which could lead to different beliefs about those facts. +[2459.640 --> 2461.840] And you can rely on facts we get through language, +[2461.840 --> 2463.800] not direct observation. +[2463.800 --> 2466.400] And that, basically, then we can build a model of the world +[2466.400 --> 2468.600] that doesn't actually reflect the real world. +[2468.600 --> 2470.720] But we can believe it anyway. +[2470.720 --> 2471.640] So that's it. +[2471.640 --> 2475.600] And here again, plug for my, I think here. +[2475.600 --> 2478.560] Do I have my ideas? +[2478.560 --> 2479.560] Oops, here we go. +[2479.560 --> 2480.800] Plug for my book. +[2480.800 --> 2483.560] I would like to say that I'm not really caring about selling books, +[2483.560 --> 2485.680] but I really care about selling these ideas. +[2485.680 --> 2487.640] I want everybody to know these stuff. +[2487.640 --> 2489.880] I think everyone should know these things. +[2489.880 --> 2492.880] In the book, I argue that we should be teaching these kind +[2492.880 --> 2495.280] of brain theory to kids in high school +[2495.280 --> 2498.440] in the same way we teach them about DNA and evolution. +[2498.440 --> 2500.400] Because I think it's important we all understand +[2500.400 --> 2503.280] how it is we form beliefs about the world. +[2503.280 --> 2505.240] The book covers a lot more than I talked about today +[2505.240 --> 2507.560] as a whole section on AI and has a whole section +[2507.560 --> 2510.760] about the future of humanity, how to think about humanity. +[2510.760 --> 2513.080] When you think about it's building a model of the world +[2513.080 --> 2516.160] and the how our knowledge is stored, how would you +[2516.160 --> 2519.920] might think about it a future that might be a little different +[2519.920 --> 2522.440] than you might have thought about in the past. +[2522.440 --> 2523.520] So I'll leave it at that. +[2523.520 --> 2524.800] And that's the end of my talk. +[2524.800 --> 2528.120] And I think we're going to do now Q&A for those +[2528.120 --> 2529.400] who'd like to do Q&A. +[2529.400 --> 2532.800] I'm going to stop sharing my talk here. +[2532.800 --> 2535.440] And we're back to looking at beautiful in the end. +[2535.440 --> 2536.680] Oh, thank you. +[2536.680 --> 2537.200] Thank you. +[2537.200 --> 2541.720] Yes, we most certainly are going to do Q&A right now. +[2541.720 --> 2543.200] But you said a couple things. +[2543.200 --> 2545.680] Thank you for sharing that, that opening story +[2545.680 --> 2546.720] about Richard Dawkins. +[2546.720 --> 2550.080] If we don't, one of the lessons that people should take away +[2550.080 --> 2553.360] today is that if you don't ask, you don't get, +[2553.360 --> 2556.360] you were brave enough to go and ask and get that. +[2556.360 --> 2557.120] So yeah. +[2557.120 --> 2557.960] I was nervous. +[2557.960 --> 2559.000] I was nervous. +[2559.000 --> 2560.000] Yeah. +[2560.000 --> 2561.160] You were nervous. +[2561.160 --> 2562.760] Oh my gosh, you have all people. +[2562.760 --> 2566.320] That should inspire you guys. +[2566.320 --> 2567.760] Shoot your shot. +[2567.760 --> 2571.160] If you are a Hamilton fan, I love the example +[2571.160 --> 2574.200] that you gave of Havana exists, but you've never been there +[2574.200 --> 2577.560] and how some people feel the same way about heaven. +[2577.560 --> 2578.200] I'm in the middle. +[2578.200 --> 2580.120] I feel the same way about Hogwarts, +[2580.120 --> 2583.680] though I have not been invited. +[2583.680 --> 2585.280] I want to believe Hogwarts is real. +[2585.280 --> 2585.760] I do. +[2585.760 --> 2586.800] I want you to. +[2586.800 --> 2590.000] I've been waiting for my owl now for a few years. +[2590.000 --> 2593.480] And I hope the sorting hat puts me in Ravenclaw for sure. +[2593.480 --> 2597.840] And I will say the scariest thing I heard you say today, +[2597.840 --> 2599.160] other animals have a cortex. +[2599.160 --> 2600.680] My cat has a neocortex. +[2600.680 --> 2602.440] And my cat's been telling me that this whole time +[2602.440 --> 2604.200] and I have not been believing him. +[2604.200 --> 2606.960] So you have proved my cat correct. +[2606.960 --> 2608.600] But so, Mia, thank you so much. +[2608.600 --> 2610.000] And we do have several questions. +[2610.000 --> 2612.600] But I have a few questions for you too. +[2612.600 --> 2616.840] You used to, I want to, if all the areas of the brain +[2616.840 --> 2620.440] are the same, I started to picture it like Legos +[2620.440 --> 2622.520] because Legos are the same and more or less. +[2622.520 --> 2625.160] Maybe I'm wrong there, I don't know. +[2625.160 --> 2627.760] But are they interchangeable? +[2627.760 --> 2631.040] Can they be used to repair a damaged section? +[2631.040 --> 2632.800] Yes, they can. +[2632.800 --> 2634.640] And they do. +[2634.640 --> 2636.480] So first of all, just take someone +[2636.480 --> 2640.000] who's a people who are born blind, can generally blind. +[2640.000 --> 2643.200] The part of their cortex that does vision +[2643.200 --> 2645.200] ends up doing something else. +[2645.200 --> 2647.680] It says, OK, we're going to do touch. +[2647.680 --> 2648.960] We're going to do a higher sense of touch. +[2648.960 --> 2650.560] So we're going to do some other things. +[2650.560 --> 2652.120] So that's one example. +[2652.120 --> 2655.840] Another example, if you have damaged trauma in your life, +[2655.840 --> 2659.080] let's say someone, you have some famous examples. +[2659.080 --> 2661.680] But let's say you have a stroke and part of your cortex, +[2661.680 --> 2662.920] the small part of your cortex choice. +[2662.920 --> 2667.080] Not a big part, but a smallest part. +[2667.080 --> 2670.000] Well, immediately you'll have some loss of function. +[2670.000 --> 2673.040] And it's very known that over the next four months or so, +[2673.040 --> 2675.920] you'll gain a lot of that function back. +[2675.920 --> 2678.840] And what's going on literally is that the dead section is +[2678.840 --> 2682.360] not becoming rejuvenated. +[2682.360 --> 2685.440] But what's happening is that the columns around it +[2685.440 --> 2688.680] are taking over and they're reassigning themselves +[2688.680 --> 2690.320] to the part that was lost. +[2690.320 --> 2692.600] And so they literally can say, well, I +[2692.600 --> 2693.880] can take over that part. +[2693.880 --> 2696.720] I can take over this and this sort of a competition going on. +[2696.720 --> 2699.000] And then finally, there was an interesting experiment +[2699.000 --> 2703.360] that someone did, a guy named Baki Vida. +[2703.360 --> 2706.960] And they took ferrets when they were in the utero +[2706.960 --> 2708.240] being before they were born. +[2708.240 --> 2714.160] And they rewired the optic nerve from the eye +[2714.160 --> 2715.480] into the part of the brain that I think +[2715.480 --> 2718.520] didn't hear or touch or something like that. +[2718.520 --> 2720.880] And the animal grew up and the literally +[2720.880 --> 2722.960] those parts of the brain took over different functions. +[2722.960 --> 2724.920] So they're proving the Mount Kessles proposal that. +[2724.920 --> 2727.840] You can do anything. +[2727.840 --> 2729.240] It's plug and play in some sense. +[2729.240 --> 2730.920] It's not exactly like that. +[2730.920 --> 2732.040] There are differences. +[2732.040 --> 2734.600] There are reasons you wouldn't want to do this. +[2734.600 --> 2737.360] It's not like, oh, you're just a bunch of legos. +[2737.360 --> 2739.320] But the basic idea is right. +[2739.320 --> 2740.040] Thank you. +[2740.040 --> 2740.520] Yeah. +[2740.520 --> 2742.760] Actually, and me and Mary Thrupp sort of +[2742.760 --> 2745.280] had the same question that you answered +[2745.280 --> 2747.320] about what happens to the regions of the Neocortex +[2747.320 --> 2749.640] that are supposed to be set aside for vision and hearing +[2749.640 --> 2751.320] if the person is born blind and deaf. +[2751.320 --> 2753.160] So you're ahead of the curve. +[2753.160 --> 2756.080] Because I was also wondering, you know, +[2756.080 --> 2758.280] is when you did the cup demonstration, +[2758.280 --> 2761.800] is that how Helen Keller was able to learn? +[2761.800 --> 2763.840] Well, she, what's interesting about Helen Keller +[2763.840 --> 2764.680] and fascinating, right? +[2764.680 --> 2766.360] So she was deaf and blind, right? +[2766.360 --> 2768.840] There was a really profound deficit. +[2768.840 --> 2771.920] But she could only learn through the world to touch. +[2771.920 --> 2773.840] And I mean, touch, smell and taste, +[2773.840 --> 2775.680] but we don't really rely on smell and taste very much +[2775.680 --> 2776.880] for humans. +[2776.880 --> 2779.880] So mostly just just just for dating. +[2779.880 --> 2783.120] And we may probably be guys don't know this. +[2783.960 --> 2788.640] But she learned a model of the world just like, +[2788.640 --> 2790.240] you're an eye model, my model of the world. +[2790.240 --> 2792.200] Yes, she wouldn't know what color was. +[2792.200 --> 2793.680] But she could walk around the world, +[2793.680 --> 2795.920] she could speak, she gave lectures all around the world. +[2795.920 --> 2799.800] She knew how things in the world work just like you and I did. +[2799.800 --> 2801.640] She had what coffee cups were. +[2801.640 --> 2804.760] And so it shows you that we can learn a model of the world +[2804.760 --> 2806.440] through different senses. +[2806.440 --> 2808.520] But we end up with the thing kind of model. +[2808.520 --> 2811.720] You know, because you're at the model is what the world is. +[2811.720 --> 2813.400] And so yes, you can learn a model through touch. +[2813.400 --> 2814.920] You can learn a model of this to site. +[2814.920 --> 2817.840] And yet with running through site, I won't be able to test. +[2817.840 --> 2821.200] I won't be able to detect, let's say, temperature or texture. +[2821.200 --> 2824.120] And learning through touch, I can't more colors. +[2824.120 --> 2826.320] But other than that, we can learn the same models. +[2826.320 --> 2828.680] It's amazing flexible system. +[2828.680 --> 2829.480] Yeah. +[2829.480 --> 2831.840] And I really hadn't thought of it that way. +[2831.840 --> 2834.800] Which is why I'm here at the best job. +[2834.800 --> 2837.800] Carolyn wants to know, how do the columns communicate +[2837.800 --> 2838.640] with each other? +[2838.640 --> 2839.440] Did you touch on that? +[2839.440 --> 2840.200] Did I miss that? +[2840.200 --> 2843.560] Yeah. Well, I talked about the voting member. +[2843.560 --> 2846.360] Well, it's more complex than that too. +[2846.360 --> 2848.240] Column send information to each other. +[2848.240 --> 2851.200] So one column, there are cells in each column that +[2851.200 --> 2852.280] are get input. +[2852.280 --> 2854.440] And there are cells in each column that have great output. +[2854.440 --> 2857.200] And so columns will send their output to other columns +[2857.200 --> 2858.400] and they'll get input from other columns. +[2858.400 --> 2860.880] It's a very complicated system. +[2860.880 --> 2863.040] I didn't really talk about that much. +[2863.040 --> 2865.240] But you can just imagine being wired together +[2865.240 --> 2867.200] in a sort of a chain. +[2867.200 --> 2868.400] So there's two types of connections. +[2868.400 --> 2871.680] Like it goes to region one, region two, region three. +[2871.680 --> 2872.640] That's one topic connection. +[2872.640 --> 2876.080] And then there's, so that's the feed forward connections +[2876.080 --> 2877.680] you might call in the feedback connections. +[2877.680 --> 2879.000] And then the columns are also connected +[2879.000 --> 2880.120] through these voting layers. +[2880.120 --> 2881.600] And that's what I talked about. +[2881.600 --> 2883.840] So there's a lot of connections going back and forth. +[2883.840 --> 2887.480] But still, almost all the vast majority of connections +[2887.480 --> 2891.120] in the brain and the cortex are within each column. +[2891.120 --> 2894.480] And the number of ones that go long distance +[2894.480 --> 2896.160] in other places is still high, but it's +[2896.160 --> 2897.600] a much smaller number. +[2897.600 --> 2899.400] OK. +[2899.400 --> 2903.640] Fred wanted to know, are frames stored in one column +[2903.640 --> 2905.280] or multiple or elsewhere? +[2905.280 --> 2908.240] Because I think that the knowledge of the cop is everywhere. +[2908.240 --> 2909.240] Yeah. +[2909.240 --> 2913.200] But every column is creating its own frames. +[2913.200 --> 2913.880] Oh, OK. +[2913.880 --> 2915.680] Really, every column. +[2915.680 --> 2918.440] And it's a very interesting way to do it. +[2918.440 --> 2920.360] We actually know a lot about how they do this. +[2920.360 --> 2923.200] But every column of a column has its own set of reference +[2923.200 --> 2924.040] frames. +[2924.040 --> 2927.240] And the next column over has a different set of reference +[2927.240 --> 2927.880] frames. +[2927.880 --> 2930.200] They can coordinate. +[2930.200 --> 2932.200] But basically, they create them independently. +[2932.200 --> 2935.080] For those who are like neuroscience geeks, +[2935.080 --> 2936.920] there's a type of cell in the old brain +[2936.920 --> 2939.880] called the grid cells, which some people who got grid cells +[2939.880 --> 2940.400] in place cells. +[2940.400 --> 2942.640] These are people who got no more prizes for discovering these. +[2942.640 --> 2944.400] They're not in the near cortex. +[2944.400 --> 2947.240] But we have speculated that these cells learn reference +[2947.240 --> 2947.520] frames. +[2947.520 --> 2948.880] We know this. +[2948.880 --> 2951.000] And we've speculated that the equivalent cells +[2951.000 --> 2952.520] exist in the cortex. +[2952.800 --> 2955.280] So we know the actual, a lot about the neural mechanisms. +[2955.280 --> 2957.800] And now we speculated this five years ago. +[2957.800 --> 2959.000] But now there's a lot of evidence. +[2959.000 --> 2961.440] This is true that people are finding these grid cells, which +[2961.440 --> 2964.840] are like reference-same cells throughout the near cortex. +[2964.840 --> 2968.960] So that prediction has been being verified right now. +[2968.960 --> 2969.360] OK. +[2969.360 --> 2970.280] Thank you. +[2970.280 --> 2973.080] Larry, a woman once said, does our brain +[2973.080 --> 2975.720] try to fill in the gaps in knowledge +[2975.720 --> 2978.600] to form a more coherent theory or a frame? +[2978.600 --> 2979.640] Oh, absolutely. +[2979.800 --> 2983.720] It's well known, for example. +[2983.720 --> 2987.840] You everyone knows about the blind spot. +[2987.840 --> 2988.760] You've done that trick. +[2988.760 --> 2990.280] Like, there's a blind spot where you're +[2990.280 --> 2991.320] optic nerve leaves the eye. +[2991.320 --> 2994.040] And so if you look at these two dots in front of you, +[2994.040 --> 2996.080] you look at one dot, this other dot disappears. +[2996.080 --> 2998.560] Anyway, there's holes in your retina. +[2998.560 --> 3000.720] Your retina is not like a camera. +[3000.720 --> 3003.720] Your retina has these blood vessels going through it +[3003.720 --> 3005.680] and all these holes in it. +[3005.680 --> 3006.640] It's a real mess. +[3006.640 --> 3008.400] And you're not aware of any of this, right? +[3008.400 --> 3012.120] And the reason is, is because the filling in, if you will, +[3012.120 --> 3013.280] is that voting layer, right? +[3013.280 --> 3015.880] The vote, they all say, yeah, we're looking out a cat. +[3015.880 --> 3020.960] And so you're not aware that you're not actually, +[3020.960 --> 3022.480] you're not actually, because it's back to sense. +[3022.480 --> 3023.360] Remember, I said, you're not really +[3023.360 --> 3025.440] perceiving the world, you're perceiving your level, right? +[3025.440 --> 3025.640] Yes. +[3025.640 --> 3027.560] Because the way you're getting from your senses +[3027.560 --> 3029.440] is this mess that's got holes and beans +[3029.440 --> 3030.680] missing all over the place. +[3030.680 --> 3033.280] It's like when I touched the cup, +[3033.280 --> 3034.680] I'm not touching all the cup, right? +[3034.680 --> 3035.720] I'm just touching a few places. +[3035.720 --> 3038.200] But I perceive the entire cup is there. +[3038.200 --> 3040.000] And the same thing's going on with your vision. +[3040.000 --> 3043.920] So yes, the reason it's filling in, +[3043.920 --> 3045.480] it's less than it's filling in. +[3045.480 --> 3049.800] It's more that their model says the cup is solid. +[3049.800 --> 3053.000] And therefore, that's what you perceive. +[3053.000 --> 3054.800] When reality, the input coming into your brain +[3054.800 --> 3057.240] is full of holes and some noise in is messy. +[3057.240 --> 3058.880] But you're perceiving the model. +[3058.880 --> 3061.280] You're not actually perceiving the real thing. +[3061.280 --> 3063.680] I know that sounds weird, but it's true. +[3063.680 --> 3066.240] No, listen, I'm here for the weird. +[3066.240 --> 3067.600] I am. +[3067.600 --> 3070.160] John Miniger, I hope I'm getting that right, John. +[3070.160 --> 3073.200] How does consciousness fit into your models? +[3073.200 --> 3076.080] Yeah, I wrote a chapter about this in the book. +[3076.080 --> 3079.600] And I know consciousness is such a controversial topic +[3079.600 --> 3083.600] that I snuck it in the machine intelligence section. +[3083.600 --> 3085.920] And I stuck it in with SkyNet. +[3085.920 --> 3088.400] That's all I said to myself, can a machine be conscious? +[3088.400 --> 3091.760] And that sort of diffuses the somewhat diffuses +[3091.760 --> 3093.800] all these emotional arguments about consciousness. +[3093.800 --> 3095.200] Because let's talk about a machine. +[3095.200 --> 3097.040] Could it be conscious? +[3097.040 --> 3099.680] And the bottom of the conscious is really +[3099.680 --> 3101.200] enough good definition for it. +[3101.200 --> 3103.960] And lots of people think it's all over the map. +[3103.960 --> 3107.960] But I do address two aspects of it. +[3107.960 --> 3110.080] And it's too much to answer here in the Q&A. +[3110.080 --> 3113.480] But I talk about why, our theory does explain why +[3113.480 --> 3114.840] you have a sense of self. +[3114.840 --> 3117.840] Why do you feel like you're present in the world? +[3117.840 --> 3121.600] Why am I, how come I don't, am I just a machine? +[3121.600 --> 3124.680] Why do I feel like I'm not just a machine? +[3124.680 --> 3127.320] And then the other thing is, I talked about this briefly, +[3127.320 --> 3130.600] is like, why do we perceive things sort of called quality? +[3130.600 --> 3132.640] Why do I perceive something to be green? +[3132.640 --> 3134.800] Green is not really a thing in the world. +[3134.800 --> 3136.840] There's light frequency, but that's not green. +[3136.840 --> 3139.120] Why do I feel green to me? +[3139.120 --> 3141.000] And you can explain these two things. +[3141.000 --> 3145.080] At least I showed how one might explain them. +[3145.080 --> 3147.680] And so yes, the theory does tell you about that. +[3147.680 --> 3151.240] In terms of it's part of the model again. +[3151.240 --> 3153.880] Green is part of what we, is part of the model of the world. +[3153.880 --> 3155.480] And why it looks like the way it is. +[3155.480 --> 3156.920] So it's too much for today. +[3156.920 --> 3159.080] But yes, I get it. +[3159.080 --> 3161.680] And in the words of the immortal, Kermit the Frog, +[3161.680 --> 3164.080] it is not easy being green. +[3164.080 --> 3167.000] For sure. +[3167.000 --> 3170.920] I don't know if this gets to sort of aerie fairy or woo. +[3170.920 --> 3175.320] But are you making a distinction between the mind and the brain? +[3175.320 --> 3177.120] No, it's all one. +[3177.120 --> 3178.120] OK. +[3178.120 --> 3184.600] So I think the CFI crowd should hopefully be unanimous in this. +[3184.600 --> 3187.760] There is, it's in our everyday language. +[3187.760 --> 3191.560] All of us can't avoid saying like, oh, I thought this today. +[3191.560 --> 3193.320] I'm not something else. +[3193.320 --> 3195.360] But in reality, of course, it's not like that. +[3195.360 --> 3198.040] The brain, everything you've ever thought you have +[3198.040 --> 3198.800] is a brain state. +[3198.800 --> 3200.040] It's neuroindactive. +[3200.040 --> 3202.000] Neuroindactive leave the brain states. +[3202.000 --> 3203.920] I mean, you need to, you're acting as a neuroindicator +[3203.920 --> 3204.440] you're thinking. +[3204.440 --> 3205.440] There is no separation. +[3205.440 --> 3207.480] There's no dualism. +[3207.480 --> 3208.480] It's all one thing. +[3208.480 --> 3210.240] And there's something wrong with that. +[3210.240 --> 3211.920] That's pretty cool. +[3211.920 --> 3213.640] Some people are disappointed in that. +[3213.640 --> 3214.640] Yeah, no. +[3217.080 --> 3218.120] Gary wants to know. +[3218.120 --> 3220.680] And this is, again, in that same category, +[3220.680 --> 3224.720] any idea about dreams, about how they, how are they created? +[3224.720 --> 3225.240] Yeah, it's a great. +[3225.240 --> 3227.720] Aside from drinking vodka way too late, +[3227.720 --> 3229.920] or that might just be a very personal experience. +[3233.760 --> 3234.280] OK. +[3234.280 --> 3236.840] So look, there's a lot of research on dreams. +[3236.840 --> 3238.960] Our theory doesn't really say much about it. +[3238.960 --> 3242.320] And so I should just leave it at that. +[3242.320 --> 3244.120] Clearly, humans need a dream. +[3244.120 --> 3244.880] It's important. +[3244.880 --> 3247.680] It performs a biological function. +[3247.680 --> 3248.840] There's some theories about that. +[3248.840 --> 3250.600] The best theory I've heard is that we need a dream +[3250.600 --> 3254.240] because it's actually a cleaning, a junk cleaning process. +[3254.240 --> 3255.960] It removes these chemicals in your brain. +[3255.960 --> 3257.600] I think that's a pretty cool idea. +[3257.600 --> 3260.240] But our theory itself doesn't say anything about that. +[3260.240 --> 3262.240] It doesn't need to, it's more like, hey, +[3262.240 --> 3265.400] how does a normal healthy, a live brain work? +[3265.680 --> 3267.960] We don't really talk too much about disease. +[3267.960 --> 3270.800] We don't talk about things like dreaming, +[3270.800 --> 3274.160] and why do you have to eat certain foods and things like that. +[3274.160 --> 3275.640] It's like, oh, how does the information process? +[3275.640 --> 3278.760] So our theory doesn't really say much about dreams. +[3278.760 --> 3279.400] OK. +[3279.400 --> 3280.760] Here it is. +[3280.760 --> 3284.440] How do you, in Huat Rose, once know how do you match, +[3284.440 --> 3287.520] and let me full disclosure, this question is way above my pay +[3287.520 --> 3287.840] grade. +[3287.840 --> 3289.400] I don't even know what I'm saying here. +[3289.400 --> 3292.280] So how do you match a Bayesian model of knowledge +[3292.280 --> 3294.840] with your highly distributed model of knowledge? +[3295.520 --> 3297.680] And those are English words. +[3297.680 --> 3299.040] Well, I don't know what Bayesian model is. +[3299.040 --> 3301.960] Bayesian, I think, is supposed to be capitalized. +[3301.960 --> 3302.720] Yes, it is. +[3302.720 --> 3306.440] So you can be excused for not knowing it. +[3306.440 --> 3308.720] All right, that is a very technical question. +[3308.720 --> 3313.360] Bayesian is a named after the famous person, Bayes, +[3313.360 --> 3316.560] is a type of probabilistic framework. +[3316.560 --> 3322.440] And some people think about the brain in probabilistic terms. +[3322.440 --> 3327.520] Our theory is sort of, it's not adverse to Bayesian, +[3327.520 --> 3330.360] but Bayes, it's not really a Bayesian theory. +[3330.360 --> 3334.600] And the way the brain represents uncertainty +[3334.600 --> 3336.800] is not through probabilities. +[3336.800 --> 3339.000] And as a technical topic, and I'm not +[3339.000 --> 3341.960] going to talk about it further, but we don't believe +[3341.960 --> 3345.240] that that's the right framework, although you +[3345.240 --> 3348.640] can think there's lots of things in cognition +[3348.640 --> 3350.840] you can think in terms of probabilities. +[3350.840 --> 3353.840] But that's not really the right framework for our model. +[3353.840 --> 3356.040] OK. +[3356.040 --> 3360.040] Aina wants to know, first of all, she said, +[3360.040 --> 3361.840] thank you, this is fascinating. +[3361.840 --> 3366.040] Can you please talk a little bit about AI and the human brain +[3366.040 --> 3369.160] and how AI acquires models from humans? +[3369.160 --> 3370.240] Yeah, yeah. +[3370.240 --> 3372.680] OK, well, first of all, there's a whole second section +[3372.680 --> 3374.120] on the book, a book is in three sections. +[3374.120 --> 3375.120] And there's a whole second section. +[3375.120 --> 3376.160] It's all about AI. +[3376.160 --> 3377.160] OK. +[3377.160 --> 3379.160] And I take some very unusual, I don't +[3379.160 --> 3381.040] have to take controversial, but I would say I +[3381.040 --> 3386.120] would take some non-common views about AI. +[3386.120 --> 3389.640] And so the basic principle I have is today's AI +[3389.640 --> 3391.000] works on very, very different principles. +[3391.000 --> 3392.840] There's nothing like what I just talked about. +[3392.840 --> 3395.480] And I, and I, most AI researchers, +[3395.480 --> 3397.600] don't think that today's AI is smart. +[3397.600 --> 3398.880] It's not really intelligent. +[3398.880 --> 3400.000] They're clever. +[3400.000 --> 3401.480] They're really good pattern, cat, +[3401.480 --> 3403.400] massifier, classifiers, and so on, but they're not really +[3403.400 --> 3405.880] smart, like humans are animals. +[3405.880 --> 3410.320] And, but the brain represents a road map +[3410.320 --> 3413.200] for how to build truly intelligent machines. +[3413.200 --> 3415.440] Now, we can talk about the board if you do that or not. +[3415.440 --> 3418.440] But the main thing of that, one of the key messages +[3418.440 --> 3422.040] I want to get across is that when we want to build +[3422.040 --> 3424.360] truly intelligent machines and just, you know, +[3424.360 --> 3426.160] disclaimer, I think we're going to be doing this +[3426.160 --> 3428.920] in big time, the latter part of this century. +[3428.920 --> 3431.960] So we're going to have these, it's going to be like crazy. +[3431.960 --> 3435.280] But when we build these intelligent machines, +[3435.280 --> 3436.760] we don't want to build the entire brain. +[3436.760 --> 3438.360] We don't want to build the old parts of the human brain. +[3438.360 --> 3439.840] We don't want them to be sitting around going, +[3439.840 --> 3442.440] oh, don't that sound, you know, I'm lazy. +[3442.440 --> 3443.440] I don't even know. +[3443.440 --> 3447.800] So I think that was a scene out of an outtake of I robot. +[3447.800 --> 3448.320] Yes. +[3448.320 --> 3450.200] Yeah, maybe I don't know. +[3450.200 --> 3452.520] So you can build in a tells machine +[3452.520 --> 3454.360] by replicating the New York cortex. +[3454.360 --> 3457.160] And on its own, the New York cortex does not, +[3457.160 --> 3459.120] it can be smart, but it has no motivations. +[3459.120 --> 3460.960] It's not like I'm going to say, oh, human, +[3460.960 --> 3461.640] you've created me. +[3461.640 --> 3463.520] Now I'm going to take over because I'm tired of being, +[3463.520 --> 3464.720] you know, you're slave. +[3464.720 --> 3466.640] It's not going to do that. +[3466.640 --> 3470.520] You have to give these intelligent machines some motivations +[3470.520 --> 3473.640] or some drives or some things that they have to do. +[3473.640 --> 3476.480] But they aren't going to be like us at all. +[3476.480 --> 3478.320] Unless you went out of our way to do that. +[3478.320 --> 3479.880] And so the whole people, a lot of people +[3479.880 --> 3482.640] think that AI is an existential threat to humanity. +[3482.640 --> 3484.680] Like we're going to create these super intelligent machines +[3484.680 --> 3486.760] and they're going to kill us or sling slave us and things +[3486.760 --> 3487.520] like that. +[3487.520 --> 3488.640] I don't believe that's true at all. +[3488.640 --> 3490.440] And I walk through arguments about that. +[3490.440 --> 3492.440] So the whole section goes to all these issues +[3492.440 --> 3494.200] about intelligent machines, what they'll +[3494.200 --> 3496.960] be like, what they won't be like. +[3496.960 --> 3499.720] And why today's AI is not, I have a chapter called why +[3499.720 --> 3501.360] there's no I and AI. +[3501.360 --> 3504.720] Because today's AI is not intelligent. +[3504.720 --> 3505.240] Got it. +[3505.240 --> 3505.480] Got it. +[3505.480 --> 3506.680] No, that is comforting. +[3506.680 --> 3508.920] And you know where that's coming from. +[3508.920 --> 3511.720] There's a bit of a bend diagram and a crossover +[3511.720 --> 3513.720] of folks who are here and folks who have watched way +[3513.720 --> 3516.240] too many sci-fi movies. +[3516.240 --> 3519.640] And we've been primed for how our phone is going +[3519.640 --> 3520.240] to take us over. +[3520.240 --> 3522.280] Yeah, but in a lot of smart people out there +[3522.280 --> 3525.000] claiming that the world is going to be overrun +[3525.000 --> 3526.800] by intelligent rope, intelligent machines. +[3526.800 --> 3528.800] I mean, a lot of people saying this now. +[3528.800 --> 3530.040] I don't know if you know what I mean. +[3530.040 --> 3531.000] I think they're all wrong. +[3531.000 --> 3533.920] And I make a very reasoned argument why. +[3533.920 --> 3537.560] And yet another reason, shameless plug for why you guys +[3537.560 --> 3540.080] should get his book. +[3540.080 --> 3542.800] Thank you so much for spending time with us +[3542.800 --> 3545.080] and in sharing your time and expertise. +[3545.080 --> 3547.720] And I thank everyone in the audience for watching. +[3547.720 --> 3550.560] And I'm so sorry I couldn't get to all of your questions. +[3550.560 --> 3553.920] There is never enough time to do that. +[3553.920 --> 3556.320] But I got to as many as I could. +[3556.320 --> 3557.960] And I do want to assure everyone +[3557.960 --> 3561.640] if you've missed this anything, the recording +[3561.640 --> 3565.760] of this event will be available tomorrow at skepticalonquirer.org. +[3565.760 --> 3568.520] And our reminder, our next guests in this series +[3568.520 --> 3570.680] will be here on Thursday, April 29th. +[3570.680 --> 3574.080] Mick West talking about escaping the rabbit hole, +[3574.080 --> 3576.720] how to help your conspiracy theorist friend. +[3576.720 --> 3580.520] I will be here with pen and paper taking notes in hand. +[3581.040 --> 3584.880] So my thanks, of course, to skepticalonquirer, CFI, +[3584.880 --> 3587.480] our producer tonight, Mark Krijdler, +[3587.480 --> 3589.520] and to you, the audience. +[3589.520 --> 3592.640] And again, to you, Jeff, thank you for making the time. +[3592.640 --> 3593.880] And sharing your expertise. +[3593.880 --> 3594.400] That was great. +[3594.400 --> 3594.880] That was great. +[3594.880 --> 3595.640] Thank you. +[3595.640 --> 3597.080] And everybody, you know me. +[3597.080 --> 3598.960] My name is Leanne Lorde. +[3598.960 --> 3599.840] Thank you. +[3599.840 --> 3601.800] And good night. +[3601.800 --> 3603.120] Good night, Jeff. +[3603.120 --> 3604.360] Good night. diff --git a/transcript/allocentric_jgxNs1WBONk.txt b/transcript/allocentric_jgxNs1WBONk.txt new file mode 100644 index 0000000000000000000000000000000000000000..98fa710244cbc90443e45c1b0890dae9d81e8ee4 --- /dev/null +++ b/transcript/allocentric_jgxNs1WBONk.txt @@ -0,0 +1,66 @@ +[0.000 --> 2.000] Bella, do's we built a party? +[2.000 --> 4.000] No, the mirror is math science. +[4.000 --> 6.000] History and rambling, the mystery +[6.000 --> 9.000] that all started with a big bang. +[9.000 --> 10.000] Hey. +[10.000 --> 11.000] Hey. +[11.000 --> 12.000] Hey. +[12.000 --> 13.000] Everything's smoothed out with Amy. +[13.000 --> 15.000] Uh, no, she's still pretty mad. +[15.000 --> 18.000] Did you make the apology as sincere as I would have? +[18.000 --> 21.000] I said, children, says he's sorry. +[21.000 --> 23.000] Well, I have to hang it on a little thick. +[26.000 --> 29.000] Hey, it's time you apologize to her yourself. +[29.000 --> 30.000] I suppose so. +[30.000 --> 32.000] But if I get out of line, I'll lose my spot. +[32.000 --> 35.000] I'm happy to hold your place till you get back. +[45.000 --> 46.000] Good morning, sir. +[46.000 --> 48.000] What can I get started for you today? +[48.000 --> 50.000] It's a pleasure, sir. +[50.000 --> 52.000] Uh-oh. What's nice? +[52.000 --> 53.000] All right. +[53.000 --> 55.000] Three empty glasses. +[55.000 --> 57.000] Would you care for a pastry? +[57.000 --> 58.000] Nope. +[60.000 --> 61.000] Uh, mocha. +[61.000 --> 63.000] Three mocha lattes. +[66.000 --> 68.000] Uh, double chocolate chip muffin. +[68.000 --> 69.000] Yup. +[73.000 --> 74.000] Ah, ah, ah, ah, ah. +[74.000 --> 76.000] You had three palm and cartonies in these. +[76.000 --> 77.000] Puzzle. +[77.000 --> 81.000] And I love the Beatles' state for my life. +[81.000 --> 82.000] Something. +[82.000 --> 84.000] Uh. +[84.000 --> 86.000] What's that, son? +[86.000 --> 88.000] Hey, bring him Lincoln. +[88.000 --> 89.000] Uh-huh. +[89.000 --> 92.000] Do you know you, me, gone? +[92.000 --> 93.000] Shoot. +[93.000 --> 94.000] Oh, yeah. +[94.000 --> 96.000] Three shots. +[96.000 --> 98.000] Uh. +[126.000 --> 128.000] Uh. +[157.000 --> 164.000] Is that everyone I do want to war? +[179.000 --> 181.000] Avengers! +[187.000 --> 188.000] No! +[203.000 --> 205.000] It's not much. +[205.000 --> 206.000] But it's home. +[206.000 --> 208.000] I think it's brilliant. +[210.000 --> 213.000] Where have you been? +[214.000 --> 217.000] Harry, how wonderful to see you, dear. +[217.000 --> 218.000] Bed's empty? +[218.000 --> 219.000] No note? +[219.000 --> 220.000] Car gone? +[220.000 --> 222.000] You got a dime. +[222.000 --> 224.000] You could have been seen. +[224.000 --> 227.000] Of course, I don't blame you, Harry, dear. +[227.000 --> 229.000] They were starving, in, mum. +[229.000 --> 231.000] They were bars in his window. +[231.000 --> 235.000] You best hope I don't put bars on your window, Donald Weasley. +[235.000 --> 236.000] Come on, Harry. +[236.000 --> 238.000] Time for spotted breakfast. +[244.000 --> 245.000] Oh. +[251.000 --> 252.000] Lincoln! +[274.000 --> 275.000] What? +[293.000 --> 294.000] Ah! diff --git a/transcript/allocentric_mhVsMmcOxQM.txt b/transcript/allocentric_mhVsMmcOxQM.txt new file mode 100644 index 0000000000000000000000000000000000000000..65d9de8ca9e56abe49fd62ad4933ad612ad45b80 --- /dev/null +++ b/transcript/allocentric_mhVsMmcOxQM.txt @@ -0,0 +1,79 @@ +[0.000 --> 6.720] Welcome to the presentation of Where Should I Look? +[6.720 --> 9.640] Comparing reference frames for spatial tactile cues. +[9.640 --> 14.560] My name is Eric Pescara and my co-authors are Anton Stubenwad to be a Svartiger, Ikunt +[14.560 --> 18.000] Fang and Michal Beigel. +[18.000 --> 21.920] When designing tactile displays on the wrist for special cues, it is important to keep +[21.920 --> 24.240] the natural movement of the body and mind. +[24.240 --> 28.560] Depending on the movement of the wrist, different reference frames can influence the output +[28.560 --> 30.040] of the tactile display. +[30.040 --> 34.280] In this paper, we compare it in allocentric reference frame with the wrist centered reference +[34.280 --> 38.280] frame in terms of accuracy, reaction time and cognitive load. +[38.280 --> 41.640] We conducted a repeated measures user study with 20 participants. +[41.640 --> 46.520] We used a tactile wristband with 10 evenly spaced actuators as our tactile display. +[46.520 --> 51.680] The data we retrieved from the experiment consisted of 120 spatial localization tasks per +[51.680 --> 53.800] participant per reference frame. +[53.800 --> 57.800] As a measure of cognitive load, we asked the participants to fill out a raw TLX survey +[57.800 --> 59.400] after every condition. +[59.400 --> 62.600] A spatial localization task was conducted as follows. +[62.600 --> 68.560] First, a combination of wrist rotation and actuator was drawn from a pre-determined list. +[68.560 --> 73.400] The participant was then instructed to turn the wrist to match the given wrist rotation. +[73.400 --> 78.040] Depending on the reference frame, the corresponding spatial direction was calculated. +[78.040 --> 82.320] The selected actuator was then activated for one second. +[82.320 --> 85.120] The participant then was asked to input the direction. +[85.120 --> 89.040] The input was stored together with the reaction time in the true direction. +[89.040 --> 94.960] This process was then repeated until the list of spatial localization tasks was exhausted. +[94.960 --> 100.000] In the evaluation, we first looked if localization accuracy was influenced by the reference frame. +[100.000 --> 103.920] Both reference frames had a high localization accuracy for the wrist and showed no statistical +[103.920 --> 105.920] difference. +[105.920 --> 111.040] While the wrist centered reference frame had an accuracy of 84% and an average error of 28.5 +[111.040 --> 115.760] degrees, the allocentric reference frame was only marginally better with an accuracy +[115.760 --> 121.040] of 85% and an average error of 26.8 degrees. +[121.040 --> 125.960] When comparing reaction time, we can see a difference between the reference frames. +[125.960 --> 131.280] There is a statistically significant difference in reaction time with a small effect size. +[131.280 --> 136.200] The allocentric reference frame has a slightly faster reaction time with an average difference +[136.200 --> 140.040] of 240 milliseconds. +[140.040 --> 144.880] For measuring the effects of the wrist rotation on the participant's reaction time, we performed +[144.880 --> 151.200] a linear regression analysis with a wrist rotation as predictor on the reaction time. +[151.200 --> 155.840] While there is a clear relation between increasing reaction time and higher wrist rotation for +[155.840 --> 161.560] the wrist centered reference frame, the allocentric reference frame is mostly unaffected by the wrist +[161.560 --> 164.240] rotation. +[164.240 --> 168.760] For measuring the effects of the wrist rotation on the participant's accuracy, we also performed +[168.760 --> 174.120] a linear regression analysis with the wrist rotation as predictor of the accuracy. +[174.120 --> 178.960] While there is a clear relation between decreasing accuracy and higher wrist rotation for the +[178.960 --> 183.940] wrist centered reference frame, the allocentric reference frame is mostly unaffected by the +[183.940 --> 186.680] wrist rotation. +[186.680 --> 191.600] We looked at the data collected by the RRTLX questionnaires to analyze how the participant's +[191.600 --> 194.800] mental demand was affected under both conditions. +[194.800 --> 199.160] The RRTLX data was separately evaluated for each dimension. +[199.160 --> 203.600] The allocentric reference frame yielded better results in every dimension. +[203.600 --> 208.960] There was a statistically significant difference between the reference frames in all dimensions +[208.960 --> 212.440] with most dimensions only having a low effect size. +[212.440 --> 217.840] However, we found that the mental demand dimension had a large effect size and the performance +[217.840 --> 222.080] in effort dimensions had a moderate effect size. +[222.080 --> 226.800] The participants also described the wrist centered reference frame as far less intuitive +[226.800 --> 228.440] and more demanding. +[228.440 --> 233.480] This leads us to conclude that the mental demand of the allocentric reference frame is lower +[233.480 --> 237.080] than the wrist centered reference frame. +[237.080 --> 241.160] Linear regression was used to measure the learning effect during the experiment for both +[241.160 --> 245.560] conditions using trial number as predictor of our reaction time. +[245.560 --> 250.080] Most statistically significant evidence was found that the trial number influenced the +[250.080 --> 253.960] reaction time for the wrist centered reference frame. +[253.960 --> 258.520] We found statistically significant evidence that the trial number influenced the reaction +[258.520 --> 260.920] time for the allocentric condition. +[260.920 --> 265.000] Our participants improved the reaction time in the allocentric reference frame during +[265.000 --> 267.320] the experiment. +[267.320 --> 272.680] In conclusion, in the experiment we conducted we found evidence that an allocentric reference +[272.680 --> 277.200] frame outperforms a wrist centered reference frame for spatial localization tasks with a +[277.200 --> 279.800] wrist-one tactile variable. +[279.800 --> 284.000] In our experiment the participants had faster reaction times with the allocentric reference +[284.000 --> 287.480] frame and improved during the experiment. +[287.480 --> 292.000] We found evidence that the allocentric reference frame was mentally less demanding and more +[292.000 --> 294.000] intuitive. +[294.000 --> 299.240] And the allocentric reference frame was more robust against postural changes compared to +[299.240 --> 302.360] the wrist-centered reference frame. +[302.360 --> 307.160] In the future we would like to investigate the influence of different reference frames +[307.240 --> 313.000] for spatial localization tasks in the wild with a broad range of activities. +[313.000 --> 317.680] We would also like to include more than just wrist rotations in the postural changes. +[317.680 --> 322.240] And as there is conflicting evidence in the literature which reference frames perform +[322.240 --> 329.280] better in which circumstances we would like to investigate this further. +[329.280 --> 331.160] Thank you for your attention and have a great day. diff --git a/transcript/allocentric_p0A_IRKfG-w.txt b/transcript/allocentric_p0A_IRKfG-w.txt new file mode 100644 index 0000000000000000000000000000000000000000..490d4ba62a97602e4b94ef7c6ef50d8eb8106b38 --- /dev/null +++ b/transcript/allocentric_p0A_IRKfG-w.txt @@ -0,0 +1,20 @@ +[0.000 --> 2.400] Many people believe you should never say, +[2.400 --> 4.580] I don't know, to a question. +[4.580 --> 6.620] Let's say at the end of a presentation. +[6.620 --> 9.740] Big picture, it's 100% acceptable. +[9.740 --> 13.180] If you don't know the answer, don't try to BS them. +[13.180 --> 15.860] They will smell blood in the water. +[15.860 --> 17.980] What matters is how you say it. +[17.980 --> 21.320] I've seen ultra-confident speakers handle this +[21.320 --> 22.940] in one of three ways. +[22.940 --> 24.860] First, you can say, I don't know, +[24.860 --> 27.260] but I'll look into it and get back to you +[27.260 --> 28.860] and make sure you follow up. +[28.920 --> 31.660] Second, I don't know, but I recommend you ask, +[31.660 --> 33.180] John, that's his area. +[33.180 --> 34.720] And make sure you refer to somebody +[34.720 --> 37.580] who really is the best person to ask. +[37.580 --> 40.620] Third, tell them what you do know instead. +[40.620 --> 44.660] Say, I don't know about X, but here's what I can tell you. +[44.660 --> 46.180] Make sure whatever you say next +[46.180 --> 48.580] adds a genuine value to the conversation. diff --git a/transcript/allocentric_rbItjWcSHbs.txt b/transcript/allocentric_rbItjWcSHbs.txt new file mode 100644 index 0000000000000000000000000000000000000000..ca94e1c2bc7879a6a020428aa13b09db52ae20f4 --- /dev/null +++ b/transcript/allocentric_rbItjWcSHbs.txt @@ -0,0 +1,16 @@ +[0.000 --> 2.000] Egocentric +[2.000 --> 4.000] Adjective +[4.000 --> 7.000] Selfish, self-centered +[7.000 --> 11.000] Egocentric +[11.000 --> 13.000] Adjective +[13.000 --> 15.000] Egotistical +[15.000 --> 19.000] Egocentric +[19.000 --> 22.000] Adjective +[22.000 --> 28.240] Relating to spatial representations, linked to a reference frame based on one's own location +[28.240 --> 32.720] within the environment, as when giving the direction as right, rather than +[32.720 --> 38.240] north, opposed to alo-centric. +[38.240 --> 40.240] Egocentric +[40.240 --> 42.240] Noun +[42.240 --> 45.240] A person who is Egocentric +[45.240 --> 56.240] == References == +[56.240 --> 58.400] Please support us with your subscription diff --git a/transcript/allocentric_tBidCJnzE4g.txt b/transcript/allocentric_tBidCJnzE4g.txt new file mode 100644 index 0000000000000000000000000000000000000000..14eb04e9e11a6fa377ce35ed7323497b18a5f029 --- /dev/null +++ b/transcript/allocentric_tBidCJnzE4g.txt @@ -0,0 +1,240 @@ +[0.000 --> 7.060] Hello friends, my name is Jessica and I am an ASHA certified speech and language +[7.060 --> 12.540] pathologist and I am obsessed with learning about all things social +[12.540 --> 17.140] communication. I am also obsessed with teaching others about them which is why +[17.140 --> 23.660] we're here on YouTube. Yay! So today I want to talk to you about non-verbal +[23.660 --> 30.860] communication. Nonverbal communication can be really tricky to teach. Most of us +[30.860 --> 36.900] have learned these skills naturally so identifying what they are and how to +[36.900 --> 40.540] break them down and teach them in a strategic way can actually be quite +[40.540 --> 45.680] challenging. So today we are going to talk about some ways that we can teach +[45.680 --> 53.620] our students about non-verbal communication and non-verbal language. +[54.660 --> 59.460] Now first I'm going to talk to you a little bit about non-verbal +[59.460 --> 65.740] communication. There are seven or eight-ish types of non-verbal communication. +[65.740 --> 78.800] They are facial expressions, body language, gestures, tone of voice, touch, eye +[78.800 --> 84.800] contact and personal space. Okay, you like those gestures? I just made them up on +[84.800 --> 90.120] the fly. Alright, so those are the seven areas of nonverbal communication. I +[90.120 --> 95.320] said eight because personal hygiene or personal appearance, sorry, can also be +[95.320 --> 100.560] considered a type of nonverbal communication. How we are choosing to appear +[100.560 --> 105.720] physically does communicate a lot about us. Okay, so let's break this down a +[105.720 --> 110.200] minute and now you know the different kinds of nonverbal communication. Let's +[110.200 --> 116.100] talk about what nonverbal communication is. It is any kind of communication that +[116.100 --> 122.680] occurs without words. It is not verbal, right? See how that works? So like I said, +[122.680 --> 127.840] it includes the following areas, facial expressions, body language, gestures, tone +[127.840 --> 133.000] of voice, touch, eye contact, personal space and physical appearance if you +[133.000 --> 139.800] want to click that. It is very common for individuals to struggle with nonverbal +[139.800 --> 145.000] communication. If your child has been diagnosed with autism, that means or your +[145.000 --> 149.600] student, that means that they have some trouble understanding and using nonverbal +[149.600 --> 155.080] communication. So the skill is extremely important to teach and learn because +[155.080 --> 160.800] learning how to understand nonverbal communication helps us to interact +[160.800 --> 166.840] socially with others and it helps us to communicate better with others. Okay, so +[166.840 --> 173.760] now we're going to break down each of the seven or eight sections of nonverbal +[173.760 --> 176.720] communication. We're going to break them down. We're going to talk about them. +[176.720 --> 180.680] We're going to define them and I'm going to give you some ideas on how you can +[180.680 --> 185.440] teach your student to understand each of these different kinds of nonverbal +[185.440 --> 189.920] communication. So the first one we're going to talk about is facial expressions. I +[189.920 --> 193.720] am looking off my computer if you keep seeing my eyes start away. I just full +[193.720 --> 199.000] disclosure. I need my notes because I want this to be good and helpful for you and +[199.000 --> 206.360] I can't do that if I'm doing this all my memory. So I hope you understand. Also, +[206.360 --> 211.080] before we dive in and I go any further, thank you for being here. Thank you for +[211.080 --> 215.400] taking the time to learn something new that will hopefully help you teach your +[215.400 --> 220.640] students a skill that they're struggling with. That is just absolutely awesome +[220.640 --> 224.520] and amazing and I thank you for choosing to spend your time with me. So before I +[224.520 --> 227.880] go any further, if you've not already gotten something to take notes with, I +[227.880 --> 233.240] would really recommend getting some pen and some paper and jotting down some +[233.240 --> 236.480] things to help you remember what we're talking about. Okay, so let's jump in. +[236.480 --> 240.920] The first type of nonverbal communication we're going to talk about is facial +[240.920 --> 252.000] expressions. Right? There are six main facial expressions that people make. Now, +[252.000 --> 260.360] each facial expression is related to an emotion. Each type of facial expression +[260.360 --> 266.280] has a very specific characteristics that will help you know what emotion the +[266.280 --> 271.680] person is feeling. Okay, so let's think about this. We're going to break it down a +[271.680 --> 278.600] little bit more. There are six types of facial expressions. Happy, sad, angry, +[278.600 --> 285.440] fear, disgust, and surprised. Scientists tell us that these are the basic +[285.440 --> 294.000] emotions that we all experience. Every other emotion is a root or a different +[294.000 --> 300.960] form of these basic emotions. So our facial expressions, we can say we're happy. +[300.960 --> 315.360] Sad, angry, scared, disgusted. We're surprised. Okay, each of these six basic +[315.360 --> 321.960] emotions have very distinct characteristics of the face. Okay, so going back +[321.960 --> 330.040] through them. When you feel happy, you have a wide smile and open now. You can +[330.040 --> 335.880] see that some teeth. You can see wrinkles around the eyes and the cheeks are +[335.880 --> 343.840] raised and the eyes are slightly squinted. Did you see all this in my face? +[343.840 --> 349.680] Can you see them? Can you see my wrinkles? My high-raised cheeks? My teeth? My smile? +[349.680 --> 357.480] I'm happy. I'm happy to be here. So that is happy. Second facial expression that +[357.480 --> 364.400] we can see is sad. Okay, the corners of the lips pointed down. +[364.400 --> 375.680] Inner eyebrows are raised up. My eyebrows apparently don't move like that. But you +[375.680 --> 383.400] know it's a face when you see one. Okay, next. Angry. There's tension in the face. +[383.400 --> 390.480] There's these closed, V-shaped eyebrows. The mouth, if the mouth is open, it's +[390.480 --> 396.120] square shaped. Square shaped, if you can understand that. If the mouth is closed, +[396.120 --> 403.200] there's tightness in the chin and jaws. Do you see all those characteristics? +[403.440 --> 415.440] Alright, fear. Slanted and raised eyebrows. Eyes are open very wide. Just saw a bug +[415.440 --> 420.660] crawl across my table. I don't know. Right? You know what fear looks like. +[420.660 --> 428.360] Disgust. A wrinkled nose. Lowered eyebrows. The mouth is tight and curved downward in the upper +[428.360 --> 436.520] lips go up. Big one across my table. I bug really didn't go across my table just +[436.520 --> 442.760] now. I'm just using that as an example. And last is surprised. Raised and curved +[442.760 --> 450.120] eyebrows. Horizontal wrinkles on the forehead. Open lips. Dropped jaw. Eyes open wide. +[450.840 --> 458.440] You see my wrinkles? Horizontal? Eyes. Mouth. I have a surprised face. So like I said, +[459.400 --> 466.280] I start by teaching my students these physical characteristics to look for when trying to +[466.280 --> 475.080] interpret a facial expression. Now an interesting tip is students with autism. We know that they struggle +[475.160 --> 481.080] with eye contact. So part of the reason that they struggle with understanding emotions is because +[481.080 --> 488.040] they are focusing on this lower third of the face. So a lot of these cues, like we talked about, +[488.040 --> 494.760] the horizontal wrinkles. This wrinkles around my eyes. Those are occurring in the top third of my face. +[494.760 --> 501.400] So a child or individual who does not focus on this top third is missing a lot of cues that's +[501.400 --> 508.520] going to help them learn to read and understand facial expressions. So to teach facial expressions, +[508.520 --> 515.400] to teach my students how to understand them, we again, I go over each of those definitions, +[515.400 --> 522.520] model them like I did for you. And they try and draw them so that they're having, you know, +[522.520 --> 527.640] an interactive process. And then we'll probably look at maybe some video clips or some pictures +[527.640 --> 535.160] or things to talk about those basic facial expressions. Again, really focus on this top third of +[535.160 --> 540.840] the face because we're getting a lot of cues there that if a child is not looking at somebody's +[540.840 --> 545.960] eyes or their top third of the face, they're going to miss those cues. Now we know there are more +[545.960 --> 554.040] emotions beyond happy, sad, mad, disgust, surprised, and angry. But we talk about these different +[554.040 --> 560.680] more complex emotions and how the same facial expressions are generally going to be used to convey +[560.680 --> 567.800] those complex emotions. So what we will do is we will, I'll give them a list of say, of some words. +[567.800 --> 579.640] For example, nervous, satisfied, amused, annoyed, love, revulsion. We're going to target, first of +[579.640 --> 585.080] all, some really good vocabulary words. But we're going to talk about what basic emotion, +[585.080 --> 591.560] these complex emotions are the most like. And then talk about kind of how the face is going to +[592.280 --> 600.280] model those similar facial expressions for the complex emotions as they do the basic emotions. +[601.160 --> 608.120] All right, gestures. Gestures are movements that we make with our bodies that are used to +[608.120 --> 614.840] communicate a message. We most frequently use our hands to gesture, but other parts of the bodies +[614.840 --> 622.920] can be used as well. Now, there are three types of gestures. Gestures that indicate nervousness, +[622.920 --> 627.640] such as fidgeting with objects, or my personal is playing with my hair. +[629.640 --> 634.520] Gestures with a specific meaning, such as a thumbs up, we know that means good job, it has a +[634.520 --> 640.200] meaning, and gestures that go with a verbal message, such as me, using my hands as I'm talking +[640.200 --> 647.320] and telling you a story. So when I'm teaching these, I focus mostly on teaching gestures with a +[647.320 --> 654.920] specific meaning. Think of these like gestures as a vocabulary word. We will talk about different +[654.920 --> 660.600] kinds of gestures, and then we will define it. For example, we will talk about the beckoning gesture, +[661.320 --> 666.680] and we will talk about what it means. It means come here. We will talk about the talking gesture. +[669.000 --> 674.200] What does that mean? She is talking and she needs to be quiet and tired of it, or we will talk about +[674.200 --> 681.160] a thump, or we will talk about the hitchhiking thumb. How that is different than a thumbs up. +[682.600 --> 687.880] Gestures like vocabulary words, we teach gestures, and I teach their meanings so that my students +[687.880 --> 692.680] are able to see someone using a gesture and define it like they would a vocabulary word. +[694.840 --> 699.800] In my non-verbal communication teaching guide, I have a whole list of different gestures +[701.480 --> 707.000] that you can use to know some gestures to teach. You can also find lists on the internet +[707.000 --> 713.640] if you're wanting to kind of DIY it yourself. All right, move and write along to touch. I believe that was +[713.640 --> 721.400] my gesture I used in the beginning. Touching someone is when you physically touch another person. +[722.360 --> 729.160] There are four different kinds of touching. There's actually five, but one of them is inappropriate, +[729.160 --> 737.720] so we're not going to talk about it here. There are four different kinds of touch. Functional, +[737.720 --> 746.680] professional, social polite, friendship, warmth, and love intimacy. Okay, let's talk about what +[746.680 --> 753.720] each of these are. A functional professional touch is the touching that occurs when a professional +[753.720 --> 759.400] must touch you to do his or her job. For example, the dentist has to touch your mouth. The hair lady +[759.400 --> 766.360] has to touch my hair. It's professional. I'm expecting her to touch me and she's doing it to do her job. +[767.320 --> 774.520] Second one is social polite, and this is touching that occurs in social routines. They're usually very +[774.520 --> 783.880] brief and they, let's see, sorry, I lost my spot. And they include things like a handshake, a pat on +[783.880 --> 788.680] the back, or a quick side hug. They're not going to last very long. We're just being polite. I'm +[788.680 --> 794.120] going to shake your hand and then we're done touching. Number three is friendship or warmth, +[794.120 --> 797.960] and this is touching that occurs between two people who are familiar with one another. +[799.800 --> 805.880] Now, when you teach this, or you know, you need to be very careful because this type of touch can +[805.880 --> 811.880] easily be confused with the next type, which is love intimacy. So you need to make sure that your +[811.880 --> 817.880] level of touch in this stage matches your partner so that you don't make that other person uncomfortable, +[817.880 --> 823.160] or you need to teach your student to make sure their level of touch matches their partners so they +[823.160 --> 830.120] don't make somebody uncomfortable. So friendship, warmth touching includes things like longer hugs, +[830.120 --> 836.280] putting your arms around the shoulders of somebody, or you know, holding hands. Well, holding +[836.280 --> 842.040] hands can also be in love intimacy. So if you're a good friend, you might give them a longer hug, +[842.040 --> 848.120] but if I hug you it's too long. Now I'm thinking, well, are we friends? Or is this like you being +[848.120 --> 856.600] intimate with me? So it's kind of that in between a social polite and intimacy. So the fourth one +[856.600 --> 864.280] is love intimacy, and this occurs between two people who are very close. This includes family, +[864.280 --> 872.280] very close friends, and significant others. You need to teach your students to be very careful to +[872.280 --> 877.880] use these touches with the appropriate people. Holding hands and touching someone's hair and +[877.880 --> 884.040] cuddling are all examples of love intimacy touching. So to teach this kind of nonverbal communication +[884.040 --> 891.480] touch, we just make a graph, and we talk about different, you know, I label, I make four different +[891.480 --> 896.840] squares. One is functional professional, one's social polite, friendship warmth and love intimacy, +[896.840 --> 902.200] and we make a list of the people who I would expect a functional professional touch with, +[902.200 --> 908.360] who I could expect a love intimacy touch with, who would be a good person to use friendship warmth +[908.360 --> 913.640] touch with, who should I use a social polite touch with. So we just sort people that we know into +[913.640 --> 921.800] the different categories of appropriate ways to touch them. Okay, next nonverbal communication +[921.800 --> 932.440] is proximics, aka personal space. So if somebody is too close to you, they're in your personal space, +[932.440 --> 939.400] and that's a type of nonverbal communication. Now, there are different kinds. There is a +[942.440 --> 950.600] scientific formula for what is appropriate as far as proximate goes. So proximics commonly called +[950.600 --> 957.640] personal space is the distance between you and another person. There are four levels intimate space, +[959.160 --> 965.240] personal space, social space, and public space. So we'll start from the middle and we'll work our +[965.240 --> 974.040] way out. Intimate space is anything that goes from touching your body to about 18 inches from +[974.040 --> 980.040] yourself. This is the space where you allow people with whom you are very close. So this could be +[980.040 --> 985.880] very close family members, very close friends, and significant others are probably the only people +[985.880 --> 993.880] you will allow in this space. Personal space is about 18 inches to four feet from your body. We will +[993.880 --> 1001.160] often allow friends and people we like in this space. Moving out again, we have social space. This +[1001.160 --> 1007.160] is four to ten feet from your body. This space is for people we don't know well, or for people with +[1007.160 --> 1014.440] whom we have no close relationship. Then last, the biggest ring is public space, which is what it +[1014.440 --> 1020.360] sounds like. Anything beyond ten feet from your body, where the rest of the public is, it contains +[1020.360 --> 1026.200] strangers and people who are not comfortable with. So this is important because it lets us know how +[1026.200 --> 1032.200] close it's appropriate to be to other people. And like I said, if somebody gets too close to me, +[1032.200 --> 1037.080] that makes me really uncomfortable if you're not one of my intimate people. At the same time, +[1037.640 --> 1043.000] if you're way out here in public space, but I think we're buds, that feels a little off to me too. +[1043.000 --> 1049.480] So to teach this, while I teach my students about these definitions, and then I like to get like +[1049.480 --> 1056.840] masking tape, and we measure this out on the ground to give an idea of what these spaces look like +[1056.840 --> 1063.720] visually. And then we'll do kind of that same activity that we did before, where we'll get the +[1063.720 --> 1069.960] four squares. And we will say, who are some people that I would allow in my intimate space? +[1070.600 --> 1075.320] Who are some people I would allow in my personal space? Who are people that might be in my social +[1075.320 --> 1080.120] space? And who are some people who would be in my public space? And we just think about +[1081.720 --> 1087.080] our space and our personal space and how we're sharing it and where people should be within that space. +[1087.240 --> 1096.280] Okay, the next type of nonverbal communication is whole body language. Our body language is the +[1096.280 --> 1102.360] way we position our head and our body to send a message to the people around us. When we tilt our +[1102.360 --> 1107.400] head to the side, it means I'm interested in what you're saying to me. If we lower our head, it +[1107.400 --> 1112.360] means we're probably unhappy. If our head stays upright, it means we're happy and we're in a good +[1112.360 --> 1119.160] mood or we're in a neutral mood that is neither happy nor unhappy. If we lean our body towards someone, +[1119.160 --> 1123.720] it means we're interested in what they have to say. And if we pull our body away from them, +[1123.720 --> 1127.000] it means we're not comfortable speaking to that person or that maybe we don't like them. +[1127.880 --> 1134.440] If you sit with an open posture like I am now, then it comes across as very welcoming and friendly. +[1135.080 --> 1141.400] If you close yourself up and you sit in the closed posture, then that is closed off. It's not as +[1141.400 --> 1148.680] welcoming and it doesn't look as friendly. So body language is also usually used with gestures +[1148.680 --> 1154.280] and facial expressions and tone of voice, all kind of combined together to give you a clue +[1154.280 --> 1161.320] as to what the other person is thinking. So to teach this skill, I will use video clips or maybe +[1161.320 --> 1169.960] look at pictures from a book and I will not do the audio if I can. Like if it's some of the +[1169.960 --> 1174.360] Disney shorts are really good for not having audio and you can just look at the body language. +[1176.680 --> 1180.440] But we'll look at the picture or the video clip and we'll describe the body language of the +[1180.440 --> 1185.400] person that's in it. And then we'll talk about what do we think that body language is communicating. +[1186.600 --> 1188.040] And we'll do that three or four times. +[1190.760 --> 1197.720] Okay, the next type of nonverbal communication is vocalix, which we commonly refer to as tone of voice. +[1199.960 --> 1208.280] This is how we use our voice, not our words, but our voice to convey a message. So think of the tone +[1208.280 --> 1214.280] of voice as the background to your words. Your tone of voice is going to change a lot based on +[1214.280 --> 1219.080] different situations. For example, you would use a different tone of voice at a football game +[1219.080 --> 1224.520] than you would in a nice restaurant. Your voice might also sound different in different context +[1224.520 --> 1229.560] when your emotions are changing. For example, your voice sounds different when you're nervous +[1229.560 --> 1235.720] versus in a situation where you're comfortable. And it's important to consider the context of each +[1235.720 --> 1244.440] situation when trying to understand the meaning of someone's voice. Vocal expression is also usually +[1244.440 --> 1250.760] tied to facial expressions. They go hand in hand. So this means if somebody's face looks sad, +[1250.760 --> 1257.320] their voice probably sounds sad too. So what I tell my students is if they have a hard time +[1257.320 --> 1262.040] understanding the tone of voice to also pay attention to the facial expressions and the body +[1262.040 --> 1270.600] language, to give them clues as to how the other person is feeling. Okay, so to teach vocalix or tone +[1270.600 --> 1277.880] of voice, what I will do is I will give my students a context and a facial expression and words. +[1277.880 --> 1285.000] And then they will practice using different tones of voice to say that word. So for example, +[1285.880 --> 1290.600] the context could be your brother or sister borrowed your shirt and gave it back with a stain on it. +[1290.600 --> 1295.000] The facial expression would be angry and the words would be thank you. Thank you. +[1297.000 --> 1301.480] Same words, thank you. This time your mom gave you broccoli for dinner. You hate broccoli. +[1302.920 --> 1309.560] Thank you. Same words again. Thank you. Your dad surprised you with a new phone. Thank you. +[1309.800 --> 1316.120] Thank you again. Someone hands you a tissue after you've been crying. Thank you. +[1316.920 --> 1323.240] So this talks about how different situations and different scenarios are going to sound different +[1323.240 --> 1327.960] with different tone of voice even though the words might be exactly this. Okay, now I would be +[1327.960 --> 1333.320] remiss to not talk about eye contact when we're talking about types of nonverbal communication. +[1333.560 --> 1339.640] Okay, understanding eye contact will help our students become better nonverbal communicators. +[1340.520 --> 1345.400] Remember how I talked about most of our students with autism focus on the lower two thirds of the +[1345.400 --> 1353.080] face. A lot is going on in these upper and this upper third. So teaching them why eye contact +[1353.080 --> 1358.520] is important or at least why looking at this upper area is important is going to help them become +[1358.600 --> 1364.440] a better nonverbal communicator. It also helps with connection and helps us to connect with others +[1364.440 --> 1369.640] and feel closer and it helps others feel closer to us. So I explain all of those things when I'm talking +[1369.640 --> 1376.200] about eye contact. The last one that we talked about is physical appearance. I again just kind of +[1376.200 --> 1381.560] briefly touch on this. I explain what physical appearance is and how you know sometimes some +[1381.560 --> 1386.840] things in your physical appearance you can change and some things you can't. So we talk about how you +[1387.080 --> 1394.760] know when you change your hair color or well okay some things like your height and your weight +[1394.760 --> 1399.640] and your natural hair color are things you cannot change. But you can change things like how you +[1399.640 --> 1404.440] dress and the accessories, how you groom yourself if you wash your hair if you cut your nails +[1405.000 --> 1409.720] that affects what people think about you. So if I come in and my hair is clean and my nails are +[1409.720 --> 1415.640] done people are going to think I'm a clean person. If I come in and I haven't washed my hair in a +[1415.720 --> 1421.720] week and my nails are long and dirty that's going to affect how people think of me. Also how you +[1421.720 --> 1427.640] know we pick our clothes based on the type of image we want to portray. I you know I'm trying to +[1427.640 --> 1433.000] choose something professional looking as I'm talking to you and I'm not wearing my workout clothes +[1433.000 --> 1437.640] that I usually wear all day long because I want you to think of me as a professional and somebody +[1437.640 --> 1443.000] who knows what I'm talking about. So physical appearance is a type of nonverbal communication. +[1443.000 --> 1449.320] So I hope you learned some new things about nonverbal communication. I hope you have a better +[1449.320 --> 1454.840] understanding about what it is, what makes up nonverbal communication. I hope you got some ideas +[1454.840 --> 1459.160] on things you can use to teach your kids how to be better nonverbal communicators. +[1461.160 --> 1468.840] Now I know that this was a lot of information and I have created a resource, a teaching guide +[1469.320 --> 1476.040] that I would love for you to have that walks you through teaching these different types of nonverbal +[1476.040 --> 1481.880] communication. I literally was reading off of it today as I was going over it with you so you +[1481.880 --> 1488.920] know what is in it and it's going to give you some words to help you teach. It's going to give you +[1488.920 --> 1495.160] some visuals. It's going to give you a strategy and a place to start and it's going to help you +[1495.160 --> 1501.160] teach these skills in a really strategic way. So if you're interested in purchasing this for me, +[1501.160 --> 1507.960] there is a link in the description below. Additionally, I have a whole bundle of teaching guides +[1507.960 --> 1514.040] that teach social communication skills. This is included in it and all of my teaching guides are +[1514.040 --> 1519.000] included in it. So it helps you, it's full of guides that help you teach things like taking +[1519.000 --> 1524.360] someone's perspective, code switching, power relationships, conversation skills, friendship +[1524.360 --> 1531.320] making skills. I have teaching guides to help you teach these skills to your students. +[1531.320 --> 1538.520] So there's a link for that in the description below as well. Thank you again, thank you for taking +[1538.520 --> 1542.760] your time to spend with me. Thank you for taking the time to learn something new. I hope you found +[1542.760 --> 1547.880] it helpful. If you'd like to keep getting videos like this or knowing when some new ones come out, +[1547.880 --> 1552.920] click subscribe and be a part of our community. Thanks! diff --git a/transcript/allocentric_uxBeSEughAc.txt b/transcript/allocentric_uxBeSEughAc.txt new file mode 100644 index 0000000000000000000000000000000000000000..f250bc90fc76bd56bee279bef701e642f469d674 --- /dev/null +++ b/transcript/allocentric_uxBeSEughAc.txt @@ -0,0 +1,54 @@ +[0.000 --> 1.700] I don't want you to have to. +[1.700 --> 4.700] You don't want to have to. +[4.700 --> 5.700] You don't want to have to. +[5.700 --> 8.380] I find the Ariana much more fragrance than the Raffialla. +[8.380 --> 10.180] Ariana, oh! +[10.180 --> 12.020] I'll keep that in mind. +[12.020 --> 12.780] How you doing? +[12.780 --> 14.300] Fine. +[14.300 --> 17.420] I never got a chance to thank you for holding the camp bus. +[17.420 --> 18.320] Oh, please, please. +[18.320 --> 20.700] Anytime you need a bus, I am your guy. +[20.700 --> 22.460] And I noticed Aaron's teeth are looking good. +[22.460 --> 23.700] Thanks to you. +[23.700 --> 26.500] I just hope she's remembering to wear her night retainer. +[26.500 --> 27.600] Well, you know how kids are. +[27.600 --> 28.420] I can't. +[28.420 --> 30.620] Oh, please, the minute my Kevin gets off that bus, +[30.620 --> 32.260] it's goodbye by play. +[32.260 --> 33.340] Is that for you? +[33.340 --> 34.900] Oh, yes. +[34.900 --> 36.780] When Deadrun and I got divorced, I decided +[36.780 --> 38.340] that I had to learn how to cook. +[38.340 --> 38.860] Huh. +[38.860 --> 40.860] The walkie world of Thai cooking. +[40.860 --> 42.500] I'm branching out. +[42.500 --> 44.780] No matter the fact, I'm taking a Thai cooking course +[44.780 --> 45.580] this summer. +[45.580 --> 46.660] Really? +[46.660 --> 48.820] I've always been interested in Asian cooking. +[48.820 --> 49.660] Really? +[49.660 --> 50.420] Mm-hmm. +[50.420 --> 51.260] Well, why don't you join me? +[54.260 --> 57.220] When I think about it, over the years, +[57.220 --> 59.900] there were less and less moments in the course of the day +[59.900 --> 63.620] when Ben and I actually made real eye contact. +[63.620 --> 69.980] If you are not going to share what he's almost asleep. +[69.980 --> 72.340] Maybe it was the stuff of life. +[72.340 --> 74.660] Who's going to take Aaron to school? +[74.660 --> 78.180] Who's turn is it to pick up Josh from his clarinet lessons? +[78.180 --> 80.660] But after a while, there was a disturbing comfort +[80.660 --> 82.620] and not really having to deal with each other. +[82.620 --> 85.660] Because somehow, you just get used to the disconnection. +[87.340 --> 89.660] And even at night, when we could finally come together, +[89.660 --> 91.420] we wound up facing forward. +[91.420 --> 94.540] Yeah, we were tired, but I think we were afraid +[94.540 --> 98.140] that if we faced each other, there'd be nothing there. +[98.140 --> 100.420] We're learning me-crab next week. +[100.420 --> 103.140] Me-crab? +[103.140 --> 104.460] I'll let you know. +[104.460 --> 105.980] OK. +[105.980 --> 108.460] Oh, no. +[108.460 --> 109.380] I'll call you. +[109.380 --> 110.260] Oh, you call me. +[110.260 --> 111.260] OK. diff --git a/transcript/allocentric_wW7Z52plM0s.txt b/transcript/allocentric_wW7Z52plM0s.txt new file mode 100644 index 0000000000000000000000000000000000000000..ce6f2882699f26a03cd158fa07150c8830c165e7 --- /dev/null +++ b/transcript/allocentric_wW7Z52plM0s.txt @@ -0,0 +1,47 @@ +[0.000 --> 12.000] Nonverbal communication differences occur between cultures because of how different people around the world interpret actions in social interaction. +[12.000 --> 23.000] Understanding the cultural differences in nonverbal communication is important for those with a goal to work in international business. +[23.000 --> 34.000] Types of nonverbal communication vary based on culture and country, but the areas of differences tend to fall within the following eight areas. +[34.000 --> 47.000] Each provides an area where people doing business in other parts of the world should understand the nonverbal communication differences between cultures and how to prepare for them. +[48.000 --> 60.000] I contact. I contact signals confidence in the West, what can be seen as rude or challenging in parts of Asia and the Middle East. +[60.000 --> 76.000] Also, there are gender rules in I contact around many Eastern cultures discouraging women to make I contact with men as a conveys authority or a sexual interest. +[78.000 --> 93.000] Touch touch often is used frequently in communication even in a business setting with customs such as a handshake, but other cultures consider touching other people inappropriate. +[93.000 --> 105.000] Those who live in Asia tend to take a more conservative approach when it comes to touching with a bow typically replacing a handshake. +[105.000 --> 111.000] Another example of differences with touching is a patting someone in the head. +[111.000 --> 124.000] In the US, it is seen as endearing and shows affection with children, but in some Asian cultures touching children or adults in the head is disrespectful. +[124.000 --> 135.000] The US is more conservative in other areas such as not kissing on the cheek as they do in many other parts of Europe. +[135.000 --> 145.000] Gestures. Many business people who work internationally discipline themselves to keep hand gestures to a minimum. +[145.000 --> 156.000] For example, pointing at someone else is an insult in most parts of the world, but in some places it is often simply used as a reference. +[156.000 --> 165.000] Organizations stick out their tongue to greet people, but that doesn't fly in other parts of the world. +[165.000 --> 174.000] The most common gesture in the world is a nod, but even that can mean different things in other cultures. +[174.000 --> 177.000] Physical distance. +[178.000 --> 188.000] The rule of thumb here, no matter where you are in the world, is to give people more space than you think they might need. +[188.000 --> 191.000] Only get close if invited. +[191.000 --> 196.000] People in different cultures abuse physical distance differently. +[197.000 --> 203.000] However, it's best never to enter anyone's personal space about two feet. +[203.000 --> 212.000] If it's not personal intimacy, many people find such proximity and comfortable. +[212.000 --> 215.000] Facial expressions. +[215.000 --> 222.000] The good news is that facial expressions for happiness, sadness, anger and fear are universal. +[222.000 --> 229.000] The bad news is that not every culture is okay with using them in a business setting. +[229.000 --> 239.000] The Japanese, for example, try to remain a neutral facial expression, believing that showing your emotions burdens the other person. +[239.000 --> 242.000] Appearance. +[242.000 --> 246.000] Conservative attire is the safest bet. +[246.000 --> 259.000] For some places in the United States are fine with very relaxed appearances, while others even see an exposed shoulder or leg asked a cause for offense. +[259.000 --> 264.000] The best choice is to simply dress conservatively. +[264.000 --> 271.000] You can also loosen up your wardrobe if it becomes apparent that it is acceptable. +[272.000 --> 274.000] Posture. +[274.000 --> 278.000] Again, the traditional route is the best route. +[278.000 --> 283.000] Don't slouch when sitting or sit with legs crossed. +[283.000 --> 291.000] Face people as they speak to you and not enough to show that you are paying attention to what they say. +[291.000 --> 296.000] Stay mindful of where you sit in meetings. +[296.000 --> 304.000] In some cultures there is a strict hierarchy for who gets to sit where. +[304.000 --> 307.000] Parallel language. +[307.000 --> 314.000] Parallel language refers to communication that is avocalized but not words. +[314.000 --> 321.000] This includes the tone of voice, loudness, speed of speech and inflection. +[321.000 --> 328.000] Parallel language is the key to understand the context or meaning of the words used. +[328.000 --> 341.000] It's important to be mindful of these issues and to understand they are not discernible in emails and texts, so great care must be used in the words you choose. +[341.000 --> 345.000] High context versus low context. +[345.000 --> 356.000] Another way to help with understanding the cultural difference in nonverbal communication is understanding the difference between high context and low context cultures. +[356.000 --> 363.000] High context cultures rely more on nonverbal communication than low context cultures. +[363.000 --> 371.000] They use personal relationships, social hierarchies and cultural knowledge to convey meaning. +[371.000 --> 384.000] In low context cultures words are more important. Communication is direct. Relationships begin and end quickly and hierarchies are relaxed. +[384.000 --> 395.000] For those who aspire to work in an international business, understanding these nonverbal communication differences between cultures is the key to success. diff --git a/transcript/allocentric_xPiRQ1G241k.txt b/transcript/allocentric_xPiRQ1G241k.txt new file mode 100644 index 0000000000000000000000000000000000000000..9f027b5bf97b965b803a016b6ba8f8e6f361b722 --- /dev/null +++ b/transcript/allocentric_xPiRQ1G241k.txt @@ -0,0 +1,127 @@ +[0.000 --> 6.240] My name is Ryan Peters. I'm a post-doctoral researcher working with Chen New at the University of Texas at Austin, +[6.240 --> 12.240] and today I'm presenting our paper titled, Are You With Me, Modeling Joint Attention from Child Egocentric Vision. +[12.240 --> 18.440] Human cognition is, in many ways, a cooperative social process. +[18.440 --> 26.840] And one of the key mechanisms that enables such social cognition is when we coordinate and share our attention to an object or task. +[27.240 --> 33.080] A huge body of work shows that such shared or joint attention is crucial for early development. +[33.080 --> 36.720] It's important for early language learning and other kinds of cultural transmission. +[36.720 --> 41.560] It predicts developmental outcomes, including language, cognitive and self-regulation abilities. +[41.560 --> 48.760] And because of these things, it's become a widely used diagnostic marker and target for clinical interventions. +[48.760 --> 54.760] From the beginning, gaze following has been seen as a kind of holy grail of joint attention. +[54.760 --> 62.520] Indeed, in Skype and Bruner's pioneering work, joint attention was equivalent to and operationalized as gaze following. +[62.520 --> 70.520] Because of that, much of the subsequent work has been designed to elicit and measure how well infants follow a social partner's gaze. +[70.520 --> 79.160] In nearly all this work, looking at the social partner's face has been interpreted as the primary behavioral pathway to check a partner's attentional state. +[79.160 --> 87.560] And therefore, face looks are deemed as a kind of indicator of awareness of being in joint attention with a social partner. +[87.560 --> 97.800] However, several recent studies have shown that infants rarely look at their parents' faces during naturalistic parent child activities such as toyplay or meal preparation. +[97.800 --> 107.400] Since infants aren't looking at their parents' faces in these studies, they instead define joint attention simply as looking at the same object at the same time. +[107.480 --> 119.800] Crucially, these studies still find predictive relations between patterns of joint attention and vocabulary development, suggesting that awareness may not be a necessary component of joint attention. +[119.800 --> 128.200] However, this implication assumes that face looks are the only pathway to achieve awareness of joint attention. +[128.200 --> 135.800] Here, we challenge that assumption and ask whether children can achieve awareness of joint attention without looking at their parents' face. +[135.880 --> 142.760] Building on recent work showing that attending to held objects plays a critical role in establishing and maintaining joint attention. +[142.760 --> 147.240] We hypothesize that hand eye coordination may provide an alternate pathway, +[147.240 --> 149.640] degaining awareness of joint attention. +[149.640 --> 153.640] To explore this hypothesis, we use classification approach that combines +[153.640 --> 157.000] head mounted eye tracking and computational modeling. +[157.000 --> 162.840] First, we brought parent child diets to play the set of 24 toys in a naturalistic environment. +[162.920 --> 168.600] Then, using the eye trackers, we collected their egocentric views and gaze data, as you see here. +[168.600 --> 176.840] Next, using that gaze data, we categorize the child egocentric views as belonging to a moment of joint attention or not. +[176.840 --> 179.800] Here are some of the child egocentric views from the dataset. +[179.800 --> 183.320] Half of these are from moments of joint attention and half are not. +[183.320 --> 189.960] Determining which is which based solely on features in a single egocentric view appears to be a non-trivial task. +[189.960 --> 193.160] But this is precisely what we set up to train our models to do. +[194.520 --> 203.720] To do so, we fed the child egocentric videos into CNN, models, and provided the ground truth classifications to train them to classify the images. +[203.720 --> 206.440] We then tested the models using held out data. +[207.960 --> 211.640] Moving on to the first set of results, we first addressed the question, +[211.640 --> 217.640] do children's egocentric views contain in-the-moment signals allowing for awareness of joint attention? +[218.200 --> 223.400] To test this, we explore whether models can classify images as joint attention or not, +[223.400 --> 224.680] better than chance. +[224.680 --> 229.240] And one sample, two tailored teetests can vary in model level, subject level, +[229.240 --> 233.640] and item level mean-balanced accuracy against chance confirms our hypothesis. +[235.720 --> 239.400] To better understand our model's performance, we also look at the ROC curve, +[239.400 --> 244.280] which characterizes overall model performance by plotting inverse specificity, +[244.280 --> 249.640] or how all the models can detect moments that are not joint attention along the x-axis, +[249.640 --> 255.880] against sensitivity, or how all the models can detect moments that are joint attention along the y-axis. +[255.880 --> 262.280] And this is done for the full range of threshold values used to binarize the confidence scores output by the models. +[262.280 --> 267.400] Confidence scores range from 0 to 1, and this black point in the center here +[267.400 --> 272.200] marks the threshold of 0.5, which we used to generate our results. +[272.200 --> 276.680] So at this point, frames with confidence scores above 0.5 or classwise, +[276.680 --> 281.880] joint tension in those with confidence scores below 0.5 or classwise as not joint attention. +[281.880 --> 287.160] The bottom left-hand corner marks the threshold of 1 for which every frame would be categorized as not +[287.160 --> 291.640] joint attention, while the top right-hand corner marks the threshold of 0 for which every frame would +[291.640 --> 293.400] be categorized as joint attention. +[293.400 --> 299.480] Finally, this dashed line along the diagonal shows performance for a random classifier. +[299.480 --> 304.840] So the fact that our curve lies above the diagonal confirms our models perform better than +[304.840 --> 310.840] chance across the full range of threshold values. The area under the curve, or the RLC AUC, +[310.840 --> 316.680] puts a number to the indicating there's a 67% probability that our models will produce a higher +[316.680 --> 322.200] confidence score for a randomly chosen joint attention frame than a randomly chosen frame from a moment +[322.200 --> 327.880] that is not turned attention. To explore the role of object holding, we also classify each frame as +[327.880 --> 333.560] belonging to different visible holding categories based on a combination of manually coded child and +[333.560 --> 339.160] parent object holding using the third person videos and automated object detections to determine +[339.160 --> 345.720] the visibility of objects in a child's egocentric use. Using these, we can compare how the model classwise +[345.720 --> 350.680] frames are which neither the child nor parent were holding a visible object versus frames in which +[350.680 --> 356.440] only the child, only the parent or both the child and parent were jointly holding the same visible object. +[357.160 --> 362.920] These last three holding categories all require there is only a single held visible object, +[362.920 --> 367.560] allowing for a clean line of reasoning as to why such views might support detection of joint +[367.560 --> 372.120] attention. However, that is not always the case. There are frames in which the child or parent are +[372.120 --> 375.640] holding two visible objects to frames in which the child and parent are each holding different +[375.640 --> 381.000] visible objects. So we created a separate category for frames with such conflicting holding cues. +[381.880 --> 386.120] Moving on to the results here, we address the question, does object holding provide in the moment +[386.120 --> 392.680] signals useful for detecting joint attention and our hypotheses are that models will leverage signals +[392.680 --> 399.800] tied to visible holding to detect moments of joint attention and signals tied to the lack of visible +[399.800 --> 405.640] object holding to detect moments that are not joint attention. Focusing on the first hypothesis, +[405.640 --> 410.920] we again look at sensitivity or how are the models detecting joint attention moments for each +[411.000 --> 416.200] of the visible holding categories, neither only child, only parent, both, and conflicting along the +[416.200 --> 423.240] xxc's here. And pairwise comparisons across the five categories reveal that models show greater +[423.240 --> 428.360] sensitivity for frames with visible held objects, and in particular, those in which both child and +[428.360 --> 436.040] parent are jointly holding an object, confirming our hypothesis. Next, focusing on the second hypothesis, +[436.040 --> 440.520] we again look at specificity or how well the models do at detecting moments that are not +[440.520 --> 445.160] joint attention for each of the holding categories, and here pairwise comparisons across the categories +[445.160 --> 450.360] reveal that models show greatest specificity for moments in which neither child, nor parent, +[450.360 --> 456.520] are holding a visible object, again, confirming our hypothesis. Finally, we can look at the RLC curves +[456.520 --> 460.760] for each of the holding categories, providing overall estimates of how well the models do for +[460.760 --> 465.640] each of the categories. And again, the point showed the values associated with the threshold at 0.5 +[465.720 --> 471.160] used to generate our results. And as you can see, the models do better than chance for all the +[471.160 --> 478.360] holding categories, but they're most accurate for frames in which both child and parent are jointly +[478.360 --> 487.640] holding a visible audit and struggle with frames containing conflicting holding cues. So we see +[487.640 --> 493.080] that models are indeed able to classify joint attention better than chance, and they leverage +[493.160 --> 499.080] visible object holding to do this. Taken together, we think this confirms our overarching hypothesis that +[499.080 --> 504.600] children might be able to achieve awareness of their parent's attentional state by leveraging +[504.600 --> 510.520] in the moment visual signals tied to object holding. However, one major difference between our models +[510.520 --> 515.560] and children is that our models had a training signal. We told them what frames were and were not +[515.560 --> 521.960] joint attention, but what could be the training signal for children? In study 2, we address this question. +[522.760 --> 528.920] Based on study 1, we know that visual signals tied to object holding can be used to detect moments +[528.920 --> 535.560] of joint attention. Next, we wondered what if children simply assume they are in joint attention +[535.560 --> 541.240] when they look at an object held by themselves or their parents? In other words, what if children can +[541.240 --> 547.000] leverage their own attentional state in conjunction with object holding as a training signal to learn +[547.240 --> 553.160] to detect moments of joint attention? To explore this hypothesis, we trained three different models with +[553.160 --> 558.200] joint attention defined using a combination of object holding and child attention. One model was +[558.200 --> 562.840] trained using a dataset consisting of frames in which only the child was holding a visible object, +[562.840 --> 567.400] and for which frames in which the child was attending to the held object were defined as joint +[567.400 --> 572.760] attention and frames in which the child was not attending to the held object were defined as not +[573.000 --> 577.800] attention. A second model was similarly trained using only parent frames and a third model +[577.800 --> 584.120] was trained using either only child or only parent frames. After training, we then tested the models +[584.120 --> 589.240] on ground truth joint attention, exactly as was done for the models in study 1. +[590.920 --> 594.840] Moving on to the results, since we're asking whether models can learn to detect joint attention, +[594.840 --> 600.280] our primary hypothesis is that models will show greater than chance sensitivity for the trained +[600.360 --> 606.600] categories. However, we also wondered whether the models might be able to generalize what they +[606.600 --> 612.280] learned to other object holding categories and maybe even show similar patterns of sensitivity +[612.280 --> 618.200] across holding categories to models trained on ground truth joint attention. So here are the +[618.200 --> 623.160] results for each of the models and we see that all three show greater than chance sensitivity +[623.240 --> 630.280] for the trained categories marked in blue, red, green respectively, confirming our first hypothesis. +[631.240 --> 636.520] Next looking at how the models generalize to cross holding categories and how those distributions +[636.520 --> 641.800] compare with what we saw in study 1 in upper left hand corner here, we see that all three models +[641.800 --> 647.240] show striking similarities with the models from study 1. In particular, all models show greatest +[647.240 --> 652.680] sensitivity for frames in which both parent and child are jointly holding a visible object. +[653.160 --> 657.800] And lowest or nearly lowest sensitivity for frames in which neither parent nor child are holding +[657.800 --> 663.640] an object, confirming our second hypothesis. It's worth noting that the model trained on frames with +[663.640 --> 668.600] either child or parent holding shows the greatest similarity highlighting the importance of having +[668.600 --> 675.800] access to both types of keys. To conclude, our results broadly show that children might be able to +[675.800 --> 681.880] achieve awareness of joint attention by leveraging in the moment visual signals tied to object holding, +[681.880 --> 687.080] and children can theoretically generate their own training signal and learn to detect moments of +[687.080 --> 691.800] joint attention simply by assuming they're enjoyed attention when they look at an object held by +[691.800 --> 696.360] themselves or their parents. In other words, face looks are not the only way to gain awareness of +[696.360 --> 703.080] joint attention in real time in social situations. There are complementary social signals encoded in +[703.080 --> 708.840] bodily behaviors such as attending to objects held by oneself or a social partner. All together, +[708.840 --> 713.480] I think this work is a good case study demonstrating how things that we study at the social level +[713.480 --> 719.640] can be grounded and embedded in the sensory motor level. In other words, social and sensory motor levels +[719.640 --> 726.040] provide complementary rather than competing explanations. Here are my references. And to conclude, +[726.040 --> 736.040] I want to thank everyone at UT Austin and Indiana University who made this work. diff --git a/video/TED_-FOCpMAww28.f140.m4a b/video/TED_-FOCpMAww28.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..d71867a7b4dd899495e697a9c632133b589dfd8e --- /dev/null +++ b/video/TED_-FOCpMAww28.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ee4e6d7ce68266ecbed30cc83308d2f66757896dbe3722e69862721dce18178 +size 7714516 diff --git a/video/TED_-FOCpMAww28.f247.webm b/video/TED_-FOCpMAww28.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..b933a1ea093c89f1290ae3d9d18a37a3d39ea21d --- /dev/null +++ b/video/TED_-FOCpMAww28.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23def1a6d9065ed6bd2c47f04b20565dd79db74a2677c2d039cdddc328ff2e6b +size 31918601 diff --git a/video/TED_-FOCpMAww28.mp4 b/video/TED_-FOCpMAww28.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..70ccb875e3c095a6387c243dcfe399580f933afa --- /dev/null +++ b/video/TED_-FOCpMAww28.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ac681b558a40d6a6d40f2079b61a0733dc9f0c707cb00f33a06cc43cf85cc61 +size 39800301 diff --git a/video/TED_1zpf8H_Dd40.f140.m4a b/video/TED_1zpf8H_Dd40.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..ea89b5c9735e9cb51ab09deb2774fc9bd057a14a --- /dev/null +++ b/video/TED_1zpf8H_Dd40.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ecfdb144906245e02f8843c08e5eb21541ee0a131140cf45de8dc1db4e00797c +size 20215212 diff --git a/video/TED_1zpf8H_Dd40.f616.mp4 b/video/TED_1zpf8H_Dd40.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a6d98068e2e71d091dab8a8d812fe9220ce4b5ca --- /dev/null +++ b/video/TED_1zpf8H_Dd40.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d21dd11cae246d6d5dbce324732b5e9bbf395f80c5bdbed1b61d7f21c35834a +size 342801378 diff --git a/video/TED_1zpf8H_Dd40.mp4 b/video/TED_1zpf8H_Dd40.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..b3358ca8ac38c9ad7442ba6ab08d20ac73dffd11 --- /dev/null +++ b/video/TED_1zpf8H_Dd40.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fd41c1ce27e2513a5b837be900b25351f1d1c7c7ae4a5592e085eb7294a4b1e +size 363265505 diff --git a/video/TED_4TQETLZZmcM.f140.m4a b/video/TED_4TQETLZZmcM.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..6751e3899c07c8c735a394d84e09eb94da99de55 --- /dev/null +++ b/video/TED_4TQETLZZmcM.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7bb64ed1ff97f2f41e05d49ebff9b17320e3478929c3adfaefec10a95748515 +size 17911383 diff --git a/video/TED_4TQETLZZmcM.f248.webm b/video/TED_4TQETLZZmcM.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..6d2865db476061d9b6c2f0630313d6eb6eb56a93 --- /dev/null +++ b/video/TED_4TQETLZZmcM.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3593e23ed112ca23feea2301c9ccb735b92a20cdc1278992ebe224990936af48 +size 132496271 diff --git a/video/TED_4TQETLZZmcM.mp4 b/video/TED_4TQETLZZmcM.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2907ae76b61c06d50a47290948cc3fa76f478725 --- /dev/null +++ b/video/TED_4TQETLZZmcM.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:632c5772f6b008fd84f8d1da5ebde7bb14168d00eec8aa766036748a3b287280 +size 150775202 diff --git a/video/TED_4jwUXV4QaTw.f251.webm b/video/TED_4jwUXV4QaTw.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..701a45539d3218112e9447070bb5ba02852f291b --- /dev/null +++ b/video/TED_4jwUXV4QaTw.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e07cd39eb9d7bff0b2a05e8595bcc79c8f7715ee1be7d76a1150a6aac2d5f65 +size 12394062 diff --git a/video/TED_4jwUXV4QaTw.f616.mp4 b/video/TED_4jwUXV4QaTw.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..142b681dfbd676e021b5a6175d11eb2d0710aaf5 --- /dev/null +++ b/video/TED_4jwUXV4QaTw.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ef6a93bd45e3d7acfdcac5f996a29d60424559d48b1fa56c521f03ba6abf168 +size 150776044 diff --git a/video/TED_4jwUXV4QaTw.mp4 b/video/TED_4jwUXV4QaTw.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ca4594ae7692331838330615db797fff55676306 --- /dev/null +++ b/video/TED_4jwUXV4QaTw.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5df6941601d36658eaeccc2d83e59255262c9f496c41855f60a1ea2b75844a7 +size 163162334 diff --git a/video/TED_79HMPQj55yc.f251.webm b/video/TED_79HMPQj55yc.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..710da6b3fc18d3c0d76c73238eda835e780bcb23 --- /dev/null +++ b/video/TED_79HMPQj55yc.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9af11db8d0b3a106df9a071dd54901e45fe3d69361191272bcaa9d771f44698b +size 7154659 diff --git a/video/TED_79HMPQj55yc.mp4 b/video/TED_79HMPQj55yc.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..0b5a596d12536dd07f4bc5815cc09dee55ab2830 --- /dev/null +++ b/video/TED_79HMPQj55yc.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18acb649c593a80331f04108e47212a369b79a297d32715c716f9ccb26cbadd9 +size 80257133 diff --git a/video/TED_8S0FDjFBj8o.f251.webm b/video/TED_8S0FDjFBj8o.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..4a789c869ef009a98728e3d2d1e335a6c52bd05d --- /dev/null +++ b/video/TED_8S0FDjFBj8o.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:503281e01abd1a6e4d9e839db76e58d600017bd2d1794babe9d69fda2ce55c5a +size 4564850 diff --git a/video/TED_8S0FDjFBj8o.mp4 b/video/TED_8S0FDjFBj8o.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..032d1b002c161b8ef1ca0991225eea74d9ccc6c9 --- /dev/null +++ b/video/TED_8S0FDjFBj8o.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ec9fcd5ffb9dcaf40c61eb48d78cc878972cd59c74107ed8986f4569ad4821d +size 57425536 diff --git a/video/TED_E6NTM793zvo.f140.m4a b/video/TED_E6NTM793zvo.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..9aa9eb63768590f055cd7fd59a650a995e205ed2 --- /dev/null +++ b/video/TED_E6NTM793zvo.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c89f57b7dfe48f71aeeea42885ac10860a3a67b2cfe972639a16d1f8ab2807b +size 13373611 diff --git a/video/TED_E6NTM793zvo.f248.webm b/video/TED_E6NTM793zvo.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..ec7252c5649f16d044ff1c08b1137a48348c9dd1 --- /dev/null +++ b/video/TED_E6NTM793zvo.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb84785e323934ed93d4f0d647a38426268341b3767c35015301b9b123a9afef +size 81738550 diff --git a/video/TED_E6NTM793zvo.mp4 b/video/TED_E6NTM793zvo.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5dbefe95da267eebd7e71b207dabd59811ee31bb --- /dev/null +++ b/video/TED_E6NTM793zvo.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b3c29509ac8e3d51331adfa79f719e835915183da857622e545e2f269d36434 +size 95629085 diff --git a/video/TED_I5x1wQ6kHX0.f140.m4a b/video/TED_I5x1wQ6kHX0.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..2fa54567d4f9763a88fcd39c6c9c182f2b59c7dd --- /dev/null +++ b/video/TED_I5x1wQ6kHX0.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abc2c3f5b2b2f503ebff1ad1f2446f32cde50a2b95e34d2b5b7e834b36f8c69b +size 26124429 diff --git a/video/TED_I5x1wQ6kHX0.f616.mp4 b/video/TED_I5x1wQ6kHX0.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ea3644bc2b9c03e10b8cfe15ca6573238b4dfb97 --- /dev/null +++ b/video/TED_I5x1wQ6kHX0.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46b3334dc939f0e59242faa06102516134e8b33a966a3dc87e766db0ab294c1d +size 290766826 diff --git a/video/TED_I5x1wQ6kHX0.mp4 b/video/TED_I5x1wQ6kHX0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..943757ce959b2f16e33399e2692b1ebc61f13471 --- /dev/null +++ b/video/TED_I5x1wQ6kHX0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb1623e8a0e061e418c840d5af14c0674e973afa3a1539af7318abe5993462c1 +size 317215930 diff --git a/video/TED_K0pxo-dS9Hc.f251.webm b/video/TED_K0pxo-dS9Hc.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..f9b0681fee1cf111d12acfccb293ab8b1f9b00d9 --- /dev/null +++ b/video/TED_K0pxo-dS9Hc.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7db90fbd2caf98a9a4f9eceb2544ff453443b3608b69eb903a3cac6ecfba5ff2 +size 14449778 diff --git a/video/TED_K0pxo-dS9Hc.mp4 b/video/TED_K0pxo-dS9Hc.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5636127ed4253fbad501809981ba0ee2c920ac72 --- /dev/null +++ b/video/TED_K0pxo-dS9Hc.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6aa8297a1c294b2e59a74395258e403d3c7967d4db4d04a32eb9fe9bf58b32cd +size 542249785 diff --git a/video/TED_Ks-_Mh1QhMc.f247.webm b/video/TED_Ks-_Mh1QhMc.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..85f099495c12281d188377f764b4a293e35a63bc --- /dev/null +++ b/video/TED_Ks-_Mh1QhMc.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f12314ad5c4baba5b3e0669edae32f58266a70ca21382a6d155793678eefb76 +size 85647656 diff --git a/video/TED_Ks-_Mh1QhMc.f251.webm b/video/TED_Ks-_Mh1QhMc.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..fb176d87035aa0edebef87d36e7ce63d10d223b9 --- /dev/null +++ b/video/TED_Ks-_Mh1QhMc.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac2f76a46d8a9f08c8120259ea9802306ee21ff93cc42e7120177b5ebf88bfd3 +size 19257916 diff --git a/video/TED_Ks-_Mh1QhMc.mp4 b/video/TED_Ks-_Mh1QhMc.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a0ab41ab48564b72e33dcdf18e4909d7160be611 --- /dev/null +++ b/video/TED_Ks-_Mh1QhMc.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7d436de282c13abc42be6dc28ad58d315977097ab953745ea1629d86d1b425c +size 104872593 diff --git a/video/TED_L9UIF852Boo.f251.webm b/video/TED_L9UIF852Boo.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..0bc2b13b805d5ba40d711f546a6cb27212eeae64 --- /dev/null +++ b/video/TED_L9UIF852Boo.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87ec89627ffbcf2fa5c82663f1b527f46122b091d76317028efdf4cf49e091aa +size 19795714 diff --git a/video/TED_L9UIF852Boo.f616.mp4 b/video/TED_L9UIF852Boo.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..db6c67b96ebd5640a4778bf2116bbde7175ebc00 --- /dev/null +++ b/video/TED_L9UIF852Boo.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:047033aca88a284b5f084006c25e0477929da6c73a10aea2cbe88b706c8d986f +size 619883404 diff --git a/video/TED_OyK0oE5rwFY.f248.webm b/video/TED_OyK0oE5rwFY.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..fced6728c4c3cf2b45af19610ef62baca17414bc --- /dev/null +++ b/video/TED_OyK0oE5rwFY.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cde3c27c18420c011231f7a758917b63d64d422082be940be5ea467396486caf +size 4451648 diff --git a/video/TED_OyK0oE5rwFY.f251.webm b/video/TED_OyK0oE5rwFY.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..9ba3e8ff0601bc88428e1ef44727113c20fcb193 --- /dev/null +++ b/video/TED_OyK0oE5rwFY.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:84967edf584d6445501f587f28f44159d655a20714b8f3a80a96a5016f6320db +size 4302373 diff --git a/video/TED_OyK0oE5rwFY.mp4 b/video/TED_OyK0oE5rwFY.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5f13bd83f2ffddb4d4b05ce2a6b88888a3e48b01 --- /dev/null +++ b/video/TED_OyK0oE5rwFY.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6f32051ed0b518b715c5d26f8b726d2a5bd6b918b111a17d9289624147257ae +size 8792900 diff --git a/video/TED_P_6vDLq64gE.f140.m4a b/video/TED_P_6vDLq64gE.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..2d4dc6d9cae3294cd020eacf23128846fb7de901 --- /dev/null +++ b/video/TED_P_6vDLq64gE.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fbe816f6731add264fc83c2d4a954096c363ef3244ed3e898e00ccbd1177674 +size 18296524 diff --git a/video/TED_P_6vDLq64gE.f244.webm b/video/TED_P_6vDLq64gE.f244.webm new file mode 100644 index 0000000000000000000000000000000000000000..d1df690de53adb90b6a5157d49c5745aeaa16130 --- /dev/null +++ b/video/TED_P_6vDLq64gE.f244.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82f9e624c5a740ce30b8951bf1415f0e5a7af537c93b6ea55960036dd5ea7774 +size 23072004 diff --git a/video/TED_P_6vDLq64gE.mp4 b/video/TED_P_6vDLq64gE.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..05e93eacc63de2107e212c66133c6c404e2dd2a0 --- /dev/null +++ b/video/TED_P_6vDLq64gE.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e85c33ed75ebbf901a4e56f6e9d34e439e0bfdfc71093d11024d81d02aafa160 +size 41691497 diff --git a/video/TED_QGeHS4jO0X0.mp4 b/video/TED_QGeHS4jO0X0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..2595e3e2bceb849d50c66f496b650ce068d15330 --- /dev/null +++ b/video/TED_QGeHS4jO0X0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:917b0608a15f648a0803194b74266f5e4a8ec47e8e8c861ef56c6131fb7d0db4 +size 52765638 diff --git a/video/TED_VRJzvJ5XPQI.f251.webm b/video/TED_VRJzvJ5XPQI.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..278f9fe9f146fbef8bac82a98377e6b06a05f43e --- /dev/null +++ b/video/TED_VRJzvJ5XPQI.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f01d5c3183e98c3533009b0f77844d00c77d1818539a8c8365edbac840e57cda +size 11826006 diff --git a/video/TED_VRJzvJ5XPQI.f616.mp4 b/video/TED_VRJzvJ5XPQI.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a842f38efa131afb90d6ebcec8d7029461628783 --- /dev/null +++ b/video/TED_VRJzvJ5XPQI.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:183c2b1365111737409327bdc14535e6fcfa7cdd2b99bf3f88553eceb7fea285 +size 264396545 diff --git a/video/TED_VRJzvJ5XPQI.mp4 b/video/TED_VRJzvJ5XPQI.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..904f74d77f9073be136b63e25868a936796a2c3c --- /dev/null +++ b/video/TED_VRJzvJ5XPQI.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d1f68d5dd99298fdbadeba8cc8ec592ee4fd57b8854b9e1f9a870bab2656026 +size 276215421 diff --git a/video/TED_W3P3rT0j2gQ.f251.webm b/video/TED_W3P3rT0j2gQ.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..644e4079c8d83b4293d7b3355f505291c94466d6 --- /dev/null +++ b/video/TED_W3P3rT0j2gQ.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6e9bc8b521f47fc826dba19313f150585309cbf1da1b2e16f5763f45e25144d +size 9432712 diff --git a/video/TED_W3P3rT0j2gQ.f616.mp4 b/video/TED_W3P3rT0j2gQ.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..b0f504a5fe1153d6bd93b69008e68d1b6821d402 --- /dev/null +++ b/video/TED_W3P3rT0j2gQ.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5bc82f65b3135005092aa8640ffd0776c53a82c6ba324487a952b506d1d0c91a +size 201850666 diff --git a/video/TED_W3P3rT0j2gQ.mp4 b/video/TED_W3P3rT0j2gQ.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5df4a0d8d5d4b716e6b5d9824f12df31e4b81f74 --- /dev/null +++ b/video/TED_W3P3rT0j2gQ.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b6d30f9f56101a8c87a67c07d145f1d7ee427bdc096dc9dae033b6752af9bc +size 211459705 diff --git a/video/TED_YdTKcdyIYkA.f251.webm b/video/TED_YdTKcdyIYkA.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..5353b22a70698ae943073290c2274e6e98bcc703 --- /dev/null +++ b/video/TED_YdTKcdyIYkA.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6c5762168b407a0cce9fcceb7391fb817a4967027e30b9e8ddfb03d218ae46e +size 11267404 diff --git a/video/TED_YdTKcdyIYkA.f616.mp4 b/video/TED_YdTKcdyIYkA.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..b65cb1788b00487f212d1d876937e33f5a659f97 --- /dev/null +++ b/video/TED_YdTKcdyIYkA.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93a3dc7741bf183492c1bc70f5e279c9104cdc1f02c3b24f96caba31b7fa2ab1 +size 108895220 diff --git a/video/TED_YdTKcdyIYkA.mp4 b/video/TED_YdTKcdyIYkA.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ba9aecd627e636278426336962da1cab1f3a5501 --- /dev/null +++ b/video/TED_YdTKcdyIYkA.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:928a46a3256c0b23ee44ee7d3f2132aeae2fcfbcbe7731859644ba5c5f17bf81 +size 120156272 diff --git a/video/TED_YrZTho_o_is.f251.webm b/video/TED_YrZTho_o_is.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..061c21209ce064f397066ff663a5078cbc54b8e2 --- /dev/null +++ b/video/TED_YrZTho_o_is.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:319d5ab50a1223d2b6bc4f438d2a396a0a31b8c9687559f1ad41d2da5c05fec9 +size 7606727 diff --git a/video/TED_YrZTho_o_is.mp4 b/video/TED_YrZTho_o_is.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..260e19dce973dbedb0fdc19178777c175e0aeb93 --- /dev/null +++ b/video/TED_YrZTho_o_is.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01326ec607439677c9d717503aaf6cf0dcbd9fc58553deb39c7c277baa051b21 +size 46334670 diff --git a/video/TED_ZZZ7k8cMA-4.f247.webm b/video/TED_ZZZ7k8cMA-4.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..d8dc9a9752b8840e881d2e37e2391320cd88f964 --- /dev/null +++ b/video/TED_ZZZ7k8cMA-4.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:134ae0f5469374b0f75b81402b26ffc1f45c51ec34dd4ba3dddafe4a57457429 +size 47600224 diff --git a/video/TED_ZZZ7k8cMA-4.f251.webm b/video/TED_ZZZ7k8cMA-4.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..8baf8abe929b71f82aa783fe8a7334aaf8a7462e --- /dev/null +++ b/video/TED_ZZZ7k8cMA-4.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97b49cbad7918a5bca979fd3eb2b90fa0922aed9b3561b4e7aac2d96ea95ff86 +size 10858103 diff --git a/video/TED_ZZZ7k8cMA-4.mp4 b/video/TED_ZZZ7k8cMA-4.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..935ec3322fe7560840f0ecc720b67f71fb82e6c0 --- /dev/null +++ b/video/TED_ZZZ7k8cMA-4.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59cc7b25066c4b64520c24845835564c3ff111a2777bb6d0cd7e8fcfe9de318b +size 58431034 diff --git a/video/TED__v36Vt9GmH8.f251.webm b/video/TED__v36Vt9GmH8.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..90a506ed39a2e50c96d58ea88d0d76b4c92d4198 --- /dev/null +++ b/video/TED__v36Vt9GmH8.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b591c621058fb3f6b0649318794adc966f42cb0da7e70545d4ea0b4751decd43 +size 13636194 diff --git a/video/TED__v36Vt9GmH8.mp4 b/video/TED__v36Vt9GmH8.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..3cc3175d4a060c9786f8d0ca6bbfa28cfb02cb66 --- /dev/null +++ b/video/TED__v36Vt9GmH8.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:135968b46f9448cdf9d8209e00ebcec0fd951f1bec2faffb1b280b4f88663c2e +size 245637360 diff --git a/video/TED_cef35Fk7YD8.f140.m4a b/video/TED_cef35Fk7YD8.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..fd0766ce3615a2a6166ab5917a61cc2cd53415d2 --- /dev/null +++ b/video/TED_cef35Fk7YD8.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a9141070beab236c9710dc5fe625c39366ae5e58d855be43aea8b9d06aeec47 +size 17751452 diff --git a/video/TED_cef35Fk7YD8.f616.mp4 b/video/TED_cef35Fk7YD8.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..0df31f4fed10c948bc57f4183b2cd28f8703a22e --- /dev/null +++ b/video/TED_cef35Fk7YD8.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38d58ad49f6c79863f442fcc5bea610cafa2d8cdfa3365f301b6357ade5ab439 +size 232820658 diff --git a/video/TED_cef35Fk7YD8.mp4 b/video/TED_cef35Fk7YD8.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ab1d5d4493d210a32c3c95359ecab95081636861 --- /dev/null +++ b/video/TED_cef35Fk7YD8.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa26d7eeb75b717e80945139724e4d11315b5f70dfcda1fc55637de4b7c447bf +size 250830149 diff --git a/video/TED_dqxNwOe2CIE.f137.mp4 b/video/TED_dqxNwOe2CIE.f137.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a83120354baebd4a417ca4d0f8345f674c957f40 --- /dev/null +++ b/video/TED_dqxNwOe2CIE.f137.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d6cc55e84f3a287cab0f42f06803fc1efdd881cb9deed483e28cf2d61e60de54 +size 59809552 diff --git a/video/TED_dqxNwOe2CIE.f251.webm b/video/TED_dqxNwOe2CIE.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..9cab7b312dc95d512525cd2b0c4fad3592003576 --- /dev/null +++ b/video/TED_dqxNwOe2CIE.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4d7092c2c88400b3de28ba03878453b8871ca3b8d78d5c0ada646a47bd73fbfe +size 6216275 diff --git a/video/TED_dqxNwOe2CIE.mp4 b/video/TED_dqxNwOe2CIE.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..483ea0ad126cb245da956cd9641236b1f8c03a9e --- /dev/null +++ b/video/TED_dqxNwOe2CIE.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82022ebad46856242660859e9bd5dcfdbeec80e23466461b477fb3799fcac709 +size 66129360 diff --git a/video/TED_eIho2S0ZahI.f247.webm b/video/TED_eIho2S0ZahI.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..db95f789ee22fad7b60daf7f67cdc5e6c86ed78d --- /dev/null +++ b/video/TED_eIho2S0ZahI.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:670de950f7577c5249aee395e419fafb220ba5193db46f581be6340fd161b64d +size 21676517 diff --git a/video/TED_eIho2S0ZahI.f251.webm b/video/TED_eIho2S0ZahI.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..ddfa9e156f27d7182c5903421947cb4f2d1038dd --- /dev/null +++ b/video/TED_eIho2S0ZahI.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:16e067470dafa2c42ad641fb2d688cac2206b8176bb5bf2145caea838ea0930b +size 7463732 diff --git a/video/TED_eIho2S0ZahI.mp4 b/video/TED_eIho2S0ZahI.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..bbf0f8e78c40fbcb7f3564a8088fec9a060d00ff --- /dev/null +++ b/video/TED_eIho2S0ZahI.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3e4abf23c169df2effa54f08bb105cc55b2bde707301c930f5336a3f5ce7712 +size 29125161 diff --git a/video/TED_fLaslONQAKM.f248.webm b/video/TED_fLaslONQAKM.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..016cfa51ed041acb3b9b7e8d03f1deb9d8fb00fc --- /dev/null +++ b/video/TED_fLaslONQAKM.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:23c4ccff29e9d57a7a03a25b01fd4ec5ec2cf7797918f2fa98632da297609b10 +size 61081321 diff --git a/video/TED_fLaslONQAKM.f251.webm b/video/TED_fLaslONQAKM.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..64493222562d69b7025c397e4f5f2afecc261dff --- /dev/null +++ b/video/TED_fLaslONQAKM.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e49ef12e5b382a3ef52f853bf37a5814323b50a26f63befdf2d5e443fbc29c1 +size 9036221 diff --git a/video/TED_fLaslONQAKM.mp4 b/video/TED_fLaslONQAKM.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..93de4b86c1bbb8ea61a16ac97f91b3776af8e42d --- /dev/null +++ b/video/TED_fLaslONQAKM.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a65a92b6578715bffd01511150d6276b9b5dd08d1fb793c915b97b178212d9e +size 70215841 diff --git a/video/TED_kKHSJHkPeLY.f616.mp4 b/video/TED_kKHSJHkPeLY.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..0e7a1816e5ef0845a59eaf707eb8e40842dcf04e --- /dev/null +++ b/video/TED_kKHSJHkPeLY.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:026dfd67ffb62a78e49269d7380e1b386362493276ee058d6dda59e3dce19ed8 +size 143671515 diff --git a/video/TED_lvxJoUuG018.f251.webm b/video/TED_lvxJoUuG018.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..e963afbd378356a3f636c9d0cb69656e52d5c289 --- /dev/null +++ b/video/TED_lvxJoUuG018.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:893fa6a93c08b234ae225f2680fbd33cf727587f6773eb5eb0deed46c4f9790a +size 23945453 diff --git a/video/TED_lvxJoUuG018.f616.mp4 b/video/TED_lvxJoUuG018.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..9e4cae762243cebb95460e0c3ca9eba2b74908af --- /dev/null +++ b/video/TED_lvxJoUuG018.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74fd2ace9b97979b55909555d0620e0d1bba810cfb527848f6a7318b7ceb7f01 +size 192763198 diff --git a/video/TED_lvxJoUuG018.mp4 b/video/TED_lvxJoUuG018.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..5ecc4413ed4c441d2960458167f671b23be45ebf --- /dev/null +++ b/video/TED_lvxJoUuG018.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7dcbfa59da1d9ab1a3f7b3bd551f4e057ac1d9b7f2685d01840d4e969c5a9159 +size 216692764 diff --git a/video/TED_nvaPzA50eQA.f137.mp4 b/video/TED_nvaPzA50eQA.f137.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a3fb48e71dfaf441ad82ed2877cacb5e5cada200 --- /dev/null +++ b/video/TED_nvaPzA50eQA.f137.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abab0b6c0bc0c0e8399dd5d35aa596cce928cdb4a51a69a191992cd548399046 +size 73819133 diff --git a/video/TED_nvaPzA50eQA.f140.m4a b/video/TED_nvaPzA50eQA.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..117ff987adb8444a925975d90261852da580eac9 --- /dev/null +++ b/video/TED_nvaPzA50eQA.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:65cb61cb86816649a9b69437b5e46096d846251f71dd5ca46e2ad9ffb182733d +size 7163281 diff --git a/video/TED_nvaPzA50eQA.mp4 b/video/TED_nvaPzA50eQA.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ff097b461ea31fb5bab71cc1503c957ff68c9bd2 --- /dev/null +++ b/video/TED_nvaPzA50eQA.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49bd5491610b35c43f51f06b950942a009c1dba4376a7d7630812d35903971a9 +size 81174100 diff --git a/video/TED_rSQNi5sAwuc.f140.m4a b/video/TED_rSQNi5sAwuc.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..81c983f16aead8de7d72c5dc07bf2adffa261d33 --- /dev/null +++ b/video/TED_rSQNi5sAwuc.f140.m4a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8c17c0c3fd076093423b88bb5dd943635fc3de3e071e0ebeb15d854294a7bca2 +size 5598247 diff --git a/video/TED_rSQNi5sAwuc.f247.webm b/video/TED_rSQNi5sAwuc.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..cbd6fceb67bbb9ab78760a813749cc6b540d3573 --- /dev/null +++ b/video/TED_rSQNi5sAwuc.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc97d1fc5e9e3fb990740f0ba3e5aa830ae1bdacc0873b9e10eb04a006fdc838 +size 34121642 diff --git a/video/TED_rSQNi5sAwuc.mp4 b/video/TED_rSQNi5sAwuc.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..3f35a89789f7515ce16de61bb68b00b0ecd86cda --- /dev/null +++ b/video/TED_rSQNi5sAwuc.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1543675cdb4eebc3a780981a6e6f3a715f858772fc0dc0e68424842fb1b15349 +size 39844129 diff --git a/video/TED_rk_SMBIW1mg.f247.webm b/video/TED_rk_SMBIW1mg.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..4a6abd94bb3f0464c4658ca343eebdd80d626b76 --- /dev/null +++ b/video/TED_rk_SMBIW1mg.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb53a172b5927d4872311e2b0c4b203c9c9a9813bd9543a9f03d10ba74eb9fea +size 109248625 diff --git a/video/TED_rk_SMBIW1mg.f251.webm b/video/TED_rk_SMBIW1mg.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..d346cf51339594356bd59c554907491d6039aabe --- /dev/null +++ b/video/TED_rk_SMBIW1mg.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e85e9a82ae51c4fbb305ab18e838d28269e8c853a920ab477aef0e1d96473f3 +size 16687224 diff --git a/video/TED_rk_SMBIW1mg.mp4 b/video/TED_rk_SMBIW1mg.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..29465a26bf13b94c75ed8b635c337567917ecd67 --- /dev/null +++ b/video/TED_rk_SMBIW1mg.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9bdefabd9717b739477b884973a5cece5a298285f8dedb2c9e11391f6b987f20 +size 126090690 diff --git a/video/TED_tBhUbHQxeQ0.f616.mp4 b/video/TED_tBhUbHQxeQ0.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e1c2ac403f913d4ff6be599f25c8e51816ace59b --- /dev/null +++ b/video/TED_tBhUbHQxeQ0.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eaf2d4663973014719395b78eafc5a7137a04a45f411a987477a5b74189790c0 +size 145748684 diff --git a/video/TED_tBhUbHQxeQ0.mp4 b/video/TED_tBhUbHQxeQ0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..14c4efb8efee0dcf38850cc177862ec9b569bed4 --- /dev/null +++ b/video/TED_tBhUbHQxeQ0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc94079823af734699e2db82a7291ebdaafbce1373718f54acab178bf636f031 +size 158790549 diff --git a/video/TED_t_CEXuoSEZU.f248.webm b/video/TED_t_CEXuoSEZU.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..037d02cf1a97e3034bc485e75efc9073113d0be0 --- /dev/null +++ b/video/TED_t_CEXuoSEZU.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fc5a4fe2c504257cdde21cde92e7559fee423ff87158ed841eb1ab7be4a7d0a +size 77870244 diff --git a/video/TED_t_CEXuoSEZU.f251.webm b/video/TED_t_CEXuoSEZU.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..360f4b6dc12ef882692213245d862cd515399392 --- /dev/null +++ b/video/TED_t_CEXuoSEZU.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc949e1f5ade15ba992ba207a1624554da6ab4be9c8b8ec76fc50bff8cba0558 +size 15266093 diff --git a/video/TED_t_CEXuoSEZU.mp4 b/video/TED_t_CEXuoSEZU.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..a189f534d1282acb695c2f53bedba3bdee64ec31 --- /dev/null +++ b/video/TED_t_CEXuoSEZU.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ae9c1bf88f7f86111630c0ccef3d94d833f5e03687241bf75269e8471ada83a +size 93615380 diff --git a/video/TED_zn2iRG7bI2I.f247.webm b/video/TED_zn2iRG7bI2I.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..2061478f3c5698e233dc4e1265413cda2aaa2ad6 --- /dev/null +++ b/video/TED_zn2iRG7bI2I.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a85bddde6b102b6bcb5aca901f33f53a691f34d8d02fdd1ecb980bd1ad1349 +size 36952617 diff --git a/video/TED_zn2iRG7bI2I.f251.webm b/video/TED_zn2iRG7bI2I.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..283df29dcba375840bd59c12209dc2beec0f3ea4 --- /dev/null +++ b/video/TED_zn2iRG7bI2I.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e249016baf212c2d520f323e03d62a79aade9f8aceae1a7b8d7a811037518b6 +size 5971779 diff --git a/video/TED_zn2iRG7bI2I.mp4 b/video/TED_zn2iRG7bI2I.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..d234718664ff1b1658472ba9d6ae86db47a01e66 --- /dev/null +++ b/video/TED_zn2iRG7bI2I.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74aa95cba801521942066e1a100498c904b092639d71d3ef63bea6ba38d3fdc0 +size 43095351 diff --git a/video/podcast_-murMCjnJx0.f247.webm b/video/podcast_-murMCjnJx0.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..e87d363387d48f8a512bb3cb04f58cb6bb5d3201 --- /dev/null +++ b/video/podcast_-murMCjnJx0.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d466ccbaa8f0730490b641a6956d686b2c705eaad7039af3d0d917341267566c +size 6774694 diff --git a/video/podcast_-murMCjnJx0.f251.webm b/video/podcast_-murMCjnJx0.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..fc24c7df1737c817dd6d949556bfb0ed93468005 --- /dev/null +++ b/video/podcast_-murMCjnJx0.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ff1745cbd603555ab3dfa98583f1c1802a203c606289626de10cc145dec5424 +size 554556 diff --git a/video/podcast_-murMCjnJx0.mp4 b/video/podcast_-murMCjnJx0.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..497faf1280788696aba4d9675f4d770ec2fabcbd --- /dev/null +++ b/video/podcast_-murMCjnJx0.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d51bdad8d9cb63b902ad6a6942ff479bd1bd8e91baf22e3ae1c10da98d3dbbc9 +size 7344392 diff --git a/video/podcast_15-Hifp15SU.f137.mp4 b/video/podcast_15-Hifp15SU.f137.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..953c206a686a12ca856f748090027338f6de8bd2 --- /dev/null +++ b/video/podcast_15-Hifp15SU.f137.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f11b8cbd52615689a959c43469b073d81635b7ecb3991b28b0b38cb90c17707 +size 47990103 diff --git a/video/podcast_73SHFFpm4J4.f251.webm b/video/podcast_73SHFFpm4J4.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..8fc80ebc151fd6afa2524e9fb7441085032d5562 --- /dev/null +++ b/video/podcast_73SHFFpm4J4.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf382185c07b08c21811b75bb5c3897247631659d218f51efa1730dbf88e2bb4 +size 69984795 diff --git a/video/podcast_7z7wkVTqMqw.f248.webm b/video/podcast_7z7wkVTqMqw.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..b368ad49a5a54dcda3eae8914d3bfb6d5b942730 --- /dev/null +++ b/video/podcast_7z7wkVTqMqw.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b0a82ea69577c1792332cb5f69385fcf7763e3b437990b2c7556d65fcf9daf0 +size 2187128 diff --git a/video/podcast_7z7wkVTqMqw.f251.webm b/video/podcast_7z7wkVTqMqw.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..bbd3e9236a7677a263c61800064a78ace4faf8f2 --- /dev/null +++ b/video/podcast_7z7wkVTqMqw.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cf6c1658afdcf35b13b6966ca38e2ff15333072c63fffc3900c1113ac7fd2f4e +size 404035 diff --git a/video/podcast_7z7wkVTqMqw.mp4 b/video/podcast_7z7wkVTqMqw.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..ec9407578ff511c418f519326e5376376da1e6d6 --- /dev/null +++ b/video/podcast_7z7wkVTqMqw.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18b83220fef710e94cc31765aafef3dc4812239b8e22f2836ef4490af14a9caa +size 2595922 diff --git a/video/podcast_AMy4hmUYdGk.f140.m4a b/video/podcast_AMy4hmUYdGk.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..806fc83f3c4bff62e0732a4f88c2fdf5cc1fb5b2 Binary files /dev/null and b/video/podcast_AMy4hmUYdGk.f140.m4a differ diff --git a/video/podcast_AMy4hmUYdGk.f616.mp4 b/video/podcast_AMy4hmUYdGk.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..8e1d21b04cea42a917b45279c3f9cb3f6e590184 --- /dev/null +++ b/video/podcast_AMy4hmUYdGk.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60e23af8c07412d557d46780fff4a96982d21dc6a5daf763e3a3a886cf005325 +size 13590190 diff --git a/video/podcast_AMy4hmUYdGk.mp4 b/video/podcast_AMy4hmUYdGk.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..63c5f0ddb07965e5408349bce52edfc062f20cea --- /dev/null +++ b/video/podcast_AMy4hmUYdGk.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa715387fa2ad3277f6a9a052d9132b2d1a9c7219d0bd995bb5bf32342f59814 +size 14234416 diff --git a/video/podcast_AlVruShkb3s.f136.mp4 b/video/podcast_AlVruShkb3s.f136.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..569fb55239441e193ecca968b61a897958590599 --- /dev/null +++ b/video/podcast_AlVruShkb3s.f136.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e65be5c39e4f381a82059e1c33f7d65263b96ed655a323bb26912e3ba5bc1ab +size 18485217 diff --git a/video/podcast_AlVruShkb3s.f251.webm b/video/podcast_AlVruShkb3s.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..ace44715260e29a50f1137446dccbb7fcd4c4a04 --- /dev/null +++ b/video/podcast_AlVruShkb3s.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a8a5add18df80d8d8d4cd7ddf1413e6a8e7cb1e9c1a325510549f63f967fd04 +size 17973653 diff --git a/video/podcast_Ama8N0jkprw.f137.mp4 b/video/podcast_Ama8N0jkprw.f137.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..9ff48b7b596597e54e4905345e049037b755bd8f --- /dev/null +++ b/video/podcast_Ama8N0jkprw.f137.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69d103933e21ba837fd7279d6115704e80fad504f8a8028b21fd94feeccce123 +size 50063220 diff --git a/video/podcast_Ama8N0jkprw.f251.webm b/video/podcast_Ama8N0jkprw.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..186cae5d919ef3959f385e7990e2052bcb438d52 --- /dev/null +++ b/video/podcast_Ama8N0jkprw.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:877c29e009edff9908259820e541a347c47a01b217e749bf251dcc573096d9f0 +size 12260891 diff --git a/video/podcast_Ama8N0jkprw.mp4 b/video/podcast_Ama8N0jkprw.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..3feec636de3b796d157a396d3f03bbf556e8fbf0 --- /dev/null +++ b/video/podcast_Ama8N0jkprw.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ceafc9045eba3e99f82c4dfbd774be7010abb74520bd9c596ffdf4875af70a7 +size 62246644 diff --git a/video/podcast_GDvthpzV6rI.mp4 b/video/podcast_GDvthpzV6rI.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..c350ff06eb30335e63e119e1c2eb42285a05e224 --- /dev/null +++ b/video/podcast_GDvthpzV6rI.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5596b278f5398f2515f34e776bb981a898434d0c99d62c8176dd35495e85d036 +size 482416608 diff --git a/video/podcast_J2d6quMpGDc.f140.m4a b/video/podcast_J2d6quMpGDc.f140.m4a new file mode 100644 index 0000000000000000000000000000000000000000..a93ab99b4a5a3638f00527eaa3e25b7b82b583f9 Binary files /dev/null and b/video/podcast_J2d6quMpGDc.f140.m4a differ diff --git a/video/podcast_J2d6quMpGDc.f248.webm b/video/podcast_J2d6quMpGDc.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..189dba9ef949f3369d3115475958a244bdd548d1 --- /dev/null +++ b/video/podcast_J2d6quMpGDc.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:265574b89b55fc7169f60a589860dfb77b638cb7e25f4c408d50897c0c466ef9 +size 2257411 diff --git a/video/podcast_J2d6quMpGDc.mp4 b/video/podcast_J2d6quMpGDc.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..db015d17cb2fafda97185113984c4b5193ffbe9e --- /dev/null +++ b/video/podcast_J2d6quMpGDc.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc84bad2e5aeb696f6f1c038efac55ff2100d1d815a89e75b46901da5235e55b +size 2942449 diff --git a/video/podcast_Ks-_Mh1QhMc.f251.webm b/video/podcast_Ks-_Mh1QhMc.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..fb176d87035aa0edebef87d36e7ce63d10d223b9 --- /dev/null +++ b/video/podcast_Ks-_Mh1QhMc.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ac2f76a46d8a9f08c8120259ea9802306ee21ff93cc42e7120177b5ebf88bfd3 +size 19257916 diff --git a/video/podcast_LHRXb9L4hh8.f251.webm b/video/podcast_LHRXb9L4hh8.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..ec41147fce4f5bdfc1f41e8302c1531c1705637d --- /dev/null +++ b/video/podcast_LHRXb9L4hh8.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acaa5e59dd30a00dfabdc32e4ea030eaf74ec55e8fd47c53d54eab39215d987f +size 80179358 diff --git a/video/podcast_UohdOOQfheY.f248.webm b/video/podcast_UohdOOQfheY.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..5903c9074a8e9f27f7b381ae102cd25cb86b8371 --- /dev/null +++ b/video/podcast_UohdOOQfheY.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8cd43ea9e4e4ba0cd14690bffb96fffc7b2008ff37218010ceb2018bdaa247a +size 7734330 diff --git a/video/podcast_UohdOOQfheY.f251.webm b/video/podcast_UohdOOQfheY.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..2ed65a5f05743a4916a40e41fc03167bc1b722bd --- /dev/null +++ b/video/podcast_UohdOOQfheY.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd4b02191cd63baeda2e4c1bc9204c15d181fb8f8d6fd3b9a509ae78016aae40 +size 859479 diff --git a/video/podcast_UohdOOQfheY.mp4 b/video/podcast_UohdOOQfheY.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..67a69283d2e67ecd15942bea48dbd63f292f7333 --- /dev/null +++ b/video/podcast_UohdOOQfheY.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d404764d9d8da813f39fce0383788ac4e84e3af11d0cb1a00cc67652a8a4421f +size 8618182 diff --git a/video/podcast_VRJzvJ5XPQI.f616.mp4 b/video/podcast_VRJzvJ5XPQI.f616.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..bbcec5e832e7f498b172f862fa2d035ed4b317e6 --- /dev/null +++ b/video/podcast_VRJzvJ5XPQI.f616.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a17e50b675fff2907654a790406e7f85768e8f2be3216d80f2aaa9246995c96c +size 264396545 diff --git a/video/podcast_YEM3nWkB-EE.f251.webm b/video/podcast_YEM3nWkB-EE.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..03e970744a3543b5eaedb48239fe600a0f64b41a --- /dev/null +++ b/video/podcast_YEM3nWkB-EE.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0243ecdba67d6c6c2a161e2d85cd5cf69922a5dc906f0fbc4213dc225ab50a95 +size 75956900 diff --git a/video/podcast_ZZZ7k8cMA-4.f251.webm b/video/podcast_ZZZ7k8cMA-4.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..8baf8abe929b71f82aa783fe8a7334aaf8a7462e --- /dev/null +++ b/video/podcast_ZZZ7k8cMA-4.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97b49cbad7918a5bca979fd3eb2b90fa0922aed9b3561b4e7aac2d96ea95ff86 +size 10858103 diff --git a/video/podcast_ZZZ7k8cMA-4.mp4 b/video/podcast_ZZZ7k8cMA-4.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..935ec3322fe7560840f0ecc720b67f71fb82e6c0 --- /dev/null +++ b/video/podcast_ZZZ7k8cMA-4.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59cc7b25066c4b64520c24845835564c3ff111a2777bb6d0cd7e8fcfe9de318b +size 58431034 diff --git a/video/podcast__AyB3HPgf04.f251.webm b/video/podcast__AyB3HPgf04.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..5375aa491a0b837bed5f1e72d84891c492c5b9a6 --- /dev/null +++ b/video/podcast__AyB3HPgf04.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c191536ffae6c53c15799c1a1741c499ea656a1117a2f5aa1199e72e6b064f +size 892637 diff --git a/video/podcast__AyB3HPgf04.f780.webm b/video/podcast__AyB3HPgf04.f780.webm new file mode 100644 index 0000000000000000000000000000000000000000..91caf36874e28c42c5298025caa825937eb4432c --- /dev/null +++ b/video/podcast__AyB3HPgf04.f780.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9af6f57a2b942ece25c5331c7e7088e4e4829bd31af462b4e48fedd1afe344c +size 1859159 diff --git a/video/podcast__AyB3HPgf04.mp4 b/video/podcast__AyB3HPgf04.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..66d8819fc13bd4b29f9ce1f3b22367c160f68966 --- /dev/null +++ b/video/podcast__AyB3HPgf04.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b57d87e28b75da536a7e6f85b6ce400c60be448843605ce0aa3b2df0d77f089 +size 2776359 diff --git a/video/podcast_a9yFKPmPZ90.f251.webm b/video/podcast_a9yFKPmPZ90.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..ec32502d8788c347e0e4fb083978ff4af6667959 --- /dev/null +++ b/video/podcast_a9yFKPmPZ90.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f89f0949b09d13091b0fee0ac53cac8f8e38116273046809478e77c4748876c7 +size 130360990 diff --git a/video/podcast_d8w9gn5yQQg.f303.webm.part b/video/podcast_d8w9gn5yQQg.f303.webm.part new file mode 100644 index 0000000000000000000000000000000000000000..3d401e0747764bd6f64f6c00448feb89c254eaaf --- /dev/null +++ b/video/podcast_d8w9gn5yQQg.f303.webm.part @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd44c9a3e81dcd10ea0eca5bc93d3a7c45da4a6af0d4971905cb939895a7d802 +size 628679736 diff --git a/video/podcast_fLaslONQAKM.f251.webm b/video/podcast_fLaslONQAKM.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..64493222562d69b7025c397e4f5f2afecc261dff --- /dev/null +++ b/video/podcast_fLaslONQAKM.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4e49ef12e5b382a3ef52f853bf37a5814323b50a26f63befdf2d5e443fbc29c1 +size 9036221 diff --git a/video/podcast_fLaslONQAKM.mp4 b/video/podcast_fLaslONQAKM.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..93de4b86c1bbb8ea61a16ac97f91b3776af8e42d --- /dev/null +++ b/video/podcast_fLaslONQAKM.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6a65a92b6578715bffd01511150d6276b9b5dd08d1fb793c915b97b178212d9e +size 70215841 diff --git a/video/podcast_lLCbuWOuL54.f616.mp4.part-Frag936 b/video/podcast_lLCbuWOuL54.f616.mp4.part-Frag936 new file mode 100644 index 0000000000000000000000000000000000000000..45d06767d439401aafebdb319ccca1ea5c91931c Binary files /dev/null and b/video/podcast_lLCbuWOuL54.f616.mp4.part-Frag936 differ diff --git a/video/podcast_m3XoZnr6OK4.f248.webm b/video/podcast_m3XoZnr6OK4.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..3b3763b1788829a299eb2c683631cb1774eec595 --- /dev/null +++ b/video/podcast_m3XoZnr6OK4.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b560b736f984202247d97a8f3002f4d5c1705779b49030fda6d705202fc9a96a +size 7399553 diff --git a/video/podcast_m3XoZnr6OK4.f251.webm b/video/podcast_m3XoZnr6OK4.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..f7e92d4c7926487f53dabc435270c6a48c0f1f92 --- /dev/null +++ b/video/podcast_m3XoZnr6OK4.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d061526d2d2866305b46829fa411554dd8e630a7f1f3858f87c63bee08ad1071 +size 769497 diff --git a/video/podcast_m3XoZnr6OK4.mp4 b/video/podcast_m3XoZnr6OK4.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..272ff332ad3d2ab50c50b9d8fc9c3bd126cf73a1 --- /dev/null +++ b/video/podcast_m3XoZnr6OK4.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff6b5c2bddf164141b590864a1e854d85b4ff16e2d2d16cd878025a536b77987 +size 8175794 diff --git a/video/podcast_oCUkBbDFLhU.f247.webm b/video/podcast_oCUkBbDFLhU.f247.webm new file mode 100644 index 0000000000000000000000000000000000000000..5256e43a8a19e2c3597704b33ef0ff031c4e29f9 --- /dev/null +++ b/video/podcast_oCUkBbDFLhU.f247.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b18d2ed6f8d4fda4166bd1c1e62ee1534f01cc507f1e21adab456595acbc472b +size 34105900 diff --git a/video/podcast_oCUkBbDFLhU.f251.webm b/video/podcast_oCUkBbDFLhU.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..2efaaf0c403eafcd06bbf39214747fed86b42675 --- /dev/null +++ b/video/podcast_oCUkBbDFLhU.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0147d20263e9da18ad988018545458bfcf60cca3de74753d48f336c5d7a10e06 +size 48679348 diff --git a/video/podcast_oCUkBbDFLhU.mp4 b/video/podcast_oCUkBbDFLhU.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e88b4f8ce95ba9256bdd05b819693f9f1198d8d7 --- /dev/null +++ b/video/podcast_oCUkBbDFLhU.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:020823b723e4969dfcbc0ddd01e266694a4b8845634fd149cdd65dff9291e483 +size 83961934 diff --git a/video/podcast_xppME-ham_Q.f248.webm b/video/podcast_xppME-ham_Q.f248.webm new file mode 100644 index 0000000000000000000000000000000000000000..59669e8e4dac3eaa6d1f1d3dbd615b2baa76abee --- /dev/null +++ b/video/podcast_xppME-ham_Q.f248.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86a508948a9dccd7cf5e88d62c200bdba0dbf0969c3090d59b8923aa6832ab3a +size 7623507 diff --git a/video/podcast_xppME-ham_Q.f251.webm b/video/podcast_xppME-ham_Q.f251.webm new file mode 100644 index 0000000000000000000000000000000000000000..2a5eeb4a54c6ac9cb921eddab80b16c3c24bdb65 --- /dev/null +++ b/video/podcast_xppME-ham_Q.f251.webm @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d548bf4ac9856f5c369379c4a67105017e5b44786a219639880edbfdf62f1184 +size 774633 diff --git a/video/podcast_xppME-ham_Q.mp4 b/video/podcast_xppME-ham_Q.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..caedc91ac2ce00acc536e8da09cc859c2f9c90ed --- /dev/null +++ b/video/podcast_xppME-ham_Q.mp4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:57391d9fb7125b6a11cdd9ec914c1aa99ac96e391541957e37edcf289235358d +size 8406965