BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//CMSA - ECPv6.15.18//NONSGML v1.0//EN
CALSCALE:GREGORIAN
METHOD:PUBLISH
X-WR-CALNAME:CMSA
X-ORIGINAL-URL:https://cmsa.fas.harvard.edu
X-WR-CALDESC:Events for CMSA
REFRESH-INTERVAL;VALUE=DURATION:PT1H
X-Robots-Tag:noindex
X-PUBLISHED-TTL:PT1H
BEGIN:VTIMEZONE
TZID:America/New_York
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20150308T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20151101T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20160313T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20161106T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20170312T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20171105T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20180311T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20181104T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20190310T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20191103T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20200308T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20201101T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20210314T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20211107T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20220313T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20221106T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20230312T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20231105T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20240310T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20241103T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20250309T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20251102T060000
END:STANDARD
END:VTIMEZONE
BEGIN:VTIMEZONE
TZID:America/New_York
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20230312T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20231105T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20240310T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20241103T060000
END:STANDARD
BEGIN:DAYLIGHT
TZOFFSETFROM:-0500
TZOFFSETTO:-0400
TZNAME:EDT
DTSTART:20250309T070000
END:DAYLIGHT
BEGIN:STANDARD
TZOFFSETFROM:-0400
TZOFFSETTO:-0500
TZNAME:EST
DTSTART:20251102T060000
END:STANDARD
END:VTIMEZONE
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20240502T110000
DTEND;TZID=America/New_York:20240502T120000
DTSTAMP:20260426T045537
CREATED:20240415T174619Z
LAST-MODIFIED:20240502T144153Z
UID:10003364-1714647600-1714651200@cmsa.fas.harvard.edu
SUMMARY:Mathematical Aspects of Scattering Amplitudes Lecture
DESCRIPTION:Mathematical Aspects of Scattering Amplitudes Lecture \nSpeaker: Daniil Rudenko\, U Chicago \nTitle: Introduction to Cluster Polylogarithms \nLocation: Harvard Science Center 310
URL:https://cmsa.fas.harvard.edu/event/scatteringamplitudes_5224dr/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Scattering Amplitudes Lectures
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20240405T140000
DTEND;TZID=America/New_York:20240406T170000
DTSTAMP:20260426T045537
CREATED:20240105T070812Z
LAST-MODIFIED:20250305T204914Z
UID:10001118-1712325600-1712422800@cmsa.fas.harvard.edu
SUMMARY:Current Developments in Mathematics Conference 2024
DESCRIPTION:CURRENT DEVELOPMENTS IN MATHEMATICS 2024\nAPRIL 5-6\, 2024\nHARVARD UNIVERSITY SCIENCE CENTER\nLECTURE HALL C\nhttps://www.math.harvard.edu/event/current-developments-in-mathematics-2024/\n  \n\nSpeakers:\nDaniel Cristofaro-Gardiner – University of Maryland\nSamit Dasgupta – Duke University\nJiaoyang Huang – University of Pennsylvania\nDaniel Litt – University of Toronto\nLisa Piccirillo – MIT/University of Texas\n\n\n\n\nDownload PDF for a detailed schedule of lectures and events. \n  \n\n\n\n\n\n\n\n\nFriday\, April 5 \n\n\n\n\n\n\n\n\n\n\nSaturday\, April 6 \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n1:30 p.m. – 2:20 p.m. Part 1\n2:20 p.m. – 2:30 p.m. Break\n2:30 p.m. – 3:20 p.m. Part 2\n\nJiaoyang Huang \nRandom Matrix Statistics and Airy Line Ensembles \n\n\n\n\n\n\n\n\n\n\n\n9:05 a.m. – 9:55 a.m. Part 1\n9:55 a.m. – 10:05 a.m. Break\n10:05 a.m. – 10:55 a.m. Part 2\n\nDaniel Litt \nMotives\, mapping class groups\, and monodromy \n\n\n\n\n\n\n\n\n\n\n\n\n3:20 p.m. – 3:35 p.m. \nBreak \n\n\n\n\n\n\n\n\n\n\n10:55 a.m. – 11:10 a.m. \nBreak \n\n\n\n\n\n\n\n\n\n\n\n\n\n3:35 p.m. – 4:25 p.m. Part 1\n4:25 p.m. – 4:35 p.m. Break\n4:35 p.m. – 5:25 p.m. Part 2\n\nLisa Piccirillo \nExotic phenomena in dimension 4 \n\n\n\n\n\n\n\n\n\n\n\n11:10 a.m. – 12 p.m. Part 1\n12 p.m. – 1:30 p.m. Lunch\n1:30 p.m. – 2:20 p.m. Part 2\n\nSamit Dasgupta \nStark’s conjectures and explicit class field theory \n\n\n\n\n\n\n\n\n\n\n\n\n\n2:20 p.m. – 2:35 p.m. \nBreak \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n2:35 p.m. – 3:25 p.m. Part 1\n3:25 p.m. – 3:35 p.m. Break\n3:35 p.m. – 4:25 p.m. Part 2\n\nDan Cristofaro-Gardiner \nLow-dimensional topology and dynamics \n\n\n\n\n\n\n\n\n  \n  \nOrganizers: David Jerison\, Paul Seidel\, Nike Sun (MIT); Denis Auroux\, Mark Kisin\, Lauren Williams\, Horng-Tzer Yau\, Shing-Tung Yau (Harvard). \nSponsored by the National Science Foundation\, Harvard University Mathematics\, and the Massachusetts Institute of Technology. \nHarvard University is committed to maintaining a safe and healthy educational and work environment in which no member of the University community is\, on the basis of sex\, sexual orientation\, or gender identity\, excluded from participation in\, denied the benefits of\, or subjected to discrimination in any University program or activity. More information can be found here.
URL:https://cmsa.fas.harvard.edu/event/cdm-2024/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/Updated-2024-CDM-Poster-scaled.jpg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20240328T163000
DTEND;TZID=America/New_York:20240328T173000
DTSTAMP:20260426T045537
CREATED:20240103T175709Z
LAST-MODIFIED:20250409T192237Z
UID:10001105-1711643400-1711647000@cmsa.fas.harvard.edu
SUMMARY:2024 Ding Shum Lecture: Yann LeCun: Objective-Driven AI: Towards AI systems that can learn\, remember\, reason\, and plan
DESCRIPTION:LECTURE SLIDES (pdf) \nOn March 28\, 2024\, the CMSA will host the fifth annual Ding Shum Lecture\, given by Yann LeCun. \nTime: 4:30–5:30 pm ET \nSpeaker: Yann Lecun\, New York University & META \nLocation: Harvard Science Center  Hall A & via Zoom Webinar \nTitle: Objective-Driven AI: Towards AI systems that can learn\, remember\, reason\, and plan \n\n\nAbstract:  \nHow could machines learn as efficiently as humans and animals? \nHow could machines learn how the world works and acquire common sense? \nHow could machines learn to reason and plan? \nCurrent AI architectures\, such as Auto-Regressive Large Language Models fall short. I will propose a modular cognitive architecture that may constitute a path towards answering these questions. The centerpiece of the architecture is a predictive world model that allows the system to predict the consequences of its actions and to plan a sequence of actions that optimize a set of objectives. The objectives include guardrails that guarantee the system’s controllability and safety. The world model employs a Hierarchical Joint Embedding Predictive Architecture (H-JEPA) trained with self-supervised learning. The JEPA learns abstract representations of the percepts that are simultaneously maximally informative and maximally predictable. The corresponding working paper is available here: https://openreview.net/forum?id=BZ5a1r-kVsf \n\n\n\n\n\n\n\n\n\n\nThis event is made possible by the generous funding of Ding Lei and Harry Shum. \n 
URL:https://cmsa.fas.harvard.edu/event/2024_dingshum/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Ding Shum Lecture,Event,Public Lecture,Special Lectures
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/Ding-Shum-2024_8.5x11.jpg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20240229T160000
DTEND;TZID=America/New_York:20240229T170000
DTSTAMP:20260426T045537
CREATED:20240103T185919Z
LAST-MODIFIED:20250409T192246Z
UID:10001107-1709222400-1709226000@cmsa.fas.harvard.edu
SUMMARY:Fourth Annual Yip Lecture | Josh Tenenbaum | How to grow a mind from a brain: From guessing and betting to thinking and talking
DESCRIPTION:Josh Tenenbaum gave the Fourth Annual Yip Lecture on February 29\, 2024. \nTitle: How to grow a mind from a brain: From guessing and betting to thinking and talking\nTime: 4:00-5:00 pm ET \nLocation: Harvard Science Center \nThe Yip Lecture takes place thanks to the support of Dr. Shing-Yiu Yip. \n \n\nThe previous Yip Lecture featured Andrew Strominger (Harvard)\, who spoke on Black Holes.
URL:https://cmsa.fas.harvard.edu/event/yip-2024/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event,Public Lecture,Special Lectures,Yip Lecture Series
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/Tenenbaum-1_MIT-768x513-1.jpeg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20240220T160000
DTEND;TZID=America/New_York:20240220T173000
DTSTAMP:20260426T045537
CREATED:20240301T093539Z
LAST-MODIFIED:20250328T150527Z
UID:10002892-1708444800-1708450200@cmsa.fas.harvard.edu
SUMMARY:Math Science Lectures in Honor of Raoul Bott: Maggie Miller: Fibered ribbon knots vs. major 4D conjectures
DESCRIPTION:Fibered ribbon knots vs. major 4D conjectures \nLocation: Harvard University Science Center Hall A & via Zoom webinar \nDates: Feb 20 & 22\, 2024 \nTime: 4:00-5:30 pm \nMaggie Miller is an assistant professor in the mathematics department at the University of Texas at Austin and a Clay Research Fellow. \nThis is the fourth annual Math Science Lecture Series held in Honor of Raoul Bott. \nTalk topic:  Fibered ribbon knots vs. major 4D conjectures\n  \n \nFeb. 20\, 2024 \nTitle: Fibered ribbon knots and the Poincaré conjecture \nAbstract: A knot is “fibered” if its complement in S^3 is the total space of a bundle over the circle\, and ribbon if it bounds a smooth disk into B^4 with no local maxima with respect to radial height. A theorem of Casson-Gordon from 1983 implies that if a fibered ribbon knot does not bound any fibered disk in B^4\, then the smooth 4D Poincaré conjecture is false. I’ll show that unfortunately (?) many ribbon disks bounded by fibered knots are fibered\, giving some criteria for extending fibrations and discuss how one might search for non-fibered examples. \n  \n \nFeb. 22\, 2024 \nTitle: Fibered knots and the slice-ribbon conjecture \nAbstract: The slice-ribbon conjecture (Fox\, 1962) posits that if a knot bounds any smooth disk into B^4\, it also bounds a ribbon disk. The previously discussed work of Casson-Gordon yields an obstruction to many fibered knots being ribbon\, yielding many interesting potential counterexamples to this conjecture — if any happy to bound a non-ribbon disk. In 2022\, Dai-Kong-Mallick-Park-Stoffregen showed that unfortunately( ?) many of these knots don’t bound a smooth disk into B^4 and thus can’t disprove the conjecture. I’ll show a simple alternate proof that a certain interesting knot (the (2\,1)-cable of the figure eight) isn’t slice and discuss remaining open questions. This talk is joint with Paolo Aceto\, Nickolas Castro\, JungHwan Park\, and Andras Stipsicz. \n  \nTalk Chair: Cliff Taubes (Harvard Mathematics) \nModerator: Freid Tong (Harvard CMSA) \n\nRaoul Bott (9/24/1923 – 12/20/2005) is known for the Bott periodicity theorem\, the Morse–Bott functions\, and the Borel–Bott–Weil theorem.
URL:https://cmsa.fas.harvard.edu/event/mathscibott_2024/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Math Science Lectures in Honor of Raoul Bott,Public Lecture,Special Lectures
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Bott-Lecture_Maggie-Miller_letter_web.png
END:VEVENT
BEGIN:VEVENT
DTSTART;VALUE=DATE:20231027
DTEND;VALUE=DATE:20231029
DTSTAMP:20260426T045537
CREATED:20230904T060021Z
LAST-MODIFIED:20240624T182341Z
UID:10000002-1698364800-1698537599@cmsa.fas.harvard.edu
SUMMARY:Mathematics in Science: Perspectives and Prospects
DESCRIPTION:Mathematics in Science: Perspectives and Prospects\nA showcase of mathematics in interaction with physics\, computer science\, biology\, and beyond. \nOctober 27–28\, 2023 \nLocation: Harvard University Science Center Hall D & via Zoom. \nDirections and Recommended Lodging \nMathematics in Science: Perspectives and Prospects Youtube Playlist \n  \n\nSpeakers \n\nNima Arkani-Hamed (IAS)\nConstantinos Daskalakis (MIT)\nAlison Etheridge (Oxford)\nMike Freedman (Harvard CMSA)\nGreg Moore (Rutgers)\nBernd Sturmfels (MPI Leipzig)\n\n\nOrganizers \n\nMichael R. Douglas (Harvard CMSA)\nDan Freed (Harvard Math & CMSA)\nMike Hopkins (Harvard Math)\nCumrun Vafa (Harvard Physics)\nHorng-Tzer Yau (Harvard Math)\n\nSchedule\nFriday\, October 27\, 2023 \n\n\n\n2:00–3:15 pm\n\nGreg Moore (Rutgers) \nTitle: Remarks on Physical Mathematics \nAbstract: I will describe some examples of the vigorous modern dialogue between mathematics and theoretical physics (especially high energy and condensed matter physics). I will begin by recalling Stokes’ phenomenon and explain how it is related to some notable developments in quantum field theory from the past 30 years. Time permitting\, I might also say something about the dialogue between mathematicians working on the differential topology of four-manifolds and physicists working on supersymmetric quantum field theories. But I haven’t finished writing the talk yet\, so I don’t know how it will end any more than you do. \nSlides (PDF) \n \n\n\n\n3:15–3:45 pm\nBreak\n\n\n3:45–5:00 pm\n\nBernd Sturmfels (MPI Leipzig) \nTitle: Algebraic Varieties in Quantum Chemistry \nAbstract: We discuss the algebraic geometry behind coupled cluster (CC) theory of quantum many-body systems. The high-dimensional eigenvalue problems that encode the electronic Schroedinger equation are approximated by a hierarchy of polynomial systems at various levels of truncation. The exponential parametrization of the eigenstates gives rise to truncation varieties. These generalize Grassmannians in their Pluecker embedding. We explain how to derive Hamiltonians\, we offer a detailed study of truncation varieties and their CC degrees\, and we present the state of the art in solving the CC equations. This is joint work with Fabian Faulstich and Svala Sverrisdóttir. \nSlides (PDF) \n \n\n\n\n\n  \nSaturday\, October 28\, 2023 \n\n\n\n9:00 am\nBreakfast\n\n\n9:30–10:45 am\n\nMike Freedman (Harvard CMSA) \nTitle: ML\, QML\, and Dynamics: What mathematics can help us understand and advance machine learning? \nAbstract: Vannila deep neural nets DNN repeatedly stretch and fold. They are reminiscent of the logistic map and the Smale horseshoe.  What kind of dynamics is responsible for their expressivity and trainability. Is chaos playing a role? Is the Kolmogorov Arnold representation theorem relevant? Large language models are full of linear maps. Might we look for emergent tensor structures in these highly trained maps in analogy with emergent tensor structures at local minima of certain loss functions in high-energy physics. \nSlides (PDF) \n \n\n\n\n10:45–11:15 am\nBreak\n\n\n11:15 am–12:30 pmvia Zoom\n\nNima Arkani-Hamed (IAS) \nTitle: All-Loop Scattering as A Counting Problem \nAbstract: I will describe a new understanding of scattering amplitudes based on fundamentally combinatorial ideas in the kinematic space of the scattering data. I first discuss a toy model\, the simplest theory of colored scalar particles with cubic interactions\, at all loop orders and to all orders in the topological ‘t Hooft expansion. I will present a novel formula for loop-integrated amplitudes\, with no trace of the conventional sum over Feynman diagrams\, but instead determined by a beautifully simple counting problem attached to any order of the topological expansion. A surprisingly simple shift of kinematic variables converts this apparent toy model into the realistic physics of pions and Yang-Mills theory. These results represent a significant step forward in the decade-long quest to formulate the fundamental physics of the real world in a new language\, where the rules of spacetime and quantum mechanics\, as reflected in the principles of locality and unitarity\, are seen to emerge from deeper mathematical structures. \n \n\n\n\n12:30–2:00 pm\nLunch break\n\n\n2:00–3:15 pm\n\nConstantinos Daskalakis (MIT) \nTitle: How to train deep neural nets to think strategically \nAbstract: Many outstanding challenges in Deep Learning lie at its interface with Game Theory: from playing difficult games like Go to robustifying classifiers against adversarial attacks\, training deep generative models\, and training DNN-based models to interact with each other and with humans. In these applications\, the utilities that the agents aim to optimize are non-concave in the parameters of the underlying DNNs; as a result\, Nash equilibria fail to exist\, and standard equilibrium analysis is inapplicable. So how can one train DNNs to be strategic? What is even the goal of the training? We shed light on these challenges through a combination of learning-theoretic\, complexity-theoretic\, game-theoretic and topological techniques\, presenting obstacles and opportunities for Deep Learning and Game Theory going forward. \nSlides (PDF) \n \n\n\n\n3:15–3:45 pm\nBreak\n\n\n3:45–5:00 pm\n\nAlison Etheridge (Oxford) \nTitle: Modelling hybrid zones \nAbstract: Mathematical models play a fundamental role in theoretical population genetics and\, in turn\, population genetics provides a wealth of mathematical challenges. In this lecture we investigate the interplay between a particular (ubiquitous) form of natural selection\, spatial structure\, and\, if time permits\, so-called genetic drift. A simple mathematical caricature will uncover the importance of the shape of the domain inhabited by a species for the effectiveness of natural selection. \nSlides (PDF) \n \n\n\n\n\nLimited funding to help defray travel expenses is available for graduate students and recent PhDs. If you are a graduate student or postdoc and would like to apply for support\, please register above and send an email to mathsci2023@cmsa.fas.harvard.edu no later than October 9\, 2023. \nPlease include your name\, address\, current status\, university affiliation\, citizenship\, and area of study. F1 visa holders are eligible to apply for support. If you are a graduate student\, please send a brief letter of recommendation from a faculty member to explain the relevance of the conference to your studies or research. If you are a postdoc\, please include a copy of your CV. \n\n 
URL:https://cmsa.fas.harvard.edu/event/mathematics-in-science/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference,Event
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/MathScience2023Poster_8.5x11.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20230831T090000
DTEND;TZID=America/New_York:20230901T170000
DTSTAMP:20260426T045537
CREATED:20230904T063654Z
LAST-MODIFIED:20251026T043812Z
UID:10000820-1693472400-1693587600@cmsa.fas.harvard.edu
SUMMARY:Big Data Conference 2023
DESCRIPTION:On August 31-Sep 1\, 2023 the CMSA hosted the ninth annual Conference on Big Data. The Big Data Conference features speakers from the Harvard community as well as scholars from across the globe\, with talks focusing on computer science\, statistics\, math and physics\, and economics. \nSpeakers: \n\nJacob Andreas\, MIT\nMorgane Austern\, Harvard\nAlbert-László Barabási\, Northeastern\nRachel Cummings\, Columbia\nMelissa Dell\, Harvard\nJianqing Fan\, Princeton\nTommi Jaakkola\, MIT\nAnkur Moitra\, MIT\nMark Sellke\, Harvard\nMarinka Zitnik\, Harvard Medical School\n\nOrganizers: \n\nMichael Douglas\, CMSA\, Harvard University\nYannai Gonczarowski\, Economics and Computer Science\, Harvard University\nLucas Janson\, Statistics and Computer Science\, Harvard University\nTracy Ke\, Statistics\, Harvard University\nHorng-Tzer Yau\, Mathematics and CMSA\, Harvard University\nYue Lu\, Electrical Engineering and Applied Mathematics\, Harvard University\n\nSchedule\n(PDF download) \nThursday\, August 31\, 2023 \n\n\n\n9:00 AM\nBreakfast\n\n\n9:30 AM\nIntroductions\n\n\n9:45–10:45 AM\nAlbert-László Barabási (Northeastern\, Harvard) \nTitle: From Network Medicine to the Foodome: The Dark Matter of Nutrition \nAbstract: A disease is rarely a consequence of an abnormality in a single gene but reflects perturbations to the complex intracellular network. Network medicine offer a platform to explore systematically not only the molecular complexity of a particular disease\, leading to the identification of disease modules and pathways\, but also the molecular relationships between apparently distinct (patho) phenotypes. As an application\, I will explore how we use network medicine to uncover the role individual food molecules in our health. Indeed\, our current understanding of how diet affects our health is limited to the role of 150 key nutritional components systematically tracked by the USDA and other national databases in all foods. Yet\, these nutritional components represent only a tiny fraction of the over 135\,000 distinct\, definable biochemicals present in our food. While many of these biochemicals have documented effects on health\, they remain unquantified in any systematic fashion across different individual foods. Their invisibility to experimental\, clinical\, and epidemiological studies defines them as the ‘Dark Matter of Nutrition.’ I will speak about our efforts to develop a high-resolution library of this nutritional dark matter\, and efforts to understand the role of these molecules on health\, opening novel avenues by which to understand\, avoid\, and control disease. \nhttps://youtu.be/UmgzUwi6K3E\n\n\n10:45–11:00 AM\nBreak\n\n\n11:00 AM–12:00 PM\nRachel Cummings (Columbia) \nTitle: Differentially Private Algorithms for Statistical Estimation Problems \nAbstract: Differential privacy (DP) is widely regarded as a gold standard for privacy-preserving computation over users’ data.  It is a parameterized notion of database privacy that gives a rigorous worst-case bound on the information that can be learned about any one individual from the result of a data analysis task. Algorithmically it is achieved by injecting carefully calibrated randomness into the analysis to balance privacy protections with accuracy of the results.\nIn this talk\, we will survey recent developments in the development of DP algorithms for three important statistical problems\, namely online learning with bandit feedback\, causal interference\, and learning from imbalanced data. For the first problem\, we will show that Thompson sampling — a standard bandit algorithm developed in the 1930s — already satisfies DP due to the inherent randomness of the algorithm. For the second problem of causal inference and counterfactual estimation\, we develop the first DP algorithms for synthetic control\, which has been used non-privately for this task for decades. Finally\, for the problem of imbalanced learning\, where one class is severely underrepresented in the training data\, we show that combining existing techniques such as minority oversampling perform very poorly when applied as pre-processing before a DP learning algorithm; instead we propose novel approaches for privately generating synthetic minority points. \nBased on joint works with Marco Avella Medina\, Vishal Misra\, Yuliia Lut\, Tingting Ou\, Saeyoung Rho\, and Ethan Turok. \nhttps://youtu.be/0cPE6rb1Roo\n\n\n12:00–1:30 PM\nLunch\n\n\n1:30–2:30 PM\nMorgane Austern (Harvard) \nTitle: To split or not to split that is the question: From cross validation to debiased machine learning \nAbstract: Data splitting is a ubiquitous method in statistics with examples ranging from cross-validation to cross-fitting. However\, despite its prevalence\, theoretical guidance regarding its use is still lacking. In this talk\, we will explore two examples and establish an asymptotic theory for it. In the first part of this talk\, we study the cross-validation method\, a ubiquitous method for risk estimation\, and establish its asymptotic properties for a large class of models and with an arbitrary number of folds. Under stability conditions\, we establish a central limit theorem and Berry-Esseen bounds for the cross-validated risk\, which enable us to compute asymptotically accurate confidence intervals. Using our results\, we study the statistical speed-up offered by cross-validation compared to a train-test split procedure. We reveal some surprising behavior of the cross-validated risk and establish the statistically optimal choice for the number of folds. In the second part of this talk\, we study the role of cross-fitting in the generalized method of moments with moments that also depend on some auxiliary functions. Recent lines of work show how one can use generic machine learning estimators for these auxiliary problems\, while maintaining asymptotic normality and root-n consistency of the target parameter of interest. The literature typically requires that these auxiliary problems are fitted on a separate sample or in a cross-fitting manner. We show that when these auxiliary estimation algorithms satisfy natural leave-one-out stability properties\, then sample splitting is not required. This allows for sample reuse\, which can be beneficial in moderately sized sample regimes. \nhttps://youtu.be/L_pHxgoQSgU\n\n\n2:30–2:45 PM\nBreak\n\n\n2:45–3:45 PM\nAnkur Moitra (MIT) \nTitle: Learning from Dynamics \nAbstract: Linear dynamical systems are the canonical model for time series data. They have wide-ranging applications and there is a vast literature on learning their parameters from input-output sequences. Moreover they have received renewed interest because of their connections to recurrent neural networks.\nBut there are wide gaps in our understanding. Existing works have only asymptotic guarantees or else make restrictive assumptions\, e.g. that preclude having any long-range correlations. In this work\, we give a new algorithm based on the method of moments that is computationally efficient and works under essentially minimal assumptions. Our work points to several missed connections\, whereby tools from theoretical machine learning including tensor methods\, can be used in non-stationary settings. \nhttps://youtu.be/UmgzUwi6K3E\n\n\n3:45–4:00 PM\nBreak\n\n\n4:00–5:00 PM\nMark Sellke (Harvard) \nTitle: Algorithmic Thresholds for Spherical Spin Glasses \nAbstract: High-dimensional optimization plays a crucial role in modern statistics and machine learning. I will present recent progress on non-convex optimization problems with random objectives\, focusing on the spherical p-spin glass. This model is related to spiked tensor estimation and has been studied in probability and physics for decades. We will see that a natural class of “stable” optimization algorithms gets stuck at an algorithmic threshold related to geometric properties of the landscape. The algorithmic threshold value is efficiently attained via Langevin dynamics or by a second-order ascent method of Subag. Much of this picture extends to other models\, such as random constraint satisfaction problems at high clause density. \nhttps://youtu.be/JoghiwiIbT8\n\n\n6:00 – 8:00 PM\nBanquet for organizers and speakers\n\n\n\n  \nFriday\, September 1\, 2023 \n\n\n\n9:00 AM\nBreakfast\n\n\n9:30 AM\nIntroductions\n\n\n9:45–10:45 AM\nJacob Andreas (MIT) \nTitle: What Learning Algorithm is In-Context Learning? \nAbstract: Neural sequence models\, especially transformers\, exhibit a remarkable capacity for “in-context” learning. They can construct new predictors from sequences of labeled examples (x\,f(x)) presented in the input without further parameter updates. I’ll present recent findings suggesting that transformer-based in-context learners implement standard learning algorithms implicitly\, by encoding smaller models in their activations\, and updating these implicit models as new examples appear in the context\, using in-context linear regression as a model problem. First\, I’ll show by construction that transformers can implement learning algorithms for linear models based on gradient descent and closed-form ridge regression. Second\, I’ll show that trained in-context learners closely match the predictors computed by gradient descent\, ridge regression\, and exact least-squares regression\, transitioning between different predictors as transformer depth and dataset noise vary\, and converging to Bayesian estimators for large widths and depths. Finally\, we present preliminary evidence that in-context learners share algorithmic features with these predictors: learners’ late layers non-linearly encode weight vectors and moment matrices. These results suggest that in-context learning is understandable in algorithmic terms\, and that (at least in the linear case) learners may rediscover standard estimation algorithms. This work is joint with Ekin Akyürek at MIT\, and Dale Schuurmans\, Tengyu Ma and Denny Zhou at Stanford. \nhttps://youtu.be/UNVl64G3BzA\n\n\n10:45–11:00 AM\nBreak\n\n\n11:00 AM–12:00 PM\nTommi Jaakkola (MIT) \nTitle: Generative modeling and physical processes \nAbstract: Rapidly advancing deep distributional modeling techniques offer a number of opportunities for complex generative tasks\, from natural sciences such as molecules and materials to engineering. I will discuss generative approaches inspired from physical processes including diffusion models and more recent electrostatic models (Poisson flow)\, and how they relate to each other in terms of embedding dimension. From the point of view of applications\, I will highlight our recent work on SE(3) invariant distributional modeling over backbone 3D structures with ability to generate designable monomers without relying on pre-trained protein structure prediction methods as well as state of the art image generation capabilities (Poisson flow). Time permitting\, I will also discuss recent analysis of efficiency of sample generation in such models. \nhttps://youtu.be/GLEwQAWQ85E\n\n\n12:00–1:30 PM\nLunch\n\n\n1:30–2:30 PM\nMarinka Zitnik (Harvard Medical School) \nTitle: Multimodal Learning on Graphs \nAbstract: Understanding biological and natural systems requires modeling data with underlying geometric relationships across scales and modalities such as biological sequences\, chemical constraints\, and graphs of 3D spatial or biological interactions. I will discuss unique challenges for learning from multimodal datasets that are due to varying inductive biases across modalities and the potential absence of explicit graphs in the input. I will describe a framework for structure-inducing pretraining that allows for a comprehensive study of how relational structure can be induced in pretrained language models. We use the framework to explore new graph pretraining objectives that impose relational structure in the induced latent spaces—i.e.\, pretraining objectives that explicitly impose structural constraints on the distance or geometry of pretrained models. Applications in genomic medicine and therapeutic science will be discussed. These include TxGNN\, an AI model enabling zero-shot prediction of therapeutic use across over 17\,000 diseases\, and PINNACLE\, a contextual graph AI model dynamically adjusting its outputs to contexts in which it operates. PINNACLE enhances 3D protein structure representations and predicts the effects of drugs at single-cell resolution. \nhttps://youtu.be/hjt4nsN_8iM\n\n\n2:30–2:45 PM\nBreak\n\n\n2:45–3:45 PM\nJianqing Fan (Princeton) \nTitle: UTOPIA: Universally Trainable Optimal Prediction Intervals Aggregation \nAbstract: Uncertainty quantification for prediction is an intriguing problem with significant applications in various fields\, such as biomedical science\, economic studies\, and weather forecasts. Numerous methods are available for constructing prediction intervals\, such as quantile regression and conformal predictions\, among others. Nevertheless\, model misspecification (especially in high-dimension) or sub-optimal constructions can frequently result in biased or unnecessarily-wide prediction intervals. In this work\, we propose a novel and widely applicable technique for aggregating multiple prediction intervals to minimize the average width of the prediction band along with coverage guarantee\, called Universally Trainable Optimal Predictive Intervals Aggregation (UTOPIA). The method also allows us to directly construct predictive bands based on elementary basis functions.  Our approach is based on linear or convex programming which is easy to implement. All of our proposed methodologies are supported by theoretical guarantees on the coverage probability and optimal average length\, which are detailed in this paper. The effectiveness of our approach is convincingly demonstrated by applying it to synthetic data and two real datasets on finance and macroeconomics. (Joint work Jiawei Ge and Debarghya Mukherjee). \nhttps://youtu.be/WY6dr1oEOrk\n\n\n3:45–4:00 PM\nBreak\n\n\n4:00–5:00 PM\nMelissa Dell (Harvard) \nTitle: Efficient OCR for Building a Diverse Digital History \nAbstract: Many users consult digital archives daily\, but the information they can access is unrepresentative of the diversity of documentary history. The sequence-to-sequence architecture typically used for optical character recognition (OCR) – which jointly learns a vision and language model – is poorly extensible to low-resource document collections\, as learning a language-vision model requires extensive labeled sequences and compute. This study models OCR as a character-level image retrieval problem\, using a contrastively trained vision encoder. Because the model only learns characters’ visual features\, it is more sample-efficient and extensible than existing architectures\, enabling accurate OCR in settings where existing solutions fail. Crucially\, it opens new avenues for community engagement in making digital history more representative of documentary history. \nhttps://youtu.be/u0JY9vURUAs\n\n\n\n  \n\nInformation about the 2022 Big Data Conference can be found here.
URL:https://cmsa.fas.harvard.edu/event/bigdata_2023/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Big Data Conference,Conference,Event
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Big-Data-2023_letter-1.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20230511T133000
DTEND;TZID=America/New_York:20230511T143000
DTSTAMP:20260426T045537
CREATED:20230808T180145Z
LAST-MODIFIED:20240111T084858Z
UID:10001199-1683811800-1683815400@cmsa.fas.harvard.edu
SUMMARY:How do the eigenvalues of a large non-Hermitian random matrix behave?
DESCRIPTION:Probability Seminar \nSpeaker: Giorgio Cipolloni (Princeton) \nTitle: How do the eigenvalues of a large non-Hermitian random matrix behave? \nAbstract: We prove that the fluctuations of the eigenvalues converge to the Gaussian Free Field (GFF) on the unit disk. These fluctuations appear on a non-natural scale\, due to strong correlations between the eigenvalues. Then\, motivated by the long time behaviour of the ODE \dot{u}=Xu\, we give a precise estimate on the eigenvalue with the largest real part and on the spectral radius of X. \nLocation: Science Center Room 232
URL:https://cmsa.fas.harvard.edu/event/probability-51123/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Probability Seminar
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/CMSA-Probability-Seminar-05.11.23.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20230410T150000
DTEND;TZID=America/New_York:20230410T160000
DTSTAMP:20260426T045537
CREATED:20230808T174720Z
LAST-MODIFIED:20240111T070433Z
UID:10001194-1681138800-1681142400@cmsa.fas.harvard.edu
SUMMARY:Localization for random band matrices
DESCRIPTION:Probability Seminar \n*Please note room change: Science Center 232* \n\nSpeaker: Ron Peled (Tel Aviv University) \nTitle: Localization for random band matrices \nAbstract: I will explain an approach via “an adaptive Mermin-Wagner style shift” which proves localization of N x N Gaussian random band matrices with band width W satisfying W << N^{1/4}. \nJoint work with Giorgio Cipolloni\, Jeffrey Schenker and Jacob Shapiro.
URL:https://cmsa.fas.harvard.edu/event/probability-41023/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Probability Seminar
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/CMSA-Probability-Seminar-04.10.23.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20230407T140000
DTEND;TZID=America/New_York:20230408T170000
DTSTAMP:20260426T045537
CREATED:20230705T055126Z
LAST-MODIFIED:20240229T095034Z
UID:10000067-1680876000-1680973200@cmsa.fas.harvard.edu
SUMMARY:Current Developments in Mathematics Conference 2023
DESCRIPTION:Current Developments in Mathematics 2023\nHarvard University Science Center\, Lecture Hall C\nApril 7-8\, 2023\nSpeakers: \nAmol Aggarwal – Columbia University\nBhargav Bhatt – Institute for Advanced Study\, Princeton University\, & University of Michigan\nPaul Bourgade – New York University\, Courant Institute\nVesselin Dimitrov – Institute for Advanced Study & Georgia Institute of Technology\nGreta Panova – University of Southern California\n\n\n\n\nFor more information\, and to register\, please visit:\nCurrent Developments in Mathematics 2023 \n \n  \nOrganizers: David Jerison\, Paul Seidel\, Nike Sun (MIT); Denis Auroux\, Mark Kisin\, Lauren Williams\, Horng-Tzer Yau \nSponsored by the National Science Foundation\, Harvard University Mathematics\, Harvard University Center of Mathematical Sciences and Applications\, and the Massachusetts Institute of Technology. \nHarvard University is committed to maintaining a safe and healthy educational and work environment in which no member of the University community is\, on the basis of sex\, sexual orientation\, or gender identity\, excluded from participation in\, denied the benefits of\, or subjected to discrimination in any University program or activity. More information can be found here.
URL:https://cmsa.fas.harvard.edu/event/cdm-2023/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference,Event
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/CDM-2023-Poster.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20230321T170000
DTEND;TZID=America/New_York:20230321T180000
DTSTAMP:20260426T045537
CREATED:20230705T053409Z
LAST-MODIFIED:20250409T192224Z
UID:10000065-1679418000-1679421600@cmsa.fas.harvard.edu
SUMMARY:2023 Ding Shum Lecture
DESCRIPTION:On March 21\, 2023\, the CMSA hosted the fourth annual Ding Shum Lecture\, given by Cynthia Dwork (Harvard SEAS and Microsoft Research). \n\n\nTime: 5:00-6:00 pm ET \nLocation: Harvard University Science Center Hall D \nThis event was be held in person and via Zoom webinar. \n\n  \n\nTitle: Measuring Our Chances: Risk Prediction in This World and its Betters \nAbstract: Prediction algorithms score individuals\, assigning a number between zero and one that is often interpreted as an individual probability: a 0.7 “chance” that this child is in danger in the home; an 80% “probability” that this woman will succeed if hired; a 1/3 “likelihood” that they will graduate within 4 years of admission. But what do words like “chance\,” “probability\,” and “likelihood” actually mean for a non-repeatable activity like going to college? This is a deep and unresolved problem in the philosophy of probability. Without a compelling mathematical definition we cannot specify what an (imagined) perfect risk prediction algorithm should produce\, nor even how an existing algorithm should be evaluated. Undaunted\, AI and machine learned algorithms churn these numbers out in droves\, sometimes with life-altering consequences. \nAn explosion of recent research deploys insights from the theory of pseudo-random numbers – sequences of 0’s and 1’s that “look random” but in fact have structure – to yield a tantalizing answer to the evaluation problem\, together with a supporting algorithmic framework with roots in the theory of algorithmic fairness. \nWe can aim even higher. Both (1) our qualifications\, health\, and skills\, which form the inputs to a prediction algorithm\, and (2) our chances of future success\, which are the desired outputs from the ideal risk prediction algorithm\, are products of our interactions with the real world. But the real world is systematically inequitable. How\, and when\, can we hope to approximate probabilities not in this world\, but in a better world\, one for which\, unfortunately\, we have no data at all? Surprisingly\, this novel question is inextricably bound with the very existence of nondeterminism. \n\n\nProfessor Cynthia Dwork is Gordon McKay Professor of Computer Science at the Harvard University John A. Paulson School of Engineering and Applied Sciences\, Affiliated Faculty at Harvard Law School\, and Distinguished Scientist at Microsoft. She uses theoretical computer science to place societal problems on a firm mathematical foundation. \nHer recent awards and honors include the 2020 ACM SIGACT and IEEE TCMF Knuth Prize\, the 2020 IEEE Hamming Medal\, and the 2017 Gödel Prize. \n\n\n\n\nTalk Chair: Horng-Tzer Yau (Harvard Mathematics & CMSA)\n\nModerator: Faidra Monachou (Harvard CMSA)\n\n\n\n\n\n\n\n\n\nThe 2020-2022 Ding Shum lectures were postponed due to Covid-19. \n\n\n\nThe 2019 Ding Shum Lecture featured Ronald Rivest on “Election Security.”\n\n\nThis event is made possible by the generous funding of Ding Lei and Harry Shum. \n\n\nWatch the Lecture on Youtube:
URL:https://cmsa.fas.harvard.edu/event/2023-ding-shum-lecture/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Ding Shum Lecture,Event,Special Lectures
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/Cynthia-Dwork.jpg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20230202T190000
DTEND;TZID=America/New_York:20230202T200000
DTSTAMP:20260426T045537
CREATED:20230705T050204Z
LAST-MODIFIED:20250328T200143Z
UID:10000062-1675364400-1675368000@cmsa.fas.harvard.edu
SUMMARY:Third Annual Yip Lecture
DESCRIPTION:Andrew Strominger will give the Third Annual Yip Lecture on February 2\, 2023. \nTime: 7:00-8:00 pm ET \nLocation: Harvard Science Center Hall A \n  \nTitle: Black Holes: The Most Mysterious Objects in the Universe \nAbstract: In the last decade black holes have come to center stage in both theoretical and observational science. Theoretically\, they were shown a half-century ago by Stephen Hawking and others to obey a precise but still-mysterious set of laws which imply they are paradoxically both the simplest and most complex objects in the universe. Compelling progress on this paradox has occurred recently. Observationally\, they have finally and dramatically been seen in the sky\, including at LIGO and the Event Horizon Telescope. Future prospects for progress on both fronts hinge on emergent symmetries occurring near the black holes. An elementary presentation of aspects of these topics and their interplay will be given. \nAndrew Strominger is the Gwill E. York Professor of Physics and a senior faculty member at the Black Hole Initiative at Harvard University. \nIntroduction: Peter Galison (Harvard Physics & Black Hole Initiative) \nModerator: Daniel Kapec (Harvard CMSA) \nThe Yip Lecture takes place thanks to the support of Dr. Shing-Yiu Yip. \n  \n \n\nThe previous Yip Lecture featured Avi Loeb (Harvard)\, who spoke on Extraterrestrial Life.
URL:https://cmsa.fas.harvard.edu/event/yip-2023/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event,Public Lecture,Special Lectures,Yip Lecture Series
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Yip-2023.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20221207T153000
DTEND;TZID=America/New_York:20221207T163000
DTSTAMP:20260426T045537
CREATED:20230807T165823Z
LAST-MODIFIED:20240110T091938Z
UID:10001187-1670427000-1670430600@cmsa.fas.harvard.edu
SUMMARY:Fourier quasicrystals and stable polynomials
DESCRIPTION:Probability Seminar \nNote location change: Science Center Room 300H \nSpeaker: Lior Alon (MIT) \nTitle: Fourier quasicrystals and stable polynomials \nAbstract: The Poisson summation formula says that the countable sum of exp(int)\, over all integers n\, vanishes as long as t is not an integer multiple of 2 pi. Can we find a non-periodic discrete set A\, such that the sum of exp(iat)\, over a in A\, vanishes for all t outside of a discrete set? The surprising answer is yes. Yves Meyer called the atomic measure supported on such a set a crystalline measure. Crystalline measures provide another surprising connection between physics (quasicrystals) and number theory (the zeros of the Zeta and L functions under GRH). A recent work of Pavel Kurasov and Peter Sarnak provided a construction of crystalline measures with ‘good’ convergence (Fourier quasicrystals) using stable polynomials\, a family of multivariate polynomials that were previously used in proving the Lee-Yang circle theorem and the Kadison-Singer conjecture. After providing the needed background\, I will discuss a recent work in progress with Cynthia Vinzant on the classification of these Kurasov-Sarnak measures and their supporting sets. We prove that these sets have well-defined gap distributions. We show that each Kurasov-Sarnak measure decomposes according to the irreducible decomposition of its associated polynomial\, and the measures associated with each irreducible factor is either supported on an arithmetic progression\, or its support has a bounded intersection with any arithmetic progression. Finally\, we construct random Kurasov-Sarnak measures with gap distribution as close as we want to the eigenvalues spacing of a random unitary matrix. \nBased on joint work with Pravesh Kothari.
URL:https://cmsa.fas.harvard.edu/event/probability-12722/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Probability Seminar
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/CMSA-Probability-Seminar-12.07.22.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20221130T150000
DTEND;TZID=America/New_York:20221130T160000
DTSTAMP:20260426T045537
CREATED:20230807T165526Z
LAST-MODIFIED:20240110T091213Z
UID:10001186-1669820400-1669824000@cmsa.fas.harvard.edu
SUMMARY:Lipschitz properties of transport maps under a log-Lipschitz condition
DESCRIPTION:Probability Seminar \n\nLocation: Room 109\, Harvard Science Center\, 1 Oxford Street\, Cambridge MA 02138\nSpeaker: Dan Mikulincer (MIT) \n\n\nTitle: Lipschitz properties of transport maps under a log-Lipschitz condition \nAbstract: Consider the problem of realizing a target probability measure as a push forward\, by a transport map\, of a given source measure. Typically one thinks about the target measure as being ‘complicated’ while the source is simpler and often more structured. In such a setting\, for applications\, it is desirable to find Lipschitz transport maps which afford the transfer of analytic properties from the source to the target. The talk will focus on Lipschitz regularity when the target measure satisfies a log-Lipschitz condition. \nI will present a construction of a transport map\, constructed infinitesimally along the Langevin flow\, and explain how to analyze its Lipschitz constant. The analysis of this map leads to several new results which apply both to Euclidean spaces and manifolds\, and which\, at the moment\, seem to be out of reach of the classically studied optimal transport theory. \nJoint work with Max Fathi and Yair Shenfeld.
URL:https://cmsa.fas.harvard.edu/event/probability-113022/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Probability Seminar
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/CMSA-Probability-Seminar-11.30.22.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20220923T043000
DTEND;TZID=America/New_York:20220923T180000
DTSTAMP:20260426T045537
CREATED:20230705T045048Z
LAST-MODIFIED:20231226T164613Z
UID:10000059-1663907400-1663956000@cmsa.fas.harvard.edu
SUMMARY:CMSA/MATH Fall Gathering
DESCRIPTION:CMSA/MATH Fall Gathering \nFriday\, Sep 23\, 2022\n4:30–6:00 pm\n\nAll CMSA and Math affiliates are invited.
URL:https://cmsa.fas.harvard.edu/event/fall_2022/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20220730T090000
DTEND;TZID=America/New_York:20220801T134500
DTSTAMP:20260426T045537
CREATED:20230705T041718Z
LAST-MODIFIED:20250305T170940Z
UID:10000056-1659171600-1659361500@cmsa.fas.harvard.edu
SUMMARY:Advances in Mathematical Physics
DESCRIPTION:A Conference in Honor of Elliott H. Lieb on his 90th Birthday\nOn July 30 – Aug 1\, 2022 the Harvard Mathematics Department and the CMSA co-hosted a birthday conference in honor of Elliott Lieb. \nThis meeting highlights Elliott’s vast contribution to math and physics. Additionally\, this meeting features Prof. Lieb’s more recent impact in strong subadditivity of entropy and integrable systems (ice model\, Temperley-Lieb algebra etc.). \nVenue:\nJuly 30–31\, 2022: Hall B\, Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138\nAugust 1\, 2022: Hall C\, Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138 \nSchedule (pdf) \nOrganizers:\nMichael Aizenman\, Princeton University\nJoel Lebowitz\, Rutgers University\nRuedi Seiler\, Technische Universität Berlin\nHerbert Spohn\, Technical University of Munich\nHorng-Tzer Yau\, Harvard University\nShing-Tung Yau\, Harvard University\nJakob Yngvason\, University of Vienna \nSPEAKERS:\nRafael Benguria\, Pontificia Universidad Catolica de Chile\nEric Carlen\, Rutgers University\nPhilippe Di Francesco\, University of Illinois\nHugo Duminil-Copin\, IHES\nLászló Erdös\, Institute of Science and Technology Austria\nRupert Frank\, Ludwig Maximilian University of Munich\nJürg Fröhlich\, ETH Zurich\nAlessandro Giuliani\, Università degli Studi Roma Tre\nBertrand Halperin\, Harvard University\nKlaus Hepp\, Institute for Theoretical Physics\, ETH Zurich\nSabine Jansen\, Ludwig Maximilian University of Munich\nMathieu Lewin\, Université Paris-Dauphine\nBruno Nachtergaele\, The University of California\, Davis\nYoshiko Ogata\, University of Tokyo\nRon Peled\, Tel Aviv University\nBenjamin Schlein\, University of Zurich\nRobert Seiringer\, Institute of Science and Technology Austria\nJan Philip Solovej\, University of Copenhagen\nHal Tasaki\, Gakushuin University\nSimone Warzel\, Technical University of Munich\nJun Yin\, The University of California\, Los Angeles \n 
URL:https://cmsa.fas.harvard.edu/event/advances-in-mathematical-physics/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference,Event
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Elliott-Lieb-conference-2022_banner-2-1536x734-1.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20220404T190000
DTEND;TZID=America/New_York:20220404T200000
DTSTAMP:20260426T045537
CREATED:20230705T082949Z
LAST-MODIFIED:20250328T200243Z
UID:10000085-1649098800-1649102400@cmsa.fas.harvard.edu
SUMMARY:Second Annual Yip Lecture: Extraterrestrial Life
DESCRIPTION:Harvard CMSA hosted the second annual Yip Lecture on April 4\, 2022. \nThe Yip Lecture takes place thanks to the support of Dr. Shing-Yiu Yip.\nThis year’s speaker was Avi Loeb (Harvard). \n  \n \nExtraterrestrial Life\nAbstract: Are we alone? It would be arrogant to think that we are\, given that a quarter of all stars host a habitable Earth-size planet. Upcoming searches will aim to detect markers of life in the atmospheres of planets outside the Solar System. We also have unprecedented technologies to detect signs of intelligent civilizations through industrial pollution of planetary atmospheres\, space archaeology of debris from dead civilizations or artifacts such as photovoltaic cells that are used to re-distribute light and heat on the surface of a planet or giant megastructures. Our own civilization is starting to explore interstellar travel. Essential information may also arrive as a “message in a bottle”\, implying that we should examine carefully any unusual object that arrives to our vicinity from outside the Solar System\, such as `Oumuamua. \n\nAbraham (Avi) Loeb is the Frank B. Baird\, Jr.\, Professor of Science at Harvard University and a bestselling author (in lists of the New York Times\, Wall Street Journal\, Publishers Weekly\, Die Zeit\, Der Spiegel\, L’Express and more). He received a PhD in Physics from the Hebrew University of Jerusalem in Israel at age 24 (1980–1986)\, led the first international project supported by the Strategic Defense Initiative (1983–1988)\, and was subsequently a long-term member of the Institute for Advanced Study at Princeton (1988–1993). Loeb has written 8 books\, including most recently\, Extraterrestrial (Houghton Mifflin Harcourt\, 2021)\, and nearly a thousand papers (with an h-index of 118) on a wide range of topics\, including black holes\, the first stars\, the search for extraterrestrial life\, and the future of the Universe. Loeb is the head of the Galileo Project in search for extraterrestrial intelligence\, the Director of the Institute for Theory and Computation (2007–present) within the Harvard-Smithsonian Center for Astrophysics\, and also serves as the Head of the Galileo Project (2021–present). He had been the longest serving Chair of Harvard’s Department of Astronomy (2011–2020) and the Founding Director of Harvard’s Black Hole Initiative (2016–2021). He is an elected fellow of the American Academy of Arts & Sciences\, the American Physical Society\, and the International Academy of Astronautics. Loeb is a former member of the President’s Council of Advisors on Science and Technology (PCAST) at the White House\, a former chair of the Board on Physics and Astronomy of the National Academies (2018–2021) and a current member of the Advisory Board for “Einstein: Visualize the Impossible” of the Hebrew University. He also chairs the Advisory Committee for the Breakthrough Starshot Initiative (2016–present) and serves as the Science Theory Director for all Initiatives of the Breakthrough Prize Foundation. In 2012\, TIME magazine selected Loeb as one of the 25 most influential people in space and in 2020 Loeb was selected among the 14 most inspiring Israelis of the last decade. \nClick here for Loeb’s commentaries on innovation and diversity. \nWebsite: https://www.cfa.harvard.edu/~loeb/ \nSee the Harvard Gazette article featuring Avi Loeb: “Oh\, if I could talk to the aliens” published March 8\, 2022. \nProf. Loeb’s books:\nExtraterrestrial: The First Sign of Intelligent Life Beyond Earth (2021)\nLife in the Cosmos: From Biosignatures to Technosignatures (2021) \nAvil Loeb is the head of the Galileo Project at Harvard. \n\nThe previous Yip Lecture featured Peter Galison (Harvard)\, who spoke on the EHT’s hunt for an objective image of a black hole.
URL:https://cmsa.fas.harvard.edu/event/second-annual-yip-lecture/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event,Public Lecture,Special Lectures,Yip Lecture Series
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Yip2022_poster_web.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20190502T090000
DTEND;TZID=America/New_York:20190505T170000
DTSTAMP:20260426T045537
CREATED:20230715T175235Z
LAST-MODIFIED:20250328T145104Z
UID:10000115-1556787600-1557075600@cmsa.fas.harvard.edu
SUMMARY:Conference on Differential Geometry\, Calabi-Yau theory and General Relativity: A conference in honor of the 70th Birthday of Shing-Tung Yau
DESCRIPTION:On May 2-5\, 2019 the Harvard Mathematics Department hosted a Conference on Differential Geometry\, Calabi-Yau Theory and General Relativity: A conference in honor of the 70th Birthday of Shing-Tung Yau. The conference was held in the  Science Center\, Lecture Hall C.  \nOrganizers:\n\nHorng-Tzer Yau (Harvard)\nWilfried Schmid (Harvard)\nClifford Taubes (Harvard)\nCumrun Vafa (Harvard)\n\nSpeakers:\n\nLydia Bieri\, University of Michigan\nTristan Collins\, MIT\nSimon Donaldson\, Imperial College\nFan Chung Graham\, UC San Diego\nNigel Hitchin\, Oxford University\nJun Li\, Stanford University\nKefeng Liu\, UCLA\nChiu-Chu Melissa Liu\, Columbia University\nAlina Marian\, Northeastern University\nXenia de la Ossa\, Oxford University\nDuong H. Phong\, Columbia University\nRichard Schoen\, UC Irvine\nAndrew Strominger\, Harvard University\nNike Sun\, MIT\nClifford Taubes\, Harvard University\nChuu-Lian Terng\, UC Irvine\nValentino Tosatti\, Northwestern University\nKaren Uhlenbeck\, University of Texas\nCumrun Vafa\, Harvard University\nMu Tao Wang\, Columbia University\nEdward Witten\, IAS\nStephen Yau\, Tsinghua University\, P.R. China
URL:https://cmsa.fas.harvard.edu/event/conference-on-differential-geometry-calabi-yau-theory-and-general-relativity-a-conference-in-honor-of-the-70th-birthday-of-shing-tung-yau/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference,Event
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Yau-2-2-791x1024-2.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20190418T160000
DTEND;TZID=America/New_York:20190418T170000
DTSTAMP:20260426T045537
CREATED:20230715T174140Z
LAST-MODIFIED:20250328T150900Z
UID:10000113-1555603200-1555606800@cmsa.fas.harvard.edu
SUMMARY:Yip Annual Lecture
DESCRIPTION:On April 18\, 2019 Harvard CMSA hosted the inaugural Yip lecture. The Yip Lecture takes place thanks to the support of Dr. Shing-Yiu Yip. This year’s speaker was Peter Galison (Harvard Physics). \nThe lecture was held from 4:00-5:00pm in Science Center\, Hall A.
URL:https://cmsa.fas.harvard.edu/event/yip-annual-lecture/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event,Public Lecture,Special Lectures,Yip Lecture Series
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Yip-3-1-791x1024-1-1.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20190409T160000
DTEND;TZID=America/New_York:20190409T170000
DTSTAMP:20260426T045537
CREATED:20240212T100146Z
LAST-MODIFIED:20250328T150617Z
UID:10001950-1554825600-1554829200@cmsa.fas.harvard.edu
SUMMARY:Math Science Lectures in Honor of Raoul Bott: Mina Aganagic
DESCRIPTION:On April 9 and 10\, 2019 the CMSA hosted two lectures by Mina Aganagic (UC Berkeley).  This was the second annual Math Science Lecture Series held in honor of Raoul Bott. \nThe lectures took place in Science Center\, Hall C \n“Two math lessons from string theory”\n\n\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n \n \nApril 9\, 2019 \nLecture 1 \nTitle: “Lesson on Integrability” \nAbstract: The quantum Knizhnik-Zamolodchikov (qKZ) equation is a difference generalization of the famous Knizhnik-Zamolodchikov (KZ) equation. The problem to explicitly capture the monodromy of the qKZ equation has been open for over 25 years. I will describe the solution to this problem\, discovered jointly with Andrei Okounkov. The solution comes from the geometry of Nakajima quiver varieties and has a string theory origin. \nPart of the interest in the qKZ monodromy problem is that its solution leads to integrable lattice models\, in parallel to how monodromy matrices of the KZ equation lead to knot invariants. Thus\, our solution of the problem leads to a new\, geometric approach\, to integrable lattice models. There are two other approaches to integrable lattice models\, due to Nekrasov and Shatashvili and to Costello\, Witten and Yamazaki. I’ll describe joint work with Nikita Nekrasov which explains how string theory unifies the three approaches to integrable lattice models.\n\n\n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n  \n \n \nApril 10\, 2019 \nLecture 2 \nTitle: “Lesson on Knot Categorification” \nAbstract: An old problem is to find a unified approach to the knot categorification problem. The new string theory perspective on the qKZ equation I described in the first talk can be used to derive two geometric approaches to the problem. \nThe first approach is based on a category of B-type branes on resolutions of slices in affine Grassmannians. The second is based on a category of A-branes in a Landau-Ginzburg theory. The relation between them is two dimensional (equivariant) mirror symmetry. String theory also predicts that a third approach to categorification\, based on counting solutions to five dimensional Haydys-Witten equations\, is equivalent to the first two. \nThis talk is mostly based on joint work with Andrei Okounkov.\n\n\n\n  \n  \n 
URL:https://cmsa.fas.harvard.edu/event/math-science-lectures-in-honor-of-raoul-bott-mina-aganagic/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event,Math Science Lectures in Honor of Raoul Bott,Public Lecture,Special Lectures
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Aganagic-791x1024-1-232x300-1.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20181116T080000
DTEND;TZID=America/New_York:20181117T170000
DTSTAMP:20260426T045537
CREATED:20230715T085736Z
LAST-MODIFIED:20241212T191652Z
UID:10000102-1542355200-1542474000@cmsa.fas.harvard.edu
SUMMARY:Current Developments In Mathematics 2018
DESCRIPTION:Current Developments in Mathematics 2018 Conference. \nFriday\, Nov. 16\, 2018 2:15 pm – 6:00 pm \nSaturday\, Nov. 17\, 2018  9:00 am – 5:00 pm \nHarvard University Science Center\, Hall B \nYoutube Playlist
URL:https://cmsa.fas.harvard.edu/event/current-developments-in-mathematics-2018/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference,Event
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/cdm-2018-poster.jpeg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20181024T150000
DTEND;TZID=America/New_York:20181024T160000
DTSTAMP:20260426T045537
CREATED:20230715T085247Z
LAST-MODIFIED:20250328T150854Z
UID:10000101-1540393200-1540396800@cmsa.fas.harvard.edu
SUMMARY:2018 Ding Shum Lecture
DESCRIPTION:  \n \nOn October 24\, 2018\, the CMSA hosted the second annual Ding Shum lecture. This event was made possible by the generous funding of Ding Lei and Harry Shum. Last year featured Leslie Valiant\, who spoke on “learning as a Theory of Everything.” \nThis year will feature Eric Maskin\, who will speak on “How to Improve Presidential Elections: the Mathematics of Voting.” This lecture will take place from 5:00-6:00pm in Science Center\, Hall D.  \nPictures of the event can be found here.
URL:https://cmsa.fas.harvard.edu/event/2018-ding-shum-lecture/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Ding Shum Lecture,Event,Public Lecture,Special Lectures
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Ding-Shum-lecture-2018.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20180823T083000
DTEND;TZID=America/New_York:20180824T163000
DTSTAMP:20260426T045537
CREATED:20230715T083801Z
LAST-MODIFIED:20250415T154139Z
UID:10000086-1535013000-1535128200@cmsa.fas.harvard.edu
SUMMARY:Big Data Conference 2018
DESCRIPTION:On August 23-24\, 2018 the CMSA hosted the fourth annual Conference on Big Data. The Conference featured speakers from the Harvard community as well as scholars from across the globe\, with talks focusing on computer science\, statistics\, math and physics\, and economics. \nThe talks were held in Science Center Hall B\, 1 Oxford Street. \nSpeakers:  \n\nMohammad Akbarpour\, Stanford\nEmily Breza\, Harvard\nFrancesca Dominici\, Harvard\nChiara Farronato\, Harvard\nKobi Gal\, Ben Gurion\nJonah Kallenbach\, Reverie Labs\nSamuel Kou\, Harvard\nLaura Kreidberg\, Harvard\nDanielle Li\, MIT\nLibby Mishkin\, Uber\nJosh Speagle\, Harvard\nWilliam Stein\, University of Washington\nAlex Teyltelboym\, University of Oxford\nSergiy Verstyuk\, CMSA/Harvard\n\nOrganizers:  \n\nShing-Tung Yau\, William Caspar Graustein Professor of Mathematics\, Harvard University\nScott Duke Kominers\, MBA Class of 1960 Associate Professor\, Harvard Business\nRichard Freeman\, Herbert Ascherman Professor of Economics\, Harvard University\nJun Liu\, Professor of Statistics\, Harvard University\nHorng-Tzer Yau\, Professor of Mathematics\, Harvard University
URL:https://cmsa.fas.harvard.edu/event/2018-big-data-conference-2/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Big Data Conference,Conference,Event
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Big-Data-2018-4.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20180402T163000
DTEND;TZID=America/New_York:20180403T180000
DTSTAMP:20260426T045537
CREATED:20230717T174857Z
LAST-MODIFIED:20260218T203218Z
UID:10000076-1522686600-1522778400@cmsa.fas.harvard.edu
SUMMARY:Math Science Lectures in Honor of Raoul Bott\, April 2-3
DESCRIPTION:On April 2-3\, the CMSA will be hosting two lectures by Freddy Cachazo (Perimeter Institute) on “Geometry and Combinatorics in Particle Interactions.”  This will be the first of the new annual Bott Math Science Lecture Series hosted by the CMSA. \nThe lectures will take place from 4:30-5:30pm in Science Center\, Hall D. \n \n \n  \n 
URL:https://cmsa.fas.harvard.edu/event/math-science-lectures-in-honor-of-raoul-bott-april-2-3/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event,Math Science Lectures in Honor of Raoul Bott,Special Lectures
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/Cachazo-e1519325938458.jpg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20180124T090000
DTEND;TZID=America/New_York:20180125T170000
DTSTAMP:20260426T045537
CREATED:20230717T173945Z
LAST-MODIFIED:20250305T214037Z
UID:10000042-1516784400-1516899600@cmsa.fas.harvard.edu
SUMMARY:Blockchain Conference
DESCRIPTION:On January 24-25\, 2019 the Center of Mathematical Sciences will be hosting a conference on distributed-ledger (blockchain) technology. The conference is intended to cover a broad range of topics\, from abstract mathematical aspects (cryptography\, game theory\, graph theory\, theoretical computer science) to concrete applications (in accounting\, government\, economics\, finance\, management\, medicine). The talks will take place in Science Center\, Hall D. \nhttps://youtu.be/FyKCCutxMYo \nPhotos\n \nSpeakers: \n\nJoseph Abadi\, Princeton University\nBenedikt Bunz\, Stanford University\nJake Cacciapaglia\, Nebula Genomics/Harvard Medical School\nEduardo Castello\, Massachusetts Institute of Technology\nAlisa DiCaprio\, R3\nZhiguo He\, University of Chicago\nSteven Kou\, Boston University\nAnne Lafarre\, Tilburg University\nJacob Leshno\, University of Chicago\nBruce Schneier\, Harvard Kennedy School\nDavid Schwartz\, Ripple\nElaine Shi\, Cornell University/Thunder Research\nHong Wan\, NCSU
URL:https://cmsa.fas.harvard.edu/event/blockchain-conference/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference,Event
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/Blockchain-Final-scaled.jpg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20171010T170000
DTEND;TZID=America/New_York:20171010T180000
DTSTAMP:20260426T045537
CREATED:20230717T173349Z
LAST-MODIFIED:20250328T150724Z
UID:10000038-1507654800-1507658400@cmsa.fas.harvard.edu
SUMMARY:2017 Ding Shum Lecture
DESCRIPTION:Leslie Valiant will be giving the inaugural talk of the Ding Shum Lectures on Tuesday\, October 10 at 5:00 pm in Science Center Hall D\, Cambridge\, MA. \nLearning as a Theory of Everything \nAbstract: We start from the hypothesis that all the information that resides in living organisms was initially acquired either through learning by an individual or through evolution. Then any unified theory of evolution and learning should be able to characterize the capabilities that humans and other living organisms can possess or acquire. Characterizing these capabilities would tell us about the nature of humans\, and would also inform us about feasible targets for automation. With this purpose we review some background in the mathematical theory of learning. We go on to explain how Darwinian evolution can be formulated as a form of learning. We observe that our current mathematical understanding of learning is incomplete in certain important directions\, and conclude by indicating one direction in which further progress would likely enable broader phenomena of intelligence and cognition to be realized than is possible at present. \n 
URL:https://cmsa.fas.harvard.edu/event/2017-ding-shum-lecture/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Ding Shum Lecture,Event,Public Lecture,Special Lectures
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Ding-Shum-lecture-3.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20171002T091500
DTEND;TZID=America/New_York:20171002T173000
DTSTAMP:20260426T045537
CREATED:20230717T172938Z
LAST-MODIFIED:20250328T150846Z
UID:10000036-1506935700-1506965400@cmsa.fas.harvard.edu
SUMMARY:The 2017 Charles River Lectures
DESCRIPTION:Charles River with Bench at Sunset \nJointly organized by Harvard University\, Massachusetts Institute of Technology\, and Microsoft Research New England\, the Charles River Lectures on Probability and Related Topics is a one-day event for the benefit of the greater Boston area mathematics community. \nThe 2017 lectures will take place 9:15am – 5:30pm on Monday\, October 2 at Harvard University  in the Harvard Science Center. \n\n\n\n*************************************************** \nUPDATED LOCATION\nHarvard University\nHarvard Science Center (Halls C & E)\n1 Oxford Street\, Cambridge\, MA 02138 (Map)\nMonday\, October 2\, 2017\n9:15 AM – 5:30 PM\n************************************************** \nPlease note that registration has closed. \nSpeakers:\n\nPaul Bourgade (Courant Institute\, NYU)\nMassimiliano Gubinelli (University of Bonn)\nAndrea Montanari (Stanford University)\nRoman Vershynin (University of California\, Irvine)\nOfer Zeitouni (Weizmann Institute)\n\nAgenda:\nIn Harvard Science Center Hall C: \n8:45 am – 9:15 am: Coffee/light breakfast \n9:15 am – 10:15 am: Ofer Zeitouni \nTitle: Noise stability of the spectrum of large matrices \nAbstract: The spectrum of large non-normal matrices is notoriously sensitive to perturbations\, as the example of nilpotent matrices shows. Remarkably\, the spectrum of these matrices perturbed by polynomially (in the dimension) vanishing additive noise is remarkably stable. I will describe some results and the beginning of a theory. \nThe talk is based on joint work with Anirban Basak and Elliot Paquette\, and earlier works with Feldheim\, Guionnet\, Paquette and Wood.\n\n10:20 am – 11:20 am: Andrea Montanari \nTitle: Algorithms for estimating low-rank matrices  \nAbstract: Many interesting problems in statistics can be formulated as follows. The signal of interest is a large low-rank matrix with additional structure\, and we are given a single noisy view of this matrix. We would like to estimate the low rank signal by taking into account optimally the signal structure. I will discuss two types of efficient estimation procedures based on message-passing algorithms and semidefinite programming relaxations\, with an emphasis on asymptotically exact results. \n11:20 am – 11:45 am: Break \n11:45 am – 12:45 pm: Paul Bourgade \nTitle: Random matrices\, the Riemann zeta function and trees \nAbstract: Fyodorov\, Hiary & Keating have conjectured that the maximum of the characteristic polynomial of random unitary matrices behaves like extremes of log-correlated Gaussian fields. This allowed them to predict the typical size of local maxima of the Riemann zeta function along the critical axis. I will first explain the origins of this conjecture\, and then outline the proof for the leading order of the maximum\, for unitary matrices and the zeta function. This talk is based on joint works with Arguin\, Belius\, Radziwill and Soundararajan. \n1:00 pm – 2:30 pm: Lunch \nIn Harvard Science Center Hall E: \n2:45 pm – 3:45 pm: Roman Vershynin \nTitle: Deviations of random matrices and applications \nAbstract: Uniform laws of large numbers provide theoretical foundations for statistical learning theory. This lecture will focus on quantitative uniform laws of large numbers for random matrices. A range of illustrations will be given in high dimensional geometry and data science. \n3:45 pm – 4:15 pm: Break \n4:15 pm – 5:15 pm: Massimiliano Gubinelli \nTitle: Weak universality and Singular SPDEs \nAbstract: Mesoscopic fluctuations of microscopic (discrete or continuous) dynamics can be described in terms of nonlinear stochastic partial differential equations which are universal: they depend on very few details of the microscopic model. This universality comes at a price: due to the extreme irregular nature of the random field sample paths\, these equations turn out to not be well-posed in any classical analytic sense. I will review recent progress in the mathematical understanding of such singular equations and of their (weak) universality and their relation with the Wilsonian renormalisation group framework of theoretical physics. \nOrganizers:\n Alexei Borodin\, Henry Cohn\, Vadim Gorin\, Elchanan Mossel\, Philippe Rigollet\, Scott Sheffield\, and H.T. Yau
URL:https://cmsa.fas.harvard.edu/event/the-2017-charles-river-lectures/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Event,Public Lecture,Special Lectures
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/Charles-River-Lectures-2017-pdf.jpeg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20170818T154700
DTEND;TZID=America/New_York:20170819T154700
DTSTAMP:20260426T045537
CREATED:20230717T172600Z
LAST-MODIFIED:20250328T144515Z
UID:10000034-1503071220-1503157620@cmsa.fas.harvard.edu
SUMMARY:2017 Big Data Conference
DESCRIPTION:The Center of Mathematical Sciences and Applications will be hosting a conference on Big Data from August 18 – 19\, 2017\, in Hall D of the Science Center at Harvard University.\nThe Big Data Conference features many speakers from the Harvard community as well as scholars from across the globe\, with talks focusing on computer science\, statistics\, math and physics\, and economics. This is the third conference on Big Data the Center will host as part of our annual events\, and is co-organized by Richard Freeman\, Scott Kominers\, Jun Liu\, Horng-Tzer Yau and Shing-Tung Yau. \nConfirmed Speakers: \n\nMohammad Akbarpour\, Stanford University\nAlbert-László Barabási\, Northeastern University\nNoureddine El Karoui\, University of California\, Berkeley\nRavi Jagadeesan\, Harvard University\nLucas Janson\, Harvard University\nTracy Ke\, University of Chicago\nTze Leung Lai\, Stanford University\nAnnie Liang\, University of Pennsylvania\nMarena Lin\, Harvard University\nNikhil Naik\, Harvard University\nAlex Peysakhovich\, Facebook\nNatesh Pillai\, Harvard University\nJann Spiess\, Harvard University\nBradly Stadie\, Open AI\, University of California\, Berkeley\nZak Stone\, Google\nHau-Tieng Wu\, University of Toronto\nSifan Zhou\, Xiamen University\n\n  \nFollowing the conference\, there will be a two-day workshop from August 20-21. The workshop is organized by Scott Kominers\, and will feature: \n\nJörn Boehnke\, Harvard University\nNikhil Naik\, Harvard University\nBradly Stadie\, Open AI\, University of California\, Berkeley\n\n  \nConference Schedule \nA PDF version of the schedule below can also be downloaded here. \nAugust 18\, Friday (Full day)\n\n\n\nTime\nSpeaker\nTopic\n\n\n8:30 am – 9:00 am\n\nBreakfast\n\n\n9:00 am – 9:40 am\nMohammad Akbarpour \nVideo\nTitle: Information aggregation in overlapping generations and the emergence of experts \nAbstract: We study a model of social learning with “overlapping generations”\, where agents meet others and share data about an underlying state over time. We examine under what conditions the society will produce individuals with precise knowledge about the state of the world. There are two information sharing regimes in our model: Under the full information sharing technology\, individuals exchange the information about their point estimates of an underlying state\, as well as their sources (or the precision of their signals) and update their beliefs by taking a weighted average. Under the limited information sharing technology\, agents only observe the information about the point estimates of those they meet\, and update their beliefs by taking a weighted average\, where weights can depend on the sequence of meetings\, as well as the labels. Our main result shows that\, unlike most social learning settings\, using such linear learning rules do not guide the society (or even a fraction of its members) to learn the truth\, and having access to\, and exploiting knowledge of the precision of a source signal are essential for efficient social learning (joint with Amin Saberi & Ali Shameli).\n\n\n9:40 am – 10:20 am\nLucas Janson \nVideo\nTitle: Model-Free Knockoffs For High-Dimensional Controlled Variable Selection \nAbstract: Many contemporary large-scale applications involve building interpretable models linking a large set of potential covariates to a response in a nonlinear fashion\, such as when the response is binary. Although this modeling problem has been extensively studied\, it remains unclear how to effectively control the fraction of false discoveries even in high-dimensional logistic regression\, not to mention general high-dimensional nonlinear models. To address such a practical problem\, we propose a new framework of model-free knockoffs\, which reads from a different perspective the knockoff procedure (Barber and Candès\, 2015) originally designed for controlling the false discovery rate in linear models. The key innovation of our method is to construct knockoff variables probabilistically instead of geometrically. This enables model-free knockoffs to deal with arbitrary (and unknown) conditional models and any dimensions\, including when the dimensionality p exceeds the sample size n\, while the original knockoffs procedure is constrained to homoscedastic linear models with n greater than or equal to p. Our approach requires the design matrix be random (independent and identically distributed rows) with a covariate distribution that is known\, although we show our procedure to be robust to unknown/estimated distributions. As we require no knowledge/assumptions about the conditional distribution of the response\, we effectively shift the burden of knowledge from the response to the covariates\, in contrast to the canonical model-based approach which assumes a parametric model for the response but very little about the covariates. To our knowledge\, no other procedure solves the controlled variable selection problem in such generality\, but in the restricted settings where competitors exist\, we demonstrate the superior power of knockoffs through simulations. Finally\, we apply our procedure to data from a case-control study of Crohn’s disease in the United Kingdom\, making twice as many discoveries as the original analysis of the same data. \nSlides\n\n\n10:20 am – 10:50 am\n\nBreak\n\n\n10:50 pm – 11:30 pm\nNoureddine El Karoui \nVideo\nTitle: Random matrices and high-dimensional statistics: beyond covariance matrices \nAbstract: Random matrices have played a central role in understanding very important statistical methods linked to covariance matrices (such as Principal Components Analysis\, Canonical Correlation Analysis etc…) for several decades. In this talk\, I’ll show that one can adopt a random-matrix-inspired point of view to understand the performance of other widely used tools in statistics\, such as M-estimators\, and very common methods such as the bootstrap. I will focus on the high-dimensional case\, which captures well the situation of “moderately” difficult statistical problems\, arguably one of the most relevant in practice. In this setting\, I will show that random matrix ideas help upend conventional theoretical thinking (for instance about maximum likelihood methods) and highlight very serious practical problems with resampling methods.\n\n\n11:30 am – 12:10 pm\nNikhil Naik \nVideo\nTitle: Understanding Urban Change with Computer Vision and Street-level Imagery \nAbstract: Which neighborhoods experience physical improvements? In this work\, we introduce a computer vision method to measure changes in the physical appearances of neighborhoods from time-series street-level imagery. We connect changes in the physical appearance of five US cities with economic and demographic data and find three factors that predict neighborhood improvement. First\, neighborhoods that are densely populated by college-educated adults are more likely to experience physical improvements. Second\, neighborhoods with better initial appearances experience\, on average\, larger positive improvements. Third\, neighborhood improvement correlates positively with physical proximity to the central business district and to other physically attractive neighborhoods. Together\, our results illustrate the value of using computer vision methods and street-level imagery to understand the physical dynamics of cities. \n(Joint work with Edward L. Glaeser\, Cesar A. Hidalgo\, Scott Duke Kominers\, and Ramesh Raskar.)\n\n\n12:10 pm – 12:25 pm\nVideo #1 \nVideo #2\nData Science Lightning Talks\n\n\n12:25 pm – 1:30 pm\n\nLunch\n\n\n1:30 pm – 2:10 pm\nTracy Ke \nVideo\nTitle: A new SVD approach to optimal topic estimation \nAbstract: In the probabilistic topic models\, the quantity of interest—a low-rank matrix consisting of topic vectors—is hidden in the text corpus matrix\, masked by noise\, and Singular Value Decomposition (SVD) is a potentially useful tool for learning such a low-rank matrix. However\, the connection between this low-rank matrix and the singular vectors of the text corpus matrix are usually complicated and hard to spell out\, so how to use SVD for learning topic models faces challenges. \nWe overcome the challenge by revealing a surprising insight: there is a low-dimensional simplex structure which can be viewed as a bridge between the low-rank matrix of interest and the SVD of the text corpus matrix\, and which allows us to conveniently reconstruct the former using the latter. Such an insight motivates a new SVD-based approach to learning topic models. \nFor asymptotic analysis\, we show that under a popular topic model (Hofmann\, 1999)\, the convergence rate of the l1-error of our method matches that of the minimax lower bound\, up to a multi-logarithmic term. In showing these results\, we have derived new element-wise bounds on the singular vectors and several large deviation bounds for weakly dependent multinomial data. Our results on the convergence rate and asymptotical minimaxity are new. We have applied our method to two data sets\, Associated Process (AP) and Statistics Literature Abstract (SLA)\, with encouraging results. In particular\, there is a clear simplex structure associated with the SVD of the data matrices\, which largely validates our discovery.\n\n\n2:10 pm – 2:50 pm\nAlbert-László Barabási \nVideo\nTitle: Taming Complexity: From Network Science to Controlling Networks \nAbstract: The ultimate proof of our understanding of biological or technological systems is reflected in our ability to control them. While control theory offers mathematical tools to steer engineered and natural systems towards a desired state\, we lack a framework to control complex self-organized systems. Here we explore the controllability of an arbitrary complex network\, identifying the set of driver nodes whose time-dependent control can guide the system’s entire dynamics. We apply these tools to several real networks\, unveiling how the network topology determines its controllability. Virtually all technological and biological networks must be able to control their internal processes. Given that\, issues related to control deeply shape the topology and the vulnerability of real systems. Consequently unveiling the control principles of real networks\, the goal of our research\, forces us to address series of fundamental questions pertaining to our understanding of complex systems. \n \n\n\n2:50 pm – 3:20 pm\n\nBreak\n\n\n3:20 pm – 4:00 pm\nMarena Lin \nVideo\nTitle: Optimizing climate variables for human impact studies \nAbstract: Estimates of the relationship between climate variability and socio-economic outcomes are often limited by the spatial resolution of the data. As studies aim to generalize the connection between climate and socio-economic outcomes across countries\, the best available socio-economic data is at the national level (e.g. food production quantities\, the incidence of warfare\, averages of crime incidence\, gender birth ratios). While these statistics may be trusted from government censuses\, the appropriate metric for the corresponding climate or weather for a given year in a country is less obvious. For example\, how do we estimate the temperatures in a country relevant to national food production and therefore food security? We demonstrate that high-resolution spatiotemporal satellite data for vegetation can be used to estimate the weather variables that may be most relevant to food security and related socio-economic outcomes. In particular\, satellite proxies for vegetation over the African continent reflect the seasonal movement of the Intertropical Convergence Zone\, a band of intense convection and rainfall. We also show that agricultural sensitivity to climate variability differs significantly between countries. This work is an example of the ways in which in-situ and satellite-based observations are invaluable to both estimates of future climate variability and to continued monitoring of the earth-human system. We discuss the current state of these records and potential challenges to their continuity.\n\n\n4:00 pm – 4:40 pm\nAlex Peysakhovich\n Title: Building a cooperator \nAbstract: A major goal of modern AI is to construct agents that can perform complex tasks. Much of this work deals with single agent decision problems. However\, agents are rarely alone in the world. In this talk I will discuss how to combine ideas from deep reinforcement learning and game theory to construct artificial agents that can communicate\, collaborate and cooperate in productive positive sum interactions.\n\n\n4:40 pm – 5:20 pm\nTze Leung Lai \nVideo\nTitle: Gradient boosting: Its role in big data analytics\, underlying mathematical theory\, and recent refinements \nAbstract: We begin with a review of the history of gradient boosting\, dating back to the LMS algorithm of Widrow and Hoff in 1960 and culminating in Freund and Schapire’s AdaBoost and Friedman’s gradient boosting and stochastic gradient boosting algorithms in the period 1999-2002 that heralded the big data era. The role played by gradient boosting in big data analytics\, particularly with respect to deep learning\, is then discussed. We also present some recent work on the mathematical theory of gradient boosting\, which has led to some refinements that greatly improves the convergence properties and prediction performance of the methodology.\n\n\n\nAugust 19\, Saturday (Full day)\n\n\n\nTime\nSpeaker\nTopic\n\n\n8:30 am – 9:00 am\n\nBreakfast\n\n\n9:00 am – 9:40 am\nNatesh Pillai \nVideo\nTitle: Accelerating MCMC algorithms for Computationally Intensive Models via Local Approximations \nAbstract: We construct a new framework for accelerating Markov chain Monte Carlo in posterior sampling problems where standard methods are limited by the computational cost of the likelihood\, or of numerical models embedded therein. Our approach introduces local approximations of these models into the Metropolis–Hastings kernel\, borrowing ideas from deterministic approximation theory\, optimization\, and experimental design. Previous efforts at integrating approximate models into inference typically sacrifice either the sampler’s exactness or efficiency; our work seeks to address these limitations by exploiting useful convergence characteristics of local approximations. We prove the ergodicity of our approximate Markov chain\, showing that it samples asymptotically from the exact posterior distribution of interest. We describe variations of the algorithm that employ either local polynomial approximations or local Gaussian process regressors. Our theoretical results reinforce the key observation underlying this article: when the likelihood has some local regularity\, the number of model evaluations per Markov chain Monte Carlo (MCMC) step can be greatly reduced without biasing the Monte Carlo average. Numerical experiments demonstrate multiple order-of-magnitude reductions in the number of forward model evaluations used in representative ordinary differential equation (ODE) and partial differential equation (PDE) inference problems\, with both synthetic and real data.\n\n\n9:40 am – 10:20 am\nRavi Jagadeesan \nVideo\nTitle: Designs for estimating the treatment effect in networks with interference \nAbstract: In this paper we introduce new\, easily implementable designs for drawing causal inference from randomized experiments on networks with interference. Inspired by the idea of matching in observational studies\, we introduce the notion of considering a treatment assignment as a quasi-coloring” on a graph. Our idea of a perfect quasi-coloring strives to match every treated unit on a given network with a distinct control unit that has identical number of treated and control neighbors. For a wide range of interference functions encountered in applications\, we show both by theory and simulations that the classical Neymanian estimator for the direct effect has desirable properties for our designs. This further extends to settings where homophily is present in addition to interference.\n\n\n10:20 am – 10:50 am\n\nBreak\n\n\n10:50 am – 11:30 am\nAnnie Liang \nVideo\nTitle: The Theory is Predictive\, but is it Complete? An Application to Human Generation of Randomness \nAbstract: When we test a theory using data\, it is common to focus on correctness: do the predictions of the theory match what we see in the data? But we also care about completeness: how much of the predictable variation in the data is captured by the theory? This question is difficult to answer\, because in general we do not know how much “predictable variation” there is in the problem. In this paper\, we consider approaches motivated by machine learning algorithms as a means of constructing a benchmark for the best attainable level of prediction.  We illustrate our methods on the task of predicting human-generated random sequences. Relative to a theoretical machine learning algorithm benchmark\, we find that existing behavioral models explain roughly 15 percent of the predictable variation in this problem. This fraction is robust across several variations on the problem. We also consider a version of this approach for analyzing field data from domains in which human perception and generation of randomness has been used as a conceptual framework; these include sequential decision-making and repeated zero-sum games. In these domains\, our framework for testing the completeness of theories provides a way of assessing their effectiveness over different contexts; we find that despite some differences\, the existing theories are fairly stable across our field domains in their performance relative to the benchmark. Overall\, our results indicate that (i) there is a significant amount of structure in this problem that existing models have yet to capture and (ii) there are rich domains in which machine learning may provide a viable approach to testing completeness (joint with Jon Kleinberg and Sendhil Mullainathan).\n\n\n11:30 am – 12:10 pm\nZak Stone \nVideo\nTitle: TensorFlow: Machine Learning for Everyone \nAbstract: We’ve witnessed extraordinary breakthroughs in machine learning over the past several years. What kinds of things are possible now that weren’t possible before? How are open-source platforms like TensorFlow and hardware platforms like GPUs and Cloud TPUs accelerating machine learning progress? If these tools are new to you\, how should you get started? In this session\, you’ll hear about all of this and more from Zak Stone\, the Product Manager for TensorFlow on the Google Brain team.\n\n\n12:10 pm – 1:30 pm\n\nLunch\n\n\n1:30 pm – 2:10 pm\nJann Spiess \nVideo\nTitle: (Machine) Learning to Control in Experiments \nAbstract: Machine learning focuses on high-quality prediction rather than on (unbiased) parameter estimation\, limiting its direct use in typical program evaluation applications. Still\, many estimation tasks have implicit prediction components. In this talk\, I discuss accounting for controls in treatment effect estimation as a prediction problem. In a canonical linear regression framework with high-dimensional controls\, I argue that OLS is dominated by a natural shrinkage estimator even for unbiased estimation when treatment is random; suggest a generalization that relaxes some parametric assumptions; and contrast my results with that for another implicit prediction problem\, namely the first stage of an instrumental variables regression.\n\n\n2:10 pm – 2:50 pm\nBradly Stadie\nTitle: Learning to Learn Quickly: One-Shot Imitation and Meta Learning \nAbstract: Many reinforcement learning algorithms are bottlenecked by data collection costs and the brittleness of their solutions when faced with novel scenarios.\nWe will discuss two techniques for overcoming these shortcomings. In one-shot imitation\, we train a module that encodes a single demonstration of a desired behavior into a vector containing the essence of the demo. This vector can subsequently be utilized to recover the demonstrated behavior. In meta-learning\, we optimize a policy under the objective of learning to learn new tasks quickly. We show meta-learning methods can be accelerated with the use of auxiliary objectives. Results are presented on grid worlds\, robotics tasks\, and video game playing tasks.\n\n\n2:50 pm – 3:20 pm\n\nBreak\n\n\n3:20 pm – 4:00 pm\nHau-Tieng Wu \nVideo\nTitle: When Medical Challenges Meet Modern Data Science \nAbstract: Adaptive acquisition of correct features from massive datasets is at the core of modern data analysis. One particular interest in medicine is the extraction of hidden dynamics from a single observed time series composed of multiple oscillatory signals\, which could be viewed as a single-channel blind source separation problem. The mathematical and statistical problems are made challenging by the structure of the signal which consists of non-sinusoidal oscillations with time varying amplitude/frequency\, and by the heteroscedastic nature of the noise. In this talk\, I will discuss recent progress in solving this kind of problem by combining the cepstrum-based nonlinear time-frequency analysis and manifold learning technique. A particular solution will be given along with its theoretical properties. I will also discuss the application of this method to two medical problems – (1) the extraction of a fetal ECG signal from a single lead maternal abdominal ECG signal; (2) the simultaneous extraction of the instantaneous heart/respiratory rate from a PPG signal during exercise; (3) (optional depending on time) an application to atrial fibrillation signals. If time permits\, the clinical trial results will be discussed.\n\n\n4:00 pm – 4:40 pm\nSifan Zhou \nVideo\nTitle: Citing People Like Me: Homophily\, Knowledge Spillovers\, and Continuing a Career in Science \nAbstract: Forward citation is widely used to measure the scientific merits of articles. This research studies millions of journal article citation records in life sciences from MEDLINE and finds that authors of the same gender\, the same ethnicity\, sharing common collaborators\, working in the same institution\, or being geographically close are more likely (and quickly) to cite each other than predicted by their proportion among authors working on the same research topics. This phenomenon reveals how social and geographic distances influence the quantity and speed of knowledge spillovers. Given the importance of forward citations in academic evaluation system\, citation homophily potentially put authors from minority group at a disadvantage. I then show how it influences scientists’ chances to survive in the academia and continue publishing. Based on joint work with Richard Freeman.\n\n\n\n  \nTo view photos and video interviews from the conference\, please visit the CMSA blog. \n\n \n\n  \n\n\n\nBig Data\,CMSA\,Harvard\,Math\nEvents\,Past Events
URL:https://cmsa.fas.harvard.edu/event/2017-big-data-conference-aug-18-19/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Big Data Conference,Conference,Event
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Big-Data-2017_2.png
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20170428T090000
DTEND;TZID=America/New_York:20170502T170000
DTSTAMP:20260426T045537
CREATED:20230717T175015Z
LAST-MODIFIED:20250305T215930Z
UID:10000030-1493370000-1493744400@cmsa.fas.harvard.edu
SUMMARY:JDG 2017 Conference
DESCRIPTION:In celebration of the Journal of Differential Geometry’s 50th anniversary\, the Harvard Math Department will be hosting the Tenth Conference on Geometry and Topology (JDG 2017) from April 28 – May 2\, 2017. \nConfirmed Speakers \n\nMina Aganagic\, UC Berkeley\nDenis Auroux\, UC Berkeley\nCaucher Birkar\, University of Cambridge\nHuai-Dong Cao\, Lehigh University\nTristan Collins\, Harvard University\nCamillo De Lellis\, ETH Zurich\nJean-Pierre Demailly\, Grenoble Alpes University\nSimon Donaldson\, Stony Brook University\nDan Freed\, University of Texas at Austin\nKenji Fukaya\, Stony Brook University\nDavid Gabai\, Princeton University\nLarry Guth\, Massachusetts Institute of Technology\nRichard Hamilton\, Columbia University\nYujiro Kawamata\, University of Tokyo\nFrances Kirwan\, Oxford University\nBlaine Lawson\, Stony Brook University\nJun Li\, Stanford University\nSi Li\, Tsinghua University\nBong Lian\, Brandeis University\nChiu-Chu Melissa Liu\, Columbia University\nCiprian Manolescu\, University of California\, Los Angeles\nFernando Marques\, Princeton University\nWilliam Meeks\, University of Massachusetts Amherst\nWilliam Minicozzi\, Massachusetts Institute of Technology\nJohn Pardon\, Princeton University\nDuong Phong\, Columbia University\nAlena Pirutka\, Courant Institute of New York University\nRichard Schoen\, University of California\, Irvine\nArtan Sheshmani\, QGM Aarhus University/Harvard University\nCliff Taubes\, Harvard University\nCumrun Vafa\, Harvard University\nMu-Tao Wang\, Columbia University\nShing-Tung Yau\, Harvard University\nSteve Zelditch\, Northwestern University\n\n* This event is co-sponsored by Lehigh University and partially supported by the National Science Foundation.
URL:https://cmsa.fas.harvard.edu/event/jdg-2017-conference-april-28-may-2-2017/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Conference,Event
ATTACH;FMTTYPE=image/jpeg:https://cmsa.fas.harvard.edu/media/JDG-2017-scaled.jpg
END:VEVENT
BEGIN:VEVENT
DTSTART;TZID=America/New_York:20160822T090000
DTEND;TZID=America/New_York:20160823T163000
DTSTAMP:20260426T045537
CREATED:20230717T171959Z
LAST-MODIFIED:20250328T144123Z
UID:10000017-1471856400-1471969800@cmsa.fas.harvard.edu
SUMMARY:2016 Big Data Conference & Workshop
DESCRIPTION:! LOCATION CHANGE: The conference will be in Science Center Hall C on Tuesday\, Aug.23\, 2016.\nThe Center of Mathematical Sciences and Applications will be hosting a workshop on Big Data from August 12 – 21\, 2016 followed by a two-day conference on Big Data from August 22 – 23\, 2016. \nBig Data Conference features many speakers from the Harvard Community as well as many scholars from across the globe\, with talks focusing on computer science\, statistics\, math and physics\, and economics. This is the second conference on Big Data the Center will host as part of our annual events. The 2015 conference was a huge success. \nThe conference will be hosted at Harvard Science Center Hall A (Monday\, Aug.22) & Hall C (Tuesday\, Aug.23): 1 Oxford Street\, Cambridge\, MA 02138. \nThe 2016 Big Data conference is sponsored by the Center of Mathematical Sciences and Applications at Harvard University and the Alfred P. Sloan Foundation. \nConference Speakers:\n\nJörn Boehnke\, Harvard CMSA\nJoan Bruna\, UC Berkeley [Video]\nTamara Broderick\, MIT [Video]\nJustin Chen\, MIT [Video]\nYiling Chen\, Harvard University [Video]\nAmir Farbin\, UT Arlington [Video]\nDoug Finkbeiner\, Harvard University [Video]\nAndrew Gelman\, Columbia University [Video]\nNina Holden\, MIT [Video]\nElchanan Mossel\, MIT\nAlex Peysakhovich\, Facebook\nAlexander Rakhlin\, University of Pennsylvania [Video]\nNeal Wadhwa\, MIT [Video]\nJun Yin\, University of Wisconsin\nHarry Zhou\, Yale University [Video]\n\nPlease click Conference Program for a downloadable schedule with talk abstracts.\nConference Schedule:\n\n\n\nAugust 22 – Day 1\n\n\n8:30am\nBreakfast\n\n\n8:55am\nOpening remarks\n\n\n9:00am – 9:50am\nYiling Chen\, “Machine Learning with Strategic Data Sources” [Video]\n\n\n9:50am – 10:40am\nAndrew Gelman\, “Taking Bayesian Inference Seriously” [Video]\n\n\n10:40am – 11:10am\nBreak\n\n\n11:10am – 12:00pm\nHarrison Zhou\, “A General Framework for Bayes Structured Linear Models” [Video]\n\n\n12:00pm – 1:30pm\nLunch\n\n\n1:30pm – 2:20pm\nDouglas Finkbeiner\, “Mapping the Milky Way in 3D with star colors” [Video]\n\n\n2:20pm – 3:10pm\nNina Holden\, “Sparse exchangeable graphs and their limits” [Video]\n\n\n3:10pm – 3:40pm\nBreak\n\n\n3:40pm – 4:30pm\nAlex Peysakhovich\, “How social science methods inform personalization on Facebook News Feed” [Video]\n\n\n4:30pm – 5:20pm\nAmir Farbin\, “Deep Learning in High Energy Physics” [Video]\n\n\n\n\n\nAugust 23 – Day 2\n\n\n8:45am\nBreakfast\n\n\n9:00am – 9:50am\nJoan Bruna Estrach\, “Addressing Computational and Statistical Gaps with Deep Networks” [Video]\n\n\n9:50am – 10:40am\nJustin Chen & Neal Wadhwa\, “Smaller Than the Eye Can See: Big Engineering from Tiny Motions in Video” [Video]\n\n\n10:40am – 11:10am\nBreak\n\n\n11:10am – 12:00pm\nAlexander Rakhlin\, “How to Predict When Estimation is Hard: Algorithms for Learning on Graphs” [Video]\n\n\n12:00pm – 1:30pm\nLunch\n\n\n1:30pm – 2:20pm\nTamara Broderick\, “Fast Quantification of Uncertainty and Robustness with Variational Bayes” [Video]\n\n\n2:20pm – 3:10pm\nElchanan Mossel\, “Phylogenetic Reconstruction – a Rigorous Model of Deep Learning”\n\n\n3:10pm – 3:40pm\nBreak\n\n\n3:40pm – 4:30pm\nJörn Boehnke\, “Amazon’s Price and Sales-rank Data: What can one billion prices on 150 thousand products tell us about the economy?”\n\n\n\nWorkshop Participants:\nRichard Freeman’s Group: \n\nSen Chai\, ESSEC\nBrock Mendel\, Harvard University\nRaviv Muriciano-Goroff\, Stanford University\nSifan Zhou\, CMSA\n\nScott Kominer’s Group: \n\nBradly Stadie\, UC Berkeley\nNeal Wadhwa\, MIT [Video]\nJustin Chen\n\nChristopher Rogan’s Group: \n\nAmir Farbin\, UT Arlington [Video]\nPaul Jackson\, University of Adelaide\n\nFor more information about the workshops\, please reach out directly to the individual group leaders. \n* This event is sponsored by CMSA Harvard University and the Alfred P. Sloan Foundation. \n 
URL:https://cmsa.fas.harvard.edu/event/2016-big-data-conference-workshop/
LOCATION:Harvard Science Center\, 1 Oxford Street\, Cambridge\, MA\, 02138
CATEGORIES:Big Data Conference,Conference,Event,Workshop
ATTACH;FMTTYPE=image/png:https://cmsa.fas.harvard.edu/media/Big-Data_2016_2-1-2.png
END:VEVENT
END:VCALENDAR