{"AA":["*Allahu Akbar*"],"AF":["AlignmentForum.com"],"AGI":["Artificial General Intelligence"],"AIXI":["Hypothetical optimal AI agent, unimplementable in the real world"],"AIxi":["Hypothetical optimal AI agent, unimplementable in the real world"],"ANI":["Artificial Narrow Intelligence (or Narrow Artificial Intelligence)"],"ANN":["Artificial Neural Network"],"ASI":["Artificial Super-Intelligence"],"CAGIS":["Coalition for AGI Safety, \/r\/ControlProblem grassroots group"],"CEV":["Coherent Extrapolated Volition"],"CFAR":["Center for Applied Rationality"],"CHCAI":["Center for Human-Compatible AI"],"CIRL":["Co-operative Inverse Reinforcement Learning"],"CNN":["Convolutional Neural Network"],"CSER":["Center for the Study of Existential Risk"],"DL":["Deep Learning"],"DM":["(Google) DeepMind"],"DRL":["Deep Reinforcement Learning"],"DSA":["Decisive Strategic Advantage"],"EA":["Effective Altruism\/ist"],"ER":["Existential Risk"],"EY":["Eliezer Yudkowsky"],"FAI":["Friendly Artificial Intelligence"],"FHI":["Future of Humanity Institute"],"FLI":["Future of Life Institute"],"FOOM":["Local intelligence explosion (\"the AI going Foom\")"],"FRI":["Foundational Research Institute"],"Foom":["Local intelligence explosion (\"the AI going Foom\")"],"GAN":["Generative Adversarial Network"],"GCR":["Global Catastrophic Risk"],"GDM":["(Google) DeepMind"],"HLAI":["Human-Level Artificial Intelligence, also HLMI"],"HLMI":["Human-Level Machine Intelligence"],"IDA":["Iterated Distillation and Amplification (Christiano's alignment research agenda)"],"IE":["Intelligence Explosion"],"IO":["Input\/Output"],"IRL":["Inverse Reinforcement Learning"],"LSTM":["Long Short-Term Memory (a form of RNN)"],"LW":["LessWrong.com"],"MIRI":["Machine Intelligence Research Institute"],"ML":["Machine Learning"],"NB":["Nick Bostrom"],"NN":["Neural Network"],"OA":["OpenAI"],"OAI":["OpenAI"],"OL":["Omega lol"],"OPP":["Open Philanthropy Project"],"RL":["Reinforcement Learning"],"RNN":["Recurrent Neural Network"],"SPF":["Single Point of Failure"],"SPOF":["Single Point of Failure"],"SPoF":["Single Point of Failure"],"WBE":["Whole-Brain Emulation"],"XAI":["eXplainable Artificial Intelligence"],"foom":["Local intelligence explosion (\"the AI going Foom\")"],"uFAI":["unFriendly Artificial Intelligence"]}