<?xml version="1.0" encoding="utf-8"?>
<rss version="2.0">
    <channel>
        <title>OpenAI Research</title>
        <link>https://openai.com/research/index/</link>
        <description>Unofficial RSS feed for the OpenAI Research Index. Automatically generated by crawling openai.com/research/index.</description>
        <lastBuildDate>Sun, 05 Apr 2026 12:18:24 GMT</lastBuildDate>
        <docs>https://validator.w3.org/feed/docs/rss2.html</docs>
        <generator>https://github.com/jpmonette/feed</generator>
        <language>en</language>
        <copyright>Content copyright OpenAI. Feed generated by znck/rss-feeds.</copyright>
        <item>
            <title><![CDATA[Understanding Neural Networks Through Sparse Circuits]]></title>
            <link>https://openai.com/index/understanding-neural-networks-through-sparse-circuits/</link>
            <guid>https://openai.com/index/understanding-neural-networks-through-sparse-circuits/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI is exploring mechanistic interpretability to understand how neural networks reason. Our new sparse model approach could make AI systems more transparent and support safer, more reliable behavior.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/2RB1AxHNYdAFCN2CDPxlwv/86f61efe5420d7a7daaa6e0e17a0adf3/IT_Blog_SEO_Art_Card_16x9__1_.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Improving instruction hierarchy in frontier LLMs]]></title>
            <link>https://openai.com/index/instruction-hierarchy-challenge/</link>
            <guid>https://openai.com/index/instruction-hierarchy-challenge/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[IH-Challenge trains models to prioritize trusted instructions, improving instruction hierarchy, safety steerability, and resistance to prompt injection attacks.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/1hgOrtUKV9JbWpgIZLElGG/f1e68fcb747aeb0cb87633a34bb71bb3/SEO_Card__1_.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[How Confessions Can Keep Language Models Honest]]></title>
            <link>https://openai.com/index/how-confessions-can-keep-language-models-honest/</link>
            <guid>https://openai.com/index/how-confessions-can-keep-language-models-honest/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI researchers are testing “confessions,” a method that trains models to admit when they make mistakes or act undesirably, helping improve AI honesty, transparency, and trust in model outputs.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/6z3GibSptDf1BdP0PuokEy/20cec62d2f82200754f6403692afb44a/Confession_SEO_Card.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Why language models hallucinate]]></title>
            <link>https://openai.com/index/why-language-models-hallucinate/</link>
            <guid>https://openai.com/index/why-language-models-hallucinate/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI’s new research explains why language models hallucinate. The findings show how improved evaluations can enhance AI reliability, honesty, and safety.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/5q3iK91iYCslMpYW0fmPNc/50776ce6fc897eacb94d2c05533dba96/oai_GA_Stories_16.9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Reasoning Models Chain Of Thought Controllability]]></title>
            <link>https://openai.com/index/reasoning-models-chain-of-thought-controllability/</link>
            <guid>https://openai.com/index/reasoning-models-chain-of-thought-controllability/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces CoT-Control and finds reasoning models struggle to control their chains of thought, reinforcing monitorability as an AI safety safeguard.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/1uuAgWlWOgYMZuyLDaVFVL/af9711812b0868d52b15270a25f99913/OAI_Reasoning_Models_Struggle_to_Control_their_Chains_of_Thought__and_that%C3%A2__s_Good_SEO_16x9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Defining and evaluating political bias in LLMs]]></title>
            <link>https://openai.com/index/defining-and-evaluating-political-bias-in-llms/</link>
            <guid>https://openai.com/index/defining-and-evaluating-political-bias-in-llms/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[Learn how OpenAI evaluates political bias in ChatGPT through new real-world testing methods that improve objectivity and reduce bias.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/6EXhubTc8i77O2wEh9tdyg/be8a521b0d27029559818a60dc4ae980/political-bias-in-llms-16.9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Detecting And Reducing Scheming In Ai Models]]></title>
            <link>https://openai.com/index/detecting-and-reducing-scheming-in-ai-models/</link>
            <guid>https://openai.com/index/detecting-and-reducing-scheming-in-ai-models/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[Apollo Research and OpenAI developed evaluations for hidden misalignment (“scheming”) and found behaviors consistent with scheming in controlled tests across frontier models. The team shared concrete examples and stress tests of an early method to reduce scheming. ]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/5lYGYuDin762rL93Pdz2Bx/341a3aaca604f4244df7b9ce3b0cc91f/Anti-Scheming_Cover.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Introducing The Model Spec]]></title>
            <link>https://openai.com/index/introducing-the-model-spec/</link>
            <guid>https://openai.com/index/introducing-the-model-spec/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/2oOdYlEm4NHQCiqTsStMuD/ffe7260fb16cac11a570313b78cec8b8/DALL_E_2024-05-06_19.10.23.webp?w=1600&h=900&fit=fill" length="0" type="image/webp"/>
        </item>
        <item>
            <title><![CDATA[Evaluating AI’s ability to perform scientific research tasks]]></title>
            <link>https://openai.com/index/frontierscience/</link>
            <guid>https://openai.com/index/frontierscience/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces FrontierScience, a benchmark testing AI reasoning in physics, chemistry, and biology to measure progress toward real scientific research.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/26JX57meRpqIRJOwMtIbzv/8976180403bc44232fb6159efaf3c94d/oai_forscience_frontierscience_16x9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Gdpval]]></title>
            <link>https://openai.com/index/gdpval/</link>
            <guid>https://openai.com/index/gdpval/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces GDPval, a new evaluation that measures model performance on real-world economically valuable tasks across 44 occupations.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/65D5lQ6o5VqRjLuebLijXO/63db445c96aa2ec29d5a4e76fe20169d/GDPval__Art_Card.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Gpt 4 1]]></title>
            <link>https://openai.com/index/gpt-4-1/</link>
            <guid>https://openai.com/index/gpt-4-1/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[Introducing GPT-4.1 in the API—a new family of models with across-the-board improvements, including major gains in coding, instruction following, and long-context understanding. We’re also releasing our first nano model. Available to developers worldwide starting today.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/2C5yfJCw3N5tzKg8ZtBpw0/bcbc7022f43bca374a75dbc4a65937ef/API_GPT-4.1_Art_16.9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Evaluating Chain Of Thought Monitorability]]></title>
            <link>https://openai.com/index/evaluating-chain-of-thought-monitorability/</link>
            <guid>https://openai.com/index/evaluating-chain-of-thought-monitorability/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces a new framework and evaluation suite for chain-of-thought monitorability, covering 13 evaluations across 24 environments. Our findings show that monitoring a model’s internal reasoning is far more effective than monitoring outputs alone, offering a promising path toward scalable control as AI systems grow more capable.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/4x3EyD3VqsjaYK1Q8wOj1W/1d939bee7051a3ccb60abbdcbef156f4/CoT_Monitorability_Art_Card.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Accelerating Biological Research In The Wet Lab]]></title>
            <link>https://openai.com/index/accelerating-biological-research-in-the-wet-lab/</link>
            <guid>https://openai.com/index/accelerating-biological-research-in-the-wet-lab/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces a real-world evaluation framework to measure how AI can accelerate biological research in the wet lab. Using GPT-5 to optimize a molecular cloning protocol, the work explores both the promise and risks of AI-assisted experimentation.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/5YLMwQ4xkbvsQaVhC5SmIW/c0e7a138494ec4d27ed4d48258411c45/oai_forscience_wetlab_16x9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Introducing Indqa]]></title>
            <link>https://openai.com/index/introducing-indqa/</link>
            <guid>https://openai.com/index/introducing-indqa/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces IndQA, a new benchmark for evaluating AI systems in Indian languages. Built with domain experts, IndQA tests cultural understanding and reasoning across 12 languages and 10 knowledge areas.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/5RrMpGwZVvEFy6d02cXoOq/4891d9b5c952037c97d20041acd9675f/oai_IndQA_16.9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Sora 2]]></title>
            <link>https://openai.com/index/sora-2/</link>
            <guid>https://openai.com/index/sora-2/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[Our latest video generation model is more physically accurate, realistic, and controllable than prior systems. It also features synchronized dialogue and sound effects. Create with it in the new Sora app.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/1Q8NDmnyavsoPAKDlWpW18/a1f595376097668d1772c9cac40fc8ee/16_x_9____1_.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[New Result Theoretical Physics]]></title>
            <link>https://openai.com/index/new-result-theoretical-physics/</link>
            <guid>https://openai.com/index/new-result-theoretical-physics/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[A new preprint shows GPT-5.2 proposing a new formula for a gluon amplitude, later formally proved and verified by OpenAI and academic collaborators.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/3HY1JlEmciG7RxY03ApIjC/f97bef7a902920d97e288c3276ba760f/oai_Science_Quantum_Amplitudes_SEO_Open_Graph_16x9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Extending Single Minus Amplitudes To Gravitons]]></title>
            <link>https://openai.com/index/extending-single-minus-amplitudes-to-gravitons/</link>
            <guid>https://openai.com/index/extending-single-minus-amplitudes-to-gravitons/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[A new preprint extends single-minus amplitudes to gravitons, with GPT-5.2 Pro helping derive and verify nonzero graviton tree amplitudes in quantum gravity.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/EeranjBSfKC928pggXMwV/617a27a19ab63c20f33b076c2d7ebfc6/Graviton__SEO_card.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Gpt 5 Lowers Protein Synthesis Cost]]></title>
            <link>https://openai.com/index/gpt-5-lowers-protein-synthesis-cost/</link>
            <guid>https://openai.com/index/gpt-5-lowers-protein-synthesis-cost/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[An autonomous lab combining OpenAI’s GPT-5 with Ginkgo Bioworks’ cloud automation cut cell-free protein synthesis costs by 40% through closed-loop experimentation.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/2ABwqD8s3haMXTQz0zgXC1/ed2a59ef55cca87067ddababcbbd33a5/ginkgo_seo_card.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[First Proof Submissions]]></title>
            <link>https://openai.com/index/first-proof-submissions/</link>
            <guid>https://openai.com/index/first-proof-submissions/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[We share our AI model’s proof attempts for the First Proof math challenge, testing research-grade reasoning on expert-level problems.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/5bEu8lJDIhSisHkrCQwQ0l/84b53efb35f70a50a111d71613a1c3de/oai_Science_First_Proof_SEO_Open_Graph_16x9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[How People Are Using Chatgpt]]></title>
            <link>https://openai.com/index/how-people-are-using-chatgpt/</link>
            <guid>https://openai.com/index/how-people-are-using-chatgpt/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[New research from the largest study of ChatGPT use shows how the tool creates economic value through both personal and professional use. Adoption is broadening beyond early users, closing gaps and making AI a part of everyday life.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/2khdUJJKQqjmai3Lv9PeOK/f8e084232f90b0278b0466d9f144bfbb/how-people-are-using-chatgpt-16.9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Introducing Evmbench]]></title>
            <link>https://openai.com/index/introducing-evmbench/</link>
            <guid>https://openai.com/index/introducing-evmbench/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI and Paradigm introduce EVMbench, a benchmark evaluating AI agents’ ability to detect, patch, and exploit high-severity smart contract vulnerabilities.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/1I4Pd3JdAFesSF1aKZThPM/70fc2cf4eebbfe0d1211c089f49df0dd/SEO_Card.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Why We No Longer Evaluate Swe Bench Verified]]></title>
            <link>https://openai.com/index/why-we-no-longer-evaluate-swe-bench-verified/</link>
            <guid>https://openai.com/index/why-we-no-longer-evaluate-swe-bench-verified/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[SWE-bench Verified is increasingly contaminated and mismeasures frontier coding progress. Our analysis shows flawed tests and training leakage. We recommend SWE-bench Pro.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/67cdXZRtkDCZdovtRa01iv/63c2d0c7ccda4580583cc030fb366f6d/SEO_Card.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Introducing Aardvark]]></title>
            <link>https://openai.com/index/introducing-aardvark/</link>
            <guid>https://openai.com/index/introducing-aardvark/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces Aardvark, an AI-powered security researcher that autonomously finds, validates, and helps fix software vulnerabilities at scale. The system is in private beta—sign up to join early testing.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/2Ct6JXoKb3X1fU4EXhEb0Y/219805db3eb0f0eaa6cf4b41ff770097/Aardvark_SEO_Card_16x9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Advancing Independent Research Ai Alignment]]></title>
            <link>https://openai.com/index/advancing-independent-research-ai-alignment/</link>
            <guid>https://openai.com/index/advancing-independent-research-ai-alignment/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI commits $7.5M to The Alignment Project to fund independent AI alignment research, strengthening global efforts to address AGI safety and security risks.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/41XPFki1MAJlOqfx50Lr9n/8e7423403b90a3d12e4d3844d83a77dd/SEO_Card__1_.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Scaling Social Science Research]]></title>
            <link>https://openai.com/index/scaling-social-science-research/</link>
            <guid>https://openai.com/index/scaling-social-science-research/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[GABRIEL is a new open-source toolkit from OpenAI that uses GPT to turn qualitative text and images into quantitative data, helping social scientists analyze research at scale.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/01c5TjWNjhHHbis8R9qvCC/34f34684e336090b1526ebfea23c64cc/Scaling-social-science.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Accelerating Science Gpt 5]]></title>
            <link>https://openai.com/index/accelerating-science-gpt-5/</link>
            <guid>https://openai.com/index/accelerating-science-gpt-5/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[OpenAI introduces the first research cases showing how GPT-5 accelerates scientific progress across math, physics, biology, and computer science. Explore how AI and researchers collaborate to generate proofs, uncover new insights, and reshape the pace of discovery.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/7rjNdDNL7iKTRnSdVeDOHg/3b690c87bda461f27d057971e72e546f/GPT5_Survey_1-1.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[The State Of Enterprise Ai 2025 Report]]></title>
            <link>https://openai.com/index/the-state-of-enterprise-ai-2025-report/</link>
            <guid>https://openai.com/index/the-state-of-enterprise-ai-2025-report/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[Key findings from OpenAI’s enterprise data show accelerating AI adoption, deeper integration, and measurable productivity gains across industries in 2025.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/1lBJclq7MALbzVZf2Wa2Xb/c3e87f506e27c000f166189f6acfb14c/state-of-enterprise-ai-2025-16_9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Evals Drive Next Chapter Of Ai]]></title>
            <link>https://openai.com/index/evals-drive-next-chapter-of-ai/</link>
            <guid>https://openai.com/index/evals-drive-next-chapter-of-ai/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[Learn how evals help businesses define, measure, and improve AI performance—reducing risk, boosting productivity, and driving strategic advantage.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/136Vat98FEB3xuRr8VDh78/d3d153d37c3667c6ca966cc0a2052baa/Eval_Blog_SEO_Card_1920x1080.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Browsecomp]]></title>
            <link>https://openai.com/index/browsecomp/</link>
            <guid>https://openai.com/index/browsecomp/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[BrowseComp: a benchmark for browsing agents.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/5vsvsjylZKyS0lbYCdDwyI/e8b19cebbb2bbf843119bce363d5a5f5/BrowseComp_artcard_16.9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
        <item>
            <title><![CDATA[Affective Use Study]]></title>
            <link>https://openai.com/index/affective-use-study/</link>
            <guid>https://openai.com/index/affective-use-study/</guid>
            <pubDate>Fri, 13 Mar 2026 02:28:53 GMT</pubDate>
            <description><![CDATA[An OpenAI and MIT Media Lab Research collaboration.]]></description>
            <enclosure url="https://downloads.ctfassets.net/kftzwdyauwt9/4RNiq6LSdkrzeESGCSFkil/0350a02a60eb1f716caa5609b1cae02c/OAI_Affective_Use_16.9.jpg?w=1600&h=900&fit=fill" length="0" type="image/jpg"/>
        </item>
        <item>
            <title><![CDATA[Our Approach To The Model Spec]]></title>
            <link>https://openai.com/index/our-approach-to-the-model-spec/</link>
            <guid>https://openai.com/index/our-approach-to-the-model-spec/</guid>
            <pubDate>Wed, 25 Mar 2026 18:32:25 GMT</pubDate>
            <description><![CDATA[Learn how OpenAI’s Model Spec serves as a public framework for model behavior, balancing safety, user freedom, and accountability as AI systems advance.]]></description>
            <enclosure url="https://images.ctfassets.net/kftzwdyauwt9/35bHEbm0rP6Pp4Vz11SaVW/1033fdf6bbcac970107c7503b38ddeba/oai_model_spec_16x9.png?w=1600&h=900&fit=fill" length="0" type="image/png"/>
        </item>
    </channel>
</rss>