<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:content="http://purl.org/rss/1.0/modules/content/">
  <channel>
    <title>Aicodegeneration on Thomas Talks AI: Claude Code, AI Agents &amp; Production Engineering</title>
    <link>https://thomasdevos.com/tags/aicodegeneration/</link>
    <description>Recent content in Aicodegeneration on Thomas Talks AI: Claude Code, AI Agents &amp; Production Engineering</description>
    <generator>Hugo -- 0.145.0</generator>
    <language>en-us</language>
    <copyright>Thomas De Vos ©</copyright>
    <lastBuildDate>Mon, 04 May 2026 14:05:00 +0100</lastBuildDate>
    <atom:link href="https://thomasdevos.com/tags/aicodegeneration/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>AI code generation gets risky when it becomes action</title>
      <link>https://thomasdevos.com/posts/2026-05-04.14.20.00-ai-code-generation-gets-risky-when-it-becomes-action/</link>
      <pubDate>Mon, 04 May 2026 14:05:00 +0100</pubDate>
      <guid>https://thomasdevos.com/posts/2026-05-04.14.20.00-ai-code-generation-gets-risky-when-it-becomes-action/</guid>
      <description>AI code generation in production gets risky when coding agents move from suggestions to actions. Here is where permissions, evidence, evals, review, and rollback matter.</description>
    </item>
  </channel>
</rss>
