[{"data":1,"prerenderedAt":659},["ShallowReactive",2],{"/en-us/the-source/authors/lee-faus":3,"footer-en-us":31,"the-source-banner-en-us":365,"the-source-navigation-en-us":371,"the-source-newsletter-en-us":394,"authors-en-us":401,"categories-en-us":440,"lee-faus-articles-list-en-us":441},{"id":4,"title":5,"body":6,"category":6,"config":7,"content":9,"description":6,"extension":22,"meta":23,"navigation":24,"path":25,"seo":26,"slug":27,"stem":28,"testContent":6,"type":29,"__hash__":30},"theSourceAuthors/en-us/the-source/authors/lee-faus.yml","Lee Faus",null,{"layout":8},"the-source",[10,20],{"componentName":11,"type":11,"componentContent":12},"TheSourceAuthorHero",{"config":13,"name":5,"role":15,"bio":16,"headshot":17},{"gitlabHandle":14},"lfaus","Global Field CTO","Lee Faus is a Global Field CTO at GitLab. Lee has been a software architect, teacher, professor, and educator for over 25 years. He leverages his experience as an educator to bring complex technology concepts into a business forum where executives gain valuable advice to positively impact their business.",{"altText":5,"config":18},{"src":19},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463398/vivhlomglvnstamj54bo.jpg",{"componentName":21,"type":21},"TheSourceArticlesList","yml",{},true,"/en-us/the-source/authors/lee-faus",{"title":5},"lee-faus","en-us/the-source/authors/lee-faus","author","7pwNlTMVB3GVMZ6nm1tNkS4ayGeq7YqqRUwwncC7JoU",{"data":32},{"text":33,"source":34,"edit":40,"contribute":45,"config":50,"items":55,"minimal":354},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":35,"config":36},"View page source",{"href":37,"dataGaName":38,"dataGaLocation":39},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":41,"config":42},"Edit this page",{"href":43,"dataGaName":44,"dataGaLocation":39},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":46,"config":47},"Please contribute",{"href":48,"dataGaName":49,"dataGaLocation":39},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":51,"facebook":52,"youtube":53,"linkedin":54},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[56,113,170,229,292],{"title":57,"links":58,"subMenu":74},"Pricing",[59,64,69],{"text":60,"config":61},"View plans",{"href":62,"dataGaName":63,"dataGaLocation":39},"/pricing/","view plans",{"text":65,"config":66},"Why Premium?",{"href":67,"dataGaName":68,"dataGaLocation":39},"/pricing/premium/","why premium",{"text":70,"config":71},"Why Ultimate?",{"href":72,"dataGaName":73,"dataGaLocation":39},"/pricing/ultimate/","why ultimate",[75],{"title":76,"links":77},"Contact Us",[78,83,88,93,98,103,108],{"text":79,"config":80},"Contact sales",{"href":81,"dataGaName":82,"dataGaLocation":39},"/sales/","sales",{"text":84,"config":85},"Support portal",{"href":86,"dataGaName":87,"dataGaLocation":39},"https://support.gitlab.com","support portal",{"text":89,"config":90},"Customer portal",{"href":91,"dataGaName":92,"dataGaLocation":39},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"text":94,"config":95},"Status",{"href":96,"dataGaName":97,"dataGaLocation":39},"https://status.gitlab.com/","status",{"text":99,"config":100},"Terms of use",{"href":101,"dataGaName":102,"dataGaLocation":39},"/terms/","terms of use",{"text":104,"config":105},"Privacy statement",{"href":106,"dataGaName":107,"dataGaLocation":39},"/privacy/","privacy statement",{"text":109,"config":110},"Cookie preferences",{"dataGaName":111,"dataGaLocation":39,"id":112,"isOneTrustButton":24},"cookie preferences","ot-sdk-btn",{"title":114,"links":115,"subMenu":126},"Product",[116,121],{"text":117,"config":118},"DevSecOps platform",{"href":119,"dataGaName":120,"dataGaLocation":39},"/platform/","devsecops platform",{"text":122,"config":123},"AI-Assisted Development",{"href":124,"dataGaName":125,"dataGaLocation":39},"/gitlab-duo/","ai-assisted development",[127],{"title":128,"links":129},"Topics",[130,135,140,145,150,155,160,165],{"text":131,"config":132},"CICD",{"href":133,"dataGaName":134,"dataGaLocation":39},"/topics/ci-cd/","cicd",{"text":136,"config":137},"GitOps",{"href":138,"dataGaName":139,"dataGaLocation":39},"/topics/gitops/","gitops",{"text":141,"config":142},"DevOps",{"href":143,"dataGaName":144,"dataGaLocation":39},"/topics/devops/","devops",{"text":146,"config":147},"Version Control",{"href":148,"dataGaName":149,"dataGaLocation":39},"/topics/version-control/","version control",{"text":151,"config":152},"DevSecOps",{"href":153,"dataGaName":154,"dataGaLocation":39},"/topics/devsecops/","devsecops",{"text":156,"config":157},"Cloud Native",{"href":158,"dataGaName":159,"dataGaLocation":39},"/topics/cloud-native/","cloud native",{"text":161,"config":162},"AI for Coding",{"href":163,"dataGaName":164,"dataGaLocation":39},"/topics/devops/ai-for-coding/","ai for coding",{"text":166,"config":167},"Agentic AI",{"href":168,"dataGaName":169,"dataGaLocation":39},"/topics/agentic-ai/","agentic ai",{"title":171,"links":172},"Solutions",[173,177,182,187,192,196,201,204,209,214,219,224],{"text":174,"config":175},"Application Security Testing",{"href":176,"dataGaName":174,"dataGaLocation":39},"/solutions/application-security-testing/",{"text":178,"config":179},"Automated software delivery",{"href":180,"dataGaName":181,"dataGaLocation":39},"/solutions/delivery-automation/","automated software delivery",{"text":183,"config":184},"Agile development",{"href":185,"dataGaName":186,"dataGaLocation":39},"/solutions/agile-delivery/","agile delivery",{"text":188,"config":189},"SCM",{"href":190,"dataGaName":191,"dataGaLocation":39},"/solutions/source-code-management/","source code management",{"text":131,"config":193},{"href":194,"dataGaName":195,"dataGaLocation":39},"/solutions/continuous-integration/","continuous integration & delivery",{"text":197,"config":198},"Value stream management",{"href":199,"dataGaName":200,"dataGaLocation":39},"/solutions/value-stream-management/","value stream management",{"text":136,"config":202},{"href":203,"dataGaName":139,"dataGaLocation":39},"/solutions/gitops/",{"text":205,"config":206},"Enterprise",{"href":207,"dataGaName":208,"dataGaLocation":39},"/enterprise/","enterprise",{"text":210,"config":211},"Small business",{"href":212,"dataGaName":213,"dataGaLocation":39},"/small-business/","small business",{"text":215,"config":216},"Public sector",{"href":217,"dataGaName":218,"dataGaLocation":39},"/solutions/public-sector/","public sector",{"text":220,"config":221},"Education",{"href":222,"dataGaName":223,"dataGaLocation":39},"/solutions/education/","education",{"text":225,"config":226},"Financial services",{"href":227,"dataGaName":228,"dataGaLocation":39},"/solutions/finance/","financial services",{"title":230,"links":231},"Resources",[232,237,242,247,252,257,262,267,272,277,282,287],{"text":233,"config":234},"Install",{"href":235,"dataGaName":236,"dataGaLocation":39},"/install/","install",{"text":238,"config":239},"Quick start guides",{"href":240,"dataGaName":241,"dataGaLocation":39},"/get-started/","quick setup checklists",{"text":243,"config":244},"Learn",{"href":245,"dataGaName":246,"dataGaLocation":39},"https://university.gitlab.com/","learn",{"text":248,"config":249},"Product documentation",{"href":250,"dataGaName":251,"dataGaLocation":39},"https://docs.gitlab.com/","docs",{"text":253,"config":254},"Blog",{"href":255,"dataGaName":256,"dataGaLocation":39},"/blog/","blog",{"text":258,"config":259},"Customer success stories",{"href":260,"dataGaName":261,"dataGaLocation":39},"/customers/","customer success stories",{"text":263,"config":264},"Remote",{"href":265,"dataGaName":266,"dataGaLocation":39},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":268,"config":269},"GitLab Services",{"href":270,"dataGaName":271,"dataGaLocation":39},"/services/","services",{"text":273,"config":274},"Community",{"href":275,"dataGaName":276,"dataGaLocation":39},"/community/","community",{"text":278,"config":279},"Forum",{"href":280,"dataGaName":281,"dataGaLocation":39},"https://forum.gitlab.com/","forum",{"text":283,"config":284},"Events",{"href":285,"dataGaName":286,"dataGaLocation":39},"/events/","events",{"text":288,"config":289},"Partners",{"href":290,"dataGaName":291,"dataGaLocation":39},"/partners/","partners",{"title":293,"links":294},"Company",[295,300,305,310,315,320,325,329,334,339,344,349],{"text":296,"config":297},"About",{"href":298,"dataGaName":299,"dataGaLocation":39},"/company/","company",{"text":301,"config":302},"Jobs",{"href":303,"dataGaName":304,"dataGaLocation":39},"/jobs/","jobs",{"text":306,"config":307},"Leadership",{"href":308,"dataGaName":309,"dataGaLocation":39},"/company/team/e-group/","leadership",{"text":311,"config":312},"Team",{"href":313,"dataGaName":314,"dataGaLocation":39},"/company/team/","team",{"text":316,"config":317},"Handbook",{"href":318,"dataGaName":319,"dataGaLocation":39},"https://handbook.gitlab.com/","handbook",{"text":321,"config":322},"Investor relations",{"href":323,"dataGaName":324,"dataGaLocation":39},"https://ir.gitlab.com/","investor relations",{"text":326,"config":327},"Sustainability",{"href":328,"dataGaName":326,"dataGaLocation":39},"/sustainability/",{"text":330,"config":331},"Diversity, inclusion and belonging (DIB)",{"href":332,"dataGaName":333,"dataGaLocation":39},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":335,"config":336},"Trust Center",{"href":337,"dataGaName":338,"dataGaLocation":39},"/security/","trust center",{"text":340,"config":341},"Newsletter",{"href":342,"dataGaName":343,"dataGaLocation":39},"/company/contact/#contact-forms","newsletter",{"text":345,"config":346},"Press",{"href":347,"dataGaName":348,"dataGaLocation":39},"/press/","press",{"text":350,"config":351},"Modern Slavery Transparency Statement",{"href":352,"dataGaName":353,"dataGaLocation":39},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"items":355},[356,359,362],{"text":357,"config":358},"Terms",{"href":101,"dataGaName":102,"dataGaLocation":39},{"text":360,"config":361},"Cookies",{"dataGaName":111,"dataGaLocation":39,"id":112,"isOneTrustButton":24},{"text":363,"config":364},"Privacy",{"href":106,"dataGaName":107,"dataGaLocation":39},{"visibility":24,"title":366,"button":367},"The Intelligent Software Development Era: How AI is reshaping DevSecOps teams",{"config":368,"text":370},{"href":369},"/developer-survey/","Get the research report",{"logo":372,"subscribeLink":377,"navItems":381},{"altText":373,"config":374},"the source logo",{"src":375,"href":376},"https://res.cloudinary.com/about-gitlab-com/image/upload/v1750191004/t7wz1klfb2kxkezksv9t.svg","/the-source/",{"text":378,"config":379},"Subscribe",{"href":380},"#subscribe",[382,386,390],{"text":383,"config":384},"Artificial Intelligence",{"href":385},"/the-source/ai/",{"text":387,"config":388},"Security & Compliance",{"href":389},"/the-source/security/",{"text":391,"config":392},"Platform & Infrastructure",{"href":393},"/the-source/platform/",{"title":395,"description":396,"submitMessage":397,"formData":398},"The Source Newsletter","Stay updated with insights for the future of software development.","You have successfully signed up for The Source’s newsletter.",{"config":399},{"formId":400,"formName":343,"hideRequiredLabel":24},1077,{"amanda-rueda":402,"andre-michael-braun":403,"andrew-haschka":404,"ayoub-fandi":405,"bob-stevens":406,"brian-wald":407,"bryan-ross":408,"chandler-gibbons":409,"cherry-han":410,"dave-steer":411,"ddesanto":412,"derek-debellis":413,"emilio-salvador":414,"erika-feldman":415,"george-kichukov":416,"gitlab":417,"grant-hickman":418,"haim-snir":419,"iganbaruch":420,"jason-morgan":421,"jessie-young":422,"jlongo":423,"joel-krooswyk":424,"josh-lemos":425,"joshua-carroll":426,"julie-griffin":427,"kristina-weis":428,"lee-faus":5,"marco-caronna":429,"michelle-gill":430,"nathen-harvey":431,"ncregan":432,"rob-smith":433,"rschulman":434,"sabrina-farmer":435,"sandra-gittlen":436,"sharon-gaudin":437,"stephen-walters":438,"taylor-mccaslin":439},"Amanda Rueda","Andre Michael Braun","Andrew Haschka","Ayoub Fandi","Bob Stevens","Brian Wald","Bryan Ross","Chandler Gibbons","Cherry Han","Dave Steer","David DeSanto","Derek DeBellis","Emilio Salvador","Erika Feldman","George Kichukov","GitLab","Grant Hickman","Haim Snir","Itzik Gan Baruch","Jason Morgan","Jessie Young","Joseph Longo","Joel Krooswyk","Josh Lemos","Joshua Carroll","Julie Griffin","Kristina Weis","Marco Caronna","Michelle Gill","Nathen Harvey","Niall Cregan","Rob Smith","Robin Schulman","Sabrina Farmer","Sandra Gittlen","Sharon Gaudin","Stephen Walters","Taylor McCaslin",{"ai":383,"platform":391,"security":387},[442,468,506,542,582,623],{"id":443,"title":444,"body":6,"category":445,"config":446,"content":450,"description":451,"extension":22,"meta":460,"navigation":24,"path":461,"seo":462,"slug":464,"stem":465,"type":466,"__hash__":467,"date":452,"timeToRead":453,"heroImage":454,"keyTakeaways":455,"articleBody":459},"theSource/en-us/the-source/ai/the-next-wave-of-devsecops-team-of-one-manager-of-many.yml","The next wave of DevSecOps: Team of one, manager of many","ai",{"layout":8,"template":447,"featured":24,"author":27,"sourceCTA":448,"isHighlighted":449,"authorName":5},"TheSourceArticle","global-devsecops-report-2025",false,{"title":444,"description":451,"date":452,"timeToRead":453,"heroImage":454,"keyTakeaways":455,"articleBody":459},"Understand why AI augmentation amplifies rather than replaces engineering talent when built on collaborative learning.","2025-11-18","6 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1756302005/ntf0xsctetcx7uq1yfpy.png",[456,457,458],"DevSecOps collaboration creates knowledge mastery across domains, preparing engineers to effectively evaluate and apply AI tools in complex software delivery scenarios.","AI should augment human capability by handling routine tasks, not replace the cross-functional judgment that comes from deep collaborative learning and expertise.","Organizations that combine collaborative learning cultures with AI augmentation will outperform those viewing AI as a simple cost-reduction strategy.","AI is creating unprecedented leverage for individual engineers. Individual team members can now accomplish what once required entire teams. But here's the paradox everyone is missing: the engineers who will build these solo empires aren’t just expert coders. They've spent years in collaborative teams, absorbing knowledge across security, infrastructure, business logic, and quality assurance.\n\nThe software industry is racing toward a future of AI-augmented individual capability. Yet the foundation for this future is the very thing many organizations are abandoning: deep, cross-functional collaboration. Understanding this contradiction reveals the real role of AI in software delivery.\n\n## Collaboration as a foundation\nThe fundamental goal of DevSecOps is to establish a collaborative engineering culture that spans the entire software delivery lifecycle, from business strategy to technical implementation. This culture centers on reusability and best practices that directly improve developer productivity and delivery efficiency. Organizations achieve this through a dual-gate system: \n* **Human consensus-based code reviews** ensure knowledge transfer and maintain quality standards across disciplines.\n* **Automated quality and security gates** catch issues before they reach production.\n\nThis approach balances speed with control. It de-risks software change management while ensuring that acceleration doesn't come at the expense of stability or security.\n\nMost organizations stop here. They implement the processes, install the tooling, and measure the velocity improvements. But, they miss the deeper transformation happening beneath the surface.\n\n## The knowledge transfer engine\nThe collaborative model is fundamentally about learning and knowledge mastery at scale. Research in educational psychology, particularly [Bloom's Taxonomy of Learning](https://bokcenter.harvard.edu/taxonomies-learning), suggests that the highest form of mastery is achieved through teaching concepts to others. \n\nThis is where the dual-gate system reveals its deeper value. Code reviews become structured knowledge transfer sessions. Each person operates as the knowledge expert in their domain while learning from adjacent domains: \n* The security engineer reviewing code teaches secure development practices while learning about business requirements\n* The architect understands product priorities while sharing knowledge about technical constraints\n* The junior developer learns patterns from seniors while bringing fresh perspectives on tooling\n\nThis creates a network effect where each person's knowledge elevates everyone else's capabilities. Expertise flows in all directions across the organization. This collaborative culture fosters a learning organization in which every interaction creates opportunities for teaching and accelerated growth.\n\nWhen you view DevSecOps through this lens, code review becomes a teaching moment. Security scans are a learning opportunity. Every interaction in the system enables knowledge transfer and mastery development. This is what sets certain engineers apart: They’ve internalized knowledge from adjacent domains through years of collaborative interaction.\n\n\n## The team of one: AI as a peer, not a replacement\nThe natural evolution of this collaborative model is the \"team of one,\" a knowledge worker augmented by AI that enables unprecedented autonomy and efficiency. The promise is compelling. Every engineer gains AI peers that handle lower-level work, such as remembering, understanding, and basic application of concepts. Teaching an agent to perform these redundant tasks dramatically lowers cognitive load, freeing mental capacity for higher-order thinking, including analysis, evaluation, and creative problem-solving. \n\nThis is how AI can amplify human capabilities rather than replace them. [Recent GitLab research](https://about.gitlab.com/developer-survey/) found that although 83% of DevSecOps professionals feel that AI will significantly change their role within the next five years, 76% agree that AI will actually create the need for more engineers, not fewer.\n\nHowever, a dangerous counter-narrative is emerging in executive circles. Some leaders believe highly capable AI agents can replace knowledge workers entirely. This represents a fundamental misunderstanding of how people develop expertise. \n\nEven with highly capable AI, you still need human experts who can:\n* Evaluate outputs across multiple disciplines\n* Establish trust in AI recommendations\n* Provide domain-specific judgment\n* Take accountability for production systems\n\nIn fact, GitLab’s research found that 40% of DevSecOps professionals agree that Al will actually accelerate career growth for junior developers.\n\nThe argument that \"we don't need junior developers anymore\" ignores the fact that someone still needs to review, validate, and take accountability for what AI produces. Junior developers aren't just writing code — they’re learning to evaluate it across multiple domains, building the judgment needed to verify AI outputs.\n\nThe opposite argument — that AI might replace experienced architects and senior developers — is equally problematic. This logic suggests we could skip foundational learning entirely and restructure computer science education to focus only on prompting AI agents. But without understanding what good code looks like across security, infrastructure, and business domains, how would these graduates know whether AI outputs are correct? Both extremes miss the point.\n\n## The real constraint: Scarcity of collective wisdom\nThe real constraint isn't AI capability. It's the scarcity of people who can actually operate as that \"team of one.\" You need engineers with sufficient skills across multiple domains to effectively evaluate AI outputs in security, infrastructure, quality, and business logic. And you need educators who understand how to develop these multi-skilled practitioners. \n\nThe collaborative model from the original DevSecOps goal remains essential because this is the mechanism through which people develop that breadth of knowledge. The team of one isn't someone working in isolation. It's someone who has internalized the collective wisdom of the cross-functional team and can now operate with AI augmentation while maintaining the judgment and accountability that only human expertise provides.\n\n## The path forward\nOrganizations face a critical choice. The tempting path is to view AI as a cost-reduction strategy by replacing expensive senior talent with cheaper tools and whoever can operate them. This path leads to brittle systems, technical debt, and ultimately failure. \n\nThe sustainable path recognizes that AI is a tool that amplifies existing capability but cannot replace the judgment that comes from deep, cross-functional mastery.\n\nThe companies that will win are those that double down on collaborative learning while simultaneously investing in AI augmentation. They understand that creating a team of one requires first creating a team that teaches each individual across multiple domains. They recognize that the code review process helps to transfer the knowledge needed to use AI tools effectively. They invest in building knowledge-transfer systems that create engineers capable of operating autonomously, having learned from the collective.\n\nThis is the paradox of the AI age in software delivery. As our AI tools become increasingly capable, the value of collaborative learning becomes even more pronounced. The only way to create people capable of effectively wielding those tools is through the cross-functional knowledge transfer enabled by DevSecOps. \n\nThe goal hasn't changed. We still need to improve productivity, increase efficiency, and reduce risk. What's changed is our understanding that achieving those goals at scale requires both collaborative learning and AI augmentation, not a choice between them.\n\nThe future belongs to organizations that build cultures where everyone teaches, everyone learns, and everyone becomes capable of operating as a team of one when augmented by AI. Ultimately, the real competitive advantage isn't AI; it's the people who know how to effectively apply it.",{},"/en-us/the-source/ai/the-next-wave-of-devsecops-team-of-one-manager-of-many",{"config":463,"title":444,"description":451},{"noIndex":449},"the-next-wave-of-devsecops-team-of-one-manager-of-many","en-us/the-source/ai/the-next-wave-of-devsecops-team-of-one-manager-of-many","article","uB108kAT7FF5Vzz0yhqNLa2CiJ961jVzF7UcnptOwEA",{"id":469,"title":470,"body":6,"category":445,"config":471,"content":473,"description":474,"extension":22,"meta":499,"navigation":24,"path":500,"seo":501,"slug":503,"stem":504,"type":466,"__hash__":505,"date":475,"timeToRead":476,"heroImage":477,"keyTakeaways":478,"articleBody":482,"faq":483},"theSource/en-us/the-source/ai/ai-transforms-agile-planning-for-modern-development-teams.yml","AI transforms Agile planning for modern development teams",{"layout":8,"template":447,"featured":24,"author":27,"sourceCTA":472,"isHighlighted":449,"authorName":5},"source-lp-navigating-a-smooth-transition-to-agile-planning",{"title":470,"description":474,"date":475,"timeToRead":476,"heroImage":477,"keyTakeaways":478,"articleBody":482,"faq":483},"Discover how AI-powered platforms can streamline Agile workflows, reduce administrative burden, and help teams focus on delivering customer value.","2025-08-19T00:00:00.000Z","5 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1755530469/emjkvobknbai4rceregn.png",[479,480,481],"AI can automate routine tasks like backlog grooming and sprint planning, freeing teams to focus on strategic work and customer value delivery.","Modern platforms integrate planning, coding, and delivery in one environment, enabling AI to provide real-time insights across the entire workflow.","Smart automation handles administrative burden while preserving human judgment for innovation, making Agile planning more effective at scale.","After two and a half decades, Agile planning faces a crisis. What started as a revolutionary approach to software development has become bogged down by complex processes and endless administrative tasks. Today’s Agile practices often feel far removed from the original vision of rapid, responsive development.\n\nAs companies tried to scale Agile across large organizations, frameworks like the [Scaled Agile Framework (SAFe)](https://framework.scaledagile.com/) emerged to bridge the gap between team-level agility and enterprise requirements. As a result, the software industry’s most dangerous phrase has evolved from “we’ve always done it this way” to “we’re agile, but…,” signaling how far we've drifted from the principles that made Agile revolutionary in the first place.\n\nDespite these challenges, the fundamental ideas behind Agile — being responsive, iterating quickly, and focusing on customer value — remain as important as ever. The real issue isn’t with Agile itself but with how we execute it in complex organizational environments. We don’t need to abandon Agile; we need better tools that actually support its core principles.\n\nArtificial intelligence (AI) offers a promising solution by automating the administrative overhead that has weighed down modern Agile practices.\n## AI and the platform revolution\nMy experience with Agile methodologies started alongside pioneers like Jon Kern, one of the original signers of the [Agile Manifesto](https://agilemanifesto.org/). He taught me that focusing on customers and delivery is more effective than creating endless documentation. This approach helped me build small, high-performing teams that achieved remarkable results through quick iterations and constant customer feedback. However, I've also seen how these principles can get lost in large enterprise settings.\n\nToday's AI-powered platforms offer a way back to those original ideals. Modern multi-agent collaboration platforms create integrated environments where [AI agents](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) work together to scan code, analyze customer feedback, and suggest solutions. This coordinated intelligence helps teams stay responsive to real-time insights.\n\nImagine AI systems that can analyze customer feedback, support tickets, and usage patterns, then automatically identify and group related issues into meaningful project themes without requiring lengthy planning meetings. These systems could break down large projects into appropriately sized tasks based on data about team velocity and dependencies, then assign them to sprints that optimize for both business value and technical coherence.\n\nIn this way, AI tools can transform the tedious work of manual backlog management, estimation sessions, and sprint planning into brief validation meetings where human creativity and strategic thinking help teams focus on the “why” rather than the “how.” In this world, teams spend more time delivering value than discussing how to deliver value.\n\nHere’s a real-world example: [Cube](https://about.gitlab.com/blog/how-cube-uses-gitlab-to-increase-efficiency-and-productivity/), a software development company based in the Netherlands, significantly improved both development speed and code quality by adopting a unified platform strategy that leveraged AI across all stages of the development lifecycle.\n\nThis isn’t about removing human judgment from Agile; it’s about elevating it from administrative burden to strategic guidance, allowing teams to truly embrace the responsive, value-focused delivery that Agile originally promised.\n## Streamlined planning in an AI-driven world\nMany organizations are already replacing the complicated workflows of monolithic planning tools with lightweight issue management systems that [smoothly integrate with the entire development lifecycle](https://about.gitlab.com/the-source/platform/devops-teams-want-to-shake-off-diy-toolchains-a-platform-is-the-answer/). When issue tracking exists alongside code repositories, CI/CD pipelines, and delivery systems, it creates an environment where AI can truly enhance our workflows.\n\nThis integrated platform approach enables a fundamental shift in how we plan and execute work. Here are some key applications:\n\n**AI-driven security remediation planning**: Instead of treating security as a separate workflow, intelligent AI tools can automatically create remediation issues from vulnerability scans, prioritize them based on risk assessment, and intelligently schedule them alongside feature work. This ensures that security debt doesn’t accumulate in forgotten backlogs while providing clear visibility into application security status.\n\n**Intelligent code review automation**: AI-powered tools can automatically analyze code changes, identify potential bugs, suggest improvements, and check for compliance with architectural patterns — all before a human reviewer even sees the code. This shifts human review time from finding basic issues to making strategic decisions about implementation approaches.\n\n**Smart cross-platform coordination**: Through agent-to-agent (A2A) communication frameworks, organizations can create powerful integrations between development platforms and planning and issue management tools. These integrations enable AI agents to automatically synchronize data across platforms, providing a comprehensive view of development activities regardless of where planning occurs. They adjust sprint allocations based on developer activity and provide early warnings when timelines or team capacity are at risk.\n\nThese capabilities, which exist today, can make developers more efficient and enable leadership to make informed decisions. The result is a cohesive ecosystem where information flows seamlessly between planning and execution tools, eliminating the need for developers to switch between systems.\n## What this means for your team\nThe move toward AI-enhanced Agile planning requires a practical look at your current processes and toolchain.\n\nStart by **evaluating whether your current processes create bottlenecks** between development and deployment. Look for gaps where Agile ceremonies exist, but traditional approval workflows still dominate critical decisions.\n\nNext, **assess how much time your teams spend on planning ceremonies** versus actual development work. Consider whether AI can automate administrative tasks such as backlog grooming, estimation sessions, and status updates while preserving human strategic input on priorities and technical decisions.\n\n**Examine your current toolchain** to identify where manual coordination is required between the planning, development, and deployment phases. Look for opportunities where AI can automate data synchronization and provide predictive insights about team capacity and timeline risks, reducing the context switching that fragments developer focus.\n\nFinally, **review your current planning overhead** and identify which administrative tasks can be automated, allowing your team to focus on delivering customer value and making strategic technical decisions rather than adhering to process compliance. The goal is not to eliminate human judgment but to elevate it from routine tasks to the strategic thinking that drives innovation.\n\nThe future belongs to teams that embrace lightweight, AI-enabled platforms, where planning, code, and delivery coexist in a single, integrated environment. When machines handle the tactical execution and administrative burden, humans can focus on innovation and customer delight — the roles that truly add value to your organization.",[484,487,490,493,496],{"header":485,"content":486},"How can AI automate routine Agile planning tasks?","AI can automate backlog grooming, estimation sessions, and sprint planning by analyzing customer feedback, support tickets, and usage patterns to automatically identify and group related issues into meaningful project themes. This transforms tedious manual work into brief validation meetings where teams focus on strategic thinking rather than administrative tasks.",{"header":488,"content":489},"What are the key benefits of AI-driven security remediation planning?","AI tools automatically create remediation issues from vulnerability scans, prioritize them based on risk assessment, and intelligently schedule them alongside feature work. This ensures security debt doesn't accumulate in forgotten backlogs while providing clear visibility into application security status without treating security as a separate workflow.",{"header":491,"content":492},"How does intelligent code review automation improve development workflows?","AI-powered tools automatically analyze code changes, identify potential bugs, suggest improvements, and check compliance with architectural patterns before human reviewers see the code. This shifts human review time from finding basic issues to making strategic decisions about implementation approaches, improving both efficiency and code quality.",{"header":494,"content":495},"What is smart cross-platform coordination in AI-enhanced Agile planning?","Through agent-to-agent (A2A) communication frameworks, AI agents automatically synchronize data across development platforms and planning tools, providing comprehensive views of development activities. They adjust sprint allocations based on developer activity and provide early warnings when timelines or team capacity are at risk.",{"header":497,"content":498},"How should teams evaluate their readiness for AI-enhanced Agile planning?","Teams should assess whether current processes create bottlenecks between development and deployment, evaluate time spent on planning ceremonies versus actual development work, examine toolchain gaps requiring manual coordination, and identify administrative tasks that can be automated while preserving human strategic input on priorities and technical decisions.",{},"/en-us/the-source/ai/ai-transforms-agile-planning-for-modern-development-teams",{"config":502,"ogImage":477,"title":470,"description":474},{"noIndex":449},"ai-transforms-agile-planning-for-modern-development-teams","en-us/the-source/ai/ai-transforms-agile-planning-for-modern-development-teams","jWSJCJDQ0V_nUmhr-fTugv1BxH_lLFIkIOj47UFzxUI",{"id":507,"title":508,"body":6,"category":445,"config":509,"content":511,"description":513,"extension":22,"meta":536,"navigation":24,"path":537,"seo":538,"slug":539,"stem":540,"type":466,"__hash__":541,"date":512,"timeToRead":453,"heroImage":514,"keyTakeaways":515,"articleBody":519,"faq":520},"theSource/en-us/the-source/ai/why-automotive-software-development-needs-human-centered-ai.yml","Why automotive software development needs human-centered AI",{"layout":8,"template":447,"author":27,"featured":24,"sourceCTA":510,"isHighlighted":449,"authorName":5},"source-lp-transform-automotive-devops-secure-fast-future-ready",{"title":508,"date":512,"description":513,"timeToRead":453,"heroImage":514,"keyTakeaways":515,"articleBody":519,"faq":520},"2025-06-02","Learn why balancing AI assistance with human expertise is crucial for automotive embedded systems development and creating competitive advantages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463704/u3dshy4qn6rtrklfalx7.png",[516,517,518],"AI in automotive embedded software development works best as a Level 2 assistant, meaning human expertise remains essential for effective embedded development in vehicles.","The right human-AI balance varies across different automotive software domains; teams that find the right balance between AI assistance and human expertise will gain competitive advantages.","Creating effective human-AI partnerships requires intentional processes such as mandatory human review checkpoints for safety-critical systems.","Software is an essential part of modern automobiles. This year, the lines of code in the average car are expected to reach [650 million](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/), an increase from 200 million in 2020. What’s more, we’re seeing a shift from distributed architectures for vehicle firmware toward zonal architectures with central high-performance computers (HPCs). All of this creates complexity and novel software challenges.\n\nEmbedded systems developers are trying to adapt to this complexity. At the same, market pressures are forcing them to accelerate their development processes and ship innovation faster.\n\nArtificial intelligence (AI) can help address these challenges, but its implementation raises important questions. To what degree should AI tools autonomously generate and review code in automotive embedded systems? How much human oversight is advisable? Drawing from the automotive industry's vocabulary, I propose that embedded development requires Level 2 AI assistance - at least right now.\n\n## Understanding Level 2 automation for AI in embedded development\nIn automotive driving automation, [Level 2 systems](https://www.sae.org/blog/sae-j3016-update) represent partial automation: a carefully balanced human-machine collaboration. These systems can help control steering, acceleration, and braking in specific scenarios, but the driver must stay engaged. They must monitor the environment and be ready to take control at any moment. The human remains legally responsible for the vehicle's operation and must supervise the automation continually. In contrast, Level 4-5 systems aim to operate with minimal or no human oversight in defined conditions.\n\nThis framework provides a useful analogy for AI in embedded development. Current AI tools excel at providing suggestions and automating routine tasks, much like Level 2 driver assistance. They can suggest code, help with testing, and identify potential issues. However, their contextual understanding has limitations. Given the high stakes of automotive embedded systems, combining AI's capabilities with human wisdom and oversight is best.\n\n## Why AI excels as a development assistant\nAI shows remarkable capabilities across numerous areas of embedded development. Here are just a few examples from the growing list of applications:\n\nFirst, AI can [generate and complete code](https://docs.gitlab.com/user/project/repository/code_suggestions/) for common patterns in C/C++, reducing developers' time spent on routine programming tasks. And if prompted correctly, AI can respect embedded-specific constraints like memory limitations and hardware interfaces.\n\nSecond, AI can [generate tests](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#write-tests-in-the-ide) that you can run on cloud-based ARM CPUs or virtual hardware. This helps teams \"shift left\" in testing their firmware and catch issues earlier in development when they're less expensive to fix. It also helps identify edge cases you might have otherwise overlooked.\n\nThird, AI can help [accelerate the remediation of security vulnerabilities](https://docs.gitlab.com/user/application_security/vulnerabilities/#explaining-a-vulnerability) in your code. AI tools can help interpret security findings from your security scanners. They can even suggest potential approaches to address issues, supporting development teams as they work to meet cybersecurity requirements in this highly regulated space. AI thus helps expedite remediation.\n\nBeyond these examples, AI is increasingly valuable for [root cause analysis](https://docs.gitlab.com/user/gitlab_duo_chat/examples/#troubleshoot-failed-cicd-jobs-with-root-cause-analysis) of complex issues, comprehensive [code reviews](https://docs.gitlab.com/user/project/merge_requests/duo_in_merge_requests/#have-gitlab-duo-review-your-code), automated [code refactoring](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/) for optimization, [explaining](https://docs.gitlab.com/user/project/merge_requests/changes/#explain-code-in-a-merge-request) complex legacy code, and providing conversational assistance through [AI chat capabilities](https://docs.gitlab.com/user/gitlab_duo_chat/). As AI evolves, so will the ways in which it assists embedded development teams.\n\n## The essential human element\nThough these AI capabilities are quite powerful, they cannot - and should not - replace human expertise. Embedded developers bring domain knowledge that spans both software and hardware domains, understanding not just how to code, but how that code interacts with physical components under varying conditions.\n\nMoreover, embedded developers understand the intricate relationships between different vehicle subsystems. Far from replacing such expertise, AI must integrate with human beings' contextual knowledge.\n\nHumans also bring creativity and innovation to solving unique automotive challenges. When faced with conflicting requirements or novel problems, human engineers draw on experience and intuition that AI simply doesn't possess.\n\nThe human-centered approach is critical in automotive development, where safety and reliability cannot be compromised. Just as a driver must remain alert and ready to take control of a Level 2 automated vehicle, developers must maintain ultimate responsibility for AI-generated code. While valuable, AI suggestions require expert validation. Developers must review and verify that proposed solutions solve the problem correctly within the specific automotive context.\n\nThis human oversight becomes even more critical when considering the consequences of errors. In enterprise software, a bug might cause inconvenience; in automotive systems, it could potentially impact passenger safety. Developers bring ethical judgment and a holistic understanding of the operating environment that AI currently lacks. They can anticipate edge cases based on real-world driving conditions and evaluate AI recommendations against their practical experience with actual vehicle systems.\n\n## Creating an effective human-AI partnership\nBelow are some initial approaches to consider as you begin building productive partnerships between developers and AI.\n\nStart by identifying specific high-volume, low-risk tasks where AI can provide immediate value: unit test generation for non-safety-critical components, documentation updates, and routine code standardization are excellent entry points.\n\nImplement a tiered approach to AI integration based on system criticality. For infotainment or connectivity systems, teams might leverage more autonomous AI assistance. For safety-related systems, establish mandatory human review checkpoints with structured approval workflows. Create clear guidelines on which code components require senior engineer review versus those where junior developers can approve AI suggestions with minimal oversight.\n\nReview processes also need adaptation. Rather than having humans review AI-generated code in isolation, teams should implement collaborative workflows where AI assists with the review itself, highlighting potential issues for human evaluation. Consider adopting structured prompting techniques. For example, have developers specify constraints like memory requirements, coding standards, or performance parameters before generating AI suggestions.\n\nThese examples represent starting points for effective human-AI collaboration in embedded development.\n\n## Looking to the future\nThe human-AI partnership will evolve across different automotive domains as AI capabilities advance. Teams should prepare by focusing on higher-value skills that complement AI capabilities, such as systems architecture, integration expertise, and hardware-software design.\n\nThe teams that succeed will find the right balance, leveraging AI to handle routine tasks while keeping humans at the center of the development process. This is the path to realizing AI's productivity promise.\n\n_I'll be discussing topics like this and more with Dr. Felix Kortmann of Ignite by FORVIA HELLA in a webinar on June 11. The webinar will be on “Building the Future of Automotive Software.” Join us to learn how to effectively balance AI assistance with human expertise in your embedded development teams. [Register here](https://page.gitlab.com/webcasts-jun11-gitlab-ignite-by-foriva-hella-emea-amer.html?utm_medium=referral&utm_source=gmail&utm_campaign=20250611_global_cmp_webcast_speedsecurity_en_&utm_content=salespromo_x_auto)._",[521,524,527,530,533],{"header":522,"content":523},"What is Level 2 AI assistance in automotive software development?","Level 2 AI refers to a collaborative human-AI model where AI supports tasks like code generation and testing, but developers retain oversight and responsibility. Like Level 2 driving automation, the human stays in control, ensuring contextual accuracy and safety.",{"header":525,"content":526},"How does the role of AI differ across various automotive software domains?","AI adds value across all domains, but oversight levels vary. Safety-critical systems require stricter human validation, while infotainment systems allow more autonomous AI use. Teams should tailor AI workflows based on system risk and regulatory requirements.",{"header":528,"content":529},"How can teams establish effective AI review processes for embedded code?","Teams should use a tiered review structure. AI can perform initial quality checks — flagging syntax issues or common errors — while human experts review critical code sections and system interfaces. Clear guidelines should define when AI-generated suggestions require additional human verification or senior engineer approval to ensure safe integration within embedded systems.",{"header":531,"content":532},"What skills should embedded developers focus on as AI capabilities expand?","Embedded developers should deepen their understanding of systems architecture, hardware-software integration, and domain-specific safety requirements. Skills in prompt engineering and AI collaboration, such as framing effective prompts and interpreting model outputs, are also increasingly important. These competencies help developers remain effective evaluators and collaborators alongside AI systems.",{"header":534,"content":535},"How can AI help address the shortage of embedded software expertise in the automotive industry?","AI reduces the burden on experienced engineers by automating routine development tasks like boilerplate coding, unit testing, and documentation. This allows senior engineers to focus on high-impact projects and mentoring. At the same time, AI tools help junior developers ramp up faster by guiding them through embedded-specific best practices, accelerating onboarding and reducing skill barriers.",{},"/en-us/the-source/ai/why-automotive-software-development-needs-human-centered-ai",{"title":508,"description":513,"ogImage":514},"why-automotive-software-development-needs-human-centered-ai","en-us/the-source/ai/why-automotive-software-development-needs-human-centered-ai","zRQVj6LXNlMRYMBtoaG2yYr4RLoWfsu5UJJHU4pQQio",{"id":543,"title":544,"body":6,"category":545,"config":546,"content":548,"description":550,"extension":22,"meta":576,"navigation":24,"path":577,"seo":578,"slug":579,"stem":580,"type":466,"__hash__":581,"date":549,"timeToRead":476,"heroImage":551,"keyTakeaways":552,"articleBody":556,"faq":557},"theSource/en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain.yml","Embedding risk intelligence into your software supply chain","security",{"layout":8,"template":447,"author":27,"featured":24,"sourceCTA":547,"isHighlighted":449,"authorName":5},"source-lp-devsecops-the-key-to-modern-security-resilience",{"title":544,"date":549,"description":550,"timeToRead":476,"heroImage":551,"keyTakeaways":552,"articleBody":556,"faq":557},"2025-04-22","Transform your security strategy by embedding risk assessment into development workflows instead of treating it as a final checkpoint.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463994/rexeefvqpj1xs8vq7ugl.jpg",[553,554,555],"Focus on business impact instead of vulnerability counts by targeting security threats that pose actual danger to your business rather than trying to fix every potential issue.","Embed risk checks throughout development by adding quality metrics and automated testing early in your software pipeline to catch issues when they’re easier to fix.","Create audit trails for security decisions through “breadcrumbed” processes that document who approved changes and why, creating accountability and improving future decisions.","It’s a nightmare scenario for any business: Hackers have exposed the personal information of millions of your users. What if this wasn’t due to critical vulnerabilities in your application but simply poorly configured API endpoints that hackers could abuse to farm user data? That’s precisely what happened to a popular tech company in 2023, and it’s more common than you might think.\n\nSecurity resources are finite, and [not all threats pose equal business risk](https://about.gitlab.com/the-source/security/security-its-more-than-culture-addressing-the-root-cause-of-common-security/). Organizations that are laser-focused on technical severity ratings rather than actual business impact could be leaving themselves open to unanticipated risks.\n\nMeanwhile, the urgency for better risk quantification has never been greater. Threat actors can now leverage multiple AI systems to execute sophisticated, multi-pronged attacks targeting exploitable vulnerabilities. These AI-accelerated campaigns can quickly identify and exploit business-critical weaknesses that traditional security approaches might overlook or deprioritize - turning yesterday's “medium-risk” vulnerability into today’s multimillion-dollar breach.\n\nTo counter these evolving threats and navigate this growing complexity, leading organizations are fundamentally reimagining their approach. Instead of treating security as a separate function that happens after development, they’re embedding **risk intelligence** throughout their software supply chain. This approach allows them to focus resources where they matter most, reduce time-to-market for secure products, and demonstrate due diligence to regulators and customers.\n\nThe key is distinguishing between vulnerabilities that might cause harm and those that will cause damage in your specific business context. Companies can achieve stronger security and faster innovation by rethinking how risk is evaluated and managed across development and operations.\n\n## Limiting risk through data-driven change management\n**Risk intelligence helps you focus on threats that matter. It’s the difference between knowing you have 3,000 vulnerabilities and understanding which 50 could harm your business.**\n\nKey elements of risk intelligence include:\n\n**Exploitability assessment (reachability)**: Not all vulnerabilities can be weaponized. Risk intelligence evaluates which security findings have actual attack paths versus those that exist in code but cannot be reached by malicious actors.\n\n**Dependency context**: Risk-based security recognizes that a vulnerable package doesn’t just affect one application - it can impact dozens or hundreds across your organization. Modern approaches map dependencies across projects, enabling teams to understand the cascading impact of vulnerabilities throughout the organization. This ecosystem view provides critical context for prioritization decisions.\n\n**Continuous risk monitoring**: Instead of point-in-time assessments, risk intelligence requires ongoing monitoring that adjusts as threat landscapes evolve. A vulnerability that was low risk yesterday may become critical today based on emerging exploit techniques.\n\nSo how can you move from reactive security scanning to proactive risk intelligence? The journey begins where your software does - in the software factory itself.\n\n## The software factory: Quality gates and risk signals\nThe software factory is where code transforms from an idea to a deployable package. This phase encompasses everything from initial code commits to unit testing to packaging, creating the foundation for your entire software supply chain. By adding risk checks early, teams can find and fix issues before they spread. Just as critical is establishing clear attribution for every code change, knowing exactly who made each change (contractor, consultant, or employee), why, and when - creating an audit trail providing crucial risk assessment context.\n\nThe software factory offers three key opportunities to embed risk assessment into your development process:\n\n### Collaboration through quality intelligence\nEstablishing cross-functional quality metrics can help organizations create a shared understanding of risk across teams. Potential metrics include code coverage trends, security vulnerability density, technical debt accumulation, performance regression patterns, API compatibility scores, and documentation completeness.\n\n### Transparency through correlated data\nRisk intelligence requires connecting disparate data points into a comprehensive view. Quality intelligence dashboards with real-time metrics and trend visualization help teams spot emerging risk patterns, while documentation traceability creates auditable trails linking requirements, changes, and security findings. Automated data collection enables cross-system correlation between code changes and security findings, with pattern recognition algorithms identifying unusual behaviors that manual review might miss. This democratized intelligence empowers all stakeholders to make risk-informed decisions instead of siloing information within security teams.\n\n### Automation for quality assurance\nManual risk assessment can’t scale to modern development speeds. Continuous testing pipelines with automated security scans and performance tests provide early feedback on potential risks without slowing velocity. Automated quality gates enforce minimum standards throughout development, and risk threshold monitoring flags concerning trends before they become critical. These automated guardrails maintain consistent risk assessment while allowing development teams to maintain productivity and improve safety without sacrificing speed.\n\n## Software logistics: Risk management through team-based scorecards\nAfter code is packaged, it enters the logistics phase - provisioning, deployment, configuration, monitoring, and maintenance. Here, potential bugs meet real-world exposure. This makes assessing risk in actual operating conditions vital. However, traditional approaches to risk assessment at this stage are often inflexible and inefficient.\n\n> [Learn how effective software logistics can enable operations teams to efficiently support developers and accelerate delivery](https://about.gitlab.com/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation/).\n\nEffective risk intelligence means helping teams focus on why they should deploy instead of why they shouldn’t - replacing the binary, inflexible assessment methods of the past with an automated, metrics-driven approach. Here are three critical aspects to keep in mind:\n\n### Collaborative assessment model\nModern risk approaches replace binary go/no-go decisions with multi-stakeholder evaluations, sometimes called a Change Advisory Board (CAB), incorporating diverse perspectives. Security teams evaluate vulnerability context and exploitability, operations teams assess deployment impact and rollback capabilities, and business stakeholders weigh customer impact against needs. This team-based approach builds consensus around acceptable risk rather than imposing rigid standards, allowing for nuanced decisions that balance security with business objectives.\n\n### Scorecard transparency\nEffective risk evaluation requires visible criteria that consider multiple dimensions of impact. Comprehensive scorecards include security risk factors that assess severity and real-world exploitability, operational metrics that evaluate system stability implications, compliance requirements for relevant regulations, and business impact on customers and revenue. This transparent approach creates a holistic risk profile that provides the context necessary for informed deployment decisions while ensuring all stakeholders understand the basis for security choices.\n\n### Automated scorecard processing\nManual risk assessment creates bottlenecks that slow deployment cycles. Modern approaches use real-time processing with automated score calculation and threshold monitoring to evaluate changes continuously. Integration with CI/CD pipelines, security tools, and compliance systems ensures risk data flows automatically between systems without manual intervention. This automation maintains consistent evaluation standards while eliminating the delays typically associated with security reviews.\n\n## From vulnerability counts to business impact: The future of software security\nThe future of application security isn’t about finding more vulnerabilities - it’s about understanding the risk those vulnerabilities pose. By embedding risk intelligence throughout your software supply chain, you can drive team collaboration to help you create secure software faster.\n\nEstablishing this risk assessment process across both your software factory and logistics phases has an added benefit: You’ll create an auditable trail that documents who made security decisions, what evidence they considered, when changes were approved, and why specific actions were taken. This transparency provides accountability across the entire software supply chain, builds institutional memory of risk management approaches, and creates data to inform future decisions. The resulting traceability transforms security from a point-in-time assessment to an ongoing, verifiable process demonstrating due diligence to auditors, regulators, and customers.",[558,561,564,567,570,573],{"header":559,"content":560},"What is risk intelligence in software development?","Risk intelligence is the practice of evaluating security threats based on their real-world business impact rather than just technical severity. It helps teams focus on exploitable and high-priority vulnerabilities, streamlining security efforts.",{"header":562,"content":563},"How does embedding risk checks early improve software security?","Introducing risk assessments during early development phases allows teams to catch and resolve issues sooner, reducing costs and complexity. This shift from reactive to proactive security enhances both speed and safety.",{"header":565,"content":566},"Why should organizations move beyond vulnerability counts?","Counting vulnerabilities doesn't reflect the true risk landscape. Many may be unreachable or irrelevant. Prioritizing based on exploitability and business context ensures limited security resources are used effectively.",{"header":568,"content":569},"How do audit trails contribute to better risk management?","Audit trails document who made a change, why, and when. These records provide accountability, aid compliance, and offer valuable insight for improving future decision-making and demonstrating due diligence.",{"header":571,"content":572},"What role does automation play in risk intelligence?","Automation enables consistent, scalable risk evaluation across CI/CD pipelines. It helps enforce security standards, reduces manual bottlenecks, and ensures timely responses to emerging risks without slowing development.",{"header":574,"content":575},"What’s the benefit of team-based scorecards for deployment decisions?","Team-based scorecards bring together inputs from security, operations, and business teams. This collaborative model replaces rigid go/no-go decisions with nuanced assessments that balance innovation and acceptable risk.",{},"/en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain",{"title":544,"description":550,"ogImage":551},"embedding-risk-intelligence-into-your-software-supply-chain","en-us/the-source/security/embedding-risk-intelligence-into-your-software-supply-chain","wuTFzeEd5j6NXQwECCQbvGXEJJexyGYyc9Xu2wSlYKo",{"id":583,"title":584,"body":6,"category":585,"config":586,"content":588,"description":590,"extension":22,"meta":617,"navigation":24,"path":618,"seo":619,"slug":620,"stem":621,"type":466,"__hash__":622,"date":589,"timeToRead":591,"heroImage":592,"keyTakeaways":593,"articleBody":597,"faq":598},"theSource/en-us/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation.yml","Why software logistics is key to accelerating innovation","platform",{"layout":8,"template":447,"author":27,"featured":24,"sourceCTA":587,"isHighlighted":449,"authorName":5},"source-lp-building-a-resilient-software-development-practice",{"title":584,"date":589,"description":590,"timeToRead":591,"heroImage":592,"keyTakeaways":593,"articleBody":597,"faq":598},"2025-04-15","Transform deployment processes with software logistics, enabling your operations team to efficiently support developers and accelerate delivery.","4 min read","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463545/nomdlhvlawqmncg0g1p8.png",[594,595,596],"Software logistics focuses on what happens after code is packaged — provisioning, deployment, configuration, monitoring, and maintenance — optimizing the crucial second half of the software supply chain.","With typically only 1% of technical staff in operations roles, organizations need a “logistics mindset” to automate deployment processes and create better developer experiences.","Implementing a platform-as-a-product approach to software logistics enables standardization while maintaining flexibility, reducing security risks and accelerating deployment cycles.","Software isn’t just powering your business - it is your business. However, while organizations invest heavily in development capabilities, they often overlook a critical component: software logistics.\n\nSoftware logistics encompasses everything that happens after code is packaged for delivery: provisioning, deployment, configuration, monitoring, and maintenance. Think of it as the crucial second half of the software supply chain, where even the most brilliantly engineered solutions can falter without proper execution.\n\nThe challenge is clear: For every 100 developers in your organization, statistics show you likely have just one operations person. Those resources typically focus on network engineering, database administration, platform engineering, and site reliability. As generative AI is poised to dramatically increase the amount of code developers produce, this creates an unsustainable bottleneck in your software delivery execution.\n\n## Why traditional approaches fall short\n**Traditional approaches to this imbalance typically fall into two categories: overburdening operations teams or forcing developers to become operations experts. Neither works well.**\n\nWhen operations teams become overwhelmed, they create restrictive processes that slow down delivery. When developers are forced to handle operations, they spend less time on their core strength - solving business problems through code. Our [research shows](https://about.gitlab.com/developer-survey/2024/ai/) developers typically spend only 21% of their time writing new code, with the rest consumed by meetings, maintenance, and administrative tasks.\n\nThis inefficiency is frustrating and expensive. Every day, your innovations sit waiting for deployment, which is lost business value.\n\n## The premium delivery model for software\nWhat if you could bring guaranteed reliability and predictability to your software delivery? That’s the promise of effective software logistics.\n\nJust as modern logistics companies revolutionized retail by streamlining the supply chain - getting products from warehouses to customers efficiently - organizations must move software from package registries to production environments smoothly.\n\nMore and more organizations are investing in [platform engineering](https://about.gitlab.com/the-source/platform/driving-business-results-with-platform-engineering/) to accelerate software development by standardizing best practices and components for development teams. However, if your platform engineering initiatives are focused only on developer experience, you’re missing a critical piece of the puzzle. While improving developer experience is important, efficiency gains in code creation are meaningless if your organization lacks the operational maturity to deploy, configure, monitor, and maintain that code effectively.\n\nThat’s where software logistics comes in: ensuring that increased code velocity translates to actual business value rather than creating deployment bottlenecks or operational chaos.\n\n## The competitive advantage of software logistics\nAn effective software logistics strategy offers several key advantages:\n- **Accelerated delivery cycles**: Reduce the time from code completion to production deployment from weeks to days or even hours.\n- **Enhanced security posture**: Build security into development pipelines rather than treating it as a final gate, reducing vulnerabilities while maintaining velocity.\n- **Improved operational efficiency**: Enable your limited operations staff to support more developers through automation and self-service capabilities.\n- **Better resource utilization**: Focus your expensive development talent on creating business value rather than wrestling with deployment complexities.\n\n## Optimizing for effective software logistics\nIn conversations with technical leaders at organizations of all sizes, I’ve observed several consistent patterns that distinguish successful software logistics implementations. Here are three steps you can take to optimize your software logistics:\n\n### Build an enterprise application delivery framework\nModern software delivery requires sophisticated orchestration across diverse environments, deployment strategies, and operational concerns. An effective framework should include aspects such as **release orchestration** to coordinate the deployment of interdependent services across environments; **progressive delivery strategies** such as canary releases and feature flags that allow controlled rollouts with automated verification; and **provisioning automation** that creates the underlying infrastructure through policy-controlled interfaces while enforcing security guardrails and compliance requirements. By generating attestations at each stage, this framework creates a verifiable record of the entire delivery process and enables real-time risk assessment and compliance validation.\n\n### Adopt a platform with a unified data store\nTop-performing organizations need comprehensive metrics across their entire delivery pipeline, from code commit to production performance. You can’t manage what you don't measure - and the best teams measure everything from development velocity to operational stability to security posture. A unified data fabric serves as the nervous system for effective software logistics, connecting previously siloed information across the entire software delivery lifecycle and enabling intelligent decision-making and automation.\n\n### Boost developer autonomy through golden pipelines\nIntuitive interfaces that allow developers to initiate deployments without understanding underlying complexity, with appropriate guardrails built in, reduce the burden on operations teams while accelerating delivery cycles. As one platform engineering leader told me, “Our job is to make the platform so easy to use that teams can run themselves.”\n\n## Software logistics: The competitive differentiator for digital-first organizations\nAs competitive pressures accelerate, the ability to efficiently move software from testing to production becomes a critical competitive differentiator. Adopting a software logistics mindset can help your limited operations staff effectively support your development organization, accelerating innovation while maintaining security and reliability.",[599,602,605,608,611,614],{"header":600,"content":601},"What is software logistics in the context of software development?","Software logistics refers to the processes that occur after code is packaged, including provisioning, deployment, configuration, monitoring, and maintenance. It represents the second half of the software supply chain, ensuring reliable, secure, and efficient delivery to production.",{"header":603,"content":604},"Why is software logistics becoming more important now?","As generative AI accelerates the rate of code creation, organizations face growing pressure to deploy and maintain this code efficiently. With limited operations resources, effective software logistics is essential to prevent bottlenecks and turn development speed into business value.",{"header":606,"content":607},"How does poor software logistics affect delivery cycles?","Without optimized logistics, organizations experience deployment delays, inconsistent operations, and over-reliance on either stretched operations teams or developers doing ops work. This undermines innovation velocity and increases operational risk.",{"header":609,"content":610},"What role do “golden pipelines” play in software logistics?","Golden pipelines offer pre-configured, automated deployment workflows that developers can use independently. These pipelines enhance developer autonomy while embedding security and compliance guardrails, reducing reliance on operations teams.",{"header":612,"content":613},"How can a unified data store improve software logistics?","A unified data store connects metrics across the software delivery lifecycle, from code commit to production. This enables real-time insights, performance tracking, and automation, allowing organizations to manage delivery risk and optimize outcomes.",{"header":615,"content":616},"Why should platform engineering include a logistics focus?","While many platform engineering efforts focus on improving developer experience, the logistics side ensures that increased coding velocity translates into actual deployment efficiency. Without logistics capabilities, development speed does not equal business agility.",{},"/en-us/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation",{"title":584,"description":590,"ogImage":592},"why-software-logistics-is-key-to-accelerating-innovation","en-us/the-source/platform/why-software-logistics-is-key-to-accelerating-innovation","8qhG27DWR7yd3okgX4M7Bow2_Uy9D7zZmfYt0jsGtu4",{"id":624,"title":625,"body":6,"category":585,"config":626,"content":628,"description":630,"extension":22,"meta":653,"navigation":24,"path":654,"seo":655,"slug":656,"stem":657,"type":466,"__hash__":658,"date":629,"timeToRead":476,"heroImage":631,"keyTakeaways":632,"articleBody":636,"faq":637},"theSource/en-us/the-source/platform/finops-balancing-financial-responsibility-and-innovation.yml","FinOps: Balancing financial responsibility and innovation",{"layout":8,"template":447,"author":27,"featured":449,"sourceCTA":627,"isHighlighted":449,"authorName":5},"transform-your-software-development",{"title":625,"date":629,"description":630,"timeToRead":476,"heroImage":631,"keyTakeaways":632,"articleBody":636,"faq":637},"2024-11-26","Explore how FinOps harmonizes financial accountability with business objectives, promoting cost-effective innovation in modern enterprises.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1751463866/i27a3wecdhplvd9wbxqr.png",[633,634,635],"FinOps fosters collaboration between finance, engineering, and business teams, aligning cloud investments with strategic business goals for maximum value.","By enhancing financial transparency, FinOps empowers organizations to make swift, data-driven decisions that reduce cloud costs.","Implementing FinOps creates a balance between innovation and cloud cost management, easing tension between product development and operations teams.","When cloud spending grows alongside your engineering teams, a painful trade-off often emerges: push developers to ship faster, or rein in escalating costs. But imagine boosting developer productivity by 30% while slashing project costs by 25%. That might sound impossible, yet it’s a realistic goal for organizations that adopt FinOps (Financial Operations) - a data-driven approach that applies DevOps principles and practices to optimize the costs associated with people, process, and technology across the software development lifecycle.\n\nI’ve seen firsthand how FinOps transforms organizations by bringing financial clarity to every aspect of software development. I recently met with the DevOps team at an insurance company that is beginning its FinOps journey. Early discussions included determining basic measurements such as cloud spend and identifying other revenue-driving, cost-reducing metrics through value stream management. These conversations are critical for FinOps practitioners to evaluate how teams and resources are organized and allocated, and which processes and tools may be implemented to drive change.\n\nFrom team structures to development processes to technology choices, this visibility helps leaders optimize investments across their entire operation. By bringing together finance, product, and engineering teams, FinOps enables CFOs, CPOs, and CTOs to make informed decisions - improving efficiency across the business.\n\nImproving efficiency and optimizing costs isn’t just a technical challenge - it’s also a strategic business imperative, especially as organizations invest more money in the cloud. FinOps brings necessary financial accountability to the cloud’s variable spend model. Let’s walk through the benefits of FinOps frameworks and how you can begin incorporating FinOps methodologies into your operational workflows.\n\n## What is FinOps?\nFinOps, short for Financial Operations, succeeds by transforming how finance, engineering, technology, and business teams operate. Through real-time data and analytics, teams gain immediate visibility into how they are using resources (such as cloud resources) and can take action before costs escalate. This proactive approach to financial accountability enables quick informed decisions about resource allocation, leading to measurable cost savings.\n\nAt its core, FinOps is a cultural practice that makes this transformation sustainable. By establishing clear processes and shared metrics, teams ensure their daily technology decisions support broader business objectives.\n\n## Why is FinOps so popular right now?\nAs many companies focus on generative AI and developer productivity, they need guardrails, such as automated workflows and reusable templates, on the delivery side of the equation to ensure [paved pathways](https://about.gitlab.com/the-source/platform/driving-business-results-with-platform-engineering/) are adopted. This becomes essential for organizations that are modernizing their applications and scaling cloud architectures in production.\n\nThe challenge intensifies when managing non-production costs, such as continuous integration (CI) costs. Companies that have adopted a data-driven approach can gain deep visibility into their CI costs. They can see the financial implications of scaling CI horizontally or vertically across different processor architectures. By implementing standards like ephemeral testing environments, they ensure code quality and security while optimizing spending.\n\nProduct owners, who normally own the budget for a product line, can also work with IT teams and engineering leads to provide transparency metrics. This collaboration allows leaders to roll up budget projections across multiple services and ensure that infrastructure resources are being leveraged at their optimal capacity. The result: finance teams can finally see which applications generate the most investment return.\n\n## Bridging technical and financial domains\nCreating a FinOps model can involve both a carrot and stick approach. The carrot approach fosters a more collaborative and transparent environment. The stick approach, on the other hand - for example, reprimanding development teams for overspending - most often leads to a breakdown in processes. You want FinOps to account for what developers need to do their work and how it impacts the company’s bottom line, not just monitor their use of cloud resources.\n\nI recently met with a major airline that was spending close to $5 million a year on CI runner fleets. Security scans, dependency scans, and token scanning all ran inside these runner fleets. They could have skipped the security step to reduce their spend, but the [potential for security issues](https://about.gitlab.com/the-source/security/how-to-strengthen-security-by-applying-devsecops-principles/) was a much bigger concern than spending the money on the runner fleets. Instead of skipping the security step, the company needed to identify ways to make their runner fleets as a whole more efficient to reduce their spend _and_ encourage developers to experiment and innovate.\n\nA successful FinOps program does not require a centralized team of full-time FinOps professionals. FinOps serves as a strategic liaison among cross-functional teams such as finance, product, and engineering. A typical FinOps program includes various job roles and functions, such as a CTO or VP of Engineering, a finance leader, and one or more engineering leaders who regularly collaborate to evaluate issues, identify new efficiency opportunities, and build remediation plans.\n\nAligning technical operations with financial objectives helps ensure that cloud infrastructure and software development investments yield the highest possible return. This can demonstrate to DevSecOps teams how their work contributes directly to increasing revenue, how they may be able to reduce costs, or both.\n\n## Smart financial control in developer workflows\nFinOps monitors resource consumption from both a user and operational standpoint to help optimize developer workflows. One way to accomplish this is to analyze CI jobs and identify which ones cost more than their value justifies. Every software development pipeline contains multiple jobs, each requiring an execution resource like a virtual machine (VM) or container. The longer each job takes to execute, the higher the cost. FinOps helps developers understand which jobs are performing poorly so they know which ones they need to refactor.\n\nThis creates a self-service model that frees technology teams to work within clear guidelines. For example, a policy might prohibit someone from provisioning $100,000 worth of resources on AWS, but they can spin up an EC2 image to conduct testing. However, if they can justify why they need to provision $100,000 worth of resources, they can submit a request explaining how the project will potentially generate revenue for the company. If approved, they can begin their work.\n\nHowever, I want to reassure DevSecOps professionals that FinOps isn't about restricting innovation through monitoring. Instead, it provides full visibility into your organization’s cloud usage and spending, helping teams identify opportunities to improve cloud productivity. In addition to fostering collaboration among finance, technology, and business teams, FinOps analyzes usage patterns and forecasts demand to anticipate whether resources need to be scaled up or down to meet future needs before overspending occurs.\n\n## Easing the tension\nThere’s a constant tug-of-war between engineering and operations teams. Engineering’s mission is to drive innovation that generates new revenue streams while creating great customer experiences. The operations team focuses on maximizing productivity while saving money. FinOps eases the tension between these groups by increasing developer productivity while reducing wasteful spending - aligning technical efficiency with financial prudence.\n\nFinOps helps business leaders think in precise numbers, not subjective costs. It’s imperative to approach software development with a clear understanding of its financial impact on the organization to make informed decisions on project continuance based on two key criteria: will the project increase revenue or reduce costs?\n\nAt its core, FinOps isn’t just about cutting cloud costs; it’s also about optimizing the entire software development lifecycle and making continuous improvements. The goal is to help engineers and operations consider financial effectiveness alongside technical innovation so they understand how their work maps to boosting the organization’s bottom line.\n\n_Read more about FinOps on the [FinOps Foundation website](https://www.finops.org/introduction/what-is-finops/)._",[638,641,644,647,650],{"header":639,"content":640},"What are the key benefits of adopting FinOps?","Adopting FinOps leads to cost savings, improved resource allocation, and better cross-team collaboration. Organizations gain financial transparency, allowing them to scale cloud services efficiently, reduce waste, and ensure that every technology investment supports business growth and profitability.",{"header":642,"content":643},"How does FinOps align financial objectives with DevSecOps?","FinOps helps DevSecOps teams balance security and efficiency by ensuring that cost-saving measures do not compromise essential security processes. Instead of cutting security scans to save money, organizations use FinOps to optimize infrastructure, making security operations more cost-effective while maintaining compliance.",{"header":645,"content":646},"Why is FinOps important?","FinOps, short for Financial Operations, is a framework that helps organizations balance financial accountability with innovation by optimizing cloud spending and software development costs. It enables teams to make data-driven financial decisions while maintaining agility, improving collaboration between finance, engineering, and product teams.",{"header":648,"content":649},"What role do engineering teams play in a FinOps strategy?","Engineering teams are crucial in FinOps, as they make decisions that directly impact cloud costs. FinOps encourages developers to take ownership of cost efficiency by monitoring resource consumption, refactoring inefficient CI jobs, and leveraging automation to optimize deployments. The goal is to foster a culture where financial awareness becomes a natural part of development.",{"header":651,"content":652},"How does FinOps improve cloud cost management?","FinOps provides real-time visibility into cloud spending, allowing organizations to track resource utilization and prevent unnecessary costs. By integrating financial insights into DevOps workflows, companies can identify inefficiencies, optimize continuous integration (CI) costs, and ensure that infrastructure investments deliver maximum value.",{},"/en-us/the-source/platform/finops-balancing-financial-responsibility-and-innovation",{"title":625,"description":630,"ogImage":631},"finops-balancing-financial-responsibility-and-innovation","en-us/the-source/platform/finops-balancing-financial-responsibility-and-innovation","4k2vmEN-3VawKsLdbq7hwFDMukpIQ2JDK9nAvZcnAjI",1772652091061]