[{"data":1,"prerenderedAt":4663},["ShallowReactive",2],{"/en-us/blog/tags/tutorial/":3,"navigation-en-us":19,"banner-en-us":437,"footer-en-us":449,"tutorial-tag-page-en-us":660},{"_path":4,"_dir":5,"_draft":6,"_partial":6,"_locale":7,"content":8,"config":10,"_id":12,"_type":13,"title":14,"_source":15,"_file":16,"_stem":17,"_extension":18},"/en-us/blog/tags/tutorial","tags",false,"",{"tag":9,"tagSlug":9},"tutorial",{"template":11},"BlogTag","content:en-us:blog:tags:tutorial.yml","yaml","Tutorial","content","en-us/blog/tags/tutorial.yml","en-us/blog/tags/tutorial","yml",{"_path":20,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":22,"_id":433,"_type":13,"title":434,"_source":15,"_file":435,"_stem":436,"_extension":18},"/shared/en-us/main-navigation","en-us",{"logo":23,"freeTrial":28,"sales":33,"login":38,"items":43,"search":374,"minimal":405,"duo":424},{"config":24},{"href":25,"dataGaName":26,"dataGaLocation":27},"/","gitlab logo","header",{"text":29,"config":30},"Get free trial",{"href":31,"dataGaName":32,"dataGaLocation":27},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com&glm_content=default-saas-trial/","free trial",{"text":34,"config":35},"Talk to sales",{"href":36,"dataGaName":37,"dataGaLocation":27},"/sales/","sales",{"text":39,"config":40},"Sign in",{"href":41,"dataGaName":42,"dataGaLocation":27},"https://gitlab.com/users/sign_in/","sign in",[44,88,184,189,295,355],{"text":45,"config":46,"cards":48,"footer":71},"Platform",{"dataNavLevelOne":47},"platform",[49,55,63],{"title":45,"description":50,"link":51},"The most comprehensive AI-powered DevSecOps Platform",{"text":52,"config":53},"Explore our Platform",{"href":54,"dataGaName":47,"dataGaLocation":27},"/platform/",{"title":56,"description":57,"link":58},"GitLab Duo (AI)","Build software faster with AI at every stage of development",{"text":59,"config":60},"Meet GitLab Duo",{"href":61,"dataGaName":62,"dataGaLocation":27},"/gitlab-duo/","gitlab duo ai",{"title":64,"description":65,"link":66},"Why GitLab","10 reasons why Enterprises choose GitLab",{"text":67,"config":68},"Learn more",{"href":69,"dataGaName":70,"dataGaLocation":27},"/why-gitlab/","why gitlab",{"title":72,"items":73},"Get started with",[74,79,84],{"text":75,"config":76},"Platform Engineering",{"href":77,"dataGaName":78,"dataGaLocation":27},"/solutions/platform-engineering/","platform engineering",{"text":80,"config":81},"Developer Experience",{"href":82,"dataGaName":83,"dataGaLocation":27},"/developer-experience/","Developer experience",{"text":85,"config":86},"MLOps",{"href":87,"dataGaName":85,"dataGaLocation":27},"/topics/devops/the-role-of-ai-in-devops/",{"text":89,"left":90,"config":91,"link":93,"lists":97,"footer":166},"Product",true,{"dataNavLevelOne":92},"solutions",{"text":94,"config":95},"View all Solutions",{"href":96,"dataGaName":92,"dataGaLocation":27},"/solutions/",[98,123,145],{"title":99,"description":100,"link":101,"items":106},"Automation","CI/CD and automation to accelerate deployment",{"config":102},{"icon":103,"href":104,"dataGaName":105,"dataGaLocation":27},"AutomatedCodeAlt","/solutions/delivery-automation/","automated software delivery",[107,111,115,119],{"text":108,"config":109},"CI/CD",{"href":110,"dataGaLocation":27,"dataGaName":108},"/solutions/continuous-integration/",{"text":112,"config":113},"AI-Assisted Development",{"href":61,"dataGaLocation":27,"dataGaName":114},"AI assisted development",{"text":116,"config":117},"Source Code Management",{"href":118,"dataGaLocation":27,"dataGaName":116},"/solutions/source-code-management/",{"text":120,"config":121},"Automated Software Delivery",{"href":104,"dataGaLocation":27,"dataGaName":122},"Automated software delivery",{"title":124,"description":125,"link":126,"items":131},"Security","Deliver code faster without compromising security",{"config":127},{"href":128,"dataGaName":129,"dataGaLocation":27,"icon":130},"/solutions/security-compliance/","security and compliance","ShieldCheckLight",[132,135,140],{"text":133,"config":134},"Security & Compliance",{"href":128,"dataGaLocation":27,"dataGaName":133},{"text":136,"config":137},"Software Supply Chain Security",{"href":138,"dataGaLocation":27,"dataGaName":139},"/solutions/supply-chain/","Software supply chain security",{"text":141,"config":142},"Compliance & Governance",{"href":143,"dataGaLocation":27,"dataGaName":144},"/solutions/continuous-software-compliance/","Compliance and governance",{"title":146,"link":147,"items":152},"Measurement",{"config":148},{"icon":149,"href":150,"dataGaName":151,"dataGaLocation":27},"DigitalTransformation","/solutions/visibility-measurement/","visibility and measurement",[153,157,161],{"text":154,"config":155},"Visibility & Measurement",{"href":150,"dataGaLocation":27,"dataGaName":156},"Visibility and Measurement",{"text":158,"config":159},"Value Stream Management",{"href":160,"dataGaLocation":27,"dataGaName":158},"/solutions/value-stream-management/",{"text":162,"config":163},"Analytics & Insights",{"href":164,"dataGaLocation":27,"dataGaName":165},"/solutions/analytics-and-insights/","Analytics and insights",{"title":167,"items":168},"GitLab for",[169,174,179],{"text":170,"config":171},"Enterprise",{"href":172,"dataGaLocation":27,"dataGaName":173},"/enterprise/","enterprise",{"text":175,"config":176},"Small Business",{"href":177,"dataGaLocation":27,"dataGaName":178},"/small-business/","small business",{"text":180,"config":181},"Public Sector",{"href":182,"dataGaLocation":27,"dataGaName":183},"/solutions/public-sector/","public sector",{"text":185,"config":186},"Pricing",{"href":187,"dataGaName":188,"dataGaLocation":27,"dataNavLevelOne":188},"/pricing/","pricing",{"text":190,"config":191,"link":193,"lists":197,"feature":282},"Resources",{"dataNavLevelOne":192},"resources",{"text":194,"config":195},"View all resources",{"href":196,"dataGaName":192,"dataGaLocation":27},"/resources/",[198,231,254],{"title":199,"items":200},"Getting started",[201,206,211,216,221,226],{"text":202,"config":203},"Install",{"href":204,"dataGaName":205,"dataGaLocation":27},"/install/","install",{"text":207,"config":208},"Quick start guides",{"href":209,"dataGaName":210,"dataGaLocation":27},"/get-started/","quick setup checklists",{"text":212,"config":213},"Learn",{"href":214,"dataGaLocation":27,"dataGaName":215},"https://university.gitlab.com/","learn",{"text":217,"config":218},"Product documentation",{"href":219,"dataGaName":220,"dataGaLocation":27},"https://docs.gitlab.com/","product documentation",{"text":222,"config":223},"Best practice videos",{"href":224,"dataGaName":225,"dataGaLocation":27},"/getting-started-videos/","best practice videos",{"text":227,"config":228},"Integrations",{"href":229,"dataGaName":230,"dataGaLocation":27},"/integrations/","integrations",{"title":232,"items":233},"Discover",[234,239,244,249],{"text":235,"config":236},"Customer success stories",{"href":237,"dataGaName":238,"dataGaLocation":27},"/customers/","customer success stories",{"text":240,"config":241},"Blog",{"href":242,"dataGaName":243,"dataGaLocation":27},"/blog/","blog",{"text":245,"config":246},"Remote",{"href":247,"dataGaName":248,"dataGaLocation":27},"https://handbook.gitlab.com/handbook/company/culture/all-remote/","remote",{"text":250,"config":251},"TeamOps",{"href":252,"dataGaName":253,"dataGaLocation":27},"/teamops/","teamops",{"title":255,"items":256},"Connect",[257,262,267,272,277],{"text":258,"config":259},"GitLab Services",{"href":260,"dataGaName":261,"dataGaLocation":27},"/services/","services",{"text":263,"config":264},"Community",{"href":265,"dataGaName":266,"dataGaLocation":27},"/community/","community",{"text":268,"config":269},"Forum",{"href":270,"dataGaName":271,"dataGaLocation":27},"https://forum.gitlab.com/","forum",{"text":273,"config":274},"Events",{"href":275,"dataGaName":276,"dataGaLocation":27},"/events/","events",{"text":278,"config":279},"Partners",{"href":280,"dataGaName":281,"dataGaLocation":27},"/partners/","partners",{"backgroundColor":283,"textColor":284,"text":285,"image":286,"link":290},"#2f2a6b","#fff","Insights for the future of software development",{"altText":287,"config":288},"the source promo card",{"src":289},"/images/navigation/the-source-promo-card.svg",{"text":291,"config":292},"Read the latest",{"href":293,"dataGaName":294,"dataGaLocation":27},"/the-source/","the source",{"text":296,"config":297,"lists":299},"Company",{"dataNavLevelOne":298},"company",[300],{"items":301},[302,307,313,315,320,325,330,335,340,345,350],{"text":303,"config":304},"About",{"href":305,"dataGaName":306,"dataGaLocation":27},"/company/","about",{"text":308,"config":309,"footerGa":312},"Jobs",{"href":310,"dataGaName":311,"dataGaLocation":27},"/jobs/","jobs",{"dataGaName":311},{"text":273,"config":314},{"href":275,"dataGaName":276,"dataGaLocation":27},{"text":316,"config":317},"Leadership",{"href":318,"dataGaName":319,"dataGaLocation":27},"/company/team/e-group/","leadership",{"text":321,"config":322},"Team",{"href":323,"dataGaName":324,"dataGaLocation":27},"/company/team/","team",{"text":326,"config":327},"Handbook",{"href":328,"dataGaName":329,"dataGaLocation":27},"https://handbook.gitlab.com/","handbook",{"text":331,"config":332},"Investor relations",{"href":333,"dataGaName":334,"dataGaLocation":27},"https://ir.gitlab.com/","investor relations",{"text":336,"config":337},"Trust Center",{"href":338,"dataGaName":339,"dataGaLocation":27},"/security/","trust center",{"text":341,"config":342},"AI Transparency Center",{"href":343,"dataGaName":344,"dataGaLocation":27},"/ai-transparency-center/","ai transparency center",{"text":346,"config":347},"Newsletter",{"href":348,"dataGaName":349,"dataGaLocation":27},"/company/contact/","newsletter",{"text":351,"config":352},"Press",{"href":353,"dataGaName":354,"dataGaLocation":27},"/press/","press",{"text":356,"config":357,"lists":358},"Contact us",{"dataNavLevelOne":298},[359],{"items":360},[361,364,369],{"text":34,"config":362},{"href":36,"dataGaName":363,"dataGaLocation":27},"talk to sales",{"text":365,"config":366},"Get help",{"href":367,"dataGaName":368,"dataGaLocation":27},"/support/","get help",{"text":370,"config":371},"Customer portal",{"href":372,"dataGaName":373,"dataGaLocation":27},"https://customers.gitlab.com/customers/sign_in/","customer portal",{"close":375,"login":376,"suggestions":383},"Close",{"text":377,"link":378},"To search repositories and projects, login to",{"text":379,"config":380},"gitlab.com",{"href":41,"dataGaName":381,"dataGaLocation":382},"search login","search",{"text":384,"default":385},"Suggestions",[386,388,392,394,398,402],{"text":56,"config":387},{"href":61,"dataGaName":56,"dataGaLocation":382},{"text":389,"config":390},"Code Suggestions (AI)",{"href":391,"dataGaName":389,"dataGaLocation":382},"/solutions/code-suggestions/",{"text":108,"config":393},{"href":110,"dataGaName":108,"dataGaLocation":382},{"text":395,"config":396},"GitLab on AWS",{"href":397,"dataGaName":395,"dataGaLocation":382},"/partners/technology-partners/aws/",{"text":399,"config":400},"GitLab on Google Cloud",{"href":401,"dataGaName":399,"dataGaLocation":382},"/partners/technology-partners/google-cloud-platform/",{"text":403,"config":404},"Why GitLab?",{"href":69,"dataGaName":403,"dataGaLocation":382},{"freeTrial":406,"mobileIcon":411,"desktopIcon":416,"secondaryButton":419},{"text":407,"config":408},"Start free trial",{"href":409,"dataGaName":32,"dataGaLocation":410},"https://gitlab.com/-/trials/new/","nav",{"altText":412,"config":413},"Gitlab Icon",{"src":414,"dataGaName":415,"dataGaLocation":410},"/images/brand/gitlab-logo-tanuki.svg","gitlab icon",{"altText":412,"config":417},{"src":418,"dataGaName":415,"dataGaLocation":410},"/images/brand/gitlab-logo-type.svg",{"text":420,"config":421},"Get Started",{"href":422,"dataGaName":423,"dataGaLocation":410},"https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/compare/gitlab-vs-github/","get started",{"freeTrial":425,"mobileIcon":429,"desktopIcon":431},{"text":426,"config":427},"Learn more about GitLab Duo",{"href":61,"dataGaName":428,"dataGaLocation":410},"gitlab duo",{"altText":412,"config":430},{"src":414,"dataGaName":415,"dataGaLocation":410},{"altText":412,"config":432},{"src":418,"dataGaName":415,"dataGaLocation":410},"content:shared:en-us:main-navigation.yml","Main Navigation","shared/en-us/main-navigation.yml","shared/en-us/main-navigation",{"_path":438,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"title":439,"button":440,"config":444,"_id":446,"_type":13,"_source":15,"_file":447,"_stem":448,"_extension":18},"/shared/en-us/banner","GitLab Duo Agent Platform is now in public beta!",{"text":67,"config":441},{"href":442,"dataGaName":443,"dataGaLocation":27},"/gitlab-duo/agent-platform/","duo banner",{"layout":445},"release","content:shared:en-us:banner.yml","shared/en-us/banner.yml","shared/en-us/banner",{"_path":450,"_dir":21,"_draft":6,"_partial":6,"_locale":7,"data":451,"_id":656,"_type":13,"title":657,"_source":15,"_file":658,"_stem":659,"_extension":18},"/shared/en-us/main-footer",{"text":452,"source":453,"edit":459,"contribute":464,"config":469,"items":474,"minimal":648},"Git is a trademark of Software Freedom Conservancy and our use of 'GitLab' is under license",{"text":454,"config":455},"View page source",{"href":456,"dataGaName":457,"dataGaLocation":458},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/","page source","footer",{"text":460,"config":461},"Edit this page",{"href":462,"dataGaName":463,"dataGaLocation":458},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/content/","web ide",{"text":465,"config":466},"Please contribute",{"href":467,"dataGaName":468,"dataGaLocation":458},"https://gitlab.com/gitlab-com/marketing/digital-experience/about-gitlab-com/-/blob/main/CONTRIBUTING.md/","please contribute",{"twitter":470,"facebook":471,"youtube":472,"linkedin":473},"https://twitter.com/gitlab","https://www.facebook.com/gitlab","https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg","https://www.linkedin.com/company/gitlab-com",[475,498,555,584,618],{"title":45,"links":476,"subMenu":481},[477],{"text":478,"config":479},"DevSecOps platform",{"href":54,"dataGaName":480,"dataGaLocation":458},"devsecops platform",[482],{"title":185,"links":483},[484,488,493],{"text":485,"config":486},"View plans",{"href":187,"dataGaName":487,"dataGaLocation":458},"view plans",{"text":489,"config":490},"Why Premium?",{"href":491,"dataGaName":492,"dataGaLocation":458},"/pricing/premium/","why premium",{"text":494,"config":495},"Why Ultimate?",{"href":496,"dataGaName":497,"dataGaLocation":458},"/pricing/ultimate/","why ultimate",{"title":499,"links":500},"Solutions",[501,506,509,511,516,521,525,528,532,537,539,542,545,550],{"text":502,"config":503},"Digital transformation",{"href":504,"dataGaName":505,"dataGaLocation":458},"/topics/digital-transformation/","digital transformation",{"text":133,"config":507},{"href":128,"dataGaName":508,"dataGaLocation":458},"security & compliance",{"text":122,"config":510},{"href":104,"dataGaName":105,"dataGaLocation":458},{"text":512,"config":513},"Agile development",{"href":514,"dataGaName":515,"dataGaLocation":458},"/solutions/agile-delivery/","agile delivery",{"text":517,"config":518},"Cloud transformation",{"href":519,"dataGaName":520,"dataGaLocation":458},"/topics/cloud-native/","cloud transformation",{"text":522,"config":523},"SCM",{"href":118,"dataGaName":524,"dataGaLocation":458},"source code management",{"text":108,"config":526},{"href":110,"dataGaName":527,"dataGaLocation":458},"continuous integration & delivery",{"text":529,"config":530},"Value stream management",{"href":160,"dataGaName":531,"dataGaLocation":458},"value stream management",{"text":533,"config":534},"GitOps",{"href":535,"dataGaName":536,"dataGaLocation":458},"/solutions/gitops/","gitops",{"text":170,"config":538},{"href":172,"dataGaName":173,"dataGaLocation":458},{"text":540,"config":541},"Small business",{"href":177,"dataGaName":178,"dataGaLocation":458},{"text":543,"config":544},"Public sector",{"href":182,"dataGaName":183,"dataGaLocation":458},{"text":546,"config":547},"Education",{"href":548,"dataGaName":549,"dataGaLocation":458},"/solutions/education/","education",{"text":551,"config":552},"Financial services",{"href":553,"dataGaName":554,"dataGaLocation":458},"/solutions/finance/","financial services",{"title":190,"links":556},[557,559,561,563,566,568,570,572,574,576,578,580,582],{"text":202,"config":558},{"href":204,"dataGaName":205,"dataGaLocation":458},{"text":207,"config":560},{"href":209,"dataGaName":210,"dataGaLocation":458},{"text":212,"config":562},{"href":214,"dataGaName":215,"dataGaLocation":458},{"text":217,"config":564},{"href":219,"dataGaName":565,"dataGaLocation":458},"docs",{"text":240,"config":567},{"href":242,"dataGaName":243,"dataGaLocation":458},{"text":235,"config":569},{"href":237,"dataGaName":238,"dataGaLocation":458},{"text":245,"config":571},{"href":247,"dataGaName":248,"dataGaLocation":458},{"text":258,"config":573},{"href":260,"dataGaName":261,"dataGaLocation":458},{"text":250,"config":575},{"href":252,"dataGaName":253,"dataGaLocation":458},{"text":263,"config":577},{"href":265,"dataGaName":266,"dataGaLocation":458},{"text":268,"config":579},{"href":270,"dataGaName":271,"dataGaLocation":458},{"text":273,"config":581},{"href":275,"dataGaName":276,"dataGaLocation":458},{"text":278,"config":583},{"href":280,"dataGaName":281,"dataGaLocation":458},{"title":296,"links":585},[586,588,590,592,594,596,598,602,607,609,611,613],{"text":303,"config":587},{"href":305,"dataGaName":298,"dataGaLocation":458},{"text":308,"config":589},{"href":310,"dataGaName":311,"dataGaLocation":458},{"text":316,"config":591},{"href":318,"dataGaName":319,"dataGaLocation":458},{"text":321,"config":593},{"href":323,"dataGaName":324,"dataGaLocation":458},{"text":326,"config":595},{"href":328,"dataGaName":329,"dataGaLocation":458},{"text":331,"config":597},{"href":333,"dataGaName":334,"dataGaLocation":458},{"text":599,"config":600},"Sustainability",{"href":601,"dataGaName":599,"dataGaLocation":458},"/sustainability/",{"text":603,"config":604},"Diversity, inclusion and belonging (DIB)",{"href":605,"dataGaName":606,"dataGaLocation":458},"/diversity-inclusion-belonging/","Diversity, inclusion and belonging",{"text":336,"config":608},{"href":338,"dataGaName":339,"dataGaLocation":458},{"text":346,"config":610},{"href":348,"dataGaName":349,"dataGaLocation":458},{"text":351,"config":612},{"href":353,"dataGaName":354,"dataGaLocation":458},{"text":614,"config":615},"Modern Slavery Transparency Statement",{"href":616,"dataGaName":617,"dataGaLocation":458},"https://handbook.gitlab.com/handbook/legal/modern-slavery-act-transparency-statement/","modern slavery transparency statement",{"title":619,"links":620},"Contact Us",[621,624,626,628,633,638,643],{"text":622,"config":623},"Contact an expert",{"href":36,"dataGaName":37,"dataGaLocation":458},{"text":365,"config":625},{"href":367,"dataGaName":368,"dataGaLocation":458},{"text":370,"config":627},{"href":372,"dataGaName":373,"dataGaLocation":458},{"text":629,"config":630},"Status",{"href":631,"dataGaName":632,"dataGaLocation":458},"https://status.gitlab.com/","status",{"text":634,"config":635},"Terms of use",{"href":636,"dataGaName":637,"dataGaLocation":458},"/terms/","terms of use",{"text":639,"config":640},"Privacy statement",{"href":641,"dataGaName":642,"dataGaLocation":458},"/privacy/","privacy statement",{"text":644,"config":645},"Cookie preferences",{"dataGaName":646,"dataGaLocation":458,"id":647,"isOneTrustButton":90},"cookie preferences","ot-sdk-btn",{"items":649},[650,652,654],{"text":634,"config":651},{"href":636,"dataGaName":637,"dataGaLocation":458},{"text":639,"config":653},{"href":641,"dataGaName":642,"dataGaLocation":458},{"text":644,"config":655},{"dataGaName":646,"dataGaLocation":458,"id":647,"isOneTrustButton":90},"content:shared:en-us:main-footer.yml","Main Footer","shared/en-us/main-footer.yml","shared/en-us/main-footer",{"allPosts":661,"featuredPost":4642,"totalPagesCount":4661,"initialPosts":4662},[662,689,711,735,756,780,801,820,842,863,884,904,926,947,967,987,1007,1027,1048,1069,1091,1112,1132,1153,1172,1190,1211,1232,1255,1276,1297,1317,1337,1358,1379,1399,1419,1439,1459,1480,1499,1519,1539,1559,1581,1600,1622,1642,1662,1682,1701,1721,1740,1757,1778,1797,1815,1834,1851,1872,1892,1911,1929,1948,1967,1985,2004,2024,2043,2062,2081,2100,2119,2138,2159,2179,2200,2221,2241,2262,2283,2302,2322,2341,2359,2378,2398,2417,2437,2456,2476,2496,2514,2535,2557,2576,2596,2616,2636,2656,2677,2696,2716,2737,2756,2775,2794,2813,2833,2852,2871,2890,2909,2929,2947,2967,2986,3005,3025,3044,3066,3085,3104,3124,3144,3163,3183,3203,3221,3239,3258,3275,3294,3313,3332,3352,3371,3391,3410,3430,3450,3472,3491,3510,3529,3548,3567,3585,3605,3622,3641,3661,3678,3697,3717,3736,3755,3774,3793,3812,3832,3852,3871,3889,3906,3926,3943,3961,3980,3999,4017,4036,4056,4075,4095,4114,4133,4152,4171,4190,4209,4228,4247,4265,4285,4304,4322,4341,4359,4377,4398,4416,4435,4453,4471,4490,4510,4529,4547,4565,4583,4602,4623],{"_path":663,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":664,"content":672,"config":682,"_id":685,"_type":13,"title":686,"_source":15,"_file":687,"_stem":688,"_extension":18},"/en-us/blog/4-ways-to-accelerate-embedded-development-with-gitlab",{"title":665,"description":666,"ogTitle":665,"ogDescription":666,"noIndex":6,"ogImage":667,"ogUrl":668,"ogSiteName":669,"ogType":670,"canonicalUrls":668,"schema":671},"4 ways to accelerate embedded development with GitLab","Learn how automated hardware testing, standard builds, collaborative workflows, and integrated compliance eliminate bottlenecks in firmware development.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659756/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images.png","https://about.gitlab.com/blog/4-ways-to-accelerate-embedded-development-with-gitlab","https://about.gitlab.com","article","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"4 ways to accelerate embedded development with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt DeLaney\"},{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2025-06-05\",\n      }",{"title":665,"description":666,"authors":673,"heroImage":667,"date":676,"body":677,"category":678,"tags":679},[674,675],"Matt DeLaney","Darwin Sanoy","2025-06-05","Software in embedded systems is no longer just a part number — it's a critical differentiator. This shift has led to enormous complexity in the firmware running in our cars, airplanes, and industrial machines. The number of lines of code in the average car is expected to reach [650 million](https://www.statista.com/statistics/1370978/automotive-software-average-lines-of-codes-per-vehicle-globally/) by the end of 2025, up from 200 million just five years ago. In aerospace systems, the complexity of embedded software has nearly [doubled every four years](https://www.mckinsey.com/industries/aerospace-and-defense/our-insights/debugging-the-software-talent-gap-in-aerospace-and-defense) for the last several decades. \n\nTraditional embedded development approaches cannot effectively handle the software challenges of modern machines. This shortcoming slows engineers down, in part, by exacerbating challenges such as: \n\n* [Hardware testing bottlenecks](#challenge-1-hardware-testing-bottlenecks) \n* [Inconsistent build environments](#challenge-2-inconsistent-build-environments)\n* [Siloed development practices](#challenge-3-siloed-development-practices)\n* [Manual functional safety compliance processes](#challenge-4-manual-functional-safety-compliance-processes)\n\nEmbedded developers need a new approach to deal with the rapid increase in code. In this article, we’ll explain four ways you can use the GitLab AI-native DevSecOps platform to shorten feedback loops, work collaboratively and iteratively, and streamline compliance.\n\n## Challenge 1: Hardware testing bottlenecks\n\nUnlike enterprise software that can run on virtually any cloud server, embedded automotive software must be tested on specialized hardware that precisely matches production environments. Traditional hardware-in-the-loop (HIL) testing processes often follow this pattern:\n\n1. Developers write code for an embedded system (e.g., an electronic control unit)  \n2. They request access to limited, expensive hardware test benches (costing $500,000-$10M each)  \n3. They wait days or weeks for their scheduled access window  \n4. They manually deploy and test their code on physical hardware at their desks  \n5. They document results, pass the hardware to the next developer, and go to the back of the hardware testing queue\n\nThis process is extremely inefficient. Embedded developers may finish writing their code today and wait weeks to test it on a hardware target. By then, they've moved on to other tasks. This context switching drains productivity. Not only that, developers may wait weeks to learn they had a simple math error in their code. \n\n### Solution: Automated hardware allocation and continuous integration\n\nYou can streamline hardware testing through automation using the [GitLab On-Premises Device Cloud](https://gitlab.com/guided-explorations/embedded/ci-components/device-cloud), a CI/CD component. This lets you automate the orchestration of scarce hardware resources, turning a manual, time-intensive process into a streamlined, continuous workflow.\n\nThe On-Premises Device Cloud:\n\n1. Creates pools of shared hardware resources  \n2. Automatically — and exclusively — allocates hardware to a developer’s hardware testing pipeline tasks based on availability  \n3. Deploys and executes tests without manual intervention  \n4. Collects and reports results through integrated pipelines  \n5. Automatically deallocates hardware back into the “available” pool\n\nAfter submitting code, you’ll receive results in hours instead of days, often without ever physically touching the test hardware.\n\nWhat this video for an introduction to the GitLab On-Premises Device Cloud CI/CD Component to orchestrate the remote allocation of shared hardware for HIL:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ltr2CIM9Zag?si=NOij3t1YYz4zKajC\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can also adopt multi-pronged testing strategies that balance speed and quality. Bring the following embedded test patterns and environments into automated GitLab CI pipelines:\n\n* **Software-in-the-loop (SIL):** Testing on virtual hardware simulators for quicker initial feedback  \n* **Processor-in-the-loop (PIL):** Testing on representative processor hardware for faster feedback at a lower cost  \n* **Hardware-in-the-loop (HIL):** Testing on full production-equivalent hardware and test benches for late-stage verification\n\nBy automating the orchestration of these tests within CI pipelines, you’ll be able to identify issues earlier, iterate faster, and accelerate time to market.\n\n## Challenge 2: Inconsistent build environments\n\nAnother significant challenge in embedded development is build environment inconsistency. Embedded developers often manually execute builds on their local machines with varying configurations, compiler versions, and dependencies. Then they’ll paste the binaries from their local build to a shared codebase.\n\nThis approach creates several problems:\n\n* **Inconsistent outputs:** Builds for the same source code produce different results on different machines  \n* **\"Works on my machine\" syndrome:** Code that builds locally fails in shared environments  \n* **Poor traceability:** Limited audit trail of who built what and when  \n* **Knowledge silos:** Build expertise becomes concentrated in a few individuals\n\nThis approach can lead to errors, bottlenecks, and costly delays. \n\n### Solution: Standardized build automation\n\nYou can address these challenges by implementing standardized build automation within CI/CD pipelines in GitLab. This approach creates consistent, repeatable, container-based build environments that eliminate machine-specific variations. Through the use of special Embedded Gateway Runner provisioning scripts, containers can interface with hardware for flashing and port monitoring for automated testing.\n\nKey elements of this solution include:\n\n* **Lifecycle managed environments:** Define complex embedded simulation environments as code; automatically deploy environments for testing and destroy them afterward  \n* **Containerization:** Use Docker containers to ensure identical build environments  \n* **Automated dependency management:** Control and version all dependencies  \n* **Central build execution:** Run builds on shared infrastructure rather than local machines\n\n> Follow this tutorial to learn [how to automate embedded software builds within a GitLab CI pipeline](https://gitlab.com/guided-explorations/embedded/workshops/embedded-devops-workshop-refactoring-to-ci/-/blob/main/TUTORIAL2.md%20).\n\nBy standardizing and automating the build process, you can ensure that every build follows the same steps with the same dependencies, producing consistent outputs regardless of who initiated it. This not only improves quality but also democratizes the build process, enabling more team members to participate without specialized knowledge.\n\n## Challenge 3: Siloed development practices\n\nEnterprise development teams have widely adopted collaborative practices such as DevOps, underpinned by shared source code management (SCM) and continuous integration/continuous delivery (CI/CD) systems. Embedded developers, on the other hand, have historically worked alone at their desks. There are valid technical reasons for this. \n\nFor example, consider hardware virtualization, which is a key enabler of DevOps automation. The industry has been slower to virtualize the massive range of specialized processors and boards used in embedded systems. This is due in large part to the difficulties of virtualizing production real-time systems and the associated lack of economic incentives. Compare that to cloud virtualization which has been commoditized and benefited enterprise SaaS development for over a decade.\n\nMany providers are now embracing virtualization-first for the sake of speeding up embedded development. If teams fail to adopt virtual testing options, however, their silos will remain and negatively impact the business through: \n\n* **Knowledge fragmentation**: Critical insights remain scattered across individuals and teams  \n* **Redundant development**: Multiple teams solve identical problems, creating inconsistencies  \n* **Late-stage discovery during big-bang integrations**: Problems are found late in the process when multiple developers integrate their code at once, when errors are more costly to fix  \n* **Stifled innovation**: Solutions from one domain rarely influence others, hampering the development of new product ideas\n\n### Solution: Collaborative engineering through a unified platform\n\nAn important step in breaking down these silos is to standardize embedded development around GitLab’s unified DevSecOps platform. In this regard, GitLab is aligned with the shift of embedded systems toward more consolidated, shared platforms on embedded devices. GitLab enables:\n\n* **Shared visibility:** Make all code, Issues, and documentation visible across teams  \n* **Collaborative workflows:** Enable peer review and knowledge sharing through merge requests  \n* **Centralized knowledge:** Maintain a single source of truth for all development artifacts  \n* **Asynchronous collaboration:** Allow teams to work together across different locations and time zones\n\nHuman-AI agent collaboration is a fundamental ingredient to fueling the customer-facing innovations that digital natives and established embedded brands desire. GitLab enables human-AI collaboration as well. By creating transparency across the development lifecycle, GitLab changes embedded development from an isolated activity to a collaborative practice. Engineers can see each other's work in progress, learn from collective experiences, and build upon shared solutions.\n\nWatch this presentation from Embedded World Germany 2025, which explains the power of embedded developers collaborating and sharing “work in progress”. The demo portion from 24:42 to 36:51 shows how to integrate HIL into a GitLab CI pipeline and enable collaborative development.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/F_rlOyq0hzc?si=eF4alDY6HK98uZPj\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nPerhaps most importantly, by achieving greater collaboration through DevSecOps, teams can unlock embedded systems innovations that would otherwise remain hidden. Indeed, collaboration fuels innovation. [One study](https://www.sciencedirect.com/science/article/abs/pii/S0749597800928887), for example, found that group brainstorming, when properly structured, can lead to more innovative and creative outcomes than individuals working alone. Collaborative development is crucial in the race to develop software-defined products. \n\n## Challenge 4: Manual functional safety compliance processes\n\nEmbedded systems in the automotive and aerospace industries must comply with rigorous functional safety standards, including ISO 26262, MISRA C/C++, DO-178C, and DO-254. Traditional compliance approaches involve manual reviews, extensive documentation, and separate verification activities that occur late in the development cycle. This often creates security review bottlenecks. When specialized embedded security and code quality scanners detect vulnerabilities in a developer’s code, the scan issue gets added to a pile of other issues that haven’t been resolved. Developers can’t integrate their code, and security personnel need to wade through a backlog of code violations. This creates delays and makes compliance more difficult. \n\nSome of the challenges can best be summed up as: \n\n* **Late-stage compliance issues**: Problems discovered after development is complete  \n* **Documentation burden**: Extensive manual effort to create and maintain compliance evidence  \n* **Process bottlenecks**: Serial compliance activities that block development progress  \n* **Expertise dependence**: Reliance on limited specialists for compliance activities\n\nAs a result, teams often need to choose between velocity and compliance — a precarious trade-off in safety-critical systems.\n\n### Solution: Automated functional safety compliance workflow building blocks\n\nRather than treating security and compliance as post-development verification activities, you can codify compliance requirements and enforce them automatically through [customizable frameworks in GitLab](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/). To do this for functional safety standards, in particular, you can integrate GitLab with specialized embedded tools, which provide the depth of firmware scanning required by functional safety standards. Meanwhile, GitLab provides automated compliance checks, full audit trails, and merge request gating — all features needed to support a robust continuous compliance program. \n\nThis integrated approach includes:\n\n* **Compliance-as-code:** Define compliance requirements as automated checks  \n* **Integrated specialized tools:** Connect tools like CodeSonar into the DevSecOps platform for automotive-specific compliance  \n* **Continuous compliance verification:** Verify requirements throughout development  \n* **Automated evidence collection:** Gather compliance artifacts as a by-product of development\n\nWatch this video to learn how to use Custom Compliance Frameworks in GitLab to create your own compliance policies. You can create compliance policies related to any standard (e.g., ISO 26262) and automatically enforce those policies in GitLab.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/S-FQjzSyVJw?si=0UdtGNuugLPG0SLL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nBy shifting compliance left and embedding it within normal development workflows, you can maintain safety standards without sacrificing velocity. Automated checks catch issues early when they're easier and less expensive to fix, while continuous evidence collection reduces the documentation burden.\n\n## Realizing the power of embedded DevOps\n\nEmbedded development is changing fast. Teams that remain stuck in manual processes and isolated workflows will find themselves increasingly left behind, while those that embrace automated, collaborative practices will define the future of software-defined smart systems.\n\nExplore our [Embedded DevOps Workshop](https://gitlab.com/guided-explorations/embedded/workshops/embedded-devops-workshop-refactoring-to-ci) to start automating embedded development workflows with GitLab, or [watch this presentation from GitLab's Field Chief Cloud Architect](https://content.gitlab.com/viewer/0a35252831bd130f879b0725738f70ed) to learn how leading organizations are bringing hardware-in-the-loop testing into continuous integration workflows to accelerate embedded development.\n\n## Learn more\n\n- [Why GitLab Premium with Duo for embedded systems development?](https://content.gitlab.com/viewer/438451cba726dd017da7b95fd0fb1b59)\n- [Why GitLab Ultimate with Duo for embedded systems development?](https://content.gitlab.com/viewer/87f5104c26720e2c0d73a6b377522a44)\n- [More embedded development systems presentations from GitLab](https://content.gitlab.com/viewer/e59c40099d5e3c8f9307afb27c4a923f)","product",[478,9,680,681],"features","embedded DevOps",{"slug":683,"featured":6,"template":684},"4-ways-to-accelerate-embedded-development-with-gitlab","BlogPost","content:en-us:blog:4-ways-to-accelerate-embedded-development-with-gitlab.yml","4 Ways To Accelerate Embedded Development With Gitlab","en-us/blog/4-ways-to-accelerate-embedded-development-with-gitlab.yml","en-us/blog/4-ways-to-accelerate-embedded-development-with-gitlab",{"_path":690,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":691,"content":697,"config":705,"_id":707,"_type":13,"title":708,"_source":15,"_file":709,"_stem":710,"_extension":18},"/en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast",{"title":692,"description":693,"ogTitle":692,"ogDescription":693,"noIndex":6,"ogImage":694,"ogUrl":695,"ogSiteName":669,"ogType":670,"canonicalUrls":695,"schema":696},"5 videos and interactive tours to learn GitLab Duo fast","Get to know GitLab Duo's capabilities and benefits, and use these visual learning tools to understand how to incorporate AI throughout your software development lifecycle.\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659856/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25.png","https://about.gitlab.com/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"5 videos and interactive tours to learn GitLab Duo fast\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-08-28\",\n      }",{"title":692,"description":693,"authors":698,"heroImage":694,"date":700,"body":701,"category":702,"tags":703},[699],"Cesar Saavedra","2024-08-28","GitLab Duo is a suite of AI-powered features designed to assist DevSecOps teams throughout the software development lifecycle. Integrated seamlessly into the GitLab platform, GitLab Duo leverages artificial intelligence to enhance productivity, improve code quality, and streamline various development and security processes. This article introduces you to GitLab Duo's capabilities and benefits, and lists five videos and interactive tours to help you learn how to incorporate this AI powerhouse into your own workflow.\n\nIn this article:\n- [GitLab Duo features](#gitlab-duo-features)\n- [Benefits of GitLab Duo](#benefits-of-gitlab-duo)\n- [5 videos and interactive tours](#5-videos-and-interactive-tours-to-learn-gitlab-duo)\n\n## GitLab Duo features\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/) offers a wide range of AI-powered capabilities to help you ship more secure software faster and deliver better results for your customers.\n\n### Feature development\n\n- **Code Suggestions:** Helps developers write code more efficiently by generating code and showing suggestions as they type.\n\n- **Chat:** A conversational interface that answers questions and assists with various tasks throughout the development process.\n\n- **Code Explanation:** Helps understand selected code by providing clear explanations.\n\n- **GitLab Duo for the CLI:** Helps discover or recall Git commands when and where you need them.\n\n-  **Merge Commit Message Generation:** Helps merge more quickly by generating meaningful commit messages.\n\n- **Test Generation:** Helps catch bugs early by automatically generating tests for selected code.\n\n### Securing applications\n\n- **Vulnerability Explanation:** Shows information about security vulnerabilities in code and explains how to fix them.\n\n- **Vulnerability Resolution:** Helps resolve a vulnerability by generating a merge request that addresses it. (Beta)\n\n### Facilitating collaboration\n\n- **AI Impact Dashboard:** Measures the effectiveness and impact of AI on software development lifecycle metrics.\n\n- **Code Review Summary:** Makes merge request handover to reviewers easier by summarizing all the comments in a merge request review. (Experimental)\n\n- **Discussion Summary:** Helps everyone get up to speed by summarizing lengthy conversations in an issue. (Beta)\n\n- **Issue Description Generation:** Helps populate an issue quickly by generating a more in-depth description based on a short summary. (Experimental)\n\n- **Merge Request Summary:** Helps populate a merge request more quickly by generating a description based on the code changes. (Beta)\n\n- **Product Analytics:** Processes and responds to questions about your application's usage data.\n\n### Advanced troubleshooting\n\n- **Root Cause Analysis:** Helps determine the cause of CI/CD job failures by analyzing logs.\n\nThese components work together to provide comprehensive AI-assisted support throughout the software development lifecycle.\n\n## Benefits of GitLab Duo\n\nGitLab Duo offers numerous benefits to development teams and organizations. By integrating AI-powered assistance throughout the development lifecycle, it helps increase productivity, improve code quality, and enhance security. \nDevelopers can write code faster, understand complex codebases more easily, and catch potential issues earlier in the development process.\n\nGitLab Duo also helps streamline collaboration, speed up code reviews, and provide valuable insights into the impact of AI on ROI metrics. These benefits contribute to faster delivery of high-quality, secure software.\n\n## 5 videos and interactive tours to learn GitLab Duo\n\nTo help you get acquainted with GitLab Duo and its capabilities quickly, we've compiled a list of five videos and interactive tours. These visual learning tools provide an in-depth look at an array of GitLab Duo features and demonstrate how they can be integrated into your development workflow.\n\n__1. GitLab Duo Overview__\n\nThis comprehensive video introduces the core concepts of GitLab Duo and showcases its integration within the GitLab platform.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/o2xmLTV1y0I?si=90yPCHS_x2zSBAqe\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__2. Code Suggestions in Action__\n\nAn interactive tour demonstrating how GitLab Duo Code Suggestions works in real-time, helping developers write code more efficiently.\n\n\u003Ca href=\"https://gitlab.navattic.com/code-suggestions\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175911/Blog/b5gdnls7jdyrpeyjby5j.png\" alt=\"GitLab Duo Code Suggestions cover image\">\u003C/a>\n\n__3. Vulnerability Resolution Walkthrough__\n\nThis video guide takes you through the process of using GitLab Duo to understand and resolve security vulnerabilities in your code.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VJmsw_C125E?si=cUmRiQNJbrv5Yd9D\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__4. Chat Demo__\n\nAn interactive session showing how developers can leverage GitLab Duo Chat to get answers, generate code, and solve problems throughout the development process.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"\nhttps://www.youtube.com/embed/RJezT5_V6dI?si=QomHCGUKstnAwplM\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n__5. AI Impact Dashboard Tutorial__\n\nA detailed look at how to use and interpret the AI Impact Dashboard to measure the effectiveness of GitLab Duo in your development processes.\n\n\u003Ca href=\"https://gitlab.navattic.com/ai-impact\">\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175921/Blog/hn7gflmqswrjb33unuja.png\" alt=\"GitLab Duo AI Impact Dashboard cover image\">\u003C/a>\u003C/p>\n\n## Get started with GitLab Duo today\n\nThese videos and interactive tours offer practical insights into how GitLab Duo can enhance your development workflow. By exploring these resources, you'll gain a better understanding of how to leverage AI-powered assistance to improve productivity, code quality, and security in your projects.\n\n> #### Become a GitLab Duo expert: [Start your free, 60-day trial today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro)\n\n## Read more\n- [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n- [Refactor code into modern languages with AI-powered GitLab Duo](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/)\n- [Developing GitLab Duo blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/)\n- [Mastering GitLab admin tasks with GitLab Duo Chat](https://about.gitlab.com/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat/)","ai-ml",[704,680,9,678],"AI/ML",{"slug":706,"featured":6,"template":684},"5-videos-and-interactive-tours-to-learn-gitlab-duo-fast","content:en-us:blog:5-videos-and-interactive-tours-to-learn-gitlab-duo-fast.yml","5 Videos And Interactive Tours To Learn Gitlab Duo Fast","en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast.yml","en-us/blog/5-videos-and-interactive-tours-to-learn-gitlab-duo-fast",{"_path":712,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":713,"content":719,"config":729,"_id":731,"_type":13,"title":732,"_source":15,"_file":733,"_stem":734,"_extension":18},"/en-us/blog/a-beginners-guide-to-the-git-reftable-format",{"title":714,"description":715,"ogTitle":714,"ogDescription":715,"noIndex":6,"ogImage":716,"ogUrl":717,"ogSiteName":669,"ogType":670,"canonicalUrls":717,"schema":718},"A beginner's guide to the Git reftable format","In Git 2.45.0, GitLab upstreamed the reftable backend to Git, which completely changes how references are stored. Get an in-depth look at the inner workings of this new format.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664595/Blog/Hero%20Images/blog-image-template-1800x945__9_.png","https://about.gitlab.com/blog/a-beginners-guide-to-the-git-reftable-format","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A beginner's guide to the Git reftable format\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Patrick Steinhardt\"}],\n        \"datePublished\": \"2024-05-30\",\n      }",{"title":714,"description":715,"authors":720,"heroImage":716,"date":722,"body":723,"category":724,"tags":725},[721],"Patrick Steinhardt","2024-05-30","Until recently, the \"files\" format was the only way for Git to store references. With the [release of Git 2.45.0](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/), Git can now store references in a \"reftable\" format. This new format is a binary format that is quite a bit more complex, but that complexity allows it to address several shortcomings of the \"files\" format. The design goals for the \"reftable\" format include:\n\n- Make the lookup of a single reference and iteration through ranges of references as efficient and fast as possible.\n- Support for consistent reads of references so that Git never reads an in-between state when an update to multiple references has been applied only partially.\n- Support for atomic writes such that updating multiple references can be implemented as an all-or-nothing operation.\n- Efficient storage of both refs and the reflog.\n\nIn this article, we will go under the hood of the \"reftable\" format to see exactly how it works.\n\n## How Git stores references\n\nBefore we dive into the details of the \"reftable\" format, let's quickly recap how Git has historically stored references. If you are already familiar with this, you can skip this section.\n\nA Git repository keeps track of two important data structures:\n\n- [Objects](https://git-scm.com/book/en/v2/Git-Internals-Git-Objects), which contain the actual data of your repository. This includes commits, the directory tree structure, and the blobs that contain your source code. Objects point to each other, forming an object graph. Furthermore, each object has an object ID that uniquely identifies the object.\n\n- References, such as branches and tags, which are pointers into the object graph so that you can give objects names that are easier to remember and keep track of different tracks of your development history. For example, a repository may contain a `main` branch, which is a reference named `refs/heads/main` that points to a specific commit.\n\nReferences are stored in the reference database. Until Git 2.45.0, there was only the \"files\" database format. In this format, every reference is stored as a normal file that contains either one of the following:\n\n- A regular reference that contains the object ID of the commit it points to.\n- A symbolic reference that contains the name of another reference, similar to how a symbolic link points to another file.\n\nAt regular intervals, these references get packed into a single `packed-refs` file to make lookups more efficient.\n\nThe following examples should give an idea of how the \"files\" format operates:\n\n```shell\n$ git init .\n$ git commit --allow-empty --message \"Initial commit\"\n[main (root-commit) 6917c17] Initial commit\n\n# HEAD is a symbolic reference pointing to refs/heads/main.\n$ cat .git/HEAD\nref: refs/heads/main\n\n# refs/heads/main is a regular reference pointing to a commit.\n$ cat .git/refs/heads/main\n6917c178cfc3c50215a82cf959204e9934af24c8\n\n# git-pack-refs(1) packs these references into the packed-refs file.\n$ git pack-refs --all\n$ cat .git/packed-refs\n# pack-refs with: peeled fully-peeled sorted\n6917c178cfc3c50215a82cf959204e9934af24c8 refs/heads/main\n```\n\n## High-level structure of reftables\n\nAssuming that you've got Git 2.45.0 or newer installed, you can create a repository with the \"reftable\" format by using the `--ref-format=reftable` switch:\n\n```shell\n$ git init --ref-format=reftable .\nInitialized empty Git repository in /tmp/repo/.git/\n$ git rev-parse --show-ref-format\nreftable\n\n# Irrelevant files have been removed for ease of understanding.\n$ tree .git\n.git\n├── config\n├── HEAD\n├── index\n├── objects\n├── refs\n│   └── heads\n└── reftable\n\t├── 0x000000000001-0x000000000002-40a482a9.ref\n\t└── tables.list\n\n4 directories, 6 files\n```\n\nFirst, looking at the repository configuration, you will see it has an `extension.refstorage` key:\n\n```shell\n$ cat .git/config\n[core]\n    repositoryformatversion = 1\n    filemode = true\n    bare = false\n    logallrefupdates = true\n[extensions]\n    refstorage = reftable\n```\n\nThis configuration indicates to Git that the repository has been initialized with the \"reftable\" format and tells Git to use the \"reftable\" backend to access it.\n\nWeirdly enough, the repository still has a few files that look as if the \"files\" backend was in use:\n\n- `HEAD` would usually be a symbolic reference pointing to your currently checked-out branch. While it is not used by the \"reftable\" backend, it is required for Git clients to detect the directory as a Git repository. Therefore, when using the \"reftable\" format, `HEAD` is a stub with contents `ref: refs/heads/.invalid`.\n\n- `refs/heads` is a file with contents `this repository uses the reftable format`. Git clients that do not know about the \"reftable\" format would usually expect this path to be a directory. Consequently, creating this path as a file intentionally causes such older Git clients to fail if they tried to access the repository with the \"files\" backend.\n\nThe actual references are stored in the `reftable/` directory:\n\n```shell\n$ tree .git/reftable\n.git/reftable/\n├── 0x000000000001-0x000000000001-794bd722.ref\n└── tables.list\n\n$ cat .git/reftable/tables.list\n0x000000000001-0x000000000001-794bd722.ref\n```\n\nThere are two files here:\n\n- `0x000000000001-0x000000000001-794bd722.ref` is a table containing references and the reflog data in a binary format.\n\n- `tables.list` is, well, a list of tables. In the current state of the repository, the file contains a single line, which is the name of the table. This file tracks the current set of active tables in the \"reftable\" database and is updated whenever new tables get added to the repository.\n\nUpdating a reference creates a new table:\n\n```shell\n$ git commit --allow-empty --message \"Initial commit\"\n[main (root-commit) 1472a58] Initial commit\n\n$ tree .git/reftable\n.git/reftable/\n├── 0x000000000001-0x000000000002-eb87d12b.ref\n└── tables.list\n\n$ cat .git/reftable/tables.list\n0x000000000001-0x000000000002-eb87d12b.ref\n```\n\nAs you can see, the previous table has been replaced with a new one. Furthermore, the `tables.list` file has been updated to contain the new table.\n\n## The structure of a table\n\nAs mentioned earlier, the actual data of the reference database is contained in tables. Roughly speaking, a table is split up into multiple sections:\n\n- The \"header\" contains metadata about the table. Along with some other information, this includes the version of the format, the block size, and the hash function used by the repository (for example, SHA1 or SHA256).\n- The \"ref\" section contains your references. These records have a key that equals the reference name and point to either an object ID for regular references, or to another reference for symbolic references.\n- The \"obj\" section contains reverse mapping from object IDs to the references that point to those object IDs. These allow Git to efficiently look up which references point to a given object ID.\n- The \"log\" section contains your reflog entries. These records have a key that equals the reference name plus an index that represents the number of the log entry. Furthermore, they contain the old and new object IDs as well as the message for that reflog entry.\n- The \"footer\" contains offsets to the various sections.\n\n![long table with all the reftable sections](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_1_-_Reftable_overview.svg)\n\nEach of the section types are structured in a similar manner. Sections contain a set of records that are sorted by each record's key. For example, when you have two ref records `refs/heads/aaaaa` and `refs/heads/bbb`, you have two ref records with these reference names as their respective keys, and `refs/heads/aaaaa` would come before `refs/heads/bbb`.\n\nFurthermore, each section is divided into blocks of a fixed length. This block length is encoded in the header and serves two purposes:\n\n- Given the start of the section as well as the block size, the reader implicitly knows where each of the blocks starts. This allows Git to easily seek into the middle of a section without reading preceding blocks, which enables binary searches over blocks to speed up the lookup of records.\n- It ensures that the reader knows how much data to read from the disk at a time. Consequently, the block size is by default set to 4KiB, which is the most common sector size for hard disks. The maximum block size is 16MB.\n\nWhen we peek into, for example, a \"ref\" section, it looks roughly like the following graphic. Note how its records are ordered lexicographically inside the blocks, but also across the blocks.\n\n![reference block uncompressed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_2_-_Ref_block_uncompressed.svg)\n\nEquipped with the current information, we can locate a record by using the following steps:\n\n1. Perform a binary search over the blocks by looking at the keys of their respective first records, identifying the block that must contain our record.\n\n2. Perform a linear search over the records in that block.\n\nBoth of these steps are still somewhat inefficient. If we have many blocks we may have to read logarithmically many of them in our binary search to find the desired one. And when blocks contain many records, we potentially have to read all of them during the linear search.\n\nThe \"reftable\" format has additional built-in mechanisms to address these performance concerns. We will touch on these over the next few sections.\n\n### Prefix compression\n\nAs you may have noticed, all of the record keys share the same prefix `refs/`. This is a common thing in Git:\n\n- All branches start with `refs/heads/`.\n- All tags start with `refs/tags/`.\n\nTherefore, we expect that subsequent records will most likely share a significant prefix of their key. This is a good opportunity to save some precious disk space. Because we know that most keys will share a common prefix, it makes sense to optimize for this.\n\nThe optimization uses prefix compression. Every record encodes a prefix length that tells the reader how many bytes to reuse from the key of the preceding record. If we have two records, `refs/heads/a` and `refs/heads/b`, the latter can be encoded by specifying a prefix length of 11 and then only storing the suffix `b`. The reader will then take the first 11 bytes of `refs/heads/a`, which is `refs/heads/`, and append the suffix `b` to it.\n\n![prefix compression](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_3_-_Ref_block_prefix_compression.svg)\n\n### Restart points\n\nAs explained earlier, the best way to search for a reference in a block with our current understanding of the \"reftable\" format is to do a linear search. This is because records do not have a fixed length, so it is impossible for us to tell where records would start without scanning through the block from the beginning. Also, even if records were of fixed length, we would not be able to seek into the middle of a block because the prefix compression also requires us to read preceding records.\n\nDoing a linear search would be quite inefficient because blocks may contain hundreds or even thousands of records. To address this issue, the \"reftable\" format encodes so-called restart points into every block. Restart points are uncompressed records where the prefix compression is reset. Consequently, records at restart points always contain their full key and it becomes possible to directly seek to and read the record without having to read preceding records. These restart points are listed in the footer of each block.\n\nEquipped with this information, we can avoid performing a linear search over the block. Instead, we can now do a binary search over the restart points where we search for the first restart point with a key larger than the sought-after key. From there, it follows that the desired record must be located in the section spanning from the _preceding_ restart point to the identified one.\n\nThus, our initial procedure to look up a record (binary search for the block, linear search for the record) is now:\n\n1. Perform a binary search over the blocks, identifying the block that must contain our record.\n\n2. Perform a binary search over the restart points, identifying the sub-section of the block that must contain our record.\n\n3. Perform a linear search over the records in that sub-section.\n\n![Linear search for a record](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_4_-_Restart_points.svg)\n\n### Indices\n\nWhile the search for records inside a block is now reasonably efficient, it's still inefficient to locate the block itself. A binary search may be reasonably performant when you have a couple of blocks, but repositories with millions of references may have hundreds or even thousands of blocks. Without any additional data structure, this would cause logarithmically many disk seeks on average.\n\nTo avoid this, every section may be followed by an index section that provides an efficient way to look up a block. Each index record holds the following information:\n\n- The location of the block that it is indexing.\n- The key of the last record of the block that it is indexing.\n\nWith three or less blocks, a binary search will always require, at most, two disk reads to find the desired target block. This is the same number of reads we would have to do with an index: one to read the index itself and one to read the desired block. Consequently, indices are only written when they would actually save some reads, which is the case with four or more indexed blocks.\n\nNow the question is: What happens when the index itself becomes so large that it spans over multiple blocks? You might have guessed it: We write another index that indexes the index. These multi-level indices really only become necessary once you have repositories with hundreds of thousands of references.\n\nEquipped with these indices, we can now make the procedure to look up records even more efficient:\n1. Determine whether there is an index by looking at the footer of the table.\n\t- If there is one, perform a binary search over the index to find the desired block. This block may point into an index block itself, in which case we need to repeat this step until we hit a record of the desired type.\n\t- Otherwise, perform a binary search over the blocks as we did before.\n2. Perform a binary search over the restart points, identifying the sub-section of the block that must contain our record.\n3. Perform a linear search over the records in that sub-section.\n\n## Multiple tables\n\nUp to this point, we have only discussed how to read a _single_ table. But as the name `tables.list` indicates, you can actually have a list of tables in your \"reftable\" database.\n\nEvery time you update a reference in your repository, a new table is written and appended to `tables.list`. Thus, you will eventually end up with multiple tables:\n\n```shell\n$ tree .git/reftable/\n.git/reftable/\n├── 0x000000000001-0x000000000007-8dcd8a77.ref\n├── 0x000000000008-0x000000000008-30e0f6f6.ref\n└── tables.list\n\n$ cat .git/reftable/tables.list\n0x000000000001-0x000000000007-8dcd8a77.ref\n0x000000000008-0x000000000008-30e0f6f6.ref\n```\n\nReading the actual state of a repository requires us to merge these multiple tables into a single virtual table.\n\nYou might be wondering: If a table is written for each reference update and the same reference is updated multiple times, how does the \"reftable\" format know the most up-to-date value of a given reference? Intuitively, one could assume the value would be the one from the newest table containing the reference.\n\nIn fact, every single record has a so-called update index that encodes the \"priority\" of a record. For example, if two ref records with the same name exist, then the one with the higher update index overrides the one with the lower update index.\n\nThese update indices are visible in the file structure above. The long hex strings (for example `0x000000000001`) are the update indices, where the left-hand side of the table name is the minimum update index contained in the table and the right-hand is the maximum update index.\n\nMerging the tables then happens via a [priority queue](https://en.wikipedia.org/wiki/Priority_queue) that is ordered by the key of the ref record as well as its update index. Assuming we want to scan through all ref records, we would:\n\n1. For every table, add its first record to the priority queue.\n\n![Adding first record to the priority queue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_5_-_Priority_queue_1.svg)\n\n2. Yield the head of the priority queue. Because the queue is ordered by update index, it must be the most up-to-date version. Add the next item from that table to the priority queue.\n\n![Yielding the head of the priority queue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_6_-_Priority_queue_2.svg)\n\n3. Drop all records from the queue that have the same name. These records are shadowed, which means that they will not be shown. For each table for which we are dropping records, add the next record to the priority queue.\n\n![Dropping all records from queue that have the same name](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675179/Blog/Content%20Images/Frame_7_-_Priority_queue_3.svg)\n\nNow we can rinse and repeat to read records for other keys.\n\nTables may contain special \"tombstone\" records that mark a record as having been deleted. This allows us to delete records without having to rewrite all tables to not contain the record anymore.\n\n### Auto-compaction\n\nWhile the idea behind the priority queue is simple enough, it would be rather inefficient to merge together hundreds or even only dozens of tables in this way. So while it is true that every update to your references appends a new table to your `tables.list` file, it is only part of the story.\n\nThe other part is auto-compaction: After a new table has been appended to the list of tables, the \"reftable\" backend checks whether some of the tables should be merged. This is done by using a simple heuristic: We check whether the list of tables forms a [geometric sequence](https://en.wikipedia.org/wiki/Geometric_progression) with the file sizes. Every table `n` must be at least twice as large as the next-most-recent table `n + 1`. If that geometric sequence is violated, the backend will compact tables so that the geometric sequence is restored.\n\nOver time, this will lead to structures that look like the following:\n\n```shell\n$ du --apparent-size .git/reftable/*\n429    .git/reftable/0x000000000001-0x00000000bd7c-d9819000.ref\n101    .git/reftable/0x00000000bd7d-0x00000000c5ac-c34b88a4.ref\n32    .git/reftable/0x00000000c5ad-0x00000000cc6c-60391f53.ref\n8    .git/reftable/0x00000000cc6d-0x00000000cdc1-61c30db1.ref\n3    .git/reftable/0x00000000cdc2-0x00000000ce67-d9b55a96.ref\n1    .git/reftable/0x00000000ce68-0x00000000ce6b-44721696.ref\n1    .git/reftable/tables.list\n```\n\nNote how for every single table, the property `size(n) > size(n+1) * 2` holds.\n\nOne of the consequences of auto-compaction is that the \"reftable\" backend maintains itself. We no longer have to run `git pack-refs` in a repository.\n\n## Want to learn more?\n\nYou should now have a good understanding of how the new \"reftable\" format works under the hood. If you want to dive even deeper into the format, you can refer to the [technical documentation](https://git-scm.com/docs/reftable) provided by the Git project.\n\n> Read our [Git 2.45.0 recap](https://about.gitlab.com/blog/whats-new-in-git-2-45-0/) to find out what else is in this version of Git.","open-source",[726,9,727,728],"git","open source","performance",{"slug":730,"featured":90,"template":684},"a-beginners-guide-to-the-git-reftable-format","content:en-us:blog:a-beginners-guide-to-the-git-reftable-format.yml","A Beginners Guide To The Git Reftable Format","en-us/blog/a-beginners-guide-to-the-git-reftable-format.yml","en-us/blog/a-beginners-guide-to-the-git-reftable-format",{"_path":736,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":737,"content":743,"config":750,"_id":752,"_type":13,"title":753,"_source":15,"_file":754,"_stem":755,"_extension":18},"/en-us/blog/a-go-micro-language-framework-for-building-dsls",{"title":738,"description":739,"ogTitle":738,"ogDescription":739,"noIndex":6,"ogImage":740,"ogUrl":741,"ogSiteName":669,"ogType":670,"canonicalUrls":741,"schema":742},"Lingo: A Go micro language framework for building Domain Specific Languages","Design, build and integrate your own Domain Specific Language with Lingo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682320/Blog/Hero%20Images/typeset.png","https://about.gitlab.com/blog/a-go-micro-language-framework-for-building-dsls","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Lingo: A Go micro language framework for building Domain Specific Languages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julian Thome\"}],\n        \"datePublished\": \"2022-05-26\",\n      }",{"title":738,"description":739,"authors":744,"heroImage":740,"date":746,"body":747,"category":724,"tags":748},[745],"Julian Thome","2022-05-26","\n\nDomain Specific Languages (DSL) are small, focused languages with a narrow\ndomain of applicability. DSLs are tailored towards their target domain so that\ndomain experts can formalize ideas based on their knowledge and background.\n\nThis makes DSLs powerful tools that can be used for the purpose of increasing\nprogrammer efficiency by being more expressive in their target\ndomain, compared to general purpose languages, and by providing concepts to\nreduce the cognitive load on their users.\n\nConsider the problem of summing up the balances of different bank accounts in a\nCSV file. A sample CSV file is provided in the example below where the first\ncolumn contains the name of the account holder and the second column contains\nthe account balance.\n\n``` csv\nname, balance\nLisa, 100.30\nBert, 241.41\nMaria, 151.13\n```\n\nYou could solve the problem of summing up balances by using a general-purpose\nlanguage such as [Ruby](https://www.ruby-lang.org/en/) as in the code snippet\nbelow. Apart from the fact that the code below is not very robust, it contains\na lot of boilerplate that is irrelevant to the problem at hand, i.e., summing\nup the account balances.\n\n``` ruby\n#!/usr/bin/env ruby\n\nexit(1) if ARGV.empty? || !File.exist?(ARGV[0])\n\nsum = 0\nFile.foreach(ARGV[0]).each_with_index do |line, idx|\n  next if idx == 0\n  sum += Float(line.split(',')[1])\nend\n\nputs sum.round(2)\n```\n\nBelow is an example [AWK script](https://en.wikipedia.org/wiki/AWK) that solves\nthe same problem. AWK is a DSL that was specifically designed to address\nproblems related to text-processing.\n\n``` awk \n#!/usr/bin/awk -f\n\nBEGIN{FS=\",\"}{sum+=$2}END{print sum}\n```\n\nThe Ruby program has a size of 208 characters, whereas the AWK program has a size of 56. The AWK program is roughly 4x smaller than its Ruby\ncounterpart. In addition, the AWK implementation is more robust by being less\nprone to glitches that may appear in the CSV file (e.g., empty newlines,\nwrongly formatted data-fields). The significant difference in terms of size\nillustrates that DSLs, by being more focused on solving specific problems, can\nmake their users more productive by sparing them the burden to write\nboilerplate code and narrowing the focus of the language on the problem at\nhand.\n\nSome popular DSLs most software developers use on a regular basis include\n[Regular Expressions](https://en.wikipedia.org/wiki/Regular_expression) for\npattern matching, AWK for text\ntransformation or [Standard Query Language](https://en.wikipedia.org/wiki/SQL)\nfor interacting with databases.\n\n## Challenges when designing Domain Specific Languages\n\nPrototyping, designing and evolving DSLs is a\nchallenging process. In our experience this is an exploratory cycle where you\nconstantly prototype ideas, incorporate them into the language, try them out in\nreality, collect feedback and improve the DSL based on the feedback. \n\nWhen designing a DSL, there are many components that have to be implemented and\nevolved. At a very high level there are two main components: the language\nlexer/parser and the language processor. The lexer/parser\nis the component that accepts input as per the language definition which is\nusually specified specified by means of a language grammar. The parsing/lexing\nphase produces a syntax tree which is then passed onto the language processor.\nA language processor evaluates the syntax tree. In the example we saw earlier,\nwe ran both the Ruby and AWK interpreters providing our scripts and the CSV\nfile as input; both interpreters evaluated the scripts and this evaluation\nyielded the sum of all the account balances as a result.\n\nTools such as parser generators can significantly reduce the effort of\nlexer/parser development by means of code generation. Sophisticated DSL\nframeworks such as [JetBrains MPS](https://www.jetbrains.com/mps/) or\n[Xtext](https://www.eclipse.org/Xtext/) also provide features that help\nimplement custom language support in IDEs. However, if present at all, the\nsupport for building the language processors is usually limited to generating\nplaceholders functions or boilerplate code for the language components that\nhave to be filled-in by the DSL developer. Moreover, such large and powerful DSL\nframeworks usually have a fairly steep learning curve so that they are probably\na better fit for more sophisticated DSLs as opposed to small, easily\nembeddable, focused languages, which we refer to as _micro languages_.\n\nIn some situations, it may be worth considering working around these problems\nby just relying on standard data exchange formats such as `.toml`, `.yaml` or\n`.json` as a means of configuration. Similar to the parser generators, using\nsuch a format may relieve some of the burden when it comes to parser\ndevelopment effort. However, this approach does not help when it comes to the\nimplementation of the actual language processor. In addition, most standard data\nexchange formats are inherently limited to representing data in terms of simple\nconcepts (such as lists, dictionaries, strings and numbers). This limitation\ncan lead to bloated configuration files quickly as shown in the following\nexample.\n\nImagine the development of a calculator that operates on integers using\nmultiplication `*`, addition `+`. When using a data-description language like\nYAML in the example below, you can see that even a small simple term like `1 + 2 * 3 + 5` \ncan be hard to reason about, and by adding more terms the configuration\nfile would get bloated quickly.\n\n``` yaml\nterm:\n  add: \n    - 1\n    - times:\n      - 2\n      - 3\n    - 5\n```\n\nThis blog post is focused on the design of micro languages. The core idea is to\nprovide a simple, extensible language core that can be easily extended with\ncustom-types and custom functions; the language can evolve without having\nto touch the parser or the language processor. Instead, the DSL designer can\njust focus on the concepts that ought to be integrated into the DSL by\nimplementing interfaces and \"hooking\" them into the core language\nimplementation.\n\n## Lingo: A micro language framework for Go\n\nAt GitLab, Go is one of our main programming languages and some of the tools we\ndevelop required their own, small, embeddable DSLs so that users could properly\nconfigure and interact with them. \n\nInitially, we tried to integrate already existing, embeddable and expandable\nlanguage implementations. Our only condition was that they had to be\nembeddable natively into a Go application. We explored several great free and\nopen-source (FOSS) projects such as [go-lua](https://github.com/Shopify/go-lua)\nwhich is Lua VM implemented in Go, [go-yeagi](https://github.com/traefik/yaegi)\nwhich provides a Go interpreter with which Go can be used as a scripting\nlanguage or [go-zygomys](https://github.com/glycerine/zygomys) which is a LISP\ninterpreter written in Go. However, these packages are essentially modules to\nintegrate general-purpose languages on top of which a DSL could be built. These modules ended up being fairly complex. In contrast, we wanted to have basic support to design, implement, embed and evolve DSLs natively into a Go\napplication that is flexible, small, simple/easy to grasp, evolve and\nadapt.\n\nWe were looking for a micro language framework with the properties listed below:\n\n1. Stability: Changes applied to the DSL should neither require any changes to the core lexer/parser implementation nor to the language processor implementation.\n1. Flexibility/Composability: New DSL concepts (data-types, functions) can be integrated via a simple plug-in mechanism.\n1. Simplicity: the language framework should have just\nenough features to provide a foundation that is powerful enough to implement\nand evolve a custom DSL. In addition, the whole implementation of the micro\nlanguage framework should be in pure Go so that it is easily embeddable in Go\napplications.\n\nSince none of the available FOSS tools we looked at was able to\nfulfill all of those requirements, we developed our own micro language framework\nin Go called Lingo which stands for \"**L**ISP-based Domain Specific Languages\n(DSLs) **in Go**\". Lingo is completely FOSS and available in the [Lingo Git repository](https://gitlab.com/gitlab-org/vulnerability-research/foss/lingo)\nunder the free and open source space of the [Vulnerability Research Team](https://about.gitlab.com/handbook/engineering/development/sec/secure/vulnerability-research/).\n\n[Lingo](https://gitlab.com/gitlab-org/vulnerability-research/foss/lingo)\nprovides a foundation for building DSLs based on Symbolic Expressions (S-expressions), i.e.,\nexpressions provided in the form of nested lists `(f ...)` where `f` can be\nconsidered as the placeholder that represents the function symbol. Using this format,\nthe mathematical term we saw earlier could be written as S-expression `(+ 1 (* 2 3) 5)`. \n\nS-expressions are versatile and easy to process due to their uniformity. In\naddition, they can be used to represent both code and data in a consistent\nmanner.\n\nWith regards to the Stability, Flexibility and Composability properties, \n[Lingo](https://gitlab.com/gitlab-org/vulnerability-research/foss/lingo)\nprovides a simple plug-in mechanism to add new functions as well as types\nwithout having to touch the core parser or language processor. From the\nperspective of the S-expression parser, the actual function symbol is\nessentially irrelevant with regards to the S-expression parsing. The language processor is just evaluating S-expressions and dispatching the execution to the interface implementations. These implementations are provided by the plug-ins based on the function symbol.\n\nWith regards to Simplicity, the Lingo code base is roughly 3K lines of pure Go code including the lexer/parser, an\nengine for code transformation, and the interpreter/evaluator. The small size\nshould make it possible to understand the entirety of the implementation.  \n\nReaders that are interested in the technical details of\nLingo itself can have a look at the\n[README.md](https://gitlab.com/gitlab-org/vulnerability-research/foss/lingo/-/blob/main/README.md)\nwhere the implementation details and the used theoretical foundations are explained.\nThis blog post focuses on how\nLingo can be used to build a DSL from scratch.\n\n## Using Lingo to design a data generation engine\n\nIn this example we are designing a data-generation engine in Go using\nLingo as a foundation. Our data generation engine may be used to generate structured input\ndata for fuzzing or other application contexts. This example illustrates how\nyou can use Lingo to create a language and the corresponding language\nprocessor. Going back to the example from the beginning, let us assume we would\nlike to generate CSV files in the format we saw at the beginning covering\naccount balances.\n\n``` csv\nname, balance\nLisa, 100.30\nBert, 241.41\nMaria, 151.13\n```\n\nOur language includes the following functions:\n\n1. `(oneof s0, s1, ..., sN)`: randomly returns one of the parameter strings `sX` (0 \u003C= X \u003C= N).\n1. `(join e0, e1, ..., eN)`: joins all argument expressions and concatenates their string representation `eX` (0 \u003C= X \u003C= N).\n1. `(genfloat min max)`: generates a random float number X (0 \u003C= X \u003C= N) and returns it.\n1. `(times num exp)`: repeats the pattern generated by exp num times.\n\nFor this example we are using\nLingo to build the language and the language processor to automatically generate CSV\noutput which we are going to feed back into the Ruby and AWK programs we saw in\nthe introduction in order to perform a stress test on them. \n\nWe refer to our new language/tool as _Random Text Generator_ (RTG) `.rtg`.\nBelow is a sample script `script.rtg` we'd like our program to digest in order\nto randomly generate CSV files. As you can see in the example below, we are\njoining sub-strings starting with the CSV header `name, balance`\nafter which we randomly generate 10 lines of names and balance amounts. In\nbetween, we also randomly generate some empty lines.\n\n```\n(join \n  (join \"name\" \",\" \"balance\" \"\\n\")\n  (times 10 \n    '(join \n      (oneof \n        \"Jim\" \n        \"Max\" \n        \"Simone\" \n        \"Carl\" \n        \"Paul\" \n        \"Karl\" \n        \"Ines\" \n        \"Jane\" \n        \"Geralt\" \n        \"Dandelion\" \n        \"Triss\" \n        \"Yennefer\" \n        \"Ciri\") \n      \",\" \n      (genfloat 0 10000) \n      \"\\n\" \n      (oneof \"\" \"\\n\"))))\n```\n\nOur engine takes the script above written in RTG and generates random CSV\ncontent. Below is an example CSV file generated from this script.\n\n``` csv\nname,balance\nCarl,25.648205\nInes,11758.551\n\nCiri,13300.558\n...\n```\n\nFor the remainder of this section, we explore how we can implement a\ndata generation engine based on Lingo. The implementation of RTG requires\nthe two main ingredients: (1) a float data type and a result object to integrate a float\nrepresentation and (2) implementations for the `times`, `oneof`, `genfloat` and\n`join` functions.\n\n### Introducing a float data type and result objects\n\nLingo differentiates between data types and result objects. Data types indicate how data is\nmeant to be used and result objects are used to pass intermediate results\nbetween functions where every result has a unique type. In the code snippet\nbelow, we introduce a new `float` data type. The comments in the code snippet below\nprovide more details.\n\n``` go \n// introduce float type\nvar TypeFloatId, TypeFloat = types.NewTypeWithProperties(\"float\", types.Primitive)\n// introduce token float type for parser\nvar TokFloat = parser.HookToken(parser.TokLabel(TypeFloat.Name))\n\n// recognize (true) as boolean\ntype FloatMatcher struct{}\n\n// this function is used by the parser to \"recognize\" floats as such\nfunc (i FloatMatcher) Match(s string) parser.TokLabel {\n  if !strings.Contains(s, \".\") {\n    return parser.TokUnknown\n  }\n\n  if _, err := strconv.ParseFloat(s, 32); err == nil {\n\treturn TokFloat.Label\n  }\n\n  return parser.TokUnknown\n}\nfunc (i FloatMatcher) Id() string {\n  return string(TokFloat.Label)\n}\n\nfunc init() {\n  // hook matcher into the parser\n  parser.HookMatcher(FloatMatcher{})\n}\n```\n\nIn addition, we also require a result object which we can use to pass around\nfloat values. This is an interface implementation where most of the functions names\nare self-explanatory. The important bit is the `Type` function\nthat returns our custom `float` type we introduced in the last snippet.\n\n``` go\ntype FloatResult struct{ Val float32 }\n// deep copy\nfunc (r FloatResult) DeepCopy() eval.Result { return NewFloatResult(r.Val) }\n// returns the string representation of this result type\nfunc (r FloatResult) String() string {\n  return strconv.FormatFloat(float64(r.Val), 'f', -1, 32)\n}\n// returns the data type for this result type\nfunc (r FloatResult) Type() types.Type   { return custtypes.TypeFloat }\n// call-back that is cleaned up when the environment is cleaned up\nfunc (r FloatResult) Tidy()              {}\n\nfunc (r FloatResult) Value() interface{} { return r.Val }\nfunc (r *FloatResult) SetValue(value interface{}) error {\n  boolVal, ok := value.(float32)\n  if !ok {\n    return fmt.Errorf(\"invalid type for Bool\")\n  }\n  r.Val = boolVal\n  return nil\n}\nfunc NewFloatResult(value float32) *FloatResult {\n  return &FloatResult{\n    value,\n  }\n}\n```\n\n### Implementing the DSL functions\n\nSimilar to the data type and return object, implementation of a DSL function is\nas simple as implementing an interface. In the example below we implement the\n`genfloat` function as an example. The most important parts are the `Symbol()`,\n`Validate()` and `Evaluate()` functions. The `Symbol()` function returns the\nfunction symbol which is `genfloat` in this particular case. \n\nBoth, the `Validate()` and `Evaluate()` functions take the environment `env`\nand the parameter Stack `stack` as the parameter. The environment is used to store\nintermediate results which is useful when declaring/defining variables. The `stack` includes the input parameters of the function. For\n`(genfloat 0 10000)`, the stack would consist out of two `IntResult` parameters\n`0` and `10000` where `IntResult` is a standard result object already provided by the\ncore implementation of Lingo. `Validate()` makes sure that the parameter can be\ndigested by the function at hand, whereas `Evaluate()` actually invokes the\nfunction. In this particular case, we are generating a float value within the\nspecified range and return the corresponding `FloatResult`.\n\n``` go\ntype FunctionGenfloat struct{}\n\n// returns a description of this function\nfunc (f *FunctionGenfloat) Desc() (string, string) {\n  return fmt.Sprintf(\"%s%s %s%s\",\n    string(parser.TokLeftPar),\n    f.Symbol(),\n\t\"min max\",\n\tstring(parser.TokRightPar)),\n\t\"generate float in rang [min max]\"\n}\n\n// this is the symbol f of the function (f ...)\nfunc (f *FunctionGenfloat) Symbol() parser.TokLabel {\n  return parser.TokLabel(\"genfloat\")\n}\n\n// validates the parameters of this function which are passed in\nfunc (f *FunctionGenfloat) Validate(env *eval.Environment, stack *eval.StackFrame) error {\n  if stack.Size() != 2 {\n    return eval.WrongNumberOfArgs(f.Symbol(), stack.Size(), 2)\n  }\n\n  for idx, item := range stack.Items() {\n    if item.Type() != types.TypeInt {\n\t  return eval.WrongTypeOfArg(f.Symbol(), idx+1, item)\n\t}\n  }\n  return nil\n}\n\n// evaluates the function and returns the result\nfunc (f *FunctionGenfloat) Evaluate(env *eval.Environment, stack *eval.StackFrame) (eval.Result, error) {\n  var result float32\n  rand.Seed(time.Now().UnixNano())\n  for !stack.Empty() {\n    max := stack.Pop().(*eval.IntResult)\n    min := stack.Pop().(*eval.IntResult)\n\n\tminval := float32(min.Val)\n\tmaxval := float32(max.Val)\n\n\tresult = minval + (rand.Float32() * (maxval - minval))\n  }\n\n  return custresults.NewFloatResult(result), nil\n}\n\nfunc NewFunctionGenfloat() (eval.Function, error) {\n  fun := &FunctionGenfloat{}\n  parser.HookToken(fun.Symbol())\n  return fun, nil\n}\n```\n\n### Putting it all together\n\nAfter implementing all the functions, we only have to register/integrate them\n(`eval.HookFunction(...)`) so that Lingo properly resolves them when processing\nthe program. In the example below, we are registering all of the custom functions\nwe implemented, i.e., `times`, `oneof`, `join`, `genfloat`. The `main()`\nfunction in the example below includes the code required to evaluate our script\n`script.rtg`.\n\n``` go\n// register function\nfunc register(fn eval.Function, err error) {\n  if err != nil {\n    log.Fatalf(\"failed to create %s function %s:\", fn.Symbol(), err.Error())\n  }\n  err = eval.HookFunction(fn)\n  if err != nil {\n    log.Fatalf(\"failed to hook bool function %s:\", err.Error())\n  }\n}\n\nfunc main() {\n  // register custom functions\n  register(functions.NewFunctionTimes())\n  register(functions.NewFunctionOneof())\n  register(functions.NewFunctionJoin())\n  register(functions.NewFunctionGenfloat())\n  register(functions.NewFunctionFloat())\n  if len(os.Args) \u003C= 1 {\n    fmt.Println(\"No script provided\")\n    os.Exit(1)\n  }\n  // evaluate script\n  result, err := eval.RunScriptPath(os.Args[1])\n  if err != nil {\n    fmt.Println(err.Error())\n    os.Exit(1)\n  }\n\n  // print output\n  fmt.Printf(strings.ReplaceAll(result.String(), \"\\\\n\", \"\\n\"))\n\n  os.Exit(0)\n}\n```\n\nThe source code for RTG is available\n[here](https://gitlab.com/julianthome/lingo-example). You can find information\nabout how to build and run the tool in the\n[README.md](https://gitlab.com/julianthome/lingo-example/-/blob/main/README.md).\n\nWith approx. 300 lines of Go code, we have successfully designed a language and\nimplemented a language processor. We can now use RTG to test the robustness of\nthe Ruby (`computebalance.rb`) and AWK scripts (`computebalance.awk`) we used\nat the beginning to sum up account balances. \n\n``` bash\ntimeout 10 watch -e './rtg script.rtg > out.csv && ./computebalance.awk out.csv'\ntimeout 10 watch -e './rtg script.rtg > out.csv && ./computebalance.rb out.csv'\n```\n\nThe experiment above shows that the files generated by means of RTG can be\nproperly digested by the AWK script which is much more robust since it can cope\nwith the all generated CSV files. In contrast, executing of the Ruby script\nresults in errors because it cannot properly cope with newlines as they appear\nin the CSV file.\n\nCover image by [Charles Deluvio](https://unsplash.com/@kristianstrand) on [Unsplash](https://unsplash.com/photos/p8gzCnZf39k)\n{: .note}\n\n",[749,9,680],"collaboration",{"slug":751,"featured":6,"template":684},"a-go-micro-language-framework-for-building-dsls","content:en-us:blog:a-go-micro-language-framework-for-building-dsls.yml","A Go Micro Language Framework For Building Dsls","en-us/blog/a-go-micro-language-framework-for-building-dsls.yml","en-us/blog/a-go-micro-language-framework-for-building-dsls",{"_path":757,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":758,"content":764,"config":774,"_id":776,"_type":13,"title":777,"_source":15,"_file":778,"_stem":779,"_extension":18},"/en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"title":759,"description":760,"ogTitle":759,"ogDescription":760,"noIndex":6,"ogImage":761,"ogUrl":762,"ogSiteName":669,"ogType":670,"canonicalUrls":762,"schema":763},"A visual guide to GitLab CI/CD caching","Learn cache types, as well as when and how to use them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682443/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/a-visual-guide-to-gitlab-ci-caching","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A visual guide to GitLab CI/CD caching\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthieu Fronton\"}],\n        \"datePublished\": \"2022-09-12\",\n      }",{"title":759,"description":760,"authors":765,"heroImage":761,"date":767,"body":768,"category":769,"tags":770},[766],"Matthieu Fronton","2022-09-12","\n\nIf you've ever worked with GitLab CI/CD you may have needed, at some point, to use a cache to share content between jobs. The decentralized nature of GitLab CI/CD is a strength that can confuse the understanding of even the best of us when we want to connect wires all together. For instance, we need to know critical information such as the difference between artifacts and cache and where/how to place setups.\n\nThis visual guide will help with both challenges.\n\n## Cache vs. artifacts\n\nThe concepts _may_ seem to overlap because they are about sharing content between jobs, but they actually are fundamentally different:\n\n- If your job does not rely on the the previous one (i.e. can produce it by itself but if content already exists the job will run faster), then use cache.\n- If your job does rely on the output of the previous one (i.e. cannot produce it by itself), then use artifacts and dependencies.\n\nHere is a simple sentence to remember if you struggle between choosing cache or artifact:\n> Cache is here to speed up your job but it may not exist, so don't rely on it.\n\nThis article will focus on **cache**.\n\n## Initial setup\n\nWe'll go with a simple representation of the GitLab CI/CD pipelining model and ignore (for now) that the jobs can be executed on any runners and hosts. It will help get the basics.\n\nLet's say you have:\n- 1 project with 3 branches\n- 1 host running 2 docker runners\n\n![Initial setup](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-1.png){: .shadow.center}\n\n## Local cache: Docker volume\n\nIf you want a [local cache](https://docs.gitlab.com/ee/ci/caching/index.html#where-the-caches-are-stored) between all your jobs running on the same runner, use the [cache statement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your `.gitlab-ci.yml`:\n\n```yaml\ndefault:\n  cache:\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n![local / container / all branches / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-2.png){: .shadow.center}\n\nUsing the [predefined variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_COMMIT_REF_NAME` as the [cache key](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure the cache is tied to a specific branch:\n\n```yaml\ndefault:\n  cache:\n    key: $CI_COMMIT_REF_NAME\n    path:\n      - relative/path/to/folder/*.ext\n      - relative/path/to/another_folder/\n      - relative/path/to/file\n```\n\n![local / container / one branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-3.png){: .shadow.center}\n\nUsing the [predefined variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_JOB_NAME` as the [cache key](https://docs.gitlab.com/ee/ci/yaml/index.html#cachekey), you can ensure the cache is tied to a specific job:\n\n![local / container / all branch / one jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-4.png){: .shadow.center}\n\n## Local cache: Bind mount\n\nIf you don't want to use a volume for caching purposes (debugging purpose, cleanup disk space more easily, etc.), you can configure a [bind mount for Docker volumes](https://docs.docker.com/storage/bind-mounts/) while registering the runner. With this setup, you do not need to set up the [cache statement](https://docs.gitlab.com/ee/ci/yaml/#cache) in your `.gitlab-ci.yml`:\n\n```yaml\n#!/bin/bash\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner\"                       \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n```\n\n![local / one runners / one host / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-5.png){: .shadow.center}\n\nIn fact, this setup even allows you to share a cache between jobs running on the same host without requiring you to set up a distributed cache (which we'll talk about later):\n\n```yaml\n#!/bin/bash\n\ngitlab-runner register                             \\\n  --name=\"Bind-Mount Runner X\"                     \\\n  --docker-volumes=\"/host/path:/container/path:rw\" \\\n...\n\ngitlab-runner register                                 \\\n  --name=\"Bind-Mount Runner Y\"                         \\\n  --docker-volumes=\"/host/path:/container/alt/path:rw\" \\\n...\n```\n\n![local / multiple runners / one host / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-6.png){: .shadow.center}\n\n## Distributed cache\n\nIf you want to have a [shared cache](https://docs.gitlab.com/runner/configuration/autoscale.html#distributed-runners-caching) between all your jobs running on multiple runners and hosts, use the \u003Ca href=\"https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runnerscache-section\">[runner.cache]\u003Ca> section in your `config.toml`:\n\n```yaml\n[[runners]]\n  name = \"Distributed-Cache Runner\"\n...\n  [runners.cache]\n    Type = \"s3\"\n    Path = \"bucket/path/prefix\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"\u003Cchangeme>\"\n      SecretKey = \"\u003Cchangeme>\"\n      BucketName = \"foobar\"\n      BucketLocation = \"us-east-1\"\n```\n\n![remote / multiple runners / multiple hosts / all branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-7.png){: .shadow.center}\n\nUsing the predefined variable `CI_COMMIT_REF_NAME` as the cache key you can ensure the cache is tied to a specific branch between multiple runners and hosts:\n\n![remote / multiple runners / multiple hosts / one branch / all jobs](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-8.png){: .shadow.center}\n\n## Real-life setup\n\nThe above assumptions allowed you to harness your understanding of the concepts and possibilities.\n\nIn real life, you'll face more complex wiring and we hope this article will help you as a visual cheatsheet along with the reference documentation.\n\nJust to give you a sneak peek, here is an exercise for you:\n\n- Set up a cache between all the jobs of a specific stage, running on any runner and any hosts, but only between pipeline of the same branches:\n\n![Real-life test assignment](https://about.gitlab.com/images/blogimages/visual-guide-caching/vgc-9.png){: .shadow.center}\n\nHappy caching, folks!\n\n\n\nCover image by [Alina Grubnyak](https://unsplash.com/@alinnnaaaa) on [Unsplash](https://unsplash.com)\n{: .note}\n","engineering",[771,772,773,9],"CI","CD","DevOps",{"slug":775,"featured":6,"template":684},"a-visual-guide-to-gitlab-ci-caching","content:en-us:blog:a-visual-guide-to-gitlab-ci-caching.yml","A Visual Guide To Gitlab Ci Caching","en-us/blog/a-visual-guide-to-gitlab-ci-caching.yml","en-us/blog/a-visual-guide-to-gitlab-ci-caching",{"_path":781,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":782,"content":788,"config":795,"_id":797,"_type":13,"title":798,"_source":15,"_file":799,"_stem":800,"_extension":18},"/en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"title":783,"description":784,"ogTitle":783,"ogDescription":784,"noIndex":6,"ogImage":785,"ogUrl":786,"ogSiteName":669,"ogType":670,"canonicalUrls":786,"schema":787},"Accelerate code reviews with GitLab Duo and Amazon Q","Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750096976/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750096975734.png","https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Accelerate code reviews with GitLab Duo and Amazon Q\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-02\",\n      }",{"title":783,"description":784,"authors":789,"heroImage":785,"date":790,"body":791,"category":702,"tags":792},[699],"2025-06-02","Code reviews are critical for catching bugs, improving code readability, and maintaining coding standards, but they can also be a major bottleneck in your workflow. When you're trying to ship features quickly, waiting for multiple team members to review your code can be frustrating. The back-and-forth discussions, the scheduling conflicts, and the time it takes to get everyone aligned can stretch what should be a simple review into days or even weeks.\n\nHere's where [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), our new offering that delivers agentic AI throughout the software development lifecycle for AWS customers, comes in to transform your review process. This intelligent, AI-powered solution can perform comprehensive code reviews for you in a fraction of the time it would take your human colleagues. By leveraging advanced agentic AI capabilities, GitLab Duo with Amazon Q streamlines your entire review workflow without sacrificing the quality and thoroughness you need. Think of it as having an always-available, highly skilled reviewer who can instantly analyze your code and provide actionable feedback.\n\n## How it works: Launching a code review\n\nSo how does GitLab Duo with Amazon Q actually work? Let's say you've just finished working on a feature and created a merge request with multiple code updates. Instead of pinging your teammates and waiting for their availability, you simply enter a quick command in the comment section: \"/q review\". That's it – just those two words trigger the AI to spring into action.\n\n![Triggering a code review using GitLab Duo with Amazon Q](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097002/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097002096.png)\n\nOnce you've entered the command, Amazon Q Service immediately begins analyzing your code changes. You'll see a confirmation that the review is underway, and within moments, the AI is examining every line of your updates, checking for potential issues across multiple dimensions.\nWhen the review completes, you receive comprehensive feedback that covers all the bases: bug detection, readability improvements, syntax errors, and adherence to your team's coding standards. The AI doesn't just point out problems, it provides context and suggestions for fixing them, making it easy for you to understand what needs attention and why.\n\nThe beauty of this agentic AI approach is that it handles the heavy lifting of code review while you focus on what matters most: building great software. You get the benefits of thorough code reviews — better bug detection, consistent coding standards, and improved code quality — without the time sink. Your deployment times shrink dramatically because you're no longer waiting in review queues, and your entire team becomes more productive.\n\n## Why use GitLab Duo with Amazon Q?\n\nGitLab Duo with Amazon Q transforms your development workflow in the following ways:\n- Lightning-fast code reviews that don't compromise on quality\n- Consistent application of coding standards across your entire codebase\n- Immediate feedback that helps you fix issues before they reach production\n- Reduced deployment times that let you ship features faster\n- More time for your team to focus on creative problem-solving instead of repetitive reviews\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can revolutionize your code review process:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4gFIgyFc02Q?si=GXVz--AIrWiwzf-I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n> \n> And make sure to join the GitLab 18 virtual launch event to learn about our agentic AI plans and more. [Register today!](https://about.gitlab.com/eighteen/)",[704,478,793,678,680,281,794,9],"code review","AWS",{"slug":796,"featured":90,"template":684},"accelerate-code-reviews-with-gitlab-duo-and-amazon-q","content:en-us:blog:accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","Accelerate Code Reviews With Gitlab Duo And Amazon Q","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q.yml","en-us/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q",{"_path":802,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":803,"config":806,"content":808,"_id":816,"_type":13,"title":817,"_source":15,"_file":818,"_stem":819,"_extension":18},"/en-us/blog/accelerate-learning-with-gitlab-duo-agent-platform",{"title":804,"description":805},"Accelerate learning with GitLab Duo Agent Platform","Learn how agentic AI helped generate comprehensive gRPC documentation in minutes, not hours.",{"slug":807,"featured":90,"template":684},"accelerate-learning-with-gitlab-duo-agent-platform",{"title":804,"description":805,"authors":809,"heroImage":811,"date":812,"category":702,"tags":813,"body":815},[810],"Halil Coban","Blog/Hero%20Images/Workflow_1800x945.png","2025-07-07",[702,9,678,814],"security","At GitLab, we continue to expand our AI capabilities so I often find myself learning and working in new codebases. Whether I'm debugging issues, implementing new features, or onboarding to different projects, understanding system architecture quickly is crucial. But let's be honest — manually tracing through complex communication flows, especially gRPC connections, can eat up hours of productive development time.\n\nThis is exactly the type of tedious, yet necessary, work [GitLab Duo Agent Platform](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/) is designed to handle. Instead of replacing developers, it amplifies our capabilities by automating routine tasks so we can focus on creative problem solving and strategic technical work.\n\nLet me show you how I used [Duo Agent Platform](https://about.gitlab.com/gitlab-duo/agent-platform/) to generate comprehensive documentation for a Golang project's gRPC communication flow — and how it transformed hours of code analysis into a few minutes of guided interaction.\n\nYou can follow along with this video:\n\n\u003Cdiv style=\"padding:75% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1098569263?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI Agent Generates Complete gRPC Documentation in Minutes | GitLab Duo Agent Platform Demo\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Understanding gRPC communication flows\n\nI was working with a project called \"Duo Workflow Executor\" that communicates with a gRPC server. Rather than spending my afternoon manually tracing through the codebase to understand the communication patterns, I decided to let Duo Agent Platform handle the heavy lifting.\n\nMy goal was simple: generate a clear diagram showing how the gRPC communication works, including what payloads are received, what actions are executed, and what responses are sent back.\n\nWorking in VS Code with the GitLab Workflow extension installed, I opened the project and crafted a specific prompt for Duo Agent Platform:\n\n\"Can you prepare a mermaid diagram that shows the gRPC connection between duo-workflow-service and this project. It should show what this project receives in gRPC payload, and what actions it executes based on the payload, and what it sends back. Study internal/services/runner/runner.go, especially the Run method, and write the mermaid output to a grpc.md file.\"\n\nDuo Agent Platform didn't just blindly execute my request — it began intelligently gathering context to create a comprehensive execution plan. The platform automatically:\n\n* Searched through relevant Go files in the project  \n* Read the specific file I mentioned (runner.go)  \n* Identified additional files that would provide necessary context  \n* Analyzed the codebase structure to understand the gRPC implementation\n\nThis contextual awareness is what sets agentic AI tools with great context apart from simple code generation tools. Instead of working in isolation, Duo Agent Platform understands the relationships between different components of your project.\n\n## Collaborative refinement\n\nRather than making assumptions, Duo Agent Platform is designed to keep a human in the loop at critical moments in task execution to deliver better results. In this scenario, the platform paused to ask clarifying questions about the level of detail I wanted. \n\n**Agent:** \"Should I include error handling details in the diagram?\" \n\n**Me:** \"Don't focus on error handling. Include all actions. Focus on gRPC payload.\"\n\nThis back-and-forth refinement is crucial. The agent isn't just executing commands — it's working with me to ensure the deliverable meets my specific requirements.\n\n## Execution and results\n\nWith the clarified requirements, Duo Agent Platform finalized its execution plan and requested confirmation before proceeding. I could have modified the approach at this stage, but the plan was perfectly aligned with what I needed.\n\nThe agent then executed each task in sequence, providing real-time progress updates. Within minutes, it had:\n\n* Analyzed all relevant files across the project  \n* Created comprehensive documentation with a detailed mermaid diagram  \n* Saved everything to the requested grpc.md file  \n* Included valuable context about outgoing payloads and connection management\n\nThe resulting documentation wasn't just a basic diagram — it was thorough, accurate, and immediately useful for understanding the system architecture.\n\n## Reviewing the output\n\nHere's the actual mermaid diagram that Duo Agent Platform created, showing the complete gRPC communication flow:\n\n```mermaid\nsequenceDiagram\nparticipant DWS as duo-workflow-service\nparticipant DWE as duo-workflow-executor\nparticipant Runner as ExecutionRunner\nparticipant ActionExecutor as Action Executor\nparticipant FileSystem as File System/Commands\n\n\nNote over DWS, DWE: gRPC Bidirectional Streaming (ExecuteWorkflow)\n\n%% Connection Establishment\nDWE->>DWS: gRPC Connection with Headers\nNote over DWE, DWS: Headers: authorization (Bearer token)\u003Cbr/>x-gitlab-authentication-type: oidc\u003Cbr/>x-gitlab-realm, x-gitlab-global-user-id\u003Cbr/>x-gitlab-oauth-token, x-gitlab-base-url\u003Cbr/>x-gitlab-instance-id, x-request-id\u003Cbr/>x-gitlab-namespace-id, x-gitlab-project-id\n\n\n%% Workflow Start Request\nDWE->>DWS: ClientEvent{StartWorkflowRequest}\nNote over DWE, DWS: StartWorkflowRequest:\u003Cbr/>- ClientVersion\u003Cbr/>- WorkflowDefinition\u003Cbr/>- Goal\u003Cbr/>- WorkflowID\u003Cbr/>- WorkflowMetadata\u003Cbr/>- ClientCapabilities[]\n\n\n%% Action Processing Loop\nloop Action Processing\n    DWS->>DWE: Action Message\n    Note over DWS, DWE: Action Types:\u003Cbr/>- Action_RunCommand {program, flags[], arguments[]}\u003Cbr/>- Action_RunGitCommand {command, arguments[], repositoryUrl}\u003Cbr/>- Action_RunReadFile {filepath}\u003Cbr/>- Action_RunWriteFile {filepath, contents}\u003Cbr/>- Action_RunEditFile {filepath, oldString, newString}\u003Cbr/>- Action_RunHTTPRequest {method, path, body}\u003Cbr/>- Action_ListDirectory {directory}\u003Cbr/>- Action_FindFiles {namePattern}\u003Cbr/>- Action_Grep {searchDirectory, pattern, caseInsensitive}\u003Cbr/>- Action_NewCheckpoint {}\u003Cbr/>- Action_RunMCPTool {}\n\n\n    DWE->>Runner: Receive Action\n    Runner->>Runner: processWorkflowActions()\n    Runner->>ActionExecutor: executeAction(ctx, action)\n    \n    alt Action_RunCommand\n        ActionExecutor->>FileSystem: Execute Shell Command\n        Note over ActionExecutor, FileSystem: Executes: program + flags + arguments\u003Cbr/>in basePath directory\n        FileSystem-->>ActionExecutor: Command Output + Exit Code\n    \n    else Action_RunReadFile\n        ActionExecutor->>FileSystem: Read File\n        Note over ActionExecutor, FileSystem: Check gitignore rules\u003Cbr/>Read file contents\n        FileSystem-->>ActionExecutor: File Contents\n    \n    else Action_RunWriteFile\n        ActionExecutor->>FileSystem: Write File\n        Note over ActionExecutor, FileSystem: Check gitignore rules\u003Cbr/>Create/overwrite file\n        FileSystem-->>ActionExecutor: Success/Error Message\n    \n    else Action_RunEditFile\n        ActionExecutor->>FileSystem: Edit File\n        Note over ActionExecutor, FileSystem: Read → Replace oldString with newString → Write\u003Cbr/>Check gitignore rules\n        FileSystem-->>ActionExecutor: Edit Result Message\n    \n    else Action_RunGitCommand\n        ActionExecutor->>FileSystem: Execute Git Command \n        Note over ActionExecutor, FileSystem: Git operations with authentication\u003Cbr/>Uses provided git config\n        FileSystem-->>ActionExecutor: Git Command Output\n    \n    else Action_RunHTTPRequest\n        ActionExecutor->>DWS: HTTP Request to GitLab API\n        Note over ActionExecutor, DWS: Method: GET/POST/PUT/DELETE\u003Cbr/>Path: API endpoint\u003Cbr/>Body: Request payload\u003Cbr/>Headers: Authorization\n        DWS-->>ActionExecutor: HTTP Response\n    \n    else Action_ListDirectory\n        ActionExecutor->>FileSystem: List Directory Contents\n        Note over ActionExecutor, FileSystem: Respect gitignore rules\n        FileSystem-->>ActionExecutor: Directory Listing\n    \n    else Action_FindFiles\n        ActionExecutor->>FileSystem: Find Files by Pattern\n        Note over ActionExecutor, FileSystem: Recursive search with name pattern\u003Cbr/>Respect gitignore rules\n        FileSystem-->>ActionExecutor: File Paths List\n    \n    else Action_Grep\n        ActionExecutor->>FileSystem: Search Text Pattern\n        Note over ActionExecutor, FileSystem: Recursive text search\u003Cbr/>Case sensitive/insensitive option\n        FileSystem-->>ActionExecutor: Search Results\n    \n    else Action_NewCheckpoint/Action_RunMCPTool\n        ActionExecutor->>ActionExecutor: No-op Action\n        Note over ActionExecutor: Returns empty success result\n    end\n\n\n    ActionExecutor-->>Runner: Action Result (string)\n    \n    alt Result Size Check\n        Runner->>Runner: Check if result > 4MB\n        Note over Runner: If result exceeds MaxMessageSize (4MB)\u003Cbr/>Replace with error message about size limit\n    end\n\n\n    Runner->>DWE: ActionResponse\n    DWE->>DWS: ClientEvent{ActionResponse}\n    Note over DWE, DWS: ActionResponse:\u003Cbr/>- RequestID (matches Action.RequestID)\u003Cbr/>- Response (execution result string)\nend\n\n\n%% Workflow Completion\nDWE->>DWS: CloseSend()\nNote over DWE, DWS: Signal end of workflow execution\n\n\n%% Analytics and Cleanup\nRunner->>Runner: Send Analytics Event (Finish)\nDWE->>DWE: Token Revocation (if enabled)\nDWE->>DWS: Close gRPC Connection\n```\n\n\nThis diagram reveals several important architectural insights that would have taken considerable time to extract manually:\n\n* **Bidirectional communication:** The workflow executor both initiates requests and responds to service actions. \n* **Rich payload structure:** Each action type has specific parameters and expected responses.  \n* **Multiple integration points:** The executor interacts with local filesystem, Git repositories, and GitLab APIs.  \n* **Comprehensive action set:** Nine different action types handle everything from file operations to HTTP requests.  \n* **Proper lifecycle management:** Clear connection establishment and teardown patterns.\n\nWhat impressed me most was how the agent automatically included the detailed payload structures for each action type. This level of detail transforms the diagram from a high-level overview into actionable documentation that other developers can immediately use.\n\n## Looking ahead\n\nThis demonstration represents just one use case for GitLab Duo Agent Platform. The same contextual understanding and collaborative approach that made documentation generation seamless can be applied to:\n\n* **Code reviews:** Agents can analyze merge requests with full project context  \n* **Testing:** Generate comprehensive test suites based on actual usage patterns  \n* **Debugging:** Trace issues across multiple services and components  \n* **Security scanning:** Identify vulnerabilities with understanding of your specific architecture  \n* **CI/CD optimization:** Improve pipeline performance based on historical data\n\nGitLab Duo Agent Platform will enter public beta soon so [join the wait list today](https://about.gitlab.com/gitlab-duo/agent-platform/).\n\nStay tuned to the [GitLab Blog](https://about.gitlab.com/blog/) and social channels for additional updates. GitLab Duo Agent Platform is evolving rapidly with specialized agents, custom workflows, and community-driven extensions on the roadmap.\n\n## Learn more\n\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [GitLab Duo Agent Platform: What’s next for intelligent DevSecOps](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [From vibe coding to agentic AI: A roadmap for technical leaders](https://about.gitlab.com/the-source/ai/from-vibe-coding-to-agentic-ai-a-roadmap-for-technical-leaders/)\n","content:en-us:blog:accelerate-learning-with-gitlab-duo-agent-platform.yml","Accelerate Learning With Gitlab Duo Agent Platform","en-us/blog/accelerate-learning-with-gitlab-duo-agent-platform.yml","en-us/blog/accelerate-learning-with-gitlab-duo-agent-platform",{"_path":821,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":822,"content":828,"config":836,"_id":838,"_type":13,"title":839,"_source":15,"_file":840,"_stem":841,"_extension":18},"/en-us/blog/actioning-security-vulnerabilities-in-gitlab-premium",{"title":823,"description":824,"ogTitle":823,"ogDescription":824,"noIndex":6,"ogImage":825,"ogUrl":826,"ogSiteName":669,"ogType":670,"canonicalUrls":826,"schema":827},"How to action security vulnerabilities in GitLab Premium","Learn step-by-step how to process detected vulnerabilities and spawn merge request approval rules from critical vulnerabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099637/Blog/Hero%20Images/Blog/Hero%20Images/security-pipelines_security-pipelines.jpg_1750099637178.jpg","https://about.gitlab.com/blog/actioning-security-vulnerabilities-in-gitlab-premium","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to action security vulnerabilities in GitLab Premium\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Morris\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2023-03-13\",\n      }",{"title":823,"description":824,"authors":829,"heroImage":825,"date":832,"body":833,"category":814,"tags":834},[830,831],"Sam Morris","Noah Ing","2023-03-13","\n\nGitLab Premium features several security scanners you can leverage to detect vulnerabilities. However, when you incorporate the scanners into your project pipelines and the scanning job succeeds, you'll want feedback on whether you are introducing vulnerabilities into the codebase. This tutorial provides a mechanism to require a merge request approval if a scanner available on GitLab Premium finds a critical vulnerability.\n\n*While this tutorial shows how to add some process around actioning vulnerabilities, we have more robust, governed, and user-friendly functionality available in GitLab Ultimate called a [Scan Result Policy](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html). The solution outlined here does not seek to replace that functionality, but rather augment the scan results available in GitLab Premium. If you are an Ultimate user or if you want to compare the two experiences, then you should check out [this video introduction](https://www.youtube.com/watch?v=w5I9gcUgr9U&ab_channel=GitLabUnfiltered) instead.*\n\nLearn how to do the following:\n\n1. Set up a .gitlab-ci.yml\n2. Add in a vulnerability processing script\n3. Require approval if vulnerabilities are found \n\n### Prerequisites\n\n- A project with GitLab Premium\n- A gitlab-ci.yml\n- A project access token\n- Basic knowledge of Python\n- 5 minutes (or less)\n\n## Setup the gitlab-ci.yml \n\nThis is how the GitLab CI pipeline of our test project looks visually. Below we will break down the individual stages.\n\nAdd the following to your .gitlab-ci.yml:\n\n```yaml\nsecret_detection:\n  artifacts:\n    paths:\n      - gl-secret-detection-report.json\n\nprocess_secret_detection:\n   image: python:3.7-alpine3.9\n   stage: process_vulns\n   needs:\n    - job: secret_detection\n      artifacts: true\n   before_script:\n      pip install python-gitlab\n   script:\n     - python3 process_vulns.py gl-secret-detection-report.json $PROJECT_ACCESS_TOKEN $CI_PROJECT_ID $CI_COMMIT_SHA\n```\n\nA breakdown of what is going on above:\n- gl-secret-detection-report.json needs to be overriden so it’s being stored as an artifact in the secret_detection job.\n- The process_secret_detection job is dependent on secret_detection's artifact so we have added a needs keyword requiring successful completion of the secret_detection job.\n- pip installs the python-gitlab dependency so that the process_vulns.py can leverage GitLab API calls.\n- The process_vulns.py is taking in four arguments:\n   - gl-secret-detection-report.json is the JSON report produced from the secret_detection scanner. If you would like to take in another report this will need to be modified.\n   - $PROJECT_ACCESS_TOKEN needs to be added; review the instructions on creating a project access token in the next step.\n   - $CI_PROJECT_ID and $CI_COMMIT_SHA are both GitLab CI environment variables that will automatically be inferred.\n\n### Create a project access token\n\nTo create a project access token:\n1. On the top bar, select Main menu > Projects and find your project.\n2. On the left sidebar, select Settings > Access Tokens.\n3. Enter a name. The token name is visible to any user with permissions to view the project.\n4. Optional. Enter an expiry date for the token. The token expires on that date at midnight UTC. An instance-wide maximum lifetime setting can limit the maximum allowable lifetime in self-managed instances.\n5. Select a role for the token.\n6. Select the desired scopes.\n7. Select Create project access token.\n8. Add this newly created project access token to your CI/CD variables in your project settings!\n\n## Add in the vulnerability processing script\n\n[The process_vulns.py script can be found here.]((https://gitlab.com/gl-demo-premium-smorris/secure-premium-app/-/blob/main/process_vulns.py) Copy that file into your project.\n\nThe goal of this script is to require approval from an author (or group of authors) if a critical vulnerability is found.\n\n**Note:** You will need to [change the user ID in the process_vulns.py](https://gitlab.com/gl-demo-premium-smorris/secure-premium-app/-/blame/main/process_vulns.py#L40) to match the user ID of your designated Approver at your organization.\n\nThe following is a breakdown of what the script is doing:\n\n- JSON security reports are loaded in, if there any vulnerabilities they are parsed.\n- An authentication with GitLab is run using the project access token to interact with the project.\n- If vulnerabilities are not found, then it will print to the GitLab CI Logs: “No vulnerabilities are found.”\n- If a critical vulnerability is found, then it will require an approval.\n\nRun the pipeline and voila! Your pipeline now requires approvers if a critical vulnerability is found!\n\n### Demo\n\nWatch a video demonstration of how to action security vulnerabilities in GitLab Premium, presented by Sam Morris:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/Cld36OZrLFo\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\n#### Caveats\n- This is mimicking a Scan Result Policy; it is not a replacement.\n- This currently only requires approval for a critical vulnerability, and each new rule would have to be added to the script.\n- This script lives within the same location as your project, so there is no restriction on who can modify the script, breaking separation of duties at scale.\n- Approval rules are not removed once the vulnerability is fixed.\n- Approvers' IDs need to be hardcoded and maintained in the script file.\n- Since there is no vulnerability record generated, you cannot track the vulnerabilities over time in your application.\n- Vulnerabilities are not fed into a report or security dashboard, so this only reports merge request vulnerabilities.\n\n## References\n- [Create a project access token](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token)\n- [Setting up CI/CD variables](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui)\n- [Secure Premium app project](https://gitlab.com/gl-demo-premium-smorris/secure-premium-app/-/blob/main/process_vulns.py)\n\n## Related posts\n- [GitLab's commitment to enhanced application security in the modern DevOps world](/blog/security-gitlab-15/)\n- [How to become more productive with Gitlab CI](/blog/how-to-become-more-productive-with-gitlab-ci/)\n- [GitLab CI DRY Development](/blog/keeping-your-development-dry/)\n\n_Cover image by [Christopher Burns](https://unsplash.com/@christopher__burns?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)._",[814,835,771,772,9],"DevSecOps",{"slug":837,"featured":6,"template":684},"actioning-security-vulnerabilities-in-gitlab-premium","content:en-us:blog:actioning-security-vulnerabilities-in-gitlab-premium.yml","Actioning Security Vulnerabilities In Gitlab Premium","en-us/blog/actioning-security-vulnerabilities-in-gitlab-premium.yml","en-us/blog/actioning-security-vulnerabilities-in-gitlab-premium",{"_path":843,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":844,"content":850,"config":857,"_id":859,"_type":13,"title":860,"_source":15,"_file":861,"_stem":862,"_extension":18},"/en-us/blog/agentic-ai-guides-and-resources",{"title":845,"description":846,"ogTitle":845,"ogDescription":846,"noIndex":6,"ogImage":847,"ogUrl":848,"ogSiteName":669,"ogType":670,"canonicalUrls":848,"schema":849},"Agentic AI guides and resources","Learn everything you need to know about agentic AI, including what it is, how it works, why it levels up your DevSecOps environment, and best practices for implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749658912/Blog/Hero%20Images/blog-image-template-1800x945__20_.png","https://about.gitlab.com/blog/agentic-ai-guides-and-resources","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Agentic AI guides and resources\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-05-07\",\n      }",{"title":845,"description":846,"authors":851,"heroImage":847,"date":853,"body":854,"category":702,"tags":855,"updatedDate":856},[852],"GitLab","2025-05-07","## Defining agentic AI\n\nAgentic AI is a type of artificial intelligence that leverages advanced language models and natural language processing to take independent action. Unlike traditional generative AI tools that require constant human direction, these systems can understand requests, make decisions, and execute multi-step plans to achieve goals. They tackle complex tasks by breaking them into manageable steps and employ adaptive learning to modify their approach when facing challenges.\n\n[Learn more about agentic AI](https://about.gitlab.com/topics/agentic-ai/)\n\n## Agentic AI insights\n- [GitLab Duo Agent Platform Public Beta: Next-gen AI orchestration and more](https://about.gitlab.com/blog/gitlab-duo-agent-platform-public-beta/) — Introducing the DevSecOps orchestration platform designed to unlock asynchronous collaboration between developers and AI agents.\n- [GitLab Duo Agent Platform: What's next for intelligent DevSecOps](https://about.gitlab.com/blog/gitlab-duo-agent-platform-what-is-next-for-intelligent-devsecops/) — GitLab Duo Agent Platform, a DevSecOps orchestration platform for humans and AI agents, leverages agentic AI for collaboration across the software development lifecycle.\n- [From vibe coding to agentic AI: A roadmap for technical leaders](https://about.gitlab.com/the-source/ai/from-vibe-coding-to-agentic-ai-a-roadmap-for-technical-leaders/) — Discover how to implement vibe coding and agentic AI in your development process to increase productivity while maintaining code quality and security.\n- [Emerging agentic AI trends reshaping software development](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/) — Discover how agentic AI transforms development from isolated coding to intelligent workflows that enhance productivity while maintaining security.\n- [Agentic AI: Unlocking developer potential at scale](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/) — Explore how agentic AI is transforming software development, moving beyond code completion to create AI partners that proactively tackle complex tasks.\n- [Agentic AI, self-hosted models, and more: AI trends for 2025](https://about.gitlab.com/the-source/ai/ai-trends-for-2025-agentic-ai-self-hosted-models-and-more/) — Discover key trends in AI for software development, from on-premises model deployments to intelligent, adaptive AI agents.\n- [How agentic AI unlocks platform engineering potential](https://about.gitlab.com/the-source/ai/how-agentic-ai-unlocks-platform-engineering-potential/) — Explore how agentic AI elevates platform engineering by automating complex workflows and scaling standardization.\n\n## The agentic AI ecosystem\n- [AI-driven code analysis: The new frontier in code security](https://about.gitlab.com/topics/agentic-ai/ai-code-analysis/) \n- [DevOps automation & AI agents](https://about.gitlab.com/topics/agentic-ai/devops-automation-ai-agents/)\n- [AI-augmented software development: Agentic AI for DevOps](https://about.gitlab.com/topics/agentic-ai/ai-augmented-software-development/)\n\n## Best practices for implementing agentic AI\n\n- [Implementing effective guardrails for AI agents](https://about.gitlab.com/the-source/ai/implementing-effective-guardrails-for-ai-agents/) — Discover essential security guardrails for AI agents in DevSecOps, from compliance controls and infrastructure protection to user access management.\n\n## GitLab's agentic AI offerings\n\n### GitLab Duo with Amazon Q\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/) — The comprehensive AI-powered DevSecOps platform combined with the deepest set of cloud computing capabilities speeds dev cycles, increases automation, and improves code quality.\n- [DevSecOps + Agentic AI: Now on GitLab Self-Managed Ultimate on AWS](https://about.gitlab.com/blog/devsecops-agentic-ai-now-on-gitlab-self-managed-ultimate-on-aws/) — Start using AI-powered, DevSecOps-enhanced agents in your AWS GitLab Self-Managed Ultimate instance. Enjoy the benefits of GitLab Duo and Amazon Q in your organization.\n- [GitLab Duo with Amazon Q partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n\nWatch GitLab Duo with Amazon Q in action:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1075753390?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Technical Demo: GitLab Duo with Amazon Q\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n#### Guided tour\nClick on the image to start a tour of GitLab Duo with Amazon Q:\n\n[![GitLab Duo with Amazon Q interactive tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673568/Blog/Content%20Images/Screenshot_2025-05-07_at_7.24.45_AM.png)](https://gitlab.navattic.com/duo-with-q)\n\n#### GitLab Duo with Amazon Q tutorials\n- [Enhance application quality with AI-powered test generation](https://about.gitlab.com/blog/enhance-application-quality-with-ai-powered-test-generation/) — Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.\n- [GitLab Duo + Amazon Q: Transform ideas into code in minutes](https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes/) — The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.\n- [Accelerate code reviews with GitLab Duo and Amazon Q](https://about.gitlab.com/blog/accelerate-code-reviews-with-gitlab-duo-and-amazon-q/) — Use AI-powered agents to optimize code reviews by automatically analyzing merge requests and providing comprehensive feedback on bugs, readability, and coding standards.\n- [Speed up code reviews: Let AI handle the feedback implementation](https://about.gitlab.com/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation/) — Discover how GitLab Duo with Amazon Q automates the implementation of code review feedback through AI, transforming a time-consuming manual process into a streamlined workflow.\n\n### GitLab Duo Agentic Platform\n- [GitLab Duo Chat gets agentic AI makeover](https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover/) — Our new Duo Chat experience, currently an experimental release, helps developers onboard to projects, understand assignments, implement changes, and more.\nWatch GitLab Duo Agent Platform in action:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1095679084?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media; web-share\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Agent Platform Demo Clip\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n#### GitLab Agent Platform (and Duo Workflow) tutorials and use cases\n- [Accelerate learning with GitLab Duo Agent Platform](https://about.gitlab.com/blog/accelerate-learning-with-gitlab-duo-agent-platform/) — Learn how agentic AI helped generate comprehensive gRPC documentation in minutes, not hours.\n- [Fast and secure AI agent deployment to Google Cloud with GitLab](https://about.gitlab.com/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab/)\n\n- [Refactoring JavaScript to TypeScript with GitLab Duo Workflow](https://about.gitlab.com/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow/)\n\n- [Automating tedious coding tasks with GitLab Duo Workflow](https://about.gitlab.com/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow/) — See how agentic AI can reduce time spent on repetitive tasks, freeing you up to focus on developing innovative solutions and shipping the next big thing.\n\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/) — Learn step-by-step how to add unit tests to a Java application using agentic AI (includes a video tutorial).\n\n- [Solving complex challenges with GitLab Duo Workflow](https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow/) — Learn how a member of the GitLab Customer Success Management team uses agentic AI for real-world problem-solving, including addressing Helm chart limits in the package registry.\n\n## Learn more with GitLab University\n\n- [Get Started with GitLab Duo coursework](https://university.gitlab.com/pages/ai)\n- [GitLab Duo Enterprise Learning Path](https://university.gitlab.com/learning-paths/gitlab-duo-enterprise-learning-path)\n\n## More AI resources\n\n- [2024 Global DevSecOps Survey: Navigating AI maturity in DevSecOps](https://about.gitlab.com/developer-survey/2024/ai/)\n- [The Role of AI in DevOps](https://about.gitlab.com/topics/devops/the-role-of-ai-in-devops/)\n- [The latest AI/ML articles from GitLab](https://about.gitlab.com/blog/categories/ai-ml/)\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)",[704,478,9],"2025-06-10",{"slug":858,"featured":90,"template":684},"agentic-ai-guides-and-resources","content:en-us:blog:agentic-ai-guides-and-resources.yml","Agentic Ai Guides And Resources","en-us/blog/agentic-ai-guides-and-resources.yml","en-us/blog/agentic-ai-guides-and-resources",{"_path":864,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":865,"content":871,"config":878,"_id":880,"_type":13,"title":881,"_source":15,"_file":882,"_stem":883,"_extension":18},"/en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"title":866,"description":867,"ogTitle":866,"ogDescription":867,"noIndex":6,"ogImage":868,"ogUrl":869,"ogSiteName":669,"ogType":670,"canonicalUrls":869,"schema":870},"Amazon Linux 2 support and distro-specific packages for GitLab","Learn how to do early testing as well as how to peg your automation to the EL 7 packages until you are able to properly integrate the changes into your automation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682299/Blog/Hero%20Images/gitlab-blog-banner.png","https://about.gitlab.com/blog/amazon-linux-2-support-and-distro-specific-packages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Amazon Linux 2 support and distro-specific packages for GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-02\",\n      }",{"title":866,"description":867,"authors":872,"heroImage":868,"date":873,"body":874,"category":875,"tags":876},[675],"2022-05-02","\n\nGitLab’s Distribution Engineering team has been hard at work getting Amazon Linux 2 distro-specific packages ready in preparation for GitLab’s official support of Amazon Linux 2. Starting with Version 15.0 of GitLab, Amazon Linux 2 is a supported distro and packages are available for both x86 and Graviton ARM architectures.\n\n## What is Amazon Linux 2?\n\nAmazon Linux 2 is the next-generation Amazon Linux operating system that provides a modern application environment with the most recent enhancements from the Linux community alongside long-term support. Amazon Linux 2 is accessible as a virtual machine image for on-premises development and testing. This lets you easily develop, test, and certify your applications right from your local development environment. \n\nAccording to the AWS FAQ page for Amazon Linux 2, the primary elements of this latest version of the operating system include:\n\n1. A Linux kernel tuned for performance on Amazon EC2.\n\n2. A set of core packages including systemd, GCC 7.3, Glibc 2.26, Binutils 2.29.1 that receive Long Term Support (LTS) from [AWS](/blog/deploy-aws/).\n\n3. An extras channel for rapidly evolving technologies that are likely to be updated frequently and outside the Long Term Support (LTS) model.\n\nAmazon Linux 2 has a support lifespan through June 20, 2024, to allow enough time for users to migrate to Amazon Linux 2022.\n\n\n## Safely moving forward to Amazon Linux 2 packages from EL7\n\nWhile Amazon Linux 2 has not been officially supported before 15.0, as a convenience to customers who wanted to use yum and RPM packages to install the EL7 packages, GitLab configured a workaround in our packaging services to direct Amazon Linux 2 yum requests to the EL7 packages. If you’ve been using GitLab’s yum repo registration script, you many not know that you were using EL7 packages and not distro-specific packages.\n\nThis workaround will be deprecated and requests from Amazon Linux 2 will get the distro-specific packages with the release of GitLab 15.3.0 on August 22, 2022.\n\nAs a convenience for those of you who have automation that depends directly on this workaround, we wanted to provide you with information on how to do early testing as well as how to peg your automation to the EL 7 packages until you are able to properly integrate the changes into your automation.\n\nGitLab documentation demonstrates how to call our managed yum repository setup scripts by downloading the latest copy and running it directly in [the instructions for installing instances](https://about.gitlab.com/install/#centos-7) and [the instructions for installing runners](https://docs.gitlab.com/runner/install/linux-repository.html).\n\nAny organization using GitLab’s EL 7 packages for Amazon Linux 2 will want to test with - and update to - the distro-specific packages as soon as possible as GitLab will only be testing Amazon Linux 2 with the Amazon Linux 2 specific packages going forward.\n\nWe also understand that the timing of the testing and migration to these packages must be done in a coordinated cutover so that the package type does not change in your existing stacks without you having made any changes. This can be more important if a GitLab stack has undergone platform qualification for compliance purposes.\n\nAmazon Linux 2 specific packages are only available for GitLab 14.9.0 and later. If your automation depends directly on GitLab’s repo configuration script and it is still pegged to a GitLab version prior to 14.9.0 when this change becomes GA, then action must be taken to prevent breaking that automation. We have devised an idempotent two-line script solution that you can put in place now to prevent disruption if you are still on a pre-14.9.0 version at the time the new behavior of `script.rpm.sh` becomes GA on August 22, 2022 with the release of GitLab 15.3.0.\n\nGitLab rake-based backup and restore will continue to work seamlessly across the distro-specific package changes if you have to restore to your Amazon Linux 2 built stack from an EL7 backup. If you are using third-party backup, you may wish to trigger a new backup immediately after transitioning to the new distro packages to avoid the scenario altogether.\n\n## Amazon Linux 2 packages for building GitLab instances before 15.3.0\n\nThe following code inserts two lines of code between those originally outlined in [the instructions for installing using RPM packages](/install/#centos-7). The first one (starts with `sed`) splices in the Amazon Linux 2 yum repo endpoint edits into the repository configuration file created by script.rpm.sh. The second one (starts with `if yum`) cleans the yum cache if the package was already installed so that the new location will be used.\n\n> Sudo note: If you are using these commands interactively under the default SSH or SSM session manager user, then using `sudo su` before running this code is necessary. If you are using these commands in Infrastructure as Code (e.g. CloudFormation userdata scripts), then sudo may cause ‘command not found’ errors when the user running automation is already root equivalent. Be mindful about using interactively tested commands directly in your automation.\n\n```bash\n#Existing packaging script from https://about.gitlab.com/install/#centos-7\ncurl https://packages.gitlab.com/install/repositories/gitlab/gitlab-ee/script.rpm.sh | sudo bash\n\n#Patch to preview and/or peg Amazon Linux 2 specific packages\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n\n#Existing install command (remove \"-y\" to validate package and arch mapping before install)\nyum install gitlab-ee -y\n```\n\n> Notice in this output that the **Version** ends in `.amazon2`. In this case the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n![Resolved GitLab Dependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-instance-dependencies-resolved.png)\n\n### Moving to Amazon Linux 2 packages early for a seamless post-GA transition\n\nWhen the script.rpm.sh script is cut over to always point Amazon Linux 2 to the new distro-specific packages, the sed command will no longer be necessary. However, sed is also idempotent and will not make edits if the search text is not found. This means you can use the sed command to switch over early, but not have to worry about a breaking change when the `script.rpm.sh` is updated.\n\n### Pegging EL 7 and/or a GitLab version prior to 14.9.0 for a seamless post-GA transition\n\nIf your automation is pegged to an earlier version of GitLab, you will need to keep using EL7 packages, and, in fact, after the cutover you would need to implement the opposite command (which is also idempotent to be implemented now).\n\n```bash\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA of gitlab repo script)\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/gitlab_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-ee; then yum clean all ; yum makecache; fi\n```\n\nJust like the sed command for taking distro-specific packages early, this command can be implemented immediately with no bad effects - which will seamlessly keeping your automation pegged to the EL 7 packages when `script.rpm.sh` is updated.\n\n## Amazon Linux 2 package for building GitLab Runners before 15.3.0\n\nThe following code inserts two lines of code between those originally [outlined in the instructions](https://docs.gitlab.com/runner/install/linux-repository.html). The first one (starts with `sed`) splices in the Amazon Linux 2 yum repo endpoint edits into the repository configuration file created by script.rpm.sh. The second one (starts with `if yum`) cleans the yum cache if the package was already installed so that the new location will be used.\n\n> Sudo note: If you are using these commands interactively under the default SSH or SSM session manager user, then using `sudo su` before running this code is necessary. If you are using these commands in Infrastructure as Code (e.g. CloudFormation userdata scripts), then sudo may cause ‘command not found’ errors when the user running automation is already root equivalent. Be mindful about using interactively tested commands directly in your automation.\n\n```bash\n#Existing packaging script from https://docs.gitlab.com/runner/install/linux-repository.html\ncurl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh\" | sudo bash\n\n#Patch to test or peg Amazon Linux 2 specific packages\nsed -i \"s/\\/el\\/7/\\/amazon\\/2/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n\n#Existing install command (remove \"-y\" to validate package and arch mapping before install)\nyum install gitlab-runner -y\n```\n\n> Notice in this output that **Version** is not distro-specific. In this case the **Arch** is `aarch64` - indicating 64-bit Graviton ARM.\n\n![Resolved GitLab Runner Dependencies](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/gl-runner-dependencies-resolved.png)\n\n## Pegging to EL 7 and/or a GitLab Runner version prior to 14.9.1 for a seamless post-GA transition\n\nThe underlying package for EL 7 and Amazon Linux 2 is literally a copy of the same package. However, the Amazon Linux 2 endpoint for Runner RPM packages have only been uploaded from GitLab Runner 14.9.1 and later, so if you have runners that need to be on an earlier version, you would need to stay pointed at EL 7 for those packages to continue to resolve as available. The following code shows how to do that for GitLab Runner.\n\n```bash\n#Patch to peg GitLab Version to EL 7 Packages (only does something after GA of gitlab repo script)\nsed -i \"s/\\/amazon\\/2/\\/el\\/7/g\" /etc/yum.repos.d/runner_gitlab*.repo\n\n#Reset the cache if the package was previously installed (not needed for installs onto a clean machine)\nif yum list installed gitlab-runner; then yum clean all ; yum makecache; fi\n```\n\n## Need-to-know takeaways\n\n- Amazon Linux 2 is a supported distro for GitLab instances and runner as of the release of version 15.0 on May 22, 2022.\n- Amazon Linux 2 packages are available for x86 and ARM for GitLab Version 14.9.0 and higher. (Prior to 14.9.0 the EL7 packages must be used as they have a long version history).\n- This is the first availability of ARM RPM packages of GitLab for Amazon Linux 2.\n- In 15.3 (August 22, 2022), the script.rpm.sh will automatically start directing to the Amazon Linux 2 packages where it had previously directed Amazon Linux 2 yum requests to the EL7 packages.\n- It is common to have taken a dependency directly on the latest version of this GitLab script in other automation.\n- Before the GA cutover date of August 22, 2022 (15.3.0 GitLab Release), for these scripts, you have the opportunity to pre-test these packages and determine whether they create any issues with your automation or GitLab configuration.\n- You can also peg to the Amazon Linux 2 packages early or peg to the EL7 packages in advance if you find problems that you need more time to resolve. Both of these pegging types are idempotent, meaning the code changes do not do anything that causes problems after the change over happens.\n- Existing Amazon Linux 2 installations that were installed using the EL7 packages can use a regular yum upgrade command to start using the new Amazon Linux 2 packages. This operation may also be an upgrade of the product version at the same time. For existing installations you will need to patch the yum repo files as explained in this article in order to upgrade directly to Amazon Linux 2 from EL7 using packages. \n\n> **Note**\n> This blog post and linked pages contain information related to upcoming products, features, and functionality. It is important to note that the information presented is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog post and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab Inc.\n\n![AWS Partner Logo](https://about.gitlab.com/images/blogimages/2022-04-amazon-linux-2/awsgravitonready.png){: .right}\n","news",[877,771,772,9,794],"releases",{"slug":879,"featured":6,"template":684},"amazon-linux-2-support-and-distro-specific-packages","content:en-us:blog:amazon-linux-2-support-and-distro-specific-packages.yml","Amazon Linux 2 Support And Distro Specific Packages","en-us/blog/amazon-linux-2-support-and-distro-specific-packages.yml","en-us/blog/amazon-linux-2-support-and-distro-specific-packages",{"_path":885,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":886,"content":892,"config":898,"_id":900,"_type":13,"title":901,"_source":15,"_file":902,"_stem":903,"_extension":18},"/en-us/blog/android-cicd-with-gitlab",{"title":887,"description":888,"ogTitle":887,"ogDescription":888,"noIndex":6,"ogImage":889,"ogUrl":890,"ogSiteName":669,"ogType":670,"canonicalUrls":890,"schema":891},"Tutorial: Android CI/CD with GitLab","Learn how to create an automated Android CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669316/Blog/Hero%20Images/angela-compagnone-4Iyg6cNU7sI-unsplash.jpg","https://about.gitlab.com/blog/android-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Android CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-13\",\n      }",{"title":887,"description":888,"authors":893,"heroImage":889,"date":895,"body":896,"category":769,"tags":897},[894],"Darby Frey","2023-06-13","\n\nMention the word keystore and all Android developers in a 5km radius will suddenly have a small feeling of panic. Attempting to automate a [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline to deploy an app can be frustrating, and configuring Google Play access and code signing is at the heart of the problem.\n\nBut fear not! GitLab Mobile DevOps is here to make this process easier and faster, and I am here to guide you.\n\n[GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html) is a collection of features built right into GitLab to solve the biggest challenges mobile teams face in establishing a DevOps practice.\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD pipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n## Prerequisites \nTo get started, there are a few prerequisites you’ll need:\n\n* A Google Play developer account - [https://play.google.com/console](https://play.google.com/console)\n* Ruby and Android Studio installed on your local machine [https://docs.fastlane.tools/getting-started/android/setup/](https://docs.fastlane.tools/getting-started/android/setup/)\n\n> Try your hand at the [iOS CI/CD for GitLab tutorial](https://about.gitlab.com/blog/ios-cicd-with-gitlab/)\n\n## Reference project\nFor this tutorial, we’ll use the Android demo project for reference: [https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo).\n\n## Install fastlane\nIf you haven’t done so yet, the first step will be to install fastlane. Do this by creating a file in the root of your project called `Gemfile`. Give it the following contents:\n\n```ruby\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nThen, from the terminal in your project, run:\n\n```\nbundle install.\n```\n\nThis command will install fastlane, and all of its related dependencies.\n\n## Initialize fastlane\nNow that fastlane is installed, we can set it up for our project. Run the following command from the terminal in your project. You’ll be asked to enter your package name, so enter that. When prompted for the JSON secret file, you can skip that for now, and you can answer \"no\" to the questions about metadata management.\n\n```\nbundle exec fastlane init\n```\n\n![Initialize fastlane](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/fastlane-init.png)\n\nRunning this command will create a new folder in your project called `fastlane`. This folder will contain two files `Appfile` and `Fastfile`.\n\nThe Appfile contains the configuration information for the app, and the Fastfile has some sample code that we will replace later. See the fastlane docs for more information about the configuration details in the Appfile: [https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/).\n\n## Code signing\nNext are the steps for code signing.\n\n### Create a keystore\nThe next step is to create a keystore and properties files for code signing. Run the following command to generate a keystore in the project root called `release-keystore.jks`:\n\n```\nkeytool -genkey -v -keystore release-keystore.jks -storepass password -alias release -keypass password -keyalg RSA -keysize 2048 -validity 10000\n```\n\n![Create a keystore](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/keytool-genkey.png)\n\nMore information is available in the [keytool docs](https://download.java.net/java/early_access/loom/docs/specs/man/keytool.html).\n\nThe next step is to create a properties file to be used by [Gradle](https://gradle.org/_). Create a file in the project root called `release-keystore.properties`, with the following contents:\n\n```\nstoreFile=../release-keystore.jks\nkeyAlias=release\nkeyPassword=password\nstorePassword=password\n```\n\nAlso, be sure to add both files to your `.gitignore` file so they aren't committed to version control.\n\n### Configure Gradle\nNext, configure Gradle to use the newly created keystore. In the `app/build.gradle` file, add the following:\n\n**1.** Right after the plugins section, add:\n\n```\ndef keystoreProperties = new Properties()\ndef keystorePropertiesFile = rootProject.file('release-keystore.properties')\nif (keystorePropertiesFile.exists()) {\n    keystoreProperties.load(new FileInputStream(keystorePropertiesFile))\n}\n```\n\n**2.** Before Build Types, add:\n\n```\nsigningConfigs {\n    release {\n   \t keyAlias keystoreProperties['keyAlias']\n   \t keyPassword keystoreProperties['keyPassword']\n   \t storeFile keystoreProperties['storeFile'] ? file(keystoreProperties['storeFile']) : null\n   \t storePassword keystoreProperties['storePassword']\n    }\n}\n```\n\n**3.** Lastly, add the signingConfig to the release build type:\n\n```\nsigningConfig signingConfigs.release\n```\n\n## Upload keystore to GitLab secure files\nNext, upload your keystore files to GitLab so they can be used in CI/CD jobs. \n\n1. On the top bar, select **Menu > Projects** and find your project.\n1. On the left sidebar, select **Settings > CI/CD**.\n1. In the Secure Files section, select **Expand**.\n1. Select **Upload File**.\n1. Find the file to upload, select **Open**, and the file upload begins immediately. The file shows up in the list when the upload is complete.\n\nDo this for both the `release-keystore.jks` file and the `release-keystore.properties` file.\n\n![Upload Secure File](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/upload-secure-file.png)\n\n![List Secure Files](https://about.gitlab.com/images/blogimages/2023-06-13-android-cicd-with-gitlab/list-secure-files.png)\n\n## Create a CI/CD pipeline\n\nWith the configuration in place, now copy the contents of the .gitlab-ci.yml and fastlane/Fastfile below to the project.\n\nThis [.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/.gitlab-ci.yml) has all the configuration needed to run the test, build, and beta jobs.\nThe [fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/android_demo/-/blob/main/fastlane/Fastfile) is an example that can be customized to specific project settings.\n\nNote: This fastlane configuration uses plugins. See the [docs](https://docs.fastlane.tools/plugins/using-plugins/) for instructions on how to configure your project for fastlane plugins.\n\n## Create an app in the Google Play Console\nNext, generate a build of your app locally and upload it to seed a new app entry in the Google Play Console. Run the following command locally:\n\n```\nbundle exec fastlane build\n```\n\nThis command will create a signed build of the app at\n\n```\nbuild/outputs/bundle/release/app-release.aab\n```\n\nWith the signed build ready to go, log in to the [Google Play Console](https://play.google.com/console) and create a new app and seed it with the initial build.\n\n## Configure Google Play integration\nThe last thing to set up is the Google Play integration in GitLab. To do so, first, create a Google service account.\n\n### Create a Google service account\nFollow the [instructions](https://docs.fastlane.tools/actions/supply/#setup) for setting up a service account in Google Cloud Platform and granting that account access to the project in Google Play.\n\n### Enable Google Play integration\nFollow the [instructions](https://docs.gitlab.com/ee/user/project/integrations/google_play.html) for configuring the Google Play integration by providing a package name and the JSON key file just generated for the service account.\n\nThis is a simplified CI/CD configuration that created three CI/CD jobs to run each of the lanes in fastlane on the GitLab Runners. The test and build jobs will run for all CI/CD pipelines, and the beta job will only be run on CI/CD pipelines on the main branch. The beta job is manually triggered, so you can control when the beta release is pushed to Google Play. \n\nWith these configurations in place, commit all of these changes and push them up to your project. The CI/CD pipeline will kick off, and you can see these jobs in action.\n",[773,108,9],{"slug":899,"featured":6,"template":684},"android-cicd-with-gitlab","content:en-us:blog:android-cicd-with-gitlab.yml","Android Cicd With Gitlab","en-us/blog/android-cicd-with-gitlab.yml","en-us/blog/android-cicd-with-gitlab",{"_path":905,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":906,"content":912,"config":920,"_id":922,"_type":13,"title":923,"_source":15,"_file":924,"_stem":925,"_extension":18},"/en-us/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd",{"title":907,"description":908,"ogTitle":907,"ogDescription":908,"noIndex":6,"ogImage":909,"ogUrl":910,"ogSiteName":669,"ogType":670,"canonicalUrls":910,"schema":911},"Container image provenance with Cosign in GitLab CI/CD","Use GitLab pipelines to automate building, signing, and annotating Docker images. This tutorial shares code to show you how. Try it out in your own organization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098395/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2823%29_2w6waL76KROjhJHM2vXet6_1750098395162.png","https://about.gitlab.com/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Annotate container images with build provenance using Cosign in GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"João Pereira\"},{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2024-09-04\",\n      }",{"title":913,"description":908,"authors":914,"heroImage":909,"date":917,"body":918,"category":814,"tags":919},"Annotate container images with build provenance using Cosign in GitLab CI/CD",[915,916],"João Pereira","Tim Rizzi","2024-09-04","Container security has become a critical concern in software development. As organizations increasingly rely on containerized applications, ensuring the integrity and traceability of container images is paramount. Enhancing the security and traceability of your container images directly in your GitLab CI/CD pipeline can streamline your development process while significantly boosting your security posture.\n\nThis tutorial demonstrates setting up a GitLab pipeline to automate the process of building, signing, and annotating Docker images using Cosign and the GitLab container registry. By integrating these practices, you'll secure your images and ensure that each one is easily traceable, aligning with best practices in DevSecOps.\n\n## Background on container image security\n\nBefore we dive into the technical details, it's crucial to understand why container image security is so important. In [microservices](https://about.gitlab.com/topics/microservices/) and cloud-native applications, containers have become the standard for packaging and deploying software. However, this widespread adoption has also made containers an attractive target for cyber attacks.\n\nContainer image security is a vital component of the broader [software supply chain security](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/) concept. This encompasses all the tools, processes, and practices that ensure your software's integrity, authenticity, and security from development to deployment. By securing your container images, you're protecting your application and your entire software supply chain.\n\n## Introduction to Cosign\n\nEnter [Cosign](https://about.gitlab.com/blog/keyless-signing-with-cosign/), a tool designed to address these security concerns. Cosign is part of the Sigstore project, an open-source initiative aimed at improving the security of the software supply chain. Cosign allows developers to sign and verify container images, ensuring their integrity and authenticity.\n\nKey benefits of Cosign include:\n\n- easy integration with existing CI/CD pipelines\n- support for various signing methods, including keyless signing\n- ability to attach and verify arbitrary metadata to container images\n\nBy incorporating Cosign into your GitLab CI/CD pipeline, you're taking a significant step towards robust [DevSecOps](https://about.gitlab.com/topics/devsecops/) practices.\n\n## Benefits of image signing and annotation\n\nImage signing serves as a seal of authenticity for your container images. It helps prevent tampering and ensures that the image deployed in your production environment is precisely the one that passed through your secure build process.\n\nAnnotations, on the other hand, provide valuable metadata about the build process. This information is used for auditing and traceability. In a security incident, having detailed provenance data can significantly speed up the investigation and remediation process.\n\n## GitLab CI/CD pipeline configuration\n\nLet's look at an example `.gitlab-ci.yml` file that outlines the process of building, signing, and annotating a Docker image using Cosign:\n\n```yaml\nstages:\n  - build\n\nbuild_and_sign:\n  stage: build\n  image: docker:latest\n  services:\n    - docker:dind  # Enable Docker-in-Docker service to allow Docker commands inside the container\n  variables:\n    IMAGE_TAG: $CI_COMMIT_SHORT_SHA  # Use the commit short SHA as the image tag\n    IMAGE_URI: $CI_REGISTRY_IMAGE:$IMAGE_TAG  # Construct the full image URI with the registry, project path, and tag\n    COSIGN_YES: \"true\"  # Automatically confirm actions in Cosign without user interaction\n    FF_SCRIPT_SECTIONS: \"true\"  # Enables GitLab's CI script sections for better multi-line script output\n  id_tokens:\n    SIGSTORE_ID_TOKEN:\n      aud: sigstore  # Provide an OIDC token for keyless signing with Cosign\n  before_script:\n    - apk add --no-cache cosign jq  # Install Cosign (mandatory) and jq (optional)\n    - docker login -u \"gitlab-ci-token\" -p \"$CI_JOB_TOKEN\" \"$CI_REGISTRY\"  # Log in to the Docker registry using GitLab CI token\n  script:\n    # Build the Docker image using the specified tag and push it to the registry\n    - docker build --pull -t \"$IMAGE_URI\" .\n    - docker push \"$IMAGE_URI\"\n\n    # Retrieve the digest of the pushed image to use in the signing step\n    - IMAGE_DIGEST=$(docker inspect --format='{{index .RepoDigests 0}}' \"$IMAGE_URI\")\n\n    # Sign the image using Cosign with annotations that provide metadata about the build and tag annotation to allow verifying\n    # the tag->digest mapping (https://github.com/sigstore/cosign?tab=readme-ov-file#tag-signing)\n    - |\n      cosign sign \"$IMAGE_DIGEST\" \\\n        --annotations \"com.gitlab.ci.user.name=$GITLAB_USER_NAME\" \\\n        --annotations \"com.gitlab.ci.pipeline.id=$CI_PIPELINE_ID\" \\\n        --annotations \"com.gitlab.ci.pipeline.url=$CI_PIPELINE_URL\" \\\n        --annotations \"com.gitlab.ci.job.id=$CI_JOB_ID\" \\\n        --annotations \"com.gitlab.ci.job.url=$CI_JOB_URL\" \\\n        --annotations \"com.gitlab.ci.commit.sha=$CI_COMMIT_SHA\" \\\n        --annotations \"com.gitlab.ci.commit.ref.name=$CI_COMMIT_REF_NAME\" \\\n        --annotations \"com.gitlab.ci.project.path=$CI_PROJECT_PATH\" \\\n        --annotations \"org.opencontainers.image.source=$CI_PROJECT_URL\" \\\n        --annotations \"org.opencontainers.image.revision=$CI_COMMIT_SHA\" \\\n        --annotations \"tag=$IMAGE_TAG\"\n\n    # Verify the image signature using Cosign to ensure it matches the expected annotations and certificate identity\n    - |\n      cosign verify \\\n        --annotations \"tag=$IMAGE_TAG\" \\\n        --certificate-identity \"$CI_PROJECT_URL//.gitlab-ci.yml@refs/heads/$CI_COMMIT_REF_NAME\" \\\n        --certificate-oidc-issuer \"$CI_SERVER_URL\" \\\n        \"$IMAGE_URI\" | jq .  # Use jq to format the verification output for easier readability\n```\n\nLet's break down this pipeline configuration and understand each part in detail.\n\n## Detailed explanation of the pipeline\n\n### 1. Setup and prerequisites\n\nThe pipeline starts by setting up the necessary environment:\n\n* It uses the `docker:latest` image and enables Docker-in-Docker service, allowing Docker commands to be run within the CI job.\n* It defines variables for the image tag and URI using GitLab CI/CD predefined variables.\n* It sets up an OIDC token for keyless signing with Cosign.\n* In the `before_script` section, it installs Cosign and jq (for JSON processing) and logs into the GitLab container registry.\n\n### 2. Building and pushing the image\n\nThe first step in the script is to build the Docker image and push it to the GitLab container registry:\n\n```yaml\n- docker build --pull -t \"$IMAGE_URI\" .\n- docker push \"$IMAGE_URI\"\n```\n\nThis creates the image using the current directory's Dockerfile and pushes it to the registry.\n\n### 3. Signing the image with Cosign\n\nAfter building and pushing the image, the pipeline signs it using Cosign:\n\n```yaml\n- IMAGE_DIGEST=$(docker inspect --format='{{index .RepoDigests 0}}' \"$IMAGE_URI\")\n- |\n  cosign sign \"$IMAGE_DIGEST\" \\\n    --annotations \"com.gitlab.ci.user.name=$GITLAB_USER_NAME\" \\\n    --annotations \"com.gitlab.ci.pipeline.id=$CI_PIPELINE_ID\" \\\n    # ... (other annotations) ...\n    --annotations \"tag=$IMAGE_TAG\"\n```\n\nThis step first retrieves the image digest and then uses Cosign to sign the image, adding several annotations.\n\n## Verifying the signature and annotations\n\nAfter signing the image, it's crucial to verify the signature and the annotations we've added. This verification step ensures that the provenance data attached to the image is correct and hasn't been tampered with.\n\nIn our pipeline, we've included a verification step using the `cosign verify` command:\n\n```yaml\n- |\n  cosign verify \\\n    --annotations \"tag=$IMAGE_TAG\" \\\n    --certificate-identity \"$CI_PROJECT_URL//.gitlab-ci.yml@refs/heads/$CI_COMMIT_REF_NAME\" \\\n    --certificate-oidc-issuer \"$CI_SERVER_URL\" \\\n    \"$IMAGE_URI\" | jq .\n```\n\nThis command verifies the signature and checks the annotations. Its output will show all the annotations we've added to the image during the signing process.\n\nHere's what you might see in your pipeline logs after running this command:\n\n![verifying the signature and checking annotations](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098404/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098404260.png)\n\nIn this output, you should see all the annotations we added earlier, including:\n\n* GitLab CI user name\n* Pipeline ID and URL\n* Job ID and URL\n* Commit SHA and reference name\n* Project path\n* Image source and revision\n\nBy verifying these annotations, you can ensure that the image's provenance data is intact and matches what you expect based on your build process. This verification step is crucial for maintaining the integrity of your software supply chain. It allows you to confirm that the image you're about to deploy has gone through your secure build process and has yet to be modified since it was signed.\n\n## Summary\n\nBy integrating Cosign into your GitLab CI/CD pipeline, you've taken a significant step toward securing your software supply chain. This setup not only automates securing and annotating your container images with build metadata but also ensures a transparent and traceable build process.\n\nThe benefits of this approach are numerous:\n\n- enhanced security through image signing\n- improved traceability with detailed build provenance data\n- automated verification process\n- alignment with DevSecOps best practices\n\nAs container security continues to be a critical concern in the software development lifecycle, implementing these practices puts you ahead of potential security threats and demonstrates a commitment to software integrity.\n\n## Try it in your organization\n\nNow that you've seen how to enhance your container security using Cosign in GitLab CI/CD, it's time to put this knowledge into practice:\n\n1. **Implement in your projects**: Adapt the provided `.gitlab-ci.yml` file to fit your specific needs.\n2. **Explore further**: Dive deeper into Cosign's capabilities. Consider exploring advanced features like policy enforcement or integration with vulnerability scanning tools.\n3. **Share your experience**: After implementing this in your projects, share your experience with your team or the wider GitLab community. Your insights could help others enhance their security practices.\n4. **Stay updated**: Container security is an evolving field. Check GitLab's blog and documentation for new features and best practices updates.\n5. **Contribute**: If you find ways to improve this process or encounter any issues, consider contributing to the GitLab or Cosign open-source projects.\n\nRemember, security is a journey, not a destination. By taking these steps, you're securing your containers and contributing to a more secure software ecosystem for everyone.\n\nStart implementing these practices in your GitLab projects today, and take your container security to the next level!\n\n> Get started today! Sign up for a [free 30-day trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial)!\n\n## Read more\n\n- [Next-generation GitLab container registry goes GA](https://about.gitlab.com/blog/next-generation-gitlab-container-registry-goes-ga/)\n- [A beginner's guide to container security](https://about.gitlab.com/topics/devsecops/beginners-guide-to-container-security/)\n- [DevSecOps basics, including security](https://about.gitlab.com/topics/devsecops/)\n- [What is CI/CD?](https://about.gitlab.com/topics/ci-cd/)\n",[814,9,678,680],{"slug":921,"featured":6,"template":684},"annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd","content:en-us:blog:annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd.yml","Annotate Container Images With Build Provenance Using Cosign In Gitlab Ci Cd","en-us/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd.yml","en-us/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd",{"_path":927,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":928,"content":934,"config":941,"_id":943,"_type":13,"title":944,"_source":15,"_file":945,"_stem":946,"_extension":18},"/en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"title":929,"description":930,"ogTitle":929,"ogDescription":930,"noIndex":6,"ogImage":931,"ogUrl":932,"ogSiteName":669,"ogType":670,"canonicalUrls":932,"schema":933},"Automate tedious coding tasks with GitLab Duo Workflow","See how agentic AI can reduce time spent on repetitive tasks, freeing you up to focus on developing innovative solutions and shipping the next big thing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662465/Blog/Hero%20Images/GitLab_Duo_Workflow_Unified_Data_Store__1_.png","https://about.gitlab.com/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automate tedious coding tasks with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jeff Park\"}],\n        \"datePublished\": \"2025-05-06\",\n      }",{"title":929,"description":930,"authors":935,"heroImage":931,"date":937,"body":938,"category":702,"tags":939},[936],"Jeff Park","2025-05-06","Working with large codebases often means spending significant time on repetitive tasks that, while necessary, don't really push your projects forward. The good news is that these tasks are great candidates to be completed with AI. Reducing the time spent on them will free you up to work on more important problems that you’re actually excited to tackle. With GitLab Duo Workflow, the time spent on these tasks will go from hours to minutes. \n\n[Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/) is a powerful new agentic solution, currently in private beta, that lives in VS Code and is designed to help you complete complex development tasks. While many AI coding assistants are focused on helping developers write code, Duo Workflow understands your project structure, reads your files, and can make coordinated changes across your entire codebase.\n\nI created a demonstration that showcases how Duo Workflow can transform a tedious coding task into a streamlined process that saves you time and mental energy.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1081627484?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Automate tedious coding tasks with GitLab Duo Workflow\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Implementing a new lint rule\n\nIn this demo, we tackle a common scenario that many developers face: implementing a new lint rule and then updating multiple files across the codebase to comply with this rule. The specific issue involves validation errors occurring in several project files that need to be addressed consistently.\n\nRather than manually identifying and modifying each affected file one by one – a process that could take hours depending on the size of your codebase – we'll see how Duo Workflow can:\n\n1. Read and understand the details from an issue   \n2. Analyze the project structure to identify affected files  \n3. Create a comprehensive plan to implement the necessary changes  \n4. Draft a new lint rule to prevent future occurrences  \n5. Make consistent code changes across all relevant files  \n6. Stage the changes for your review before any commits are made\n\nA simple prompt initiates the process: \n\n\"Read through issue #1 in this project and submit code changes to resolve it. Be sure to look at each tool file and make all appropriate changes.\"\n\nFrom there, Duo Workflow takes over – reading the issue, analyzing the files, creating a plan, and implementing the solution – all while keeping me informed of its progress and reasoning.\n\n## Why this matters for your development process\n\nWhat's particularly powerful about Duo Workflow is how it maintains awareness of this wider context throughout the entire process. It's not just making text replacements based on a large language model's training data – it's understanding the code, making intelligent decisions, and proposing a complete solution that you maintain full control over.\n\nThis approach offers several key benefits:\n\n* **Consistency in implementation:** Apply changes uniformly across files  \n* **Time savings:** Focus your energy on creative problem-solving rather than repetitive tasks  \n* **Reduced context switching:** Complete complex tasks without leaving your IDE  \n* **Keeping a human in the loop:** Review all proposed modifications before committing\n\n## What's next\n\nGitLab Duo Workflow is part of our work to bring AI-powered capabilities to every stage of the software development lifecycle. While this demo focuses on code editing, the same approach can be applied to various development tasks:\n\n* Implementing new features based on issue descriptions  \n* Fixing bugs with comprehensive test coverage  \n* Refactoring legacy code to modern standards  \n* Creating documentation from codebase analysis\n\nWe believe that by automating repetitive tasks, Duo Workflow helps you focus on what matters most – solving interesting problems and creating innovative solutions for your users.\n\n> GitLab Duo Workflow is currently available in private beta for GitLab Ultimate customers. [Sign up for the waitlist today!](https://about.gitlab.com/gitlab-duo/workflow/)\n\n## Learn more\n- [Use GitLab Duo Workflow to improve application quality assurance](https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance/)\n- [Solving complex challenges with GitLab Duo Workflow](https://about.gitlab.com/blog/solving-complex-challenges-with-gitlab-duo-workflow/)\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [Emerging agentic AI trends reshaping software development](https://about.gitlab.com/the-source/ai/emerging-agentic-ai-trends-reshaping-software-development/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n",[704,478,680,678,9,940],"workflow",{"slug":942,"featured":90,"template":684},"automate-tedious-coding-tasks-with-gitlab-duo-workflow","content:en-us:blog:automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","Automate Tedious Coding Tasks With Gitlab Duo Workflow","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow.yml","en-us/blog/automate-tedious-coding-tasks-with-gitlab-duo-workflow",{"_path":948,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":949,"content":955,"config":961,"_id":963,"_type":13,"title":964,"_source":15,"_file":965,"_stem":966,"_extension":18},"/en-us/blog/automating-a-twitter-bot-using-gitlab-cicd",{"title":950,"description":951,"ogTitle":950,"ogDescription":951,"noIndex":6,"ogImage":952,"ogUrl":953,"ogSiteName":669,"ogType":670,"canonicalUrls":953,"schema":954},"How to automate a Twitter bot using GitLab CI/CD","This tutorial shows how to use the DevSecOps platform to create a set-and-forget Twitter bot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749661856/Blog/Hero%20Images/ci-cd-demo.jpg","https://about.gitlab.com/blog/automating-a-twitter-bot-using-gitlab-cicd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate a Twitter bot using GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2023-03-21\",\n      }",{"title":950,"description":951,"authors":956,"heroImage":952,"date":958,"body":959,"category":769,"tags":960},[957],"Siddharth Mathur","2023-03-21","\n\nGitLab's CI/CD pipelines are great for automating many things, like deployments to Google Kubernetes Engine and security scans. But did you know that you could use GitLab CI/CD pipelines to run a set-and-forget Twitter bot?\n\nMany organizations today are leveraging Twitter's API to [understand customer sentiment](https://developer.twitter.com/en/blog/success-stories/target), [track public health data](https://developer.twitter.com/en/blog/success-stories/penn), [perform financial analysis](https://developer.twitter.com/en/blog/success-stories/likefolio), and more. While these bots may be running on self-managed infrastrucuture or external services, you can simplify and consolidate your tooling by leveraging GitLab instead, making your bot easier to manage.\n\nWith GitLab's [Free tier](/pricing/), you can leverage 400 minutes of CI/CD run time per month to automatically analyze and post tweets. With GitLab [Premium](/pricing/premium) and [Ultimate](/pricing/ultimate), you'll get even more pipeline minutes to tweet more, run longer natural language processing analyses, or for other projects.\n\nSetting up a Twitter bot using GitLab is pretty simple. At the end of this blog, you'll have a project that looks like [this](https://gitlab.com/smathur/twitter-bot), and a Twitter account that automatically posts a simple tweet.\n\nTo get started, you'll need these prerequisites:\n- GitLab account (self-hosted with GitLab Runner(s) set up or on GitLab.com)\n- Twitter API credentials\n\nOnce you've generated your Twitter API credentials, we can start building out our bot in GitLab. In this blog, we'll leverage GitLab's Web IDE based on Visual Studio Code, but feel free to use a code editor of your choice.\n\n## Step 1: Write a Python script to post tweets\n\n![Navigate to the Web IDE](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/web-ide.png){: .shadow}\n\nCreate a new blank project in GitLab, and click the \"Web IDE\" button to start writing some code. In the Web IDE, create a new file called `run_bot.py`, and paste the following code (this is where you interact with the Twitter API):\n\n```python\nimport tweepy\nimport config\n\ndef set_up():\n\tauth = tweepy.OAuthHandler(config.consumer_key, config.consumer_secret_key)\n\tauth.set_access_token(config.access_token, config.access_token_secret)\n\tapi = tweepy.API(auth)\n\treturn api\n\ndef run(tweet):\n\tapi = set_up()\n\tapi.update_status(tweet)\n\nrun('It\\'s Tanuki time')\n```\n\n**Note:** If you're familiar with Python, you'll notice that we're importing a file called `config` with some variables that we're using. This `config` file doesn't exist yet, but we'll create it from within a GitLab pipeline, leveraging CI/CD variables to securely store and use our Twitter API credentials.\n\nCreate another file called `requirements.txt`, and paste the following line:\n\n```\ntweepy\n```\n\nChanges to files in the Web IDE will be automatically saved, so switch to the Git tab and commit your changes.\n\n## Step 2: Create a CI/CD pipeline to run your Python script\n\nNext, we'll create a CI/CD pipeline script to run our Twitter bot and post a tweet every time the pipeline is run. To do this, you can:\n1. Create a new file using the Web IDE called `.gitlab-ci.yml`, or\n2. Head to your GitLab project, and from the sidebar, click CI/CD > Editor.\n\nIf you see some default text in the pipeline configuration, delete everything to start with a clean slate.\n\nIn the pipeline YAML file, we'll first specify the Docker image we want to run the bot on:\n\n```yaml\nimage: python:latest\n```\n\n**Note:** Normally in a pipeline, we would define stages first and then write jobs that are each assigned to a specific stage. Since we're only running one job in this pipeline, we don't need to specify stages at the top of our pipeline configuration file.\n\nNext, we'll add a job called `run` that runs the Python script we created in the previous step. Inside this job, we'll add a `script` section to run some commands that will execute our Python script.\n\n```yaml\nrun:\n  script:\n    - echo \"consumer_key = '$CONSUMER_KEY'\" >> config.py\n    - echo \"consumer_secret_key = '$CONSUMER_SECRET'\" >> config.py\n    - echo \"access_token = '$ACCESS_TOKEN'\" >> config.py\n    - echo \"access_token_secret = '$ACCESS_SECRET'\" >> config.py\n    - pip install -r requirements.txt\n    - python3 run_bot.py\n```\n\nCommit your changes. The pipeline will automatically run, since you just made a change to the project files, but it will fail. This is because we are calling some CI/CD variables in the pipeline, which we haven't set yet. Let's go ahead and do that!\n\n## Step 3: Set CI/CD variables to store API tokens\n\nHead to your GitLab project and from the sidebar, go to Settings > CI/CD.\n\nExpand the \"Variables\" section and add the `ACCESS_SECRET`, `ACCESS_TOKEN`, `CONSUMER_KEY`, and `CONSUMER_SECRET` variables as shown below (these are your Twitter API credentials):\n\n![CI/CD variables](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/ci-cd-variables.png){: .shadow}\n\nNote that the secrets are masked to prevent them from showing up in job logs (check the \"Mask variable\" box when creating/editing the variable).\n\n## Step 4: Test and schedule your Twitter bot\n\nNow that we've got everything set up, all we need to do is run the bot. Go to CI/CD > Pipelines, and click \"Run pipeline\". Click \"Run pipeline\" again, and wait for the `run` job to finish. If you've set up your Twitter credentials correctly, you should see that the pipeline successfully ran, and a tweet was posted on your bot account!\n\n![Schedule a pipeline](https://about.gitlab.com/images/blogimages/2023-03-10-automating-a-twitter-bot-using-gitlab-cicd/schedule-pipeline.png){: .shadow}\n\nOnce you've verified that your pipeline runs successfully, schedule your pipeline to automatically run at a regular interval. Go to CI/CD > Schedules, and click \"New schedule\". Feel free to use one of the default provided intervals, or use cron to set a custom schedule. Specify a timezone, and ensure that the \"Active\" checkbox is checked. Finally, click \"Save pipeline schedule\". You'll see that your pipeline has been scheduled to run, and when it will run next.\n\nAnd that's it! You now have a fully-functional Twitter bot running on GitLab, using CI/CD pipelines to automatically post tweets. While this demo Twitter bot simply posts a specified text message, you can add your own logic to [generate sentences using AI](https://linguatools.org/language-apis/sentence-generating-api/), [perform sentiment analysis on other users' tweets](https://www.analyticsvidhya.com/blog/2021/06/twitter-sentiment-analysis-a-nlp-use-case-for-beginners/), and more. Running a Twitter bot is just one of the many ways you can leverage pipelines in GitLab, and you can also check out some other [interesting use cases](https://docs.gitlab.com/ee/ci/examples/).\n",[835,771,772,9],{"slug":962,"featured":6,"template":684},"automating-a-twitter-bot-using-gitlab-cicd","content:en-us:blog:automating-a-twitter-bot-using-gitlab-cicd.yml","Automating A Twitter Bot Using Gitlab Cicd","en-us/blog/automating-a-twitter-bot-using-gitlab-cicd.yml","en-us/blog/automating-a-twitter-bot-using-gitlab-cicd",{"_path":968,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":969,"content":975,"config":981,"_id":983,"_type":13,"title":984,"_source":15,"_file":985,"_stem":986,"_extension":18},"/en-us/blog/automating-agile-workflows-with-the-gitlab-triage-gem",{"title":970,"description":971,"ogTitle":970,"ogDescription":971,"noIndex":6,"ogImage":972,"ogUrl":973,"ogSiteName":669,"ogType":670,"canonicalUrls":973,"schema":974},"Automating Agile workflows with the gitlab-triage gem","Learn how to automate repetitive tasks like triaging issues and merge requests to free up valuable developer time in our \"Getting Started with GitLab\" series.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659525/Blog/Hero%20Images/blog-getting-started-with-gitlab-banner-0497-option4-fy25.png","https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating Agile workflows with the gitlab-triage gem\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-03-13\",\n      }",{"title":970,"description":971,"authors":976,"heroImage":972,"date":977,"body":978,"category":678,"tags":979},[852],"2025-03-13","*Welcome to our \"Getting started with GitLab\" series, where we help newcomers get familiar with the GitLab DevSecOps platform.*\n\nThis post dives into the [`gitlab-triage`](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage) gem, a powerful tool that lets you create bots to automate your Agile workflow. Say goodbye to manual tasks and hello to streamlined efficiency.\n\n## Why automate your workflow?\n\nEfficiency is key in software development. Automating repetitive tasks like triaging issues and merge requests frees up valuable time for your team to focus on what matters most: building amazing software.\n\nWith `gitlab-triage`, you can:\n\n* **Ensure consistency:** Apply labels and assign issues automatically based on predefined rules.  \n* **Improve response times:** Get immediate feedback on new issues and merge requests.  \n* **Reduce manual effort:** Eliminate the need for manual triage and updates.  \n* **Boost productivity:** Free up your team to focus on coding and innovation.\n\n## Introducing the `gitlab-triage` gem\n\nThe `gitlab-triage` gem is a Ruby library that allows you to create bots that interact with your GitLab projects. These bots can automatically perform a wide range of actions, including:\n\n* **Labeling:** Automatically categorize issues and merge requests.  \n* **Commenting:** Provide updates, request information, or give feedback.  \n* **Assigning:** Assign issues and merge requests to the appropriate team members.  \n* **Closing:** Close stale or resolved issues and merge requests.  \n* **Creating:** Generate new issues based on specific events or conditions.  \n* **And much more!**\n\nCheck out the [`gitlab-triage` gem repository](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage). \n\n## Setting up your triage bot\n\nLet's get your first triage bot up and running!\n\n1. Install the gem. (Note: The gem command is available with Ruby programming language installed.)\n\n```bash\ngem install gitlab-triage\n```\n\n2. Get your GitLab API token.\n\n* Go to your GitLab [profile settings](https://gitlab.com/-/profile/preferences).  \n* Navigate to **Access Tokens**.  \n* Create a new token with the `api` scope.  \n* **Keep your token secure and set an expiration date for it based on when you will be done with this walkthrough!**\n\n3. Define your triage policies.\n\nCreate a file named `.triage-policies.yml` in your project's root directory. This file will contain the rules that govern your bot's behavior. Here's a simple example:\n\n```yaml\n\n---\n- name: \"Apply 'WIP' label\"\n  condition:\n    draft: true\n  action:\n    labels:\n      - status::wip\n\n- name: \"Request more information on old issue\"\n  condition:\n   date:\n    attribute: updated_at\n    condition: older_than\n    interval_type: months\n    interval: 12\n  action:\n    comment: |\n      {{author}} This issue has been open for more than 12 months, is this still an issue?\n```\n\nThis configuration defines two policies:\n\n* The first policy applies the `status::wip` label to any issue that is in draft.  \n* The second policy adds a comment to an issue that the issue has not been updated in 12 months.\n\n4. Run your bot.\n\nYou can run your bot manually using the following command:\n\n```bash\ngitlab-triage -t \u003Cyour_api_token> -p \u003Cyour_project_id>\n```\n\nReplace `\u003Cyour_api_token>` with your GitLab API token and `\u003Cyour_project_id>` with the [ID of your GitLab project](https://docs.gitlab.com/user/project/working_with_projects/#access-a-project-by-using-the-project-id). If you would like to see the impact of actions before they are taken, you can add the `-n` or `--dry-run` to test out the policies first.\n\n## Automating with GitLab CI/CD\n\nTo automate the execution of your triage bot, integrate it with [GitLab CI/CD](https://about.gitlab.com/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation/). Here's an example `.gitlab-ci.yml` configuration:\n\n```yaml\n\ntriage:\n  script:\n    - gem install gitlab-triage\n    - gitlab-triage -t $GITLAB_TOKEN -p $CI_PROJECT_ID\n  only:\n    - schedules\n```\n\nThis configuration defines a job named \"triage\" that installs the `gitlab-triage` gem and runs the bot using the `$GITLAB_TOKEN` (a predefined [CI/CD variable](https://docs.gitlab.com/ci/variables/)) and the `$CI_PROJECT_ID` variable. The `only: schedules` clause ensures that the job runs only on a schedule.\n\nTo create a [schedule](https://docs.gitlab.com/ee/ci/pipelines/schedules.html), go to your project's **CI/CD** settings and navigate to **Schedules**. Create a new schedule and define the frequency at which you want your bot to run (e.g., daily, hourly).\n\n## Advanced triage policies\n\n`gitlab-triage` offers a range of advanced features for creating more complex triage policies:\n\n* **Regular expressions:** Use regular expressions for more powerful pattern matching.  \n* **Summary policies:** Consolidate related issues into a single summary issue.  \n* **Custom actions:** Define custom actions using [Ruby code blocks](https://gitlab.com/gitlab-org/ruby/gems/gitlab-triage#can-i-customize) to perform more complex operations using the GitLab API.\n\nHere are two advanced real-world examples from the triage bot used by the Developer Advocacy team at GitLab. You can view the full policies in [this file](https://gitlab.com/gitlab-da/projects/devrel-bot/-/blob/master/.triage-policies.yml?ref_type=heads).\n\n```yaml\n- name: Issues where DA team member is an assignee outside DA-Meta project i.e. DevRel-Influenced\n  conditions:\n    assignee_member:\n      source: group\n      condition: member_of\n      source_id: 1008\n    state: opened\n    ruby: get_project_id != 18 \n    forbidden_labels:\n      - developer-advocacy\n  actions:   \n    labels:\n      - developer-advocacy\n      - DevRel-Influenced\n      - DA-Bot::Skip\n```\n\nThis example for issues across a group, excluding those in the project with the ID of 18, have assignees who are members of the group with ID of 1008 and do not have the label `developer-advocacy` on them. This policy helps the Developer Advocacy team at GitLab to find issues members of the team are assigned to but are not in their team’s project. This helps the team identify and keep track of contributions made outside of the team by adding the teams’ labels.\n\n```\n- name: Missing Due Dates\n  conditions:\n    ruby: missing_due_date\n    state: opened\n    labels:\n      - developer-advocacy\n    forbidden_labels:\n      - DA-Due::N/A\n      - DA-Bot::Skip\n      - DA-Status::FYI\n      - DA-Status::OnHold\n      - CFP\n      - DA-Bot::Triage\n  actions:\n    labels:\n      - DA-Bot-Auto-Due-Date\n    comment: |\n      /due #{get_current_quarter_last_date}\n```\n\nThis second example checks for all issues with the `developer-advocacy` label, which do not include labels in the forbidden labels list and when their due dates have passed. It updates the due dates automatically by commenting on the issue with a slash command and a date that is generated using Ruby.\n\nThe Ruby scripts used in the policies are defined in a separate file as shown below. This feature allows you to be flexible in working with your filters and actions. You can see functions are created for different Ruby commands that we used in our policies. \n\n```\nrequire 'json'\nrequire 'date'\nrequire \"faraday\"\nrequire 'dotenv/load'\n\nmodule DATriagePlugin\n  def last_comment_at\n    conn = Faraday.new(\n      url: notes_url+\"?sort=desc&order_by=created_at&pagination=keyset&per_page=1\",\n      headers: {'PRIVATE-TOKEN' => ENV.fetch(\"PRIV_KEY\"), 'Content-Type' => 'application/json' }\n    )\n\n    response = conn.get()\n    if response.status == 200\n      jsonData = JSON.parse(response.body)\n      if jsonData.length > 0\n        Date.parse(jsonData[0]['created_at'])\n      else\n        Date.parse(resource[:created_at])\n      end\n    else\n      Date.parse(resource[:created_at])\n    end\n  end\n\n  def notes_url\n    resource[:_links][:notes]\n  end\n\n  def get_project_id\n    resource[:project_id]\n  end\n\n  def get_current_quarter_last_date()\n    yr = Time.now.year\n    case Time.now.month\n    when 2..4\n      lm = 4\n    when 5..7\n      lm = 7\n    when 8..10\n      lm = 10\n    when 11..12\n      lm = 1\n      yr = yr + 1\n    else\n      lm = 1    \n    end\n\n    return Date.new(yr, lm, -1) \n  end\n\n  def one_week_to_due_date\n    if(resource[:due_date] == nil)\n      false\n    else\n      days_to_due = (Date.parse(resource[:due_date]) - Date.today).to_i\n      if(days_to_due > 0 && days_to_due \u003C 7)\n        true\n      else\n        false\n      end\n    end\n  end\n\n  def due_date_past\n    if(resource[:due_date] == nil)\n      false\n    else\n      Date.today > Date.parse(resource[:due_date])\n    end\n  end\n\n  def missing_due_date\n    if(resource[:due_date] == nil)\n      true\n    else\n      false\n    end\n  end\n\nend\n\nGitlab::Triage::Resource::Context.include DATriagePlugin\n\n```\nThe triage bot is executed using the command:\n\n``` \n`gitlab-triage -r ./triage_bot/issue_triage_plugin.rb --debug --token $PRIV_KEY --source-id gitlab-com --source groups`  \n```\n\n- `-r`: Passes in a  file of requirements for the performing triage. In this case we are passing in our Ruby functions.  \n- `--debug`: Prints debugging information as part of the output.  \n- `--token`: Is used to pass in a valid GitLab API token.  \n- `--source`: Specifies if the sources of the issues it will search is within a group or a project.  \n- `--source-id`: Takes in the ID of the selected source type – in this case, a group.\n\nThe GitLab [triage-ops](https://gitlab.com/gitlab-org/quality/triage-ops) project is another real-world example that is more complex and you can learn how to build your own triage bot.\n\n## Best practices\n\n* **Start simple:** Begin with basic policies and gradually increase complexity as needed. \n* **Test thoroughly:** Test your policies in a staging environment before deploying them to production.  \n* **Monitor regularly:** Monitor your bot's activity to ensure it's behaving as expected. \n* **Use descriptive names:** Give your policies clear and descriptive names for easy maintenance. \n* **Be mindful of the scope of your filters:** You might be tempted to filter issues across groups where thousands of issues exist. However, this can slow down the triage and also make the process fail due to rate limitations against the GitLab API.  \n* **Prioritize using labels for triages:** To avoid spamming other users, labels are a good way to perform triages without cluttering comments and issues.\n\n## Take control of your workflow\n\nWith the `gitlab-triage` gem, you can automate your GitLab workflow and unlock new levels of efficiency. Start by creating simple triage bots and gradually explore the more advanced features. You'll be amazed at how much time and effort you can save\\!\n\n> #### Want to take your learning to the next level? [Sign up for GitLab University courses](https://university.gitlab.com/). Or you can get going right away with a [free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n\n## \"Getting started with GitLab\" series\nRead more articles in our \"Getting started with GitLab\" series:\n\n- [How to manage users](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n- [How to import your projects to GitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n- [Mastering project management](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n- [Understanding CI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/)\n- [Working with CI/CD variables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)\n",[478,9,678,980,108],"agile",{"slug":982,"featured":6,"template":684},"automating-agile-workflows-with-the-gitlab-triage-gem","content:en-us:blog:automating-agile-workflows-with-the-gitlab-triage-gem.yml","Automating Agile Workflows With The Gitlab Triage Gem","en-us/blog/automating-agile-workflows-with-the-gitlab-triage-gem.yml","en-us/blog/automating-agile-workflows-with-the-gitlab-triage-gem",{"_path":988,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":989,"content":995,"config":1001,"_id":1003,"_type":13,"title":1004,"_source":15,"_file":1005,"_stem":1006,"_extension":18},"/en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"title":990,"description":991,"ogTitle":990,"ogDescription":991,"noIndex":6,"ogImage":992,"ogUrl":993,"ogSiteName":669,"ogType":670,"canonicalUrls":993,"schema":994},"Automating container image migration from Amazon ECR to GitLab","When platform teams move their CI/CD to GitLab, migrating container images shouldn't be the bottleneck. Follow this step-by-step guide to automate the pipeline migration process.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663129/Blog/Hero%20Images/blog-image-template-1800x945__28_.png","https://about.gitlab.com/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating container image migration from Amazon ECR to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-13\",\n      }",{"title":990,"description":991,"authors":996,"heroImage":992,"date":997,"body":998,"category":769,"tags":999},[916],"2025-02-13","\"We need to migrate hundreds of container images from Amazon Elastic Container Registry (ECR) to GitLab. Can you help?\" This question kept coming up in conversations with platform engineers. They were modernizing their DevSecOps toolchain with GitLab but got stuck when faced with moving their container images. While each image transfer is simple, the sheer volume made it daunting.\n\nOne platform engineer perfectly said, \"I know exactly what needs to be done – pull, retag, push. But I have 200 microservices, each with multiple tags. I can't justify spending weeks on this migration when I have critical infrastructure work.\"\n\n## The challenge\n\nThat conversation sparked an idea. What if we could automate the entire process? When platform teams move their [CI/CD](https://about.gitlab.com/topics/ci-cd/) to GitLab, migrating container images shouldn't be the bottleneck. The manual process is straightforward but repetitive – pull each image, retag it, and push it to GitLab's Container Registry. Multiply this by dozens of repositories and multiple tags per image, and you're looking at days or weeks of tedious work.\n\n## The solution\n\nWe set out to create a GitLab pipeline that would automatically do all this heavy lifting. The goal was simple: Give platform engineers a tool they could set up in minutes and let run overnight, waking up to find all their images migrated successfully.\n\n### Setting up access\n\nFirst things first – security. We wanted to ensure teams could run this migration with minimal AWS permissions. Here's the read-only identity and access management (IAM) policy you'll need:\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Action\": [\n                \"ecr:GetAuthorizationToken\",\n                \"ecr:BatchCheckLayerAvailability\",\n                \"ecr:GetDownloadUrlForLayer\",\n                \"ecr:DescribeRepositories\",\n                \"ecr:ListImages\",\n                \"ecr:DescribeImages\",\n                \"ecr:BatchGetImage\"\n            ],\n            \"Resource\": \"*\"\n        }\n    ]\n}\n```\n\n### GitLab configuration\n\nWith security handled, the next step is setting up GitLab. We kept this minimal - you'll need to configure these variables in your CI/CD settings:\n\n```\nAWS_ACCOUNT_ID: Your AWS account number\nAWS_DEFAULT_REGION: Your ECR region\nAWS_ACCESS_KEY_ID: [Masked]\nAWS_SECRET_ACCESS_KEY: [Masked]\nBULK_MIGRATE: true\n```\n\n### The migration pipeline\n\nNow for the interesting part. We built the pipeline using Docker-in-Docker to handle all the image operations reliably:\n\n```yaml\nimage: docker:20.10\nservices:\n  - docker:20.10-dind\n\nbefore_script:\n  - apk add --no-cache aws-cli jq\n  - aws sts get-caller-identity\n  - aws ecr get-login-password | docker login --username AWS --password-stdin\n  - docker login -u ${CI_REGISTRY_USER} -p ${CI_REGISTRY_PASSWORD} ${CI_REGISTRY}\n```\n\nThe pipeline works in three phases, each building on the last:\n\n1. Discovery\n\nFirst, it finds all your repositories:\n\n```bash\nREPOS=$(aws ecr describe-repositories --query 'repositories[*].repositoryName' --output text)\n```\n\n2. Tag enumeration\n\nThen, for each repository, it gets all the tags:\n\n```bash\nTAGS=$(aws ecr describe-images --repository-name $repo --query 'imageDetails[*].imageTags[]' --output text)\n```\n\n3. Transfer\n\nFinally, it handles the actual migration:\n\n```bash\ndocker pull ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag}\ndocker tag ${AWS_ACCOUNT_ID}.dkr.ecr.${AWS_DEFAULT_REGION}.amazonaws.com/${repo}:${tag} ${CI_REGISTRY_IMAGE}/${repo}:${tag}\ndocker push ${CI_REGISTRY_IMAGE}/${repo}:${tag}\n```\n\n## What you get\n\nRemember that platform engineer who didn't want to spend weeks on migration? Here's what this solution delivers:\n\n- automated discovery and migration of all repositories and tags\n- consistent image naming between ECR and GitLab\n- error handling for failed transfers\n- clear logging for tracking progress\n\nInstead of writing scripts and babysitting the migration, the platform engineer could focus on more valuable work.\n\n## Usage\n\nGetting started is straightforward:\n\n1. Copy the `.gitlab-ci.yml` to your repository.\n2. Configure the AWS and GitLab variables.\n3. Set `BULK_MIGRATE` to \"true\" to start the migration.\n\n## Best practices\n\nThrough helping teams with their migrations, we've learned a few things:\n\n- Run during off-peak hours to minimize the impact on your team.\n- Keep an eye on the pipeline logs - they'll tell you if anything needs attention.\n- Don't decommission ECR until you've verified all images transferred successfully.\n- For very large migrations, consider adding rate limiting to avoid overwhelming your network\n\nWe've open-sourced this pipeline in our public GitLab repository because we believe platform engineers should spend time building valuable infrastructure, not copying container images. Feel free to adapt it for your needs or ask questions about implementation.\n\n> #### Get started with this and other package components with our [CI/CD Catalog documentation](https://gitlab.com/explore/catalog/components/package).",[108,794,9,478,678,1000],"solutions architecture",{"slug":1002,"featured":90,"template":684},"automating-container-image-migration-from-amazon-ecr-to-gitlab","content:en-us:blog:automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","Automating Container Image Migration From Amazon Ecr To Gitlab","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab.yml","en-us/blog/automating-container-image-migration-from-amazon-ecr-to-gitlab",{"_path":1008,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1009,"content":1015,"config":1021,"_id":1023,"_type":13,"title":1024,"_source":15,"_file":1025,"_stem":1026,"_extension":18},"/en-us/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd",{"title":1010,"description":1011,"ogTitle":1010,"ogDescription":1011,"noIndex":6,"ogImage":1012,"ogUrl":1013,"ogSiteName":669,"ogType":670,"canonicalUrls":1013,"schema":1014},"Automating cybersecurity threat detections with GitLab CI/CD","Discover how GUARD automates cybersecurity threat detections through the use of GitLab CI/CD and how it ensures high-quality detections.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663239/Blog/Hero%20Images/AdobeStock_1023776629.jpg","https://about.gitlab.com/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating cybersecurity threat detections with GitLab CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mitra Jozenazemian\"}],\n        \"datePublished\": \"2025-01-29\",\n      }",{"title":1010,"description":1011,"authors":1016,"heroImage":1012,"date":1018,"body":1019,"category":814,"tags":1020},[1017],"Mitra Jozenazemian","2025-01-29","*This blog post is the second post in a series about [GitLab Universal Automated Response and Detection (GUARD)](https://about.gitlab.com/blog/unveiling-the-guard-framework-to-automate-security-detections-at-gitlab/).*\n\nWriting and deploying security threat detections in an organization’s security information event management platform (SIEM) is a critical component of a successful cybersecurity program. Moving from manual detection engineering to a fully automated process by implementing Detections as Code (DaC) ensures detection consistency, quality, auditing, and automated testing. At GitLab, we’ve embedded DaC capabilities into GUARD, our fully automated detection and response framework. \n\n## The problem: Source control and automated tests\n\nThe [Signals Engineering](https://handbook.gitlab.com/handbook/security/security-operations/signals-engineering/) and [SIRT](https://handbook.gitlab.com/handbook/security/security-operations/sirt/) team at GitLab share the responsibility to create, update, and decommission threat detections in our SIEM. Maintaining a single source of truth for detections is critical to ensure detection consistency and quality standards are met. Our teams made the conscious decision to abstract the detection creation process from our SIEM, improving our issue tracking, consistency, roll-back process, and metrics. Additionally, conducting pre-commit detection tests outside of our SIEM ensured that newly created detections didn’t introduce overly false positive heavy alerts, which would require tuning or disablement while the alert was fixed. \n\n## The Solution: Leverage GitLab CI/CD for detection testing and validation\n\nTo address these challenges, we developed an efficient workflow using GitLab [CI/CD](https://about.gitlab.com/topics/ci-cd/), resulting in a streamlined and secure SIEM detection deployment process.\n\n### Key components of the GUARD DaC pipeline \n\n__1. Detections stored in JSON format in a GitLab project__\n\nGitLab uses the JSON format for our threat detections. The template includes essential information such as SIEM query logic, detection title, and description along with runbook page link, MITRE tactic and technique related to the detection, and other necessary details.\n\n__2. Initiating merge requests__\n\nWhen a GitLab team member intends to create a new threat detection, update an existing one, or delete a current detection, they initiate the process by submitting a merge request (MR) in the DaC project containing the detection JSON template. Creating the MR automatically triggers a CI/CD pipeline.\n\n__3. Automated validation with CI/CD jobs__\n\nEach MR contains a number of automated checks via GitLab CI/CD:   \n* Query format validation queries SIEM API to ensure detection query is valid  \n* JSON Detection fields validation validates all required fields are present, and are in the correct format   \n* New detections and detection modification trigger a number of SIEM API calls to ensure the detection does not have any errors and that no issues will be introduced into our production detection rules   \n* Detection deletion MRs trigger the pipeline to issue a SIEM API query to ensure the detection to be deleted is still active and can be deleted \n\n__4. Peer review and approval__\n\nWhen a detection MR job completes successfully, a peer review is required to review and confirm the MR meets required quality and content standards before the detection MR can be merged. [Merge request approval rules](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/rules.html) are used to trigger the peer review process. \n\n__5. Merge and final deployment__\n\nAfter the MR is approved, it is merged into the main branch. As part of the CI/CD pipeline, an automated job executes a SIEM API command in order to perform two tasks:   \n* Create the new detection or update/delete the existing detection if needed.   \n* Extract the MITRE ATT&CK tactic and technique information related to the alert from the JSON files and transmit these details to a lookup table within the SIEM. This lookup table plays an important role in mapping our alerts to MITRE tactics and techniques, helping us improve our threat analysis and identify gaps in our detection capabilities in alignment with the MITRE framework.\n\n**Note:** The necessary credentials for these actions are securely stored in [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) to ensure the process remains confidential and secure.\n\nBelow is a template GitLab CI/CD `gitlab-ci.yml` configuration file for a DaC pipeline: \n\n```\n\n# --------------------------------------------------------------------------- #\n# GitLab CI/CD Pipeline for SIEM Detection Management\n# --------------------------------------------------------------------------- #\n\nimage: python:3.12\n\n# --------------------------------------------------------------------------- #\n# Global Configuration\n# --------------------------------------------------------------------------- #\n\nbefore_script:\n  - apt-get update && apt-get install -y jq\n  - pip install --upgrade pip\n  - pip install -r requirements.txt\n\n# --------------------------------------------------------------------------- #\n\nstages:\n  - fetch\n  - test\n  - process\n  - upload\n\n# --------------------------------------------------------------------------- #\n# Fetch Stage\n# --------------------------------------------------------------------------- #\n\nfetch_changed_files:\n  stage: fetch\n  Script:\n    - echo \"Fetching changed files...\"\n    - git branch\n    - git fetch origin $CI_DEFAULT_BRANCH:$CI_DEFAULT_BRANCH --depth 2000\n    - |\n      if [[ \"$CI_COMMIT_BRANCH\" == \"$CI_DEFAULT_BRANCH\" ]]; then\n        git diff --name-status HEAD^1...HEAD > changed-files-temp.txt\n      else\n        git fetch origin $CI_COMMIT_BRANCH:$CI_COMMIT_BRANCH --depth 2000\n        git diff --name-status ${CI_DEFAULT_BRANCH}...${CI_COMMIT_SHA} > changed-files-temp.txt\n      fi\n    - grep -E '\\.json$' changed-files-temp.txt > changed-files.txt || true\n    - flake8 .\n    - pytest\n  artifacts:\n    paths:\n      - changed-files.txt\n    expose_as: 'changed_files'\n\n# --------------------------------------------------------------------------- #\n# Test Stage\n# --------------------------------------------------------------------------- #\n\nflake8:\n  stage: test\n  script:\n    - echo \"Running Flake8 for linting...\"\n    - flake8 .\n\npytest:\n  stage: test\n  script:\n    - echo \"Running Pytest for unit tests...\"\n    - pytest\n  artifacts:\n    when: always\n    reports:\n      junit: report.xml\n\n# --------------------------------------------------------------------------- #\n# Process Stage\n# --------------------------------------------------------------------------- #\n\nprocess_files:\n  stage: process\n  script:\n    - echo \"Processing changed files...\"\n    - git clone --depth 2000 --branch $CI_DEFAULT_BRANCH $CI_REPOSITORY_URL\n    - mkdir -p modified_rules delete_file new_file\n    - python3 move-files.py -x changed-files.txt\n    - python3 check-alerts-format.py\n  artifacts:\n    paths:\n      - modified_rules\n      - delete_file\n      - new_file\n# --------------------------------------------------------------------------- #\n# Upload Stage\n# --------------------------------------------------------------------------- #\n\nupdate_rules:\n  stage: upload\n  script:\n    - echo \"Uploading updated rules and lookup tables...\"\n    - git fetch origin $CI_DEFAULT_BRANCH:$CI_DEFAULT_BRANCH --depth 2000\n    - git clone --depth 2000 --branch $CI_DEFAULT_BRANCH $CI_REPOSITORY_URL \n    - python3 update-rules.py\n    - python3 update-exceptions.py\n    - python3 create_ttps_layers.py\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_PIPELINE_SOURCE != \"schedule\"\n      changes:\n        - detections/**/*\n        - exceptions/**/*\n```\n\nThe diagram below illustrates the workflow of the CI/CD process described above.\n\n```mermaid\ngraph TD;\n    fetch[Fetch Stage: Identify Changed Files] --> test[Test Stage: Run Linting and Tests];\n    test --> process[Process Stage: Categorize Files];\n    process --> upload[Upload Stage: Update Rules and Lookup Tables];\n    fetch --> fetch_details[Details: Filter JSON files, Output 'changed-files.txt'];\n    test --> test_details[Details: Run Flake8 for linting, Pytest for testing];\n    process --> process_details[Details: Categorize into 'modified', 'new', 'deleted', Prepare for upload];\n    upload --> upload_details[Details: Update repo, Update detections in SIEM and SIEM lookup table];\n```\n\n## Benefits and outcomes\n\nAutomating our detections lifecycle through a DaC CI/CD-powered workflow introduces numerous benefits to our threat detection deployment process:\n\n* Automation: Automating the creation and validation of SIEM detections reduces manual errors and saves time.\n* Enhanced security: The CI-driven workflow enforces a \"least privilege\" policy, ensuring consistency, peer reviews, and quality standards for creating, updating, or deleting threat detections. \n* Efficiency: The standardized JSON detection format and automated creation expedite the deployment process.\n* Collaboration: The MR and review process fosters collaboration and knowledge sharing among GitLab team members.\n* Version control: Treating threat detection as code abstracts the detections from the SIEM platform they are ultimately stored in. This abstraction provides a historical record of changes, facilitates collaboration, and enables rollbacks to previous configurations if issues arise.\n\n## Get started with DaC\n\nUsing GitLab CI/CD and a \"least privilege\" policy has made our SIEM detection and alert management easier and more secure. Automation has improved efficiency and reduced risks, providing a helpful example for others wanting to improve their security and compliance. You can try this tutorial by signing up for a [free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).",[814,9,835,478,108],{"slug":1022,"featured":6,"template":684},"automating-cybersecurity-threat-detections-with-gitlab-ci-cd","content:en-us:blog:automating-cybersecurity-threat-detections-with-gitlab-ci-cd.yml","Automating Cybersecurity Threat Detections With Gitlab Ci Cd","en-us/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd.yml","en-us/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd",{"_path":1028,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1029,"content":1035,"config":1042,"_id":1044,"_type":13,"title":1045,"_source":15,"_file":1046,"_stem":1047,"_extension":18},"/en-us/blog/automating-with-gitlab-duo-part-1-generating-tests",{"title":1030,"description":1031,"ogTitle":1030,"ogDescription":1031,"noIndex":6,"ogImage":1032,"ogUrl":1033,"ogSiteName":669,"ogType":670,"canonicalUrls":1033,"schema":1034},"Automating with GitLab Duo, Part 1: Generating tests","Learn how we used the AI-driven DevSecOps platform to generate automated tests and improve our development speed and quality.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097480/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750097480784.png","https://about.gitlab.com/blog/automating-with-gitlab-duo-part-1-generating-tests","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Automating with GitLab Duo, Part 1: Generating tests\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Byron Boots\"}],\n        \"datePublished\": \"2024-12-02\",\n      }",{"title":1030,"description":1031,"authors":1036,"heroImage":1032,"date":1038,"body":1039,"category":702,"tags":1040},[1037],"Byron Boots","2024-12-02","Automated testing is time-consuming and can feel like it’s not moving a project forward. However, as many developers have likely experienced, automated testing provides an overall positive return on investment. In building a custom module (we'll call it gitlab-helper for this article), this was particularly true.\n\nOur initial development focused on migrating tried and used functionality from existing scripts to a new module whose sole purpose was to serve as a baseline for future functionality. Although existing scripts lacked automated testing, their consistent usage was strong anecdotal evidence the functionality worked as expected.\n\nOur objective was to deliver a more mature solution to this problem, so automated testing became a necessity. This introduced the challenge of building efficiently, while balancing the time to test and ensure a robust product; and with a total of three team members, this was no small bottleneck. Therefore, the team decided to take advantage of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities, for test generation, improving speed and quality of the delivered product.\n\nIn this three-part series on automating with GitLab Duo, we will cover:\n\n1. How we used GitLab Duo to generate tests for our code  \n2. How we worked interactively with GitLab Duo for more complex situations  \n3. The results we were able to achieve (Spoiler: 1 developer + GitLab Duo = 84% coverage in 2 days)\n\n## Using GitLab Duo to generate tests for code\n\nWhile functionality is available across tools, this article will cover using GitLab Duo in VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) to generate tests. Links to other GitLab Duo options are available in the [references](#references) below.\n\n### Install and enable GitLab Duo\n\nAs a prerequisite to using GitLab Duo, we ensured we had a GitLab Duo-enabled account. If you don't have GitLab Duo, you can [sign up for a free 60-day trial](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial).\n\nTo use GitLab Duo Chat in VS Code, we followed the [instructions for installation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/#use-gitlab-duo-chat-in-vs-code). Then, we were able to see the GitLab Duo Chat extension on the sidebar and open the Chat window.\n\n![Ask a question window](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097489/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097488918.png)\n\n### Generate tests with Chat\n\ngitlab-helper is a custom module built for standardizing interaction with the GitLab API across the team's work and extends other library functionalities to simplify development and scripting work. Once a method or feature was migrated to gitlab-helper and appeared to be implemented appropriately, the process to generate tests for it was simple:\n- Select the method, class, or entire file in the IDE.\n- Right-click on the selected code.\n- Under **GitLab Duo Chat**, select **Generate tests**.\n\n![Sequence to generate tests, including drop-down for generate tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097489/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097488919.png)\n\nWithin a few seconds, tests were generated and presented in the GitLab Duo Chat window. These tests can be reviewed and or added to the codebase, via copy/paste, into existing or new test files. As is the case with most natural language processing generations today, particularly around context, some of the initial tests created by GitLab Duo failed, thus requiring finetuning (for instance, when dealing with nested dependencies).\n\n> **Pro tip:** GitLab Duo does not auto-create files to add generated tests to. We found it was helpful to create new test files and add a `# Tests Generated by Duo` comment at the top of them and suffix them with `_duo.py` to indicate where the tests came from.\n\nGitLab Duo provided a great starting point for building out gitlab-helper’s automated testing and greatly improved test writing efficiency and code coverage, speeding up the development process substantially. Alongside GitLab Duo, numerous iterations of valuable tests were introduced into the gitlab-helper module with human oversight.\n\nRead the next installment in this series where we share [what we learned while using GitLab Duo for generating automated tests](https://about.gitlab.com/blog/automating-with-gitlab-duo-part-2-complex-testing/) and working interactively with AI for more complex situations.\n\n## References\n\nThere’s more than one way to use GitLab Duo to generate tests, check out the other options below:\n\n* The GitLab UI  \n* [The GitLab Web IDE (VS Code in the cloud)](https://docs.gitlab.com/ee/user/project/web_ide/index.html)  \n* VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow)  \n* JetBrains IDEs, with the [GitLab Duo Plugin for JetBrains](https://plugins.jetbrains.com/plugin/22325-gitlab-duo)  \n* Visual Studio for Windows, with the [GitLab Extension for Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio)\n",[704,9,1041,478,680],"testing",{"slug":1043,"featured":6,"template":684},"automating-with-gitlab-duo-part-1-generating-tests","content:en-us:blog:automating-with-gitlab-duo-part-1-generating-tests.yml","Automating With Gitlab Duo Part 1 Generating Tests","en-us/blog/automating-with-gitlab-duo-part-1-generating-tests.yml","en-us/blog/automating-with-gitlab-duo-part-1-generating-tests",{"_path":1049,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1050,"content":1056,"config":1063,"_id":1065,"_type":13,"title":1066,"_source":15,"_file":1067,"_stem":1068,"_extension":18},"/en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner",{"title":1051,"description":1052,"ogTitle":1051,"ogDescription":1052,"noIndex":6,"ogImage":1053,"ogUrl":1054,"ogSiteName":669,"ogType":670,"canonicalUrls":1054,"schema":1055},"Building containers with GitLab Runner & AWS Fargate executor","Build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667132/Blog/Hero%20Images/build-container-image-runner-fargate-codebuild-cover.jpg","https://about.gitlab.com/blog/aws-fargate-codebuild-build-containers-gitlab-runner","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Elliot Rushton\"}],\n        \"datePublished\": \"2020-07-31\",\n      }",{"title":1057,"description":1052,"authors":1058,"heroImage":1053,"date":1060,"body":1061,"category":769,"tags":1062},"How to build containers with the AWS Fargate Custom Executor for GitLab Runner and AWS CodeBuild",[1059],"Elliot Rushton","2020-07-31","\n\nAWS Fargate does not allow containers to run in privileged mode. This means Docker-in-Docker (DinD), which enables the building and running of container images inside of containers, does not work with the [AWS Fargate Custom Executor driver for GitLab Runner](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate). The good news is that users don't have to be blocked by this and may use a cloud-native approach to build containers, effectively leveraging a seamless integration with AWS CodeBuild in the [CI/CD pipeline](/topics/ci-cd/).\n\nWe provide in-depth instructions on how to autoscale GitLab CI on AWS Fargate in [GitLab Runner's documentation](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html). In this blog post, we explain how to instrument CI containers and source repositories to trigger AWS CodeBuild and use it to build container images.\n\n## Architecture overview\n\n![AWS Fargate + CodeBuild: a cloud-native approach to build containers with GitLab Runner](https://about.gitlab.com/images/blogimages/build-container-image-runner-fargate-codebuild.png)\nHow distinct CI workloads run on Fargate.\n{: .note.text-center}\n\nThe picture above illustrates distinct GitLab CI workloads running on Fargate. The container identified by `ci-coordinator (001)` is running a typical CI job which does not build containers, so it does not require additional configuration or dependencies. The second container, `ci-coordinator (002)`, illustrates the problem to be tackled in this post: The CI container includes the AWS CLI in order to send content to an Amazon S3 Bucket, trigger the AWS CodeBuild job, and fetch logs.\n\n## Prerequisites\n\nOnce these prerequisites are configured, you can dive into the six-step process to configure CI containers and source repositories to trigger AWS CodeBuild and use it to build container images.\n\n- The [AWS Fargate Custom Executor driver for GitLab Runner](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate) must be set-up appropriately.\n- Ensure the AWS IAM user permissions include the ability to create and configure S3 and CodeBuild resources.\n- AWS IAM user or service role with permissions to upload files to S3, start CodeBuild jobs, and read CloudWatch Logs.\n- AWS IAM user with permissions to create and configure IAM Policies and Users.\n\n## Step 1: Create an AWS S3 bucket\n\n1. In the top menu of [AWS Management Console](https://aws.amazon.com/console/) click Services.\n1. In the Storage section, select `S3`.\n1. Click `Create bucket`.\n1. Choose a descriptive name (`ci-container-build-bucket` will be used as example) and select your preferred region.\n1. Leave all other fields with default values and click `Create bucket`.\n1. In the Buckets list, click the name of the bucket you created.\n1. Click `Create folder`.\n1. Give it the `gitlab-runner-builds` name.\n1. Click `Save`.\n\n## Step 2: Create an AWS CodeBuild Project\n\n1. Using the AWS Console, click `Services` in the top menu\n1. Select `CodeBuild` in the Developer Tools section\n1. Click `Create build project`\n1. In `Project Name` enter `ci-container-build-project`\n1. In `Source provider` select `Amazon S3`\n1. In `Bucket` select the `ci-container-build-bucket` created in step one\n1. In S3 object key or S3 folder enter `gitlab-runner-builds/build.zip`\n1. In `Environment image`, select `Managed image`\n1. For `Operating system` select your preferred OS from the available options\n1. For `Runtime(s)`, choose `Standard`.\n1. For `Image`, select `aws/codebuild/standard:4.0`\n1. For `Image version`, select `Always use the latest image for this runtime version`\n1. For `Environment type` select `Linux`\n1. Check the `Privileged` flag\n1. For the `Service role` select `New service role` and note the sugggested `Role name`\n1. For `Build specifications` select `Use a buildspec file`\n1. Scroll down to the bottom of the page and click \"Create build project\"\n\n## Step 3: Build the CI container image\n\nAs stated in Autoscaling GitLab CI on AWS Fargate, a [custom container is required](https://docs.gitlab.com/runner/configuration/runner_autoscale_aws_fargate/index.html#step-1-prepare-a-base-container-image-for-the-aws-fargate-task) to run GitLab CI jobs on Fargate. Since the solution relies on communicating with S3 and CodeBuild, you'll need to [have the AWS CLI tool](https://docs.aws.amazon.com/cli/latest/userguide/install-cliv2.html) available in the CI container.\n\nInstall the `zip` tool to make S3 communication smoother. As an example of a Ubuntu-based container, the lines below must be added to the CI container's `Dockerfile`:\n\n```dockerfile\nRUN apt-get update -qq -y \\\n    && apt-get install -qq -y curl unzip zip \\\n    && curl -Lo awscliv2.zip https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip \\\n    && unzip awscliv2.zip \\\n    && ./aws/install\n```\n\n## Step 4: Add CodeBuild configuration to the repository\n\nBy default, CodeBuild looks for a file named `buildspec.yml` in the build source. This file will instruct CodeBuild on how to build and publish the resulting container image. Create this file with the content below and commit it to the git repository (_if you changed the **Buildspec name** when configuring the CodeBuild project [in Step 2](#buildspec), please create the file accordingly_):\n\n```yaml\nversion: 0.2\n\nphases:\n  install:\n    commands:\n      - nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://127.0.0.1:2375 --storage-driver=overlay2&\n      - timeout 15 sh -c \"until docker info; do echo .; sleep 1; done\"\n  build:\n    commands:\n      - echo Build started on `date`\n      - docker -v\n      - docker build -t \u003CIMAGE-TAG> .\n      - echo Build completed on `date`\n```\n\n## Step 5: Set up the GitLab CI job\n\nNow we will set up the GitLab CI job that will pull everything together.\n\n### Interacting with CodeBuild through the AWS CLI\n\nThe CI job will need to interact with AWS Cloud to start CodeBuild jobs, poll the status of the jobs, and fetch logs. Commands such as `aws codebuild` and `aws logs` help to tackle this, so let's use them in a script, `codebuild.sh`:\n\n```bash\n#!/bin/bash\n\nbuild_project=ci-container-build-project\nbuild_id=$(aws codebuild start-build --project-name $build_project --query 'build.id' --output text)\nbuild_status=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].buildStatus' --output text)\n\nwhile [ $build_status == \"IN_PROGRESS\" ]\ndo\n    sleep 10\n    build_status=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].buildStatus' --output text)\ndone\n\nstream_name=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].logs.streamName' --output text)\ngroup_name=$(aws codebuild batch-get-builds --ids $build_id --query 'builds[].logs.groupName' --output text)\n\naws logs get-log-events --log-stream-name $stream_name --log-group-name $group_name --query 'events[].message' --output text\necho Codebuild completed with status $build_status\n```\n\n### Add a job to build the resulting container\n\nOnce the steps one through five are complete, the source repository will be structured as follows:\n\n```plaintext\n/sample-repository\n  ├── .gitlab-ci.yml\n  ├── buildspec.yml\n  ├── codebuild.sh\n  ├── Dockerfile\n  ├── \u003CAPPLICATION-FILES>\n```\n\nThe final step to build the container is to add a job to `.gitlab-ci.yml`:\n\n```yaml\ndockerbuild:\n  stage: deploy\n  script:\n    - zip build.zip buildspec.yml Dockerfile \u003CAPPLICATION-FILES>\n    - aws configure set default.region \u003CREGION>\n    - aws s3 cp build.zip s3://ci-container-build-bucket/gitlab-runner-builds/build.zip\n    - bash codebuild.sh\n```\n\nBelow are some definitions from terms in the script:\n\n- `\u003CAPPLICATION-FILES>` is a placeholder for the files that will be required to successfully build the resulting container image using the `Dockerfile`, e.g., `package.json` and `app.js` in a Node.js application\n- `Dockerfile` is used to build the resulting image. _Note: It is not the same file used to build the CI container image, mentioned in [Step 3: Build the CI container image](#step-3-build-the-ci-container-image)_\n- Zip and AWS CLI must be installed in the CI container to make the script work – refer to [Step 3: Build the CI container image](#step-3-build-the-ci-container-image) for details\n\n## Step 6: Set up AWS credentials\n\nThe final step is to set up the AWS credentials. As we already mentioned, the CI job will interact with AWS through the AWS CLI to perform a number of operations, and to do that, the AWS CLI needs to authenticate as an IAM user with the permissions listed below. We recommend you create a new user and grant it minimal privileges instead of using your personal AWS user account. For the sake of simplicity, we suggest this approach to complete this walk-through guide.\n\nThis AWS user only needs programmatic access and do not forget to make note of its Access key ID and Secret access key – they will be needed later. A simple way to grant only the minimal privileges for the new user is to create a customer managed policy since it can be directly attached to the user. A group might also be used to grant the same privileges for more users, but it is not mandatory for running the sample workflow.\n\n- S3\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": \"s3:PutObject\",\n    \"Resource\": \"arn:aws:s3:::ci-container-build-bucket/gitlab-runner-builds/*\"\n  }\n  ```\n\n- CodeBuild\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": [\"codebuild:StartBuild\", \"codebuild:BatchGetBuilds\"],\n    \"Resource\": \"arn:aws:codebuild:\u003CREGION>:\u003CACCOUNT-ID>:project/ci-container-build-project\"\n  }\n  ```\n\n- CloudWatch Logs\n\n  ```json\n  {\n    \"Effect\": \"Allow\",\n    \"Action\": \"logs:GetLogEvents\",\n    \"Resource\": \"arn:aws:logs:\u003CREGION>:\u003CACCOUNT-ID>:log-group:/aws/codebuild/ci-container-build-project:log-stream:*\"\n  }\n  ```\n\nThe access credentials can be provided to AWS CLI through GitLab CI environment variables. Please go to your GitLab Project's **CI/CD Settings**, click **Expand** in the **Variables** section, add `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` with the values you got from the AWS Management Console after creating the IAM user. See the image below for the result you can expect:\n\n![Providing AWS credentials for GitLab Runner](https://about.gitlab.com/images/blogimages/build-container-image-runner-fargate-codebuild-credentials.png)\n\nUsing an IAM Role and [Amazon ECS temporary/unique security credentials](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html) is also possible, but not covered in this tutorial.\n{: .note.text-center}\n\n## Step 7: It's showtime\n\nWith all configurations in place, commit the changes and trigger a new pipeline to watch the magic happen!\n\n### Just need the highlights?\n\n1. The CI job script added in [Step 5](#add-a-job-to-build-the-resulting-container) compresses the resulting container image build files into `build.zip`\n1. `build.zip` is then uploaded to the S3 Bucket we created in [Step 1: Create an Amazon S3 Bucket](#step-1-create-an-amazon-s3-bucket)\n1. Next, `codebuild.sh` starts a CodeBuild job based on the project created in [Step 2: Create an AWS CodeBuild Project](#step-2-create-an-aws-codebuild-project) (Note: that project has an S3 object as its source provider)\n1. Finally, the CodeBuild job downloads `gitlab-runner-builds/build.zip` from S3, decompresses it and – from `buildspec.yml`– builds the resulting container image\n\nA sample repository, demonstrating everything described in the article is available [here](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/codebuild-on-fargate-example/).\n\n## Cleanup\n\nIf you want to perform a cleanup after testing the custom executor with AWS Fargate and CodeBuild, you should remove the following objects:\n\n- AWS S3 bucket created in [Step 1](#step-1-create-an-amazon-s3-bucket)\n- AWS CodeBuild project created in [Step 2](#step-2-create-an-aws-codebuild-project)\n- `RUN` command added to the CI container image in [Step 3](#step-3-build-the-ci-container-image)\n- The `buildspec.yml` file created in [Step 4](#step-4-add-codebuild-configuration-to-the-repository)\n- The `codebuild.sh` file created in [Step 5](#step-5-set-up-the-gitlab-ci-job)\n- The `dockerbuild` job added to `.gitlab-ci.yml` in [Step 5](#step-5-set-up-the-gitlab-ci-job)\n- IAM policy, user (and maybe group) created in [Step 6](#step-6-set-up-aws-credentials)\n- GitLab CI/CD variables in [Step 6](#step-6-set-up-aws-credentials)\n\nRead more about GitLab and AWS:\n-[How autoscaling GitLab CI works on AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n-[GitLab 12.10 released with Requirements Management and Autoscaling CI on AWS Fargate](/releases/2020/04/22/gitlab-12-10-released/)\n-[Announcing 32/64-bit Arm Runner Support for AWS Graviton2](/blog/gitlab-arm-aws-graviton2-solution/)\n\nCover image by [Lucas van Oort](https://unsplash.com/@switch_dtp_fotografie) on [Unsplash](https://unsplash.com)\n{: .note}\n",[108,230,9],{"slug":1064,"featured":6,"template":684},"aws-fargate-codebuild-build-containers-gitlab-runner","content:en-us:blog:aws-fargate-codebuild-build-containers-gitlab-runner.yml","Aws Fargate Codebuild Build Containers Gitlab Runner","en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner.yml","en-us/blog/aws-fargate-codebuild-build-containers-gitlab-runner",{"_path":1070,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1071,"content":1077,"config":1085,"_id":1087,"_type":13,"title":1088,"_source":15,"_file":1089,"_stem":1090,"_extension":18},"/en-us/blog/basics-of-gitlab-ci-updated",{"title":1072,"description":1073,"ogTitle":1072,"ogDescription":1073,"noIndex":6,"ogImage":1074,"ogUrl":1075,"ogSiteName":669,"ogType":670,"canonicalUrls":1075,"schema":1076},"Running CI jobs in sequential, parallel, and custom orders","New to continuous integration? Learn how to build your first CI pipeline with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662061/Blog/Hero%20Images/cicdcover.png","https://about.gitlab.com/blog/basics-of-gitlab-ci-updated","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The basics of CI: How to run jobs sequentially, in parallel, or out of order\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-12-10\",\n      }",{"title":1078,"description":1073,"authors":1079,"heroImage":1074,"date":1081,"body":1082,"category":769,"tags":1083,"updatedDate":1084},"The basics of CI: How to run jobs sequentially, in parallel, or out of order",[1080],"Itzik Gan Baruch","2020-12-10","Let's assume that you don't know anything about [continuous integration (CI)](/topics/ci-cd/) and [why it's needed](/blog/how-to-keep-up-with-ci-cd-best-practices/) in the software development lifecycle.\n\nImagine that you work on a project, where all the code consists of two text files. Moreover, it is super critical that the concatenation of these two files contains the phrase \"Hello, world.\"\n\nIf it's not there, the whole development team won't get paid that month. Yeah, it is that serious!\n\nThe most responsible software developer wrote a small script to run every time we are about to send our code to customers.\n\nThe code is pretty sophisticated:\n\n```bash\ncat file1.txt file2.txt | grep -q \"Hello world\"\n```\n\nThe problem is that there are 10 developers on the team, and, you know, human factors can hit hard.\n\nA week ago, a new guy forgot to run the script and three clients got broken builds. So you decided to solve the problem once and for all. Luckily, your code is already on GitLab, and you remember that there is [built-in CI](/solutions/continuous-integration/). Moreover, you heard at a conference that people use CI to run tests...\n\n## Let's run our first test inside CI\n\nAfter taking a couple of minutes to find and read the docs, it seems like all we need is these two lines of code in a file called `.gitlab-ci.yml`:\n\n```yaml\ntest:\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n```\n\nWe commit it, and hooray! Our build is successful:\n\n![build succeeded](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/build_succeeded.png)\n\nLet's change \"world\" to \"Africa\" in the second file and check what happens:\n\n![build failed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/build_failed.png)\n\nThe build fails as expected!\n\nOK, we now have automated tests here! GitLab CI will run our test script every time we push new code to the source code repository in the DevOps environment.\n\n**Note:** In the above example, we assume that file1.txt and file2.txt exist in the runner host.\n\nTo run this example in GitLab, use the below code that first will create the files and then run the script.\n\n```yaml\ntest:\nbefore_script:\n      - echo \"Hello \" > | tr -d \"\\n\" | > file1.txt\n      - echo \"world\" > file2.txt\nscript: cat file1.txt file2.txt | grep -q 'Hello world'\n```\n\nFor the sake of compactness, we will assume that these files exist in the host, and will not create them in the following examples.\n\n## Make results of builds downloadable\n\nThe next business requirement is to package the code before sending it to our customers. Let's automate that part of the software development process as well!\n\nAll we need to do is define another job for CI. Let's name the job \"package\":\n\n```yaml\ntest:\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n\npackage:\n  script: cat file1.txt file2.txt | gzip > package.gz\n```\n\nWe have two tabs now:\n\n![Two tabs - generated from two jobs](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/two_tabs.png)\n\nHowever, we forgot to specify that the new file is a build _artifact_, so that it could be downloaded. We can fix it by adding an `artifacts` section:\n\n```yaml\ntest:\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n\npackage:\n  script: cat file1.txt file2.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n```\n\nChecking... it is there:\n\n![Checking the download button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/artifacts.png)\n\nPerfect, it is! However, we have a problem to fix: The jobs are running in parallel, but we do not want to package our application if our tests fail.\n\n## Run jobs sequentially\n\nWe only want to run the 'package' job if the tests are successful. Let's define the order by specifying `stages`:\n\n```yaml\nstages:\n  - test\n  - package\n\ntest:\n  stage: test\n  script: cat file1.txt file2.txt | grep -q 'Hello world'\n\npackage:\n  stage: package\n  script: cat file1.txt file2.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n```\n\nThat should be good!\n\nAlso, we forgot to mention, that compilation (which is represented by concatenation in our case) takes a while, so we don't want to run it twice. Let's define a separate step for it:\n\n```yaml\nstages:\n  - compile\n  - test\n  - package\n\ncompile:\n  stage: compile\n  script: cat file1.txt file2.txt > compiled.txt\n  artifacts:\n    paths:\n    - compiled.txt\n\ntest:\n  stage: test\n  script: cat compiled.txt | grep -q 'Hello world'\n\npackage:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n```\n\nLet's take a look at our artifacts:\n\n![Unnecessary artifact](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/clean-artifacts.png)\n\nHmm, we do not need that \"compile\" file to be downloadable. Let's make our temporary artifacts expire by setting `expire_in` to '20 minutes':\n\n```yaml\ncompile:\n  stage: compile\n  script: cat file1.txt file2.txt > compiled.txt\n  artifacts:\n    paths:\n    - compiled.txt\n    expire_in: 20 minutes\n```\n\nNow our config looks pretty impressive:\n\n- We have three sequential stages to compile, test, and package our application.\n- We pass the compiled app to the next stages so that there's no need to run compilation twice (so it will run faster).\n- We store a packaged version of our app in build artifacts for further usage.\n\n## Learning which Docker image to use\n\nSo far, so good. However, it appears our builds are still slow. Let's take a look at the logs.\n\n![ruby3.1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/ruby-31.png)\n\nWait, what is this? Ruby 3.1?\n\nWhy do we need Ruby at all? Oh, GitLab.com uses Docker images to [run our builds](/blog/shared-runners/), and [by default](https://docs.gitlab.com/ee/user/gitlab_com/#shared-runners) it uses the [`ruby:3.1`](https://hub.docker.com/_/ruby/) image. For sure, this image contains many packages we don't need. After a minute of Googling, we figure out that there's an image called [`alpine`](https://hub.docker.com/_/alpine/), which is an almost blank Linux image.\n\nOK, let's explicitly specify that we want to use this image by adding `image: alpine` to `.gitlab-ci.yml`.\n\nNow we're talking! We shaved nearly three minutes off:\n\n![Build speed improved](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/speed.png)\n\nIt looks like there are a lot of public images around:\n- [mysql](https://hub.docker.com/_/mysql/)\n- [Python](https://hub.docker.com/_/python/)\n- [Java](https://hub.docker.com/_/java/)\n- [php](https://hub.docker.com/_/php/)\n\nSo we can just grab one for our technology stack. It makes sense to specify an image that contains no extra software because it minimizes download time.\n\n## Dealing with complex scenarios\n\nSo far, so good. However, let's suppose we have a new client who wants us to package our app into `.iso` image instead of `.gz`. Since CI does all the work, we can just add one more job to it. ISO images can be created using the [mkisofs](http://www.w3big.com/linux/linux-comm-mkisofs.html) command. Here's how our config should look:\n\n```yaml\nimage: alpine\n\nstages:\n  - compile\n  - test\n  - package\n\n# ... \"compile\" and \"test\" jobs are skipped here for the sake of compactness\n\npack-gz:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  artifacts:\n    paths:\n    - packaged.gz\n\npack-iso:\n  stage: package\n  script:\n  - mkisofs -o ./packaged.iso ./compiled.txt\n  artifacts:\n    paths:\n    - packaged.iso\n```\n\nNote that job names shouldn't necessarily be the same. In fact, if they were the same, it wouldn't be possible to make the jobs run in parallel inside the same stage of the software development process. Hence, think of same names of jobs and stages as coincidence.\n\nAnyhow, the build is failing:\n\n![Failed build because of missing mkisofs](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/mkisofs.png)\n\nThe problem is that `mkisofs` is not included in the `alpine` image, so we need to install it first.\n\n## Dealing with missing software/packages\n\nAccording to the [Alpine Linux website](https://pkgs.alpinelinux.org/contents?file=mkisofs&path=&name=&branch=edge&repo=&arch=) `mkisofs` is a part of the `xorriso` and `cdrkit` packages. These are the magic commands that we need to run to install a package:\n\n```bash\necho \"ipv6\" >> /etc/modules  # enable networking\napk update                   # update packages list\napk add xorriso              # install package\n```\n\nFor CI, these are just like any other commands. The full list of commands we need to pass to `script` section should look like this:\n\n```yml\nscript:\n- echo \"ipv6\" >> /etc/modules\n- apk update\n- apk add xorriso\n- mkisofs -o ./packaged.iso ./compiled.txt\n```\n\nHowever, to make it semantically correct, let's put commands related to package installation in `before_script`. Note that if you use `before_script` at the top level of a configuration, then the commands will run before all jobs. In our case, we just want it to run before one specific job.\n\n## Directed Acyclic Graphs: Get faster and more flexible pipelines\n\nWe defined stages so that the package jobs will run only if the tests passed. What if we want to break the stage sequencing a bit, and run a few jobs earlier, even if they are defined in a later stage? In some cases, the traditional stage sequencing might slow down the overall pipeline execution time.\n\nImagine that our test stage includes a few more heavy tests that take a lot of time to execute, and that those tests are not necessarily related to the package jobs. In this case, it would be more efficient if the package jobs don't have to wait for those tests to complete before they can start. This is where Directed Acyclic Graphs (DAG) come in: To break the stage order for specific jobs, you can define job dependencies which will skip the regular stage order.\n\nGitLab has a special keyword `needs`, which creates dependencies between jobs, and allows jobs to run earlier, as soon as their dependent jobs complete.\n\nIn the below example, the pack jobs will start running as soon as the test job completes, so if, in future, someone adds more tests in the test stage, the package jobs will start to run before the new test jobs complete:\n\n```yaml\npack-gz:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.gz\n\npack-iso:\n  stage: package\n  before_script:\n  - echo \"ipv6\" >> /etc/modules\n  - apk update\n  - apk add xorriso\n  script:\n  - mkisofs -o ./packaged.iso ./compiled.txt\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.iso\n```\n\nOur final version of `.gitlab-ci.yml`:\n\n```yaml\nimage: alpine\n\nstages:\n  - compile\n  - test\n  - package\n\ncompile:\n  stage: compile\n  before_script:\n      - echo \"Hello  \" | tr -d \"\\n\" > file1.txt\n      - echo \"world\" > file2.txt\n  script: cat file1.txt file2.txt > compiled.txt\n  artifacts:\n    paths:\n    - compiled.txt\n    expire_in: 20 minutes\n\ntest:\n  stage: test\n  script: cat compiled.txt | grep -q 'Hello world'\n\npack-gz:\n  stage: package\n  script: cat compiled.txt | gzip > packaged.gz\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.gz\n\npack-iso:\n  stage: package\n  before_script:\n  - echo \"ipv6\" >> /etc/modules\n  - apk update\n  - apk add xorriso\n  script:\n  - mkisofs -o ./packaged.iso ./compiled.txt\n  needs: [\"test\"]\n  artifacts:\n    paths:\n    - packaged.iso\n```\n\nWow, it looks like we have just created a pipeline! We have three sequential stages, the jobs `pack-gz` and `pack-iso`, inside the `package` stage, are running in parallel:\n\n![Pipelines illustration](https://about.gitlab.com/images/blogimages/the-basics-of-gitlab-ci/pipeline.png)\n\n## Elevating your pipeline\n\nHere is how to elevate your pipeline.\n\n### Incorporating automated testing into CI pipelines\n\nIn DevOps, a key software development strategy rule is making really great apps with amazing user experience. So, let's add some tests in our CI pipeline to catch bugs early in the entire process. This way, we fix issues before they get big and before we move on to work on a new project.\n\nGitLab makes our lives easier by offering out-of-the-box templates for various [tests](https://docs.gitlab.com/ee/ci/testing/). All we need to do is include these templates in our CI configuration.\n\nIn this example, we will include [accessibility testing](https://docs.gitlab.com/ee/ci/testing/accessibility_testing.html):\n\n```yaml\nstages:\n  - accessibility\n\nvariables:\n  a11y_urls: \"https://about.gitlab.com https://www.example.com\"\n\ninclude:\n  - template: \"Verify/Accessibility.gitlab-ci.yml\"\n```\n\nCustomize the `a11y_urls` variable to list the URLs of the web pages to test with [Pa11y](https://pa11y.org/) and [code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html).\n\n```yaml\n   include:\n   - template: Jobs/Code-Quality.gitlab-ci.yml\n```\n\nGitLab makes it easy to see the test report right in the merge request widget area. Having the code review, pipeline status, and test results in one spot makes everything smoother and more efficient.\n\n![Accessibility report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-02_at_10.56.41.png)\n\u003Ccenter>\u003Ci>Accessibility merge request widget\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n![Code quality widget in MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-02_at_11.00.25.png)\n\u003Ccenter>\u003Ci>Code quality merge request widget\u003C/i>\u003C/center>\n\n### Matrix builds\n\nIn some cases, we will need to test our app in different configurations, OS versions, programming language versions, etc. For those cases, we'll use the [parallel:matrix](https://docs.gitlab.com/ee/ci/yaml/#parallelmatrix) build to test our application across various combinations in parallel using one job configuration. In this blog, we'll test our code with different Python versions using the matrix keyword.\n\n```yaml\npython-req:\n  image: python:$VERSION\n  stage: lint\n  script:\n    - pip install -r requirements_dev.txt\n    - chmod +x ./build_cpp.sh\n    - ./build_cpp.sh\n  parallel:\n    matrix:\n      - VERSION: ['3.8', '3.9', '3.10', '3.11']   # https://hub.docker.com/_/python\n```\n\nDuring pipeline execution, this job will run in parallel four times, each time using different Python image as shown below:\n\n![Matrix job running](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-02_at_11.12.48.png)\n\n### Unit testing\n\n#### What are unit tests?\n\nUnit tests are small, targeted tests that check individual components or functions of software to ensure they work as expected. They are essential for catching bugs early in the software development process and verifying that each part of the code performs correctly in isolation.\n\nExample: Imagine you're developing a calculator app. A unit test for the addition function would check if 2 + 2 equals 4. If this test passes, it confirms that the addition function is working correctly.\n\n#### Unit testing best practices\n\nIf the tests fail, the pipeline fails and users get notified. The developer needs to check the job logs, which usually contain thousands of lines, and see where the tests failed so that they can fix them. This check is time-consuming and inefficient.\n\nYou can configure your job to use [unit test reports](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html). GitLab displays reports on the merge request and on the pipelines details page, making it easier and faster to identify the failure without having to check the entire log.\n\n##### JUnit test report\n\nThis is a sample JUnit test report:\n\n![pipelines JUnit test report v13 10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674097/Blog/Content%20Images/pipelines_junit_test_report_v13_10.png){: .shadow.center}\n\n### Integration and end-to-end testing strategies\n\nIn addition to our regular development routine, it's super important to set up a special pipeline just for integration and end-to-end testing. This checks that all the different parts of our code work together smoothly, including those [microservices](https://about.gitlab.com/topics/microservices/), UI testing, and any other components.\n\nWe run these tests [nightly](https://docs.gitlab.com/ee/ci/pipelines/schedules.html). We can set it up so that the [results automatically get sent to a special Slack channel](https://docs.gitlab.com/ee/user/project/integrations/gitlab_slack_application.html#notification-events). This way, when developers come in the next day, they can quickly spot any issues. It's all about catching and fixing problems early on!\n\n### Test environment\n\nFor some of the tests, we may need a test environment to properly test our apps. With GitLab CI/CD, we can automate the deployment of testing environments and save a ton of time. Since this blog mostly focuses on CI, I won't elaborate on this, but you can refer to this section in the [GitLab documentation](https://docs.gitlab.com/ee/topics/release_your_application.html).\n\n## Implementing security scans in CI pipelines\n\nHere are the ways to implement security scans in CI pipelines.\n\n### SAST and DAST integration\n\nWe're all about keeping our code safe. If there are any vulnerabilities in our latest changes, we want to know ASAP. That's why it's a good idea to add security scans to your pipeline. They'll check the code with every commit and give you a heads up about any risks. We've put together a product tour to walk you through adding scans, including static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) and dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)), to your CI pipeline.\n\n__Click__ the image below to start the tour.\n\n[![Scans product tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-14_at_13.44.42.png)](https://gitlab.navattic.com/gitlab-scans)\n\nPlus, with AI, we can dig even deeper into vulnerabilities and get suggestions on how to fix them. Check out this demo for more info.\n\n__Click__ the image below to start the tour.\n\n[![product tour explain vulnerability ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674096/Blog/Content%20Images/Screenshot_2024-04-14_at_13.50.24.png)](https://tech-marketing.gitlab.io/static-demos/pt-explain-vulnerability.html)\n\n## Recap\n\nThere's much more to cover but let's stop here for now. All examples were made intentionally trivial so that you could learn the concepts of GitLab CI without being distracted by an unfamiliar technology stack. Let's wrap up what we have learned:\n\n1. To delegate some work to GitLab CI you should define one or more [jobs](https://docs.gitlab.com/ee/ci/jobs/) in `.gitlab-ci.yml`.\n2. Jobs should have names and it's your responsibility to come up with good ones.\n3. Every job contains a set of rules and instructions for GitLab CI, defined by [special keywords](#keywords).\n4. Jobs can run sequentially, in parallel, or out of order using [DAG](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/index.html).\n5. You can pass files between jobs and store them in build artifacts so that they can be downloaded from the interface.\n6. Add [tests and security scans](https://docs.gitlab.com/ee/development/integrations/secure.html) to the CI pipeline to ensure the quality and security of your app.\n\nBelow are more formal descriptions of the terms and keywords we used, as well as links to the relevant documentation.\n\n### Keyword descriptions and documentation\n\n{: #keywords}\n\n| Keyword/term       | Description |\n|---------------|--------------------|\n| [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/) | File containing all definitions of how your project should be built |\n| [script](https://docs.gitlab.com/ee/ci/yaml/#script)        | Defines a shell script to be executed |\n| [before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script) | Used to define the command that should be run before (all) jobs |\n| [image](https://docs.gitlab.com/ee/ci/docker/using_docker_images.html#what-is-image) | Defines what Docker image to use |\n| [stages](https://docs.gitlab.com/ee/ci/yaml/#stages)         | Defines a pipeline stage (default: `test`) |\n| [artifacts](https://docs.gitlab.com/ee/ci/yaml/#artifacts)     | Defines a list of build artifacts |\n| [artifacts:expire_in](https://docs.gitlab.com/ee/ci/yaml/#artifactsexpire_in) | Used to delete uploaded artifacts after the specified time |\n| [needs](https://docs.gitlab.com/ee/ci/yaml/#needs) | Used to define dependencies between jobs and allows to run jobs out of order |\n| [pipelines](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) | A pipeline is a group of builds that get executed in stages (batches) |\n\n## More on CI/CD\n\n- [GitLab’s guide to CI/CD for beginners](/blog/beginner-guide-ci-cd/)\n- [Get faster and more flexible pipelines with a Directed Acyclic Graph](/blog/directed-acyclic-graph/)\n- [Decrease build time with custom Docker image](http://beenje.github.io/blog/posts/gitlab-ci-and-conda/)\n- [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/)\n\n## FAQ\n\n### How do you choose between running CI jobs sequentially vs. in parallel?\n\nConsiderations for choosing between running CI jobs sequentially or in parallel include job dependencies, resource availability, execution times, potential interference, test suite structure, and cost considerations. For example, if you have a build job that must finish before a deployment job can start, you would run these jobs sequentially to ensure the correct order of execution. On the other hand, tasks such as unit testing and integration testing can typically run in parallel since they are independent and don't rely on each other's completion.\n\n### What are directed Acyclic Graphs in GitLab CI, and how do they improve pipeline flexibility?\n\nA Directed Acyclic Graph (DAG) in GitLab CI breaks the linear order of pipeline stages. It lets you set dependencies between jobs, so jobs in later stages start as soon as earlier stage jobs finish. This reduces overall pipeline execution time, improves efficiency, and lets some jobs complete earlier than in a regular order.\n\n### What is the importance of choosing the right Docker image for CI jobs in GitLab?\n\nGitLab utilizes Docker images to execute jobs. The default image is ruby:3.1. Depending on your job's requirements, it's crucial to choose the appropriate image. Note that jobs first download the specified Docker image, and if the image contains additional packages beyond what's necessary, it will increase download and execution times. Therefore, it's important to ensure that the chosen image contains only the packages essential for your job to avoid unnecessary delays in execution.\n\n## Next steps\n\nAs a next step and to further modernize your software development practice, check out the [GitLab CI/CD Catalog](https://docs.gitlab.com/ee/architecture/blueprints/ci_pipeline_components/) to learn how to standardize and reuse CI/CD components.",[771,9],"2024-04-24",{"slug":1086,"featured":6,"template":684},"basics-of-gitlab-ci-updated","content:en-us:blog:basics-of-gitlab-ci-updated.yml","Basics Of Gitlab Ci Updated","en-us/blog/basics-of-gitlab-ci-updated.yml","en-us/blog/basics-of-gitlab-ci-updated",{"_path":1092,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1093,"content":1099,"config":1106,"_id":1108,"_type":13,"title":1109,"_source":15,"_file":1110,"_stem":1111,"_extension":18},"/en-us/blog/beginner-guide-python-programming",{"title":1094,"description":1095,"ogTitle":1094,"ogDescription":1095,"noIndex":6,"ogImage":1096,"ogUrl":1097,"ogSiteName":669,"ogType":670,"canonicalUrls":1097,"schema":1098},"How to get started with Python programming","Python is increasingly popular, and for good reason. Here's our beginner's guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664962/Blog/Hero%20Images/python.jpg","https://about.gitlab.com/blog/beginner-guide-python-programming","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get started with Python programming\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-10-21\",\n      }",{"title":1094,"description":1095,"authors":1100,"heroImage":1096,"date":1101,"body":1102,"category":1103,"tags":1104},[852],"2021-10-21","Are you a programming enthusiast who wants to learn Python? Are you new to coding? Do you need help deciding where to begin with Python? If you are looking for answers to these questions, then you are in the right place.\n\n## How to start writing code with Python\n\nPython is an easy-to-learn, easy-to-use and easy-to-deploy programming language, with rampant usage in building web and desktop applications, analyzing data and performing [DevOps](https://about.gitlab.com/topics/devops/) tasks. It is a free, open-source, object-oriented coding language used to write simple scripts and complex programs. Of the almost 700 programming languages, Python is considered one of the best to learn first.\n\n## Installing Python\n\nBefore discussing the basics of Python, it is essential to download and install Python on your desktop/laptop. Python works on multiple platforms, including Linux, Windows and Mac. It comes preinstalled on most Mac and Linux systems; however, you should download the latest version from the official Python website.\n\nTo check the current Python version on your system, open the command line and type “python -V”. \n\n![command prompt](https://about.gitlab.com/images/blogimages/python1.png){: .shadow}\n\nIf you have an outdated version, download either the 32- or 64-bit setup from the website based on your system requirements.\n\nThere are other alternatives for downloading the setup: for Windows, you can install it directly from Microsoft. For Linux, install it using the package manager. For macOS, you can download it from Homebrew.\n\nOnce the setup is downloaded, run the file installer, and click on “Install Now”. Once the installation is complete, you are ready to go. Below is an example of a Python installation for Windows.\n\n![install Python](https://about.gitlab.com/images/blogimages/python2.png){: .shadow}\n\n## Running Python in command prompt\n\nTo verify Python is installed and working correctly in Windows, open the command prompt and enter “python”, which will invoke the interpreter. You can directly execute Python codes in it.  For example, type “2*5+1” and press “enter”. You will see “11” as the output. Entering “quit ()” will exit the interpreter.\n\n![Python interpreter](https://about.gitlab.com/images/blogimages/python3.png){: .shadow}\n\n## Running Python in IDE\n\nWith the latest Python installed, you are now ready to start programming in Python. When writing long scripts or programs in Python, use Python’s built-in Integrated Development and Learning Environment (IDLE).\n\nStart the IDLE and then, from the File dropdown, select “New File”, which opens a new editing window. So now, on your screen, you have two windows: a Python shell and an untitled file.\n\n![Python shell and untitled file](https://about.gitlab.com/images/blogimages/python4.png){: .shadow}\n\nThe Python shell is a REPL environment, which is shorthand for \"read-eval-print loop\". It runs snippets of the code, usually one statement at a time. For example, by repeating the same calculation “2*5+1” that we did in the command prompt, you can see how a Python shell can function as a calculator.\n\n![Python as a calculator](https://about.gitlab.com/images/blogimages/python5.png){: .shadow}\n\nThe untitled window is a text editing window for writing complete programs. The shell displays its output. For example, the conventional first program of Python for beginners is printing “Hello World!”. Make sure you save the text editor before running it by pressing “F5”.\n\n![Hello World](https://about.gitlab.com/images/blogimages/python61.png){: .shadow}\n\n## The basics of Python\n\nWe know you can’t wait to start writing long scripts for games and websites, but you still have a long way to get there. Just like with learning any other language, you must first understand the basics of Python. \n\nThe **print()** function, as seen in the Hello World! example, prints a value on the output window. A value is the most basic thing a program uses. It can be a string, a numeric value or any other Python object. Any object within single/double quotations is called a string. For instance, the “Hello World!” that is printed in the above program is also of the type string. Numeric values like 4 and 4.5 are the types of integers and floats, respectively. You can change an integer or float into a string and vice versa using the built-in functions **int()**, **float()** and **str()**.\n\n![value in an output window](https://about.gitlab.com/images/blogimages/python7.png){: .shadow}\n\n## Python’s vocabulary\n\nPython is the simplest coding language. It is easy to read and understand. Unlike human languages, Python has a small vocabulary or reserved words holding special meaning. Terms other than this reserved vocabulary hold meaning only to you and are called variables. These 35 reserved words are:\n\n![Python terms](https://about.gitlab.com/images/blogimages/python8.png){: .shadow}\n\nMake sure you use these words for their specified purpose to avoid confusing the Python interpreter and causing a syntax error.\n\n### Naming variables\n\nSometimes you want to store values in your code for retrieving them later, which you can do by giving them symbolic names called variables. As seen below, we ask Python to store 5 and 6 with labels x and y, respectively, and then retrieve them later to find their sum.\n\n![storing variables](https://about.gitlab.com/images/blogimages/python9.png){: .shadow}\n\nThere are rules for choosing a name for a variable; failing to follow these gives a syntax error. A few mandatory rules are narrated below:\n\n1. The name can contain both letters and numbers, but it can’t start with a number.\n1. An underscore can appear in the name to separate multiple words.\n1. Special symbols like @#$ are illegal and should not appear in the name.\n1. Python keywords should not be used as names for variables.\n\n### Understanding operators and operands\n\nPython uses special symbols called “operators” for representing basic mathematical computation. The values to which these operators are applied are called operands. The symbols used as operators for subtraction, addition, division, multiplication and exponentiation are  -,+, /, * and **, respectively. \n\n![symbols for operators](https://about.gitlab.com/images/blogimages/python10.png){: .shadow}\n\nThe modulus operator (%) outputs the remainder of the first operand divided by the second operand. It is useful in checking whether a number is divisible by another and extracting the rightmost digit/digits of a number.\n\n![modulus operator](https://about.gitlab.com/images/blogimages/python11.png){: .shadow}\n\n### Using expressions\n\nA combination of values, variables and operators is called an expression. An expression typed in the shell gets evaluated, and the answer is displayed. However, in a script, an expression doesn't do anything on its own.\n\nPython uses the mathematical convention PEMDAS for the operators, which means that P for Parentheses has the highest precedence, then Exponentiation, Multiplication and Division, which have the same priority. Addition and Subtraction come next and also have the same precedence. Operators that have the same preference are also evaluated from left to right.\n\n![PEMDAS](https://about.gitlab.com/images/blogimages/python12.png){: .shadow}\n\nThe Addition and Multiplication operators also work with strings for concatenation and repeating a string, respectively.\n\n![addition and multiplication operators](https://about.gitlab.com/images/blogimages/python13.png){: .shadow}\n\nPython also allows you to take the value for a variable from the user via their keyboard. This can be done using a built-in function called **input**.\n\n![input](https://about.gitlab.com/images/blogimages/python14.png){: .shadow}\n\n## Write your first program\n\nNow it's time to write a short program using everything you've learned here. Write a script that takes two numbers as input and adds them. Do this on your own and see the code below to tally your work.\n\n![write a short program](https://about.gitlab.com/images/blogimages/python15.png){: .shadow}\n\n**Congratulations!** You just wrote your first program.\n\nLearning Python is easy and fun. We just helped you make it through the basics. To become a professional Python Programmer, you still have a lot to learn and practice. Good luck on your journey to becoming an expert coder.\n\nPhoto by \u003Ca href=\"https://unsplash.com/@davidclode?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">David Clode\u003C/a> on \u003Ca href=\"https://unsplash.com/s/photos/python?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>","devsecops",[773,1105,9],"careers",{"slug":1107,"featured":6,"template":684},"beginner-guide-python-programming","content:en-us:blog:beginner-guide-python-programming.yml","Beginner Guide Python Programming","en-us/blog/beginner-guide-python-programming.yml","en-us/blog/beginner-guide-python-programming",{"_path":1113,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1114,"content":1120,"config":1126,"_id":1128,"_type":13,"title":1129,"_source":15,"_file":1130,"_stem":1131,"_extension":18},"/en-us/blog/build-a-new-website-in-a-few-easy-steps-with-gitlab-pages",{"title":1115,"description":1116,"ogTitle":1115,"ogDescription":1116,"noIndex":6,"ogImage":1117,"ogUrl":1118,"ogSiteName":669,"ogType":670,"canonicalUrls":1118,"schema":1119},"Build a new website in a few easy steps with GitLab Pages ","This tutorial shows you how to create and host your personal website using GitLab Pages with a ready-to-use template that you can customize in minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097716/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_7c3TDgNgct9xQbmTJSw0de_1750097716096.png","https://about.gitlab.com/blog/build-a-new-website-in-a-few-easy-steps-with-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Build a new website in a few easy steps with GitLab Pages \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Alex Fracazo\"}],\n        \"datePublished\": \"2025-03-03\",\n      }",{"title":1115,"description":1116,"authors":1121,"heroImage":1117,"date":1123,"body":1124,"category":678,"tags":1125},[1122],"Alex Fracazo","2025-03-03","A personal website is more than just a utility for digital creators and professionals in tech. It's a representation of your brand. But creating one from scratch can be time-consuming and expensive.\n\nWith [GitLab Pages](https://docs.gitlab.com/user/project/pages/), you can host your website with built-in features, including SSL certificates and a GitLab-provided domain. All of this is available on GitLab's free tier, making it an efficient solution for hosting your professional presence.\n\nWe're going to take you on a fun journey to craft a stunning personal website using GitLab Pages! We’ve got a super simple, versatile template that you can easily jazz up to reflect your unique style. So grab your favorite snack, get comfy, and let’s turn your online presence into something truly fabulous!\n\n## Prerequisites\n\nYou will need the following prerequisites before getting started:\n\n* A GitLab account (the [free tier](https://about.gitlab.com/pricing/) is sufficient)  \n* Basic familiarity with HTML/CSS  \n* Content and images you want to add to your website (optional)\n\nOnce you’re set up with a GitLab account and have your content handy, you can move on to the next steps.\n\n## Step 1: Create a new project\n\n1. Sign on to your GitLab account and create a project.\n\n![GitLab Pages tutorial - welcome screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097724/Blog/Content%20Images/Blog/Content%20Images/Capture-2025-02-27-183716_aHR0cHM6_1750097724662.png)\n\n2. Click **Create blank project**.\n\n![GitLab Pages tutorial - Create new project screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/Capture-2025-02-27-183814_aHR0cHM6_1750097724663.png)\n\n3. Fill in your project details:\n    * Name your project `yourusername.gitlab.io`. Replace `yourusername` with your GitLab username. **Tip:** The project name determines your website’s URL. If you name your project `yourusername.gitlab.io`, your website will be available at `https://yourusername.gitlab.io` with no additional path. However, if you use any other project name, your site will be available at `https://yourusername.gitlab.io/project-name`.\n    * Make the project public.\n4. Click **Create project**.\n\n![GitLab Pages tutorial - Create blank project screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097724666.png)\n\n![GitLab Pages tutorial - customized get started page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097724668.png)\n\n## Step 2: Add the template files\n\nStart by creating two new files in your repository:\n\n![GitLab Pages tutorial - Add new files to personal page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750097724669.png)\n\n1. First, create `index.html`:\n    * In your project, click the **+** button and select **New file**.\n    * Name the file `index.html`.\n![GitLab Pages tutorial - new file page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750097724671.png)\n    * Add your HTML content.\n        * Use the example HTML provided below. (Pro tip: Users can ask GitLab Duo Chat to generate HTML for enhanced functionality.)\n\n```    \n\u003C!DOCTYPE html>\n\u003Chtml>\n\u003Chead>\n    \u003Cmeta charset=\"utf-8\"/>\n    \u003Ctitle>[Your Name] - [Your Title]\u003C/title>\n    \u003Cmeta name=\"description\" content=\"[Your Name] is a [Your Title].\"/>\n    \u003Cmeta name=\"author\" content=\"[Your Name]\"/>\n    \u003Cmeta property=\"og:title\" content=\"[Your Name]\" />\n    \u003Cmeta property=\"og:description\" content=\"[Your Title]\" />\n    \u003Cmeta property=\"og:image\" content=\"og.png\" />\n    \u003Cmeta name=\"viewport\" content=\"width=device-width,initial-scale=1\"/>\n    \u003Clink href=\"https://unpkg.com/basscss@8.0.2/css/basscss.min.css\" rel=\"stylesheet\">\n    \u003Clink href=\"style.css\" rel=\"stylesheet\">\n    \u003Clink rel=\"shortcut icon\" type=\"image/png\" href=\"favicon.png\"/>\n\u003C/head>\n\u003Cbody>\n\u003Cdiv class=\"content\" id=\"content\">\n  \u003Cdiv class=\"p2 sm-p4 mt2 sm-mt4 mb2 sm-mb4\">  \n  \u003Cdiv class=\"fade mt3\">\n    \u003Ca target=\"_new\" href=\"[Your Linkedin URL]\">\n      \u003Cimg class=\"photo\" src=\"profile.png\" width=\"64\" height=\"64\">\n    \u003C/a>\n  \u003C/div>\n  \u003Ch2 class=\"mb0 mt4 fade\">\n    Hello, I'm [Your Name] \n    \u003Cspan class=\"smallcaps\">(\u003C/span>\n    \u003Ca target=\"_new\" href=\"[Your Linkedin URL]\">@[Your Handle]\u003C/a>\n    \u003Cspan class=\"smallcaps\">)\u003C/span>\n  \u003C/h2>\n  \u003Ch2 class=\"mt0 mb4 fade gray\">\n    I'm a [Your Title]\n  \u003C/h2>\n  \u003Cp class=\"mb4 fade\">\n    I'm a [Your Role] at [Your Company], [Brief company description].\n  \u003C/p>\n  \u003Cdiv class=\"fade\">\n    \u003Cp class=\"fade mb4\">\n      Your personal statement about what you do and what you're interested in. Add your contact preferences here.\n    \u003C/p>\n  \u003C/div>\n  \u003Cp class=\"fade mb4\">\n    \u003Cspan class=\"gray\">—\u003C/span> \n    [Your Name] \n    \u003Cspan class=\"smallcaps>(\u003C/span>\n    \u003Ca target=\"_new\" href=\"[Your Linkedin URL]\">@[Your Handle]\u003C/a>\n    \u003Cspan class=\"smallcaps\">)\u003C/span>\n  \u003C/p>\n  \u003C/div>\n\u003C/div>\n\u003C/body>\n\u003C/html> \n```\n\n* Add a commit message (e.g., \"Added index.html\").\n  * Click **Commit changes**.\n\n2. Create `style.css` (follow same steps above).\n\n```\nbody {\n  margin: 0;\n  padding: 0;\n  background: #000;\n  color: #f4f4f4;\n  font-family: \"Graphik Web\", system-ui, -apple-system, BlinkMacSystemFont, \"Helvetica Neue\", \"Helvetica\", \"Segoe UI\", Roboto, Ubuntu, sans-serif;\n  font-weight: 400;\n  font-smooth: antialiased;\n  -webkit-font-smoothing: antialiased;\n  -moz-osx-font-smoothing: grayscale;\n}\n\na {\n  color: #ff310a;\n  text-decoration: none;\n}\n\na:hover {\n  color: #CFEF54\n}\n\n.content {\n  max-width: 40rem;\n  margin: 0 auto;\n}\n\nimg.photo {\n  border-radius: 50%;\n}\n\np {\n  font-size: 1.5rem;\n  line-height: 1.4;\n  margin: 0;\n  letter-spacing: -0.05rem;\n}\n\nh2 {\n  font-weight: 400;\n  line-height: 1.3;\n  letter-spacing: -0.05rem;\n}\n\n.smallcaps {\n  font-variant: small-caps;\n  color:#333;\n}\n\n.gray{\n  color: #999;\n}\n\n.preloader {\n  display: flex;\n  justify-content: center;\n  align-items: center;\n  height: 100vh;\n  height: -moz-available;\n  height: -webkit-fill-available;\n  height: fill-available;\n  width: 100%;\n  background: #000;\n  position: fixed;\n  top: 0;\n  left: 0;\n  z-index: 9999;\n  transition: opacity 0.3s linear;\n  transform: translate3d(0, 0, 0);\n}\n\nbody.loaded .preloader {\n  opacity: 0;\n}\n\n.fade {\n  animation: fadeIn 1s ease-in-out both;\n}\n\n.fade:nth-child(2) {\n\tanimation-delay: 1s;\n}\n\n.fade:nth-child(3) {\n\tanimation-delay: 2s;\n}\n\n.fade:nth-child(4) {\n\tanimation-delay: 3s;\n}\n\n.fade:nth-child(5) {\n\tanimation-delay: 4s;\n}\n\n.fade:nth-child(6) {\n\tanimation-delay: 5s;\n}\n\n.fade:nth-child(7) {\n\tanimation-delay: 6s;\n}\n\n.fade:nth-child(8) {\n\tanimation-delay: 7s;\n}\n\n.fade:nth-child(9) {\n\tanimation-delay: 8s;\n}\n\n.fade:nth-child(10) {\n\tanimation-delay: 9s;\n}\n\n.fade:nth-child(11) {\n\tanimation-delay: 10s;\n}\n\n.fade:nth-child(12) {\n\tanimation-delay: 11s;\n}\n\n.fade:nth-child(13) {\n\tanimation-delay: 12s;\n}\n\n@keyframes fadeIn {\n\tfrom {\n\t\topacity: 0;\n\t\ttransform: translate3d(0, 0%, 0);\n\t}\n\tto {\n\t\topacity: 1;\n\t\ttransform: translate3d(0, 0, 0);\n\t}\n} \n\n```\n\n## Step 3: Configure GitLab CI file\n\nThere are two ways to create the GitLab CI configuration file that tells GitLab how to build and deploy your site:\n\n![GitLab Pages tutorial - optimize your workflow with CI/CD pipelines screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097724672.png)\n\n**Option 1: Use Pipeline Editor (recommended)**\n\n1. Go to your project's **Build > Pipeline Editor**.\n\n![GitLab Pages tutorial - pipeline editor/main branch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097724673.png)\n\n2. The `.gitlab-ci.yml` file will be automatically created. \n3. Copy and paste the following configuration: \n\n```\npages:\n  stage: deploy\n  script:\n    - mkdir .public\n    - cp -r * .public\n    - mv .public public\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n```\n\n![GitLab Pages Tutorial - New file in window](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097724674.png)\n\n**Option 2: Manual creation**\n\nIf you prefer to create the file manually: \n1. Create a new file named `.gitlab-ci.yml`. \n2. Add the following configuration:\n\n```\npages:\n  stage: deploy\n  script:\n    - mkdir .public\n    - cp -r * .public\n    - mv .public public\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n```\n\nThe key to getting your site running is the GitLab CI configuration file. This file tells GitLab how to build and deploy your site.\n\nLet's break down what each part does:\n\n**The script part**\n\n```\nscript:\n  - mkdir .public\n  - cp -r * .public\n  - mv .public public\n```\n\nThis creates a folder called `public` and copies all your website files into it. GitLab Pages uses this folder to serve your website by default, though you can [customize the publishing folder](https://docs.gitlab.com/user/project/pages/introduction/#customize-the-default-folder) if needed.\n\n**The only part**\n\n```\nonly:\n  - main\n\n```\n\nThis tells GitLab to only update your website when changes are made to the main branch. This helps prevent accidental updates from experimental changes.\n\n## Step 4: Watch the magic happen\n1. Commit all your changes.\n2. Go to **Build > Pipelines** to watch your deployment.\n3. Wait for the pipeline to complete successfully (indicated by a green checkmark).\n\n![GitLab Pages tutorial - pipeline running for new page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097724676.png)\n\n![GitLab Pages tutorial - pipeline passed for new page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097724677.png)\n\n## Step 5: Access your website\n\nOnce the pipeline completes successfully, your website will be available at: **https://[yourusername].gitlab.io/** .\n\nYou can find an overview of your deployed website and additional settings in your project's **Deploy > Pages** section. Here you'll find useful information. including: \n\n* Your website's access URLs   \n* Domain settings  \n  * By default GitLab enables **Unique domain**. Make sure to disable it if you want to use the GitLab-provided domain. Learn more with the [unique domain documentation](https://docs.gitlab.com/ee/user/project/pages#unique-domains).  \n* HTTPS certificates status   \n* Recent deployments   \n* Additional configuration options\n* Custom domains\n\nThis section is particularly helpful when setting up custom domains or troubleshooting deployment issues.\n\n**Customize your site**\n\n![GitLab Pages tutorial - customize site](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097724678.png)\n\n1. Replace all “Your ...” placeholders in `index.html` with your information.\n\n![GitLab Pages tutorial - upload file to customize page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097725/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097724679.png)\n\n2. Add your images:\n    - profile.png - your profile photo (64x64px)\n    - favicon.png - your site favicon (32x32px)\n    - Og.png - OpenGraph image for social media preview (1200x630px)\n\n**See it in action**\n\nIf you're familiar with GitLab, feel free to [fork my repository](https://gitlab.com/fracazo/fracazo.gitlab.io) to get started quickly. \n\nHere is the final result:\n[https://fracazo.gitlab.io/](https://fracazo.gitlab.io/)\n\n**Common issues and solutions**\n- By default, GitLab enables \"Unique domain\" for Pages projects. To use the simpler GitLab-provided domain (like `username.gitlab.io`), go to **Deploy > Pages** and disable the \"Use unique domain\" option. While unique domains offer some technical advantages, like better asset path handling, you might prefer the cleaner URL structure for a personal website.\n- If your pipeline fails, check that you're using `main` instead of `master` in your `.gitlab-ci.yml` file.\n- Ensure your group and project is public for GitLab Pages to work.\n- If any jobs fail in your pipeline, you can check the job log for detailed error messages to help with troubleshooting.\n\nWith GitLab Pages and this template, you can have a professional/personal website up and running in minutes. The template is clean, responsive, and easy to customize. As you grow professionally, you can easily update your site directly through GitLab. \n\nYou can automate the deployment process by leveraging GitLab's CI/CD capabilities and focusing on creating great content.\n\nThe best part? All of this is available on GitLab's free tier, making it an excellent option for free hosting of your personal projects, documentation sites, or even small business websites. For more advanced features and configurations, check out our [Pages documentation](https://docs.gitlab.com/ee/user/project/pages/).\n\n## What’s next for GitLab Pages?\nWe're constantly working to make GitLab Pages even better for creators and developers. Here are some exciting improvements coming soon: \n\n### Simplified domain management \nWe have some exciting updates coming to GitLab Pages that will make managing your domains even easier and more fun! You can look forward to a streamlined dashboard that brings all your domain settings together in one friendly space, making everything easily accessible. \n\nYou’ll stay informed with real-time updates on your DNS and SSL certificate statuses, helping you keep your domains secure and running smoothly. \n\n### Custom domain setup\nSetting up custom domains will be a breeze with our easy-to-follow process, guiding you every step of the way. Plus, you'll be able to set up your custom domains to automatically redirect visitors from your old website address to your new one – perfect for when you want all your traffic to go to one main website. Learn more about [custom domains](https://docs.gitlab.com/ee/user/project/pages/custom_domains_ssl_tls_certification/index.html#set-up-a-custom-domain).\n\n> Get started with GitLab Pages today with [GitLab's free tier](https://about.gitlab.com/pricing/)! \n\n## Learn more\n- [GitLab Pages features review apps and multiple website deployment](https://about.gitlab.com/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment/)\n- [GitLab Pages: Multiple website deployment documentation](https://docs.gitlab.com/user/project/pages/#parallel-deployments)\n- [GitLab Pages examples](https://gitlab.com/pages)",[9,478],{"slug":1127,"featured":6,"template":684},"build-a-new-website-in-a-few-easy-steps-with-gitlab-pages","content:en-us:blog:build-a-new-website-in-a-few-easy-steps-with-gitlab-pages.yml","Build A New Website In A Few Easy Steps With Gitlab Pages","en-us/blog/build-a-new-website-in-a-few-easy-steps-with-gitlab-pages.yml","en-us/blog/build-a-new-website-in-a-few-easy-steps-with-gitlab-pages",{"_path":1133,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1134,"content":1140,"config":1147,"_id":1149,"_type":13,"title":1150,"_source":15,"_file":1151,"_stem":1152,"_extension":18},"/en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"title":1135,"description":1136,"ogTitle":1135,"ogDescription":1136,"noIndex":6,"ogImage":1137,"ogUrl":1138,"ogSiteName":669,"ogType":670,"canonicalUrls":1138,"schema":1139},"Build an ML app pipeline with GitLab Model Registry using MLflow","Learn how to manage your ML apps entirely through GitLab with this tutorial. Also discover the role machine learning operations, or MLOps, plays in automating the DevSecOps lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660151/Blog/Hero%20Images/blog-image-template-1800x945__26_.png","https://about.gitlab.com/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Build an ML app pipeline with GitLab Model Registry using MLflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Gufran Yeşilyurt, OBSS\"},{\"@type\":\"Person\",\"name\":\"Péter Bozsó\"}],\n        \"datePublished\": \"2024-09-17\",\n      }",{"title":1135,"description":1136,"authors":1141,"heroImage":1137,"date":1144,"body":1145,"category":702,"tags":1146},[1142,1143],"Gufran Yeşilyurt, OBSS","Péter Bozsó","2024-09-17","__*Editor's note: From time to time, we invite members of our partner community to contribute to the GitLab Blog. Thanks to Gufran Yeşilyurt, a DevOps consultant at OBSS Technology, for co-creating with us.*__\n\nThis tutorial will walk you through setting up an MLOps pipeline with GitLab Model Registry, utilizing MLflow. This will be a great starting point to manage your ML apps entirely through GitLab. But first, it is crucial to understand why we need MLOps and what GitLab offers.\n\n[MLOps](https://about.gitlab.com/direction/modelops/mlops/#overview), or machine learning operations, is a critical practice for managing and automating the lifecycle of machine learning models, from development to deployment and maintenance. Its importance lies in addressing the complexity and dynamism of machine learning workflows, which involve not just software development but also data management, model training, testing, deployment, and continuous monitoring.\n\nMLOps ensures that models are reproducible, scalable, and maintainable, facilitating collaboration between data scientists, machine learning engineers, and operations teams. By incorporating MLOps, organizations can streamline the deployment process, reduce time to market, and improve the reliability and performance of their machine learning applications.\n\nThe necessity of MLOps arises from the unique challenges posed by machine learning projects. Unlike traditional software development, machine learning involves handling large datasets, experimenting with various models, and continuously updating models based on new data and feedback.\n\nWithout proper operations, managing these aspects becomes cumbersome, leading to potential issues like model drift, where the model's performance degrades over time due to changes in the underlying data. MLOps provides a structured approach to monitor and manage these changes, ensuring that models remain accurate and effective. Moreover, it introduces automation in various stages, such as data preprocessing, model training, and deployment, thereby reducing manual errors and enhancing efficiency.\n\nGitLab's features play a pivotal role in implementing MLOps effectively. GitLab provides an integrated platform that combines source code management, [CI/CD pipelines](https://about.gitlab.com/topics/ci-cd/), tracking and collaboration tools, making it ideal for managing machine learning projects.\n\nWith GitLab, teams can leverage version control to track changes in both code and data, ensuring reproducibility and transparency. The CI/CD pipelines in GitLab automate the testing and deployment of machine learning models, allowing for continuous integration and continuous delivery. This automation not only speeds up the deployment process but also ensures consistency and reliability in the models being deployed. \n\nAdditionally, GitLab's collaboration features, such as merge requests and code reviews, facilitate better communication and coordination among team members, ensuring that everyone is aligned and any issues are promptly addressed.\n\nPrerequisites:\n- basic knowledge of GitLab pipelines\n- basic knowledge of MLflow\n- a Kubernetes cluster\n- Dockerfile\n\nThis tutorial includes instructions to:\n- [Set up environment variables of MLflow](#set-up-environment-variables-of-mlflow)\n- [Train and log candidates at merge request](#train-and-log-candidates-at-merge-request)\n- [Register the most successful candidate](#register-the-most-successful-candidate)\n- [Dockerize and deploy an ML app with the registered model](#dockerize-and-deploy-an-ml-app-with-the-registered-model)\n\nIn this example, to decide whether to provide the user a loan, we make use of Random Forest Classifier, Decision Tree, and Logistic Regression. At the end of this showcase, we will have a web application that utilizes machine learning to respond to the user.\n\nTo reproduce this example in your own GitLab environment, you can read the rest of this article or follow the video below. You can find the source code of this example in [these OBSS repositories](https://gitlab.com/gitlab-partners-public/obss).\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/grNJAp1xAi0?si=Bf9CAP9lB1uWErOZ\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Set up environment variables of MLflow\n\nOn the host where the code is executed, set the environment variables for tracking URI and token. This might be a remote host, CI pipeline, or your local environment. When they are set, you can call `mlflow.set_experiment(\"\u003Cexperiment_name>\")`. As a reference:\n\n```\nexport MLFLOW_TRACKING_URI=\"\u003Cyour gitlab endpoint>/api/v4/projects/\u003Cyour project id>/ml/mlflow\"\nexport MLFLOW_TRACKING_TOKEN=\"\u003Cyour_access_token>\"\n```\n\n**Note:** If the training code contains the call to `mlflow.set_tracking_uri()`, remove it.\n\n## Train and log candidates at merge request\n\nIn your model train code, you can use MLflow methods to log metrics, artifacts, and parameters. You can also divide the train steps into pipeline stages if you are comfortable with that part. In this example, one Python file will be used for both training and report generation.\n\n```\nmlflow.log_params(params)\nmlflow.log_metrics(metrics_data)\nmlflow.log_artifact(artifacts)\n```\n\nYou can then create the necessary pipeline to train the experiment. By adding the relevant rules, you can trigger this pipeline manually in merge requests and observe the report generated as MR Note.\n\nWhen the pipeline is finished, you can see the details about the candidate in **Analyze > Model Experiments**.\n\n![details about the candidate in the finished pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676127/Blog/Content%20Images/Screenshot_1.png)\n\n## Register the most successful candidate\n\nAccording to the measurements you have made, we can register the most successful candidate (may be the one with the highest accuracy value) with the Run ID of the candidate.\n\nBut first, we need to create a model and its version in Registry. I created these steps in separate stages and components (because I may need these steps in other projects). You should be careful to use semantic versioning when versioning.\n\n### Register source model parameters and metrics\n\n```\nsource_candidate = client.get_run(source_candidate_id)\nparams = { k: v for k, v in source_candidate.data.params.items() }\nmetric = { k: v for k, v in source_candidate.data.metrics.items() }\n\nmodel_version = client.get_model_version(model_name, version)\nrun_id = model_version.run_id\nmodel_class = \"\"\nfor name, value in params.items():\n    client.log_param(run_id, name, value)\n    if name == \"Class\":\n        model_class = value\n\nfor name, value in metric.items():\n    client.log_metric(run_id, name, value)\n\n```\n\nAfter logging the parameters and metrics, you can [register the artifacts](https://gitlab.com/gitlab-partners-public/obss/mlops-loan-prediction/-/blob/main/register_candidate.py) as you did in the train step.\n\nYou may want to manually enter the inputs of the relevant steps as [a variable in the pipeline](https://gitlab.com/gitlab-partners-public/obss/components/-/blob/main/templates/register-candidate.yml).\n\n## CI/CD components\n\nI have used [CI/CD components](https://docs.gitlab.com/ee/ci/components/) because they provide a structured environment for managing machine learning workflows. These components enable reusability by allowing teams to store and share standardized scripts, models, and datasets, ensuring that previous work can be easily accessed, modified, and redeployed in future projects, thus accelerating development and reducing redundancy.\n\n> [Learn more about CI/CD components and the CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/).\n\n## Dockerize and deploy an ML app with the registered model\n\nIn this project, while registering the model, I also register the pkl file as an artifact and then create the docker image with that artifact and send it to [GitLab Container Registry](https://about.gitlab.com/blog/next-generation-gitlab-container-registry-goes-ga/).\n\nYou can now access your Docker image from the Container Registry and deploy it to your environment with the method you want.\n\n## Resources\n- [Model experiments](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/)\n- [MLflow client compatibility](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/mlflow_client.html)\n- [CI/CD components](https://docs.gitlab.com/ee/ci/components/)\n- [Building GitLab with GitLab: Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n\n***Credits:**\nThis tutorial and the corresponding sample projects were created and generously shared with the community by [OBSS](https://obss.tech/en/). OBSS is an EMEA-based channel partner of GitLab. They have deep expertise across the whole DevSecOps lifecycle and amongst many other things, they are more than happy to support customers with migrating their MLOps workloads to GitLab.*\n",[704,9,108,281],{"slug":1148,"featured":90,"template":684},"build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow","content:en-us:blog:build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","Build An Ml App Pipeline With Gitlab Model Registry Using Mlflow","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow.yml","en-us/blog/build-an-ml-app-pipeline-with-gitlab-model-registry-using-mlflow",{"_path":1154,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1155,"content":1161,"config":1166,"_id":1168,"_type":13,"title":1169,"_source":15,"_file":1170,"_stem":1171,"_extension":18},"/en-us/blog/build-and-run-containers-in-remote-development-workspaces",{"title":1156,"description":1157,"ogTitle":1156,"ogDescription":1157,"noIndex":6,"ogImage":1158,"ogUrl":1159,"ogSiteName":669,"ogType":670,"canonicalUrls":1159,"schema":1160},"Build and run containers in Remote Development workspaces","Use this easy-to-follow tutorial to create a secure, ephemeral, reproducible development environment in GitLab that can replace your local environments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663857/Blog/Hero%20Images/blog-image-template-1800x945__12_.png","https://about.gitlab.com/blog/build-and-run-containers-in-remote-development-workspaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Build and run containers in Remote Development workspaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vishal Tak\"}],\n        \"datePublished\": \"2025-03-03\",\n      }",{"title":1156,"description":1157,"authors":1162,"heroImage":1158,"date":1123,"body":1164,"category":678,"tags":1165},[1163],"Vishal Tak","Development environments often require the ability to build and run containers as part of their local development. Securely running containers within containers can be challenging. This article will provide a step-by-step guide to securely build and run containers in a workspace.\n\nYou will learn how to:\n- [Create a Kubernetes cluster on AWS EKS](#create-a-kubernetes-cluster-on-aws-eks)\n- [Configure Sysbox](#configure-sysbox)\n- [Configure GitLab agent for Kubernetes and GitLab Workspaces Proxy](#configure-gitlab-agent-for-kubernetes-and-gitlab-workspaces-proxy)\n- [Configure sudo access for a workspace with Sysbox](#configure-sudo-access-for-a-workspace-with-sysbox)\n- [Configure Ingress Controller](#configure-ingress-controller)\n- [Build containers inside a workspace](#build-containers-inside-a-workspace)\n- [Run containers inside a workspace](#run-containers-inside-a-workspace)\n- [Get started today](#get-started-today)\n\n## Create a Kubernetes cluster on AWS EKS\nInstall the [AWS CLI](https://docs.aws.amazon.com/cli/latest/userguide/getting-started-install.html) on your local machine. Next, configure a [named profile](https://docs.aws.amazon.com/cli/latest/reference/configure/) and export it to ensure all the following `aws` commands use the set credentials.\n\n```\naws configure --profile gitlab-workspaces-container-demo\nexport AWS_PROFILE=gitlab-workspaces-container-demo\n```\n\nInstall [eksctl](https://eksctl.io/installation/), a CLI to interact with AWS EKS. Let’s now create a Kubernetes 1.31 cluster on AWS EKS with 1 node of Ubuntu 22.04 of `c5.2xlarge` instance type. The nodes can autoscale from 0-20 nodes and each node will have a label `sysbox-install: yes` . This will be explained later in the article.\n\n```\nexport CLUSTER_NAME=\"gitlab-workspaces-container-demo-eks-sysbox\"\n\neksctl create cluster \\\n  --name \"${CLUSTER_NAME}\" \\\n  --version 1.31 \\\n  --node-ami-family=Ubuntu2204 \\\n  --nodes=1 \\\n  --nodes-min=0 \\\n  --nodes-max=20 \\\n  --instance-types=c5.2xlarge \\\n  --node-labels \"sysbox-install=yes\" \\\n  --asg-access \\\n  --external-dns-access \\\n  --full-ecr-access\n```\n\nCreate an [IAM OIDC](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) provider for your cluster.\n\n```\neksctl utils associate-iam-oidc-provider --cluster \"${CLUSTER_NAME}\" --approve\n```\n\nCreate IAM role for [EBS add-on](https://docs.aws.amazon.com/eks/latest/userguide/ebs-csi.html) for EKS.\n\n```\neksctl create iamserviceaccount \\\n  --name ebs-csi-controller-sa \\\n  --namespace kube-system \\\n  --cluster \"${CLUSTER_NAME}\" \\\n  --role-name \"AmazonEKS_EBS_CSI_DriverRole_${CLUSTER_NAME}\" \\\n  --role-only \\\n  --attach-policy-arn arn:aws:iam::aws:policy/service-role/AmazonEBSCSIDriverPolicy \\\n  --approve\n```\n\nCreate Amazon EBS CSI driver add-on for Amazon EKS cluster.  \n\n```\neksctl utils describe-addon-versions --kubernetes-version 1.31 | grep aws-ebs-csi-driver\n\nexport AWS_ACCOUNT_ID=\"UPDATE_ME\"\n\neksctl create addon \\\n  --cluster \"${CLUSTER_NAME}\" \\\n  --name aws-ebs-csi-driver \\\n  --version latest \\\n  --service-account-role-arn \"arn:aws:iam::${AWS_ACCOUNT_ID}:role/AmazonEKS_EBS_CSI_DriverRole_${CLUSTER_NAME}\" \\\n  --force\n```\n\nInstall [kubectl](https://kubernetes.io/docs/reference/kubectl/), a command line tool for communicating with a Kubernetes cluster's control plane, using the Kubernetes API.\n\nLet’s get the [kubeconfig](https://kubernetes.io/docs/concepts/configuration/organize-cluster-access-kubeconfig/) of the created cluster.\n\n```\naws eks update-kubeconfig --name \"${CLUSTER_NAME}\"\n```\n\n## Configure Sysbox\n\n[Sysbox](https://github.com/nestybox/sysbox) is a container runtime that improves container isolation and enables containers to run the same workloads as virtual machines.\n\n[Install](https://github.com/nestybox/sysbox#installation) Sysbox on the Kubernetes cluster using the `sysbox-deploy-k8s daemonset`.\n\n```\ncurl https://raw.githubusercontent.com/nestybox/sysbox/refs/tags/v0.6.6/sysbox-k8s-manifests/sysbox-install.yaml -o sysbox-install.yaml\n```\n\nBecause of how Sysbox releases itself, it first created a git tag, which runs a pipeline to build assets after which the YAML files for the `sysbox-deploy-k8s daemonset` are updated. Thus, we need to update the DaemonSet's `spec.template.soec.containers[0].image` to [registry.nestybox.com/nestybox/sysbox-deploy-k8s:v0.6.6-0](https://github.com/nestybox/sysbox/blob/46ba726e8e894aa22e20465a32d22dfa2863ec12/sysbox-k8s-manifests/sysbox-install.yaml#L66) .\n\n```\nnew_image_value=\"registry.nestybox.com/nestybox/sysbox-deploy-k8s:v0.6.6-0\"\ntemp_file=$(mktemp)\nsed -E \"s|^([[:space:]]*image:)[[:space:]]*.*|\\1 $new_image_value|\" \"sysbox-install.yaml\" > \"$temp_file\"\nmv \"$temp_file\" \"sysbox-install.yaml\"\n```\n\nApply the YAML file to Kubernetes and ensure all the pods of the DaemonSet are running.\n\n```\nkubectl apply -f sysbox-install.yaml\nkubectl get pod -A\nkubectl -n kube-system get daemonset\n```\n\nVerify the installation by creating a pod which uses Sysbox container runtime.\n\n```\ncat \u003C\u003CEOF | kubectl apply -f -\napiVersion: v1\nkind: Pod\nmetadata:\n  name: sysbox-verification-pod\n  namespace: default\n  annotations:\n    io.kubernetes.cri-o.userns-mode: \"auto:size=65536\"\nspec:\n  runtimeClassName: sysbox-runc\n  containers:\n  - image: \"hello-world\"\n    imagePullPolicy: Always\n    name: main\n  restartPolicy: Always\nEOF\n\nkubectl -n default get pod sysbox-verification-pod\nkubectl exec -it sysbox-verification-pod -- echo \"Pod is running successfully on a Kubernetes cluster configured with Sysbox.\"\nkubectl -n default delete pod sysbox-verification-pod\n```\n\n## Configure GitLab agent for Kubernetes and GitLab Workspaces Proxy\n\nFollow our [documentation tutorial](https://docs.gitlab.com/ee/user/workspace/set_up_gitlab_agent_and_proxies.html) to set up GitLab agent and GitLab Workspaces Proxy.  \n\n## Configure sudo access for a workspace with Sysbox\n\nFollow our [documentation](https://docs.gitlab.com/ee/user/workspace/configuration.html#with-sysbox) to configure sudo access for a workspace with Sysbox.\n\n## Configure Ingress Controller\n\nSetup [Ingress NGINX Controller for Kubernetes](https://github.com/kubernetes/ingress-nginx)\n\n```\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx --force-update\nhelm repo update\n\nhelm upgrade --install \\\n  ingress-nginx ingress-nginx/ingress-nginx \\\n  --namespace ingress-nginx \\\n  --create-namespace \\\n  --version 4.11.1 \\\n  --timeout=600s --wait --wait-for-jobs\n\nkubectl -n ingress-nginx get pod\n```\n\n## Build containers inside a workspace\n\nWe’ll use [example-go-http-app](https://gitlab.com/gitlab-org/workspaces/examples/example-go-http-app) as the project to create a workspace from. Open the workspace, start a terminal, and install [Docker](https://docs.docker.com/engine/install/).\n\n```\n# Add Docker's official GPG key:\nsudo apt-get update\nsudo apt-get install ca-certificates curl\nsudo install -m 0755 -d /etc/apt/keyrings\nsudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc\nsudo chmod a+r /etc/apt/keyrings/docker.asc\n\n# Add the repository to Apt sources:\necho \\\n  \"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \\\n  $(. /etc/os-release && echo \"${UBUNTU_CODENAME:-$VERSION_CODENAME}\") stable\" | \\\n  sudo tee /etc/apt/sources.list.d/docker.list > /dev/null\nsudo apt-get update\nsudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin\n\n# Start the Docker Daemon\nsudo dockerd\n```\n\nBuild the container image.\n\n```\nsudo docker build -t workspaces-golang-server .\n```\n\n## Run containers inside a workspace\n\nLet’s run the container built above and expose port 3000 from the container onto the host (workspace).\n\n```\nsudo docker run -p 3000:3000 workspaces-golang-server\n```\n\nThe port `3000` is exposed in the [.devfile.yaml](https://gitlab.com/gitlab-org/workspaces/examples/example-go-http-app/-/blob/dd3dbb38cdce1143f7ed023980f34630cea991a5/.devfile.yaml#L15) used to create the workspace. Access the server running inside the container from the browser. Here is a video clip.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/JQErF0U6oFk?si=6oiK48q5ghZq312g\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started today\n\nFrom GitLab 17.4, you can build and run containers securely in GitLab Workspaces. See our [documentation](https://docs.gitlab.com/ee/user/workspace/configuration.html#build-and-run-containers-in-a-workspace) for more information. Replace your local development environments to GitLab Workspaces for a secure, ephemeral, reproducible development environment. \n\n## Read more\n\n- [Enable secure sudo access for GitLab Remote Development workspaces](https://about.gitlab.com/blog/enable-secure-sudo-access-for-gitlab-remote-development-workspaces/)\n- [Quickstart guide for GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/)\n- [Create a workspace quickly with the GitLab default devfile](https://about.gitlab.com/blog/create-a-workspace-quickly-with-the-gitlab-default-devfile/)\n- [Contributor how-to: Remote Development workspaces and GitLab Developer Kit](https://about.gitlab.com/blog/gitlab-gdk-remote-development/)\n",[9,478,680,678],{"slug":1167,"featured":90,"template":684},"build-and-run-containers-in-remote-development-workspaces","content:en-us:blog:build-and-run-containers-in-remote-development-workspaces.yml","Build And Run Containers In Remote Development Workspaces","en-us/blog/build-and-run-containers-in-remote-development-workspaces.yml","en-us/blog/build-and-run-containers-in-remote-development-workspaces",{"_path":1173,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1174,"content":1179,"config":1184,"_id":1186,"_type":13,"title":1187,"_source":15,"_file":1188,"_stem":1189,"_extension":18},"/en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way",{"title":1175,"description":1176,"ogTitle":1175,"ogDescription":1176,"noIndex":6,"ogImage":1137,"ogUrl":1177,"ogSiteName":669,"ogType":670,"canonicalUrls":1177,"schema":1178},"Building a GitLab CI/CD pipeline for a monorepo the easy way","Learn how to create a GitLab CI/CD pipeline for a monorepo to host multiple applications in one repository.","https://about.gitlab.com/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building a GitLab CI/CD pipeline for a monorepo the easy way\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Morris\"}],\n        \"datePublished\": \"2024-07-30\",\n      }",{"title":1175,"description":1176,"authors":1180,"heroImage":1137,"date":1181,"body":1182,"category":769,"tags":1183},[830],"2024-07-30","Monorepos allow you to host multiple applications’ code in a single repository. In GitLab, that involves placing disparate application source code in separate directories in one project. While this strategy allows for version controlled storage of your code, it was tricky leveraging the full power of GitLab’s [CI/CD](https://about.gitlab.com/topics/ci-cd/) pipeline capabilities… until now!\n\n## The ideal case: CI/CD in a monorepo\n\nSince you have more than one application’s code living in your repository, you will want to have more than one pipeline configuration. For example, if you have a .NET application and a Spring application in one project, each application may have different build and test jobs to complete. Ideally, you can completely decouple the pipelines and only run each pipeline based on changes to that specific application’s source code.\n\nThe technical approach for this would be to have a project-level `.gitlab-ci.yml` pipeline configuration file that includes a specific YAML file based on changes in a certain directory. The `.gitlab-ci.yml` pipeline serves as the control plane that triggers the appropriate pipeline based on the changes made to the code.\n\n## The legacy approach\n\nPrior to GitLab 16.4, we were not able to include a YAML file based on changes to a directory or file in a project. However, we could accomplish this functionality via a workaround. \n\nIn our monorepo project, we have two directories for different applications. In this example, there are `java` and `python` directories representing a Java and Python app, respectively. Each directory has an application-specific YAML file to build each app. In the project’s pipeline file, we simply include both application pipeline files, and do the logic handling in those files directly.\n\n`.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\ntop-level-job:\n  stage: build\n  script:\n    - echo \"Hello world...\"\n\ninclude:\n  - local: '/java/j.gitlab-ci.yml'\n  - local: '/python/py.gitlab-ci.yml'\n\n```\n\nIn each application-specific pipeline file, we create a hidden job named .java-common or .python-common that only runs if there are changes to that app’s directory. [Hidden jobs](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs) do not run by default, and are often utilized to reuse specific job configurations. Each pipeline extends that hidden job to inherit the rules defining which files to watch for changes, which would then initiate the pipeline job. \n\n`j.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\n.java-common:\n  rules:\n    - changes:\n      - '../java/*'\n\njava-build-job:\n  extends: .java-common\n  stage: build\n  script:\n    - echo \"Building Java\"\n\njava-test-job:\n  extends: .java-common\n  stage: test\n  script:\n    - echo \"Testing Java\"\n\n```\n\n`py.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\n.python-common:\n  rules:\n    - changes:\n      - '../python/*'\n\npython-build-job:\n  extends: .python-common\n  stage: build\n  script:\n    - echo \"Building Python\"\n\npython-test-job:\n  extends: .python-common\n  stage: test\n  script:\n    - echo \"Testing Python\"\n\n```\n\nThere are some downsides to this, including having to extend the job for each other job in the YAML file to ensure it complies with the rules, creating a lot of redundant code and room for human error. Additionally, extended jobs cannot have duplicate keys, so you could not define your own `rules` logic in each job since there would be a collision in the keys and their [values are not merged](https://docs.gitlab.com/ee/ci/yaml/index.html#extends). \n\nThis results in a pipeline running that includes the j.gitlab-ci.yml jobs when `java/` is updated, and py.gitlab-ci.yml when `python/` is updated. \n\n## The new approach: Conditionally include pipeline files\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/6phvk8jioAo?si=y6ztZODvUtM-cHmZ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIn GitLab 16.4, we introduced [`include` with `rules:changes` for pipelines](https://docs.gitlab.com/ee/ci/yaml/includes.html#include-with-ruleschanges). Previously, you could `include` with `rules:if`, but not `rules:changes` making this update extremely powerful. Now, you can simply use the `include` keyword and define the monorepo rules in your project pipeline configuration. \n\nNew `.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n\ntop-level-job:\n  stage: build\n  script:\n    - echo \"Hello world...\"\n\ninclude:\n  - local: '/java/j.gitlab-ci.yml'\n    rules:\n      - changes:\n        - 'java/*'\n  - local: '/python/py.gitlab-ci.yml'\n    rules:\n      - changes:\n        - 'python/*'\n\n```\n\nThen each application’s YAML can just focus on building and testing that application’s code, without extending a hidden job repeatedly. This allows for more flexibility in job definitions and reduces code rewriting for engineers.\n\nNew `j.gitlab-ci.yml`:\n\n```\nstages:\n  - build\n  - test\n  - deploy\n\njava-build-job:\n  stage: build\n  script:\n    - echo \"Building Java\"\n\njava-test-job:\n  stage: test\n  script:\n    - echo \"Testing Java\"\n\n```\n\nNew `py.gitlab-ci.yml`:\n```\nstages:\n  - build\n  - test\n  - deploy\n\npython-build-job:\n  stage: build\n  script:\n    - echo \"Building Python\"\n\npython-test-job:\n  stage: test\n  script:\n    - echo \"Testing Python\"\n\n```\n\nThis accomplishes the same task of including the Java and Python jobs only when their directories are modified. Something to consider in your implementation is that [jobs can run unexpectedly when using `changes`](https://docs.gitlab.com/ee/ci/jobs/job_troubleshooting.html#jobs-or-pipelines-run-unexpectedly-when-using-changes). The changes rule always evaluates to true when pushing a new branch or a new tag to GitLab, so all jobs included will run upon first push to a branch regardless of the `rules:changes` definition. You can mitigate this experience by creating your feature branch first and then opening a merge request to begin your development, since the first push to the branch when it is created will force all jobs to run.\n\nUltimately, monorepos are a strategy that can be used with GitLab and CI/CD, and, with our new `include` with `rules:changes` feature, we have a better best practice for using GitLab CI with monorepos. To get started with monorepos, take out a free Gitlab Ultimate trial today.\n\n## More CI/CD resources\n\n* [5 tips for managing monorepos in GitLab](https://about.gitlab.com/blog/tips-for-managing-monorepos-in-gitlab/)\n* [How to learn CI/CD fast](https://about.gitlab.com/blog/how-to-learn-ci-cd-fast/)",[108,9],{"slug":1185,"featured":6,"template":684},"building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way","content:en-us:blog:building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way.yml","Building A Gitlab Ci Cd Pipeline For A Monorepo The Easy Way","en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way.yml","en-us/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way",{"_path":1191,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1192,"content":1198,"config":1205,"_id":1207,"_type":13,"title":1208,"_source":15,"_file":1209,"_stem":1210,"_extension":18},"/en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"title":1193,"description":1194,"ogTitle":1193,"ogDescription":1194,"noIndex":6,"ogImage":1195,"ogUrl":1196,"ogSiteName":669,"ogType":670,"canonicalUrls":1196,"schema":1197},"Explore the Dragon Realm: Building a C++ adventure game with AI","How to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663344/Blog/Hero%20Images/compassinfield.jpg","https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2023-08-24\",\n      }",{"title":1199,"description":1194,"authors":1200,"heroImage":1195,"date":1202,"body":1203,"category":702,"tags":1204},"Explore the Dragon Realm: Build a C++ adventure game with a little help from AI",[1201],"Fatima Sarah Khalid","2023-08-24","\nLearning, for me, has never been about reading a textbook or sitting in on a lecture - it's been about experiencing and immersing myself in a hands-on challenge. This is particulary true for new programming languages. With [GitLab Duo Code Suggestions](https://about.gitlab.com/gitlab-duo/), artificial intelligence (AI) becomes my interactive guide, providing an environment for trial, error, and growth. In this tutorial, we will build a text-based adventure game in C++ by using Code Suggestions to learn the programming language along the way.\n\nYou can use this table of contents to navigate into each section. It is recommended to read top-down for the best learning experience.\n\n- [Setup](#setup)\n  - [Installing VS Code](#installing-vs-code)\n  - [Installing Clang as a compiler](#installing-clang-as-a-compiler)\n  - [Setting up VS Code](#setting-up-vs-code)\n- [Getting started](#getting-started)\n  - [Compiling and running your program](#compiling-and-running-your-program)\n- [Setting the text adventure stage](#setting-the-adventure-stage)\n- [Defining the adventure: Variables](#defining-the-adventure-variables)\n- [Crafting the adventure: Making decisions with conditionals](#crafting-the-adventure-making-decisions-with-conditionals)\n- [Structuring the narrative: Characters](#structuring-the-narrative-characters)\n- [Structuring the narrative: Items](#structuring-the-narrative-items)\n- [Applying what we've learned at the Grand Library](#applying-what-weve-learned-at-the-grand-library)\n- [See you next time in the Dragon Realm](#see-you-next-time-in-the-dragon-realm)\n- [Share your feedback](#share-your-feedback)\n\n> Download [GitLab Ultimate for free](https://about.gitlab.com/gitlab-duo/) for a 30-day trial of GitLab Duo Code Suggestions.\n\n## Setup\nYou can follow this tutorial in your [preferred and supported IDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors). Review the documentation to enable Code Suggestions for [GitLab.com SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) or [GitLab self-managed instances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nThese installation instructions are for macOS Ventura on M1 Silicon. \n\n### Installing VS Code\n\n* Download and install [VS Code](https://code.visualstudio.com/download).\n* Alternatively, you can also install it as a Homebrew cask: `brew install --cask visual-studio-code`.\n\n### Installing Clang as a compiler\n\n* On macOS, you'll need to install some developer tools. Open your terminal and type:\n\n```\nxcode-select --install\n```\n\nThis will prompt you to install Xcode's command line tools, which include the [Clang C++ compiler](https://clang.llvm.org/get_started.html).\n\nAfter the installation, you can check if `clang++` is installed by typing:\n\n```\nclang++ --version\n```\n\nYou should see an output that includes some information about the Clang version you have installed. \n\n### Setting up VS Code\n\n* Launch VS Code.\n* Install and configure [the GitLab Workflow extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n* Optionally, in VS Code, install the [C/C++ Intellisense extension](https://marketplace.visualstudio.com/items?itemName=ms-vscode.cpptools), which helps with debugging C/C++. \n\n## Getting started\nNow, let's start building this magical adventure with C++. We'll start with a \"Hello World\" example.\n\nCreate a new project `learn-ai-cpp-adventure`. In the project root, create `adventure.cpp`. The first part of every C++ program is the `main()` function. It's the entry point of the program.\n\nWhen you start writing `int main() {`, Code Suggestions will help autocomplete the function with some default parameters.\n\n![adventure.cpp with a hello world implementation suggested by Code Suggestions](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/0-helloworld.png){: .shadow}\n\n```cpp\nint main()\n{\n    cout \u003C\u003C \"Hello World\" \u003C\u003C endl;\n    return 0;\n}\n```\n\nWhile this is a good place to start, we need to add an include and update the output statement:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\nThe program prints \"Hello World!\" to the console when executed.\n\n* `#include \u003Ciostream>`: Because we are building a text-based adventure, we will rely on input from the player using input and output operations (I/O) in C++. This include is a preprocessor directive that tells our program to include the `iostream` library, which provides facilities to use input and output streams, such as `std::cout` for output.\n\n* You might find that Code Suggestions suggests `int main(int argc, char* argv[])` as the definition of our main function. The parameters `(int argc, char* argv[])` are used to pass command-line arguments to the program. Code Suggestions added them as default parameters, but they are not needed if you're not using command-line arguments. In that case, we can also define the main function as `int main()`.\n\n* `std::cout \u003C\u003C \"Hello World!\" \u003C\u003C std::endl;`: outputs \"Hello World\" to the console. The stream operator `\u003C\u003C` is used to send the string to output. `std::endl` is an end-line character.\n\n* `return 0;`: we use `return 0;` to indicate the end of the `main()` function and return a value of 0. In C++, it is good practice to return 0 to indicate the program has completed successfully.\n\n### Compiling and running your program\nNow that we have some code, let's review how we'll compile and run this program. \n* Open your terminal or use the terminal in VSCode (View -> Terminal).\n* Navigate to your project directory.\n* Compile your program by typing:\n\n```bash\nclang++ adventure.cpp -o adventure\n```\n\nThis command tells the Clang++ compiler to compile adventure.cpp and create an executable named adventure. After this, run your program by typing:\n\n```\n./adventure\n```\n\nYou should see \"Hello World!\" printed in the terminal. \n\nBecause our tutorial uses a single source file `adventure.cpp`, we can use the compiler directly to build our program. In the future, if the program grows beyond a file, we'll set up additional configurations to handle compilation. \n\n## Setting the text adventure stage\nBefore we get into more code, let's set the stage for our text adventure.\n\nFor this text adventure, players will explore the Dragon Realm. The Dragon Realm is full of mountains, lakes, and magic. Our player will enter the Dragon Realm for the first time, explore different locations, meet new characters, collect magical items, and journal their adventure. At every location, they will be offered choices to decide the course of their journey.\n\nTo kick off our adventure into the Dragon Realm, let's update our `adventure.cpp main()` function to be more specific. As you update the welcome message, you might find that Code Suggestions already knows we're building a game.\n\n![adventure.cpp - Code Suggestions offers suggestion of welcoming users to the Dragon Realm and knows its a game](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/1-welcome-to-the-realm.png){: .shadow}\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\n## Defining the adventure: Variables\nA variable stores data that can be used throughout the program scope in the `main()` function. A variable is defined by a type, which indicates the kind of data it can hold.\n\nLet's create a variable to hold our player's name and give it the type `string`. A `string` is designed to hold a sequence of characters so it's perfect for storing our player's name.\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    // Print \"Hello World!\" to the console\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\nAs you do this, you may notice that Code Suggestions knows what's coming next - prompting the user for their player's name.\n\n![adventure.cpp - Code Suggestions suggests welcoming the player with the playerName variable](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2-player-name-variable.png){: .shadow}\n\nWe may be able to get more complete and specific Code Suggestions by providing comments about what we'd like to do with the name - personally welcome the player to the game. Start by adding our plan of action in comments.\n\n```cpp\n    // Declare a string variable to hold the player's name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n\n    // Display a personalized welcome message to the player with their name\n```\n\nTo capture the player's name from input, we need to use the `std::cin` object from the `iostream` library to fetch input from the player using the extraction operator `>>`. If you start typing `std::` to start prompting the user, Code Suggestions will make some suggestions to help you gather user input and save it to our `playerName` variable.\n\n![adventure.cpp - Code Suggestions prompts the user to input their player name](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/2.1-player-name-input.png){: .shadow}\n\nNext, to welcome our player personally to the game, we want to use `std::cout` and the `playerName` variable together:\n\n```cpp\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n```\n\n## Crafting the adventure: Making decisions with conditionals\nIt's time to introduce our player to the different locations in tbe Dragon Realm they can visit. To prompt our player with choices, we use conditionals. Conditionals allow programs to take different actions based on criteria, such as user input.\n\nLet's offer the player a selection of locations to visit and capture their choice as an `int` value that corresponds to the location they picked.\n\n```cpp\n// Display a personalized welcome message to the player with their name\nstd::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n// Declare an int variable to capture the user's choice\nint choice;\n```\n\nThen, we want to offer the player the different locations that are possible for that choice. Let's start with a comment and prompt Code Suggestions with `std::cout` to fill out the details for us.\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the locations listed in the code below](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3-setup-location-choice.png){: .shadow}\n\nAs you accept the suggestions, Code Suggestions will help build out the output and ask the player for their input.\n\n![adventure.cpp - Code Suggestions suggests a multiline output for all the locations listed in the code below and asks for player input](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.1-capture-player-location-choice.png){: .shadow}\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n\n    // Offer the player a choice of 3 locations: 1 for Moonlight Markets, 2 for Grand Library, and 3 for Shimmer Lake.\n    std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go?\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"3. Shimmer Lake\" \u003C\u003C std::endl;\n    std::cout \u003C\u003C \"Please enter your choice: \";\n    std::cin >> choice;\n```\n\nOnce you start typing `std::cin >>` or accept the prompt for asking the player for their choice, Code Suggestions might offer a suggestion for building out your conditional flow. AI is non-deterministic: One suggestion can involve if/else statements while another solution uses a switch statement.\n\nTo give Code Suggestions a nudge, we'll add a comment and start typing out an if statement: `if (choice ==)`.\n\n![adventure.cpp - Code Suggestions suggests using an if statement to manage choice of locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2-if-statement-locations.png){: .shadow}\n\nAnd if you keep accepting the subsequent suggestions, Code Suggestions will autocomplete the code using if/else statements.\n\n![adventure.cpp - Code Suggestions helps the user fill out the rest of the if/else statements for choosing a location](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.2.1-if-statement-locations-continued.png){: .shadow}\n\n```cpp\n    // Check the user's choice and display the corresponding messages\n    if (choice == 1) {\n        std::cout \u003C\u003C \"You chose Moonlight Markets\" \u003C\u003C std::endl;\n    }\n    else if (choice == 2) {\n        std::cout \u003C\u003C \"You chose Grand Library\" \u003C\u003C std::endl;\n    }\n    else if (choice == 3) {\n        std::cout \u003C\u003C \"You chose Shimmer Lake\" \u003C\u003C std::endl;\n    }\n    else {\n        std::cout \u003C\u003C \"Invalid choice\" \u003C\u003C std::endl;\n    }\n```\n\n`if/else` is a conditional statement that allows a program to execute code based on whether a condition, in this case the player's choice, is true or false. If the condition evaluates to true, the code inside the braces is executed.\n\n* `if (condition)`: used to check if the condition is true.\n* `else if (another condition)`: if the previous condition isn't true, the programs checks this condition.\n* `else`: if none of the previous conditions are true.\n\nAnother way of managing multiple choices like this example is using a `switch()` statement. A `switch` statement allows our program to jump to different sections of code based on the value of an expression, which, in this case, is the value of `choice`.\n\nWe are going to replace our `if/else` statements with a `switch` statement. You can comment out or delete the `if/else` statements and prompt Code Suggestions starting with `switch(choice) {`.\n\n![adventure.cpp - Code Suggestions helps the user handle the switch statement for the locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3-conditional-switch-locations.png){: .shadow}\n\n![adventure.cpp - Code Suggestions helps the user handle the switch statement for the locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.3.1-conditional-switch-locations-continued.png){: .shadow}\n\n```cpp\n    // Evaluate the player's decision\n    switch(choice) {\n        // If 'choice' is 1, this block is executed.\n        case 1:\n            std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 2, this block is executed.\n        case 2:\n            std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is 3, this block is executed.\n        case 3:\n            std::cout \u003C\u003C \"You chose Shimmer Lake.\" \u003C\u003C std::endl;\n            break;\n        // If 'choice' is not 1, 2, or 3, this block is executed.\n        default:\n            std::cout \u003C\u003C \"You did not enter 1, 2, or 3.\" \u003C\u003C std::endl;\n    }\n```\n\nEach case represents a potential value that the variable or expression being switched on (in this case, choice) could have. If a match is found, the code for that case is executed. We use the `default` case to handle any input errors in case the player enters a value that isn't accounted for.\n\nLet's build out what happens when our player visits the Shimmering Lake. I've added some comments after the player's arrival at Shimmering Lake to prompt Code Suggestions to help us build this out:\n\n```cpp\n    // If 'choice' is 3, this block is executed.\n    case 3:\n        std::cout \u003C\u003C \"You chose Shimmering Lake.\" \u003C\u003C std::endl;\n        // The player arrives at Shimmering Lake. It is one of the most beautiful lakes the player has ever seen.\n        // The player hears a mysterious melody from the water.\n        // They can either 1. Stay quiet and listen, or 2. Sing along with the melody.\n\n        break;\n```\n\nNow, if you start writing `std::cout` to begin offering the player this new decision point, Code Suggestions will help fill out the output code.\n\n![adventure.cpp - Code Suggestions helps fill out the output code based on the comments about the interaction at the Lake](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4-case-3-output.png){: .shadow}\n\nYou might find that the code provided by Code Suggestions is very declarative. Once I've accepted the suggestion, I personalize the code as needed. For example in this case, including the melody the player heard and using the player's name instead of \"you\":\n\n![adventure.cpp - I added the playerName to the output and then prompted Code Suggestions to continue the narrative based on the comments above](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.1-customizing-output.png){: .shadow}\n\nI also wanted Code Suggestions to offer suggestions in a specific format, so I added an end line:\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break the choices into end line outputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.2-customizing-output-endline.png){: .shadow}\n\n![adventure.cpp - I added an endline to prompt Code Suggestions to break the choices into end line outputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.3-sub-choices-output.png){: .shadow}\n\nNow, we'd like to offer our player a nested choice in this scenario. Before we can define the new choices, we need a variable to store this nested choice. Let's define a new variable `int nestedChoice` in our `main()` function, outside of the `switch()` statement we set up. You can put it after our definition of the `choice` variable.\n\n```cpp\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n```\n\nNext, returning to the `if/else` statement we were working on in `case 3`, we want to prompt the player for their decision and save it in `nestedChoice`.\n\n![adventure.cpp - I added an end line to prompt Code Suggestions to break the choices into end line outputs](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.4.4-capture-nested-choice.png){: .shadow}\n\nAs you can see, Code Suggestions wants to go ahead and handle the user's choice using another `switch` statement. I would prefer to use an `if/else` statement to handle this decision point.\n\nFirst, let's add some comments to give context:\n\n```cpp\n    // Capture the user's nested choice\n    std::cin >> nestedChoice;\n\n    // If the player chooses 1 and remains silent, they hear whispers of the merfolk below, but nothing happens.\n    // If the player chooses 2 and sings along, a merfolk surfaces and gifts them a special blue gem as a token of appreciation for their voice.\n\n    // Evaluate the user's nestedChoice\n```\n\nThen, start typing `if (nestedChoice == 1)` and Code Suggestions will start to offer suggestions:\n\n![adventure.cpp - Code Suggestions starts to build out an if statement to handle the nestedChoice](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5-nested-choice-if.png){: .shadow}\n\nIf you tab to accept them, Code Suggestions will continue to fill out the rest of the nested `if/else` statements.\n\nSometimes, while you're customizing the suggestions that Code Suggestions gives, you may even discover that it would like to make creative suggestions, too!\n\n![adventure.cpp - Code Suggestions makes a creative suggestion to end the interaction with the merfolk by saying \"You are now free to go\" after you receive the gem.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.5.2-nested-cs-creative-suggestion.png){: .shadow}\n\nHere's the code for `case 3` for the player's interaction at Shimmering Lake with the nested decision. I've updated some of the narrative dialogue player's name.\n```\n    // Handle the Shimmering Lake scenario.\n    case 3:\n        std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n\n        // Capture the user's nested choice\n        std::cin >> nestedChoice;\n\n        // If the player chooses to remain silent\n        if (nestedChoice == 1)\n        {\n            std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n        }\n        // If the player chooses to sing along with the melody\n        else if (nestedChoice == 2)\n        {\n            std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                    \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                    \u003C\u003C std::endl;\n        }\n        break;\n```\n\nOur player isn't limited to just exploring Shimmering Lake. There's a whole realm to explore and they might want to go back and explore other locations.\n\nTo facilitate this, we can use a `while` loop. A loop is a type of conditional that allows a specific section of code to be executed multiple times based on a condition. For the `condition` that allows our `while` loop to run multiple times, let's use a `boolean` to initialize the loop condition.\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n        // wrap the code for switch(choice)\n    }\n```\n\nWe also need to move our location prompt inside the `while` loop so that the player can visit more than one location at the time.\n\n![adventure.cpp - CS helps us write a go next prompt for the locations](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6-while-loop-go-next.png){: .shadow}\n\n```cpp\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n```\n\nOur `while` loop will keep running as long as `exploring` is `true`, so we need a way for the player to have the option to exit the game. Let's add a case 4 that allows the player to exit by setting `exploring = false`. This will exit the loop and take the player back to the original choices.\n\n```cpp\n    // Option to exit the game\n    case 4:\n        exploring = false;\n        break;\n```\n\n**Async exercise**: Give the player the option to exit the game instead of exploring a new decision.\n\nWe also need to update the error handling for invalid inputs in the `switch` statement. You can decide whether to end the program or use the `continue` statement to start a new loop iteration.\n\n```cpp\n        default:\n            std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n            continue; // Errors continue with the next loop iteration\n```\n\nUsing I/O and conditionals is at the core of text-based adventure games and helps make these games interactive. We can combine user input, display output, and implement our narrative into decision-making logic to create an engaging experience.\n\nHere's what our `adventure.cpp` looks like now with some comments:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Main function, the starting point of the program\nint main()\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare a string variable to store the player name\n    std::string playerName;\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> playerName;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C playerName \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C playerName \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C playerName \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C playerName \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C playerName \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C playerName\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\nHere's what the build output looks like if we run `adventure.cpp` and the player heads to the Shimmering Lake.\n\n![adventure.cpp build output - the player is called sugaroverflow and heads to the Shimmering Lake and receives a gem](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/3.6.1-full-case-3-output.png){: .shadow}\n\n## Structuring the narrative: Characters\nOur player can now explore the world. Soon, our player will also be able to meet people and collect objects. Before we can do that, let's organize the things our player can do with creating some structure for the player character.\n\nIn C++, a `struct` is used to group different data types. It's helpful in creating a group of items that belong together, such as our player's attributes and inventory, into a single unit. `struct` objects are defined globally, which means at top the file, before the `main() function.\n\nIf you start typing `struct Player {`, Code Suggestions will help you out with a sample definition of a player struct.\n\n![adventure.cpp - Code Suggestions helps with setting up the struct definition for the player](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4-player-struct-definition.png){: .shadow}\n\nAfter accepting this suggestion, you might find that Code Suggestions is eager to define some functions to make this game more fun, such as hunting for treasure.\n\n![adventure.cpp - Code Suggestions provides a suggestion for creating functions to hunt for treasure.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.1-player-struct-treasure-suggestion.png){: .shadow}\n\n```cpp\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n```\n\nGiving the player experience points was not in my original plan for this text adventure game, but Code Suggestions makes an interesting suggestion. We could use `xp` for leveling up or for other game mechanics as our project grows.\n\n`struct Player` provides a blueprint for creating a player and details the attributes that make up a player. To use our player in our code, we must instantiate, or create, an object of the `Player` struct within our `main()` function. Objects in C++ are instances of structures that contain attributes. In our example, we're working with the `Player` struct, which has attributes like name, health, and xp.\n\nAs you're creating a `Player` object, you might find that Code Suggestions wants to name the player \"John.\"\n\n![adventure.cpp - code suggestions suggests naming the new Player object John.](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/4.2-player-struct-instance-john.png){: .shadow}\n\n```cpp\nint main() {\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n```\n\nInstead of naming our player \"John\" for everyone, we'll use the `Player` object to set the attribute for name. When we want to interact with or manipulate an attribute of an object, we use the dot operator `.`. The dot operator allows us to access specific members of the object. We can set the player's name using the dot operator with `player.name`.\n\nNote that we need to replace other mentions of `playerName` the variable with `player.name`, which allows us to access the player object's name directly.\n\n* Search for all occurrences of the `playerName` variable, and replace it with `player.name`.\n* Comment/Remove the unused `std::string playerName` variable after that.\n\nWhat your `adventure.cpp` will look like now:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n};\n\n// Main function, the starting point of the program\nint main()\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\n## Structuring the narrative: Items\nAn essential part of adventure games is a player's inventory - the collection of items they acquire and use during their journey. For example, at Shimmering Lake, the player acquired a blue gem.\n\nLet's update our Player `struct` to include an inventory using an array. In C++, an `array` is a collection of elements of the same type that can be identified by an index. When creating an array, you need to specify its type and size. Start by adding `std::string inventory` to the Player `struct`:\n\n![adventure.cpp - Code Suggestions shows us how to add an array of strings to the player struct to use as the players inventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5-add-inventory-player-struct.png){: .shadow}\n\nYou might find that Code Suggestions wants our player to be able to carry some gold, but we don't need that for now. Let's also add `int inventoryCount;` to keep track of the number of items in our player's inventory.\n\n![adventure.cpp - Code Suggestions shows us how to add an integer for inventoryCount to the player struct](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.1-add-inventory-count-player-struct.png){: .shadow}\n\n```cpp\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;  // The number of items in the player's inventory.\n};\n```\nIn our Player `struct`, we have defined an array for our inventory that can hold the names of 10 items (type:string, size: 10). As the player progresses through our story, we can assign new items to the inventory array based on the player's actions using the array index.\n\nSometimes Code Suggestions gets ahead of me and tries to add more complexity to the game by suggesting that we need to create a `struct` for some Monsters. Maybe later, Code Suggestions!\n\n![adventure.cpp - Code Suggestions wants to add a struct for Monsters we can battle](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.2-suggestion-gets-distracted-by-monsters.png\n){: .shadow}\n\nBack at the Shimmering Lake, the player received a special blue gem from the merfolk. Let's update the code in `case 2` for the Shimmering Lake to add the gem to our player's inventory.\n\nYou can start by accessing the player's inventory with `player.inventory` and Code Suggestions will help add the gem.\n\n![adventure.cpp - Code Suggestions shows us how to add a gem to the player's inventory using a post-increment operation and the inventory array from the struct object](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.3-add-gem-to-inventory.png){: .shadow}\n\n```cpp\n    // If the player chooses to sing along with the melody\n    else if (nestedChoice == 2)\n    {\n        std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                \u003C\u003C std::endl;\n        player.inventory[player.inventoryCount] = \"Blue Gem\";\n        player.inventoryCount++;\n    }\n```\n\n* `player.inventory`: accesses the inventory attribute of the player object\n* `player.inventoryCount`: accesses the integer that keeps track of how many items are currently in the player's inventory. This also represents the next available index in our inventory array where an item can be stored.\n* `player.inventoryCount++`: increments the value of inventoryCount by 1. This is a post-increment operation. We are adding “Blue Gem” to the next available slot in the inventory array and incrementing the array for the newly added item.\n\nOnce we've added something to our player's inventory, we may also want to be able to look at everything in the inventory. We can use a `for` loop to iterate over the inventory array and display each item.\n\nIn C++, a `for` loop allows code to be repeatedly executed a specific number of times. It's different from the `while` loop we used earlier because the `while` executes its body based on a condition, whereas a `for` loop iterates over a sequence or range, usually with a known number of times.\n\nAfter adding the gem to the player's inventory, let's display all the items it has. Try starting a for loop with `for ( ` to display the player's inventory and Code Suggestions will help you with the syntax.\n\n![adventure.cpp - Code Suggestions demonstrates how to write a for loop to loop through the players inventory](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.4-loop-over-players-inventory.png){: .shadow}\n\n```cpp\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n// Loop through the player's inventory up to the count of items they have\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n{\n    // Output the item in the inventory slot\n    std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n}\n```\n\nA `for` loop consists of 3 main parts:\n\n* `int i = 0`: is the initialization where you set up your loop variable. Here, we start counting from 0.\n* `i \u003C player.inventoryCount`: is the condition we're looping on, our loop checks if `i`, the current loop variable, is less than the number of items in our inventory. It will keep going until this is true.\n* `i++`: is the iteration. This updates the loop variable each time the loop runs.\n\nTo make sure that our loop doesn't encounter an error, let's add some error handling to make sure the inventory is not empty when we try to output it.\n\n```\nstd::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n// Loop through the player's inventory up to the count of items they have\nfor (int i = 0; i \u003C player.inventoryCount; i++)\n{\n    // Check if the inventory slot is not empty.\n    if (!player.inventory[i].empty())\n    {\n        // Output the item in the inventory slot\n        std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n    }\n}\n```\n\nWith our progress so far, we've successfully established a persistent `while` loop for our adventure, handled decisions, crafted a `struct` for our player, and implemented a simple inventory system. Now, let's dive into the next scenario, the Grand Library, applying the foundations we've learned.\n\n**Async exercise**: Add more inventory items found in different locations.\n\nHere's what we have for `adventure.cpp` so far:\n\n```cpp\n#include \u003Ciostream> // Include the I/O stream library for input and output\n\n// Define a structure for a Player in the game.\nstruct Player{\n    std::string name;  // The name of the player.\n    int health;        // The current health of the player.\n    int xp;            // Experience points gained by the player. Could be used for leveling up or other game mechanics.\n    std::string inventory[10];  // An array of strings for the player's inventory.\n    int inventoryCount = 0;\n};\n\n// Main function, the starting point of the program\nint main()\n{\n    std::cout \u003C\u003C \"Welcome to the Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Create an instance of the Player struct\n    Player player;\n    player.health = 100; // Assign a default value for HP\n\n    // Prompt the user to enter their player name\n    std::cout \u003C\u003C \"Please enter your name: \";\n    std::cin >> player.name;\n\n    // Display a personalized welcome message to the player with their name\n    std::cout \u003C\u003C \"Welcome \" \u003C\u003C player.name \u003C\u003C \" to The Dragon Realm!\" \u003C\u003C std::endl;\n\n    // Declare an int variable to capture the user's choice\n    int choice;\n    // Declare an int variable to capture the user's nested choice\n    int nestedChoice;\n\n    // Initialize a flag to control the loop and signify the player's intent to explore.\n    bool exploring = true;\n    // As long as the player wishes to keep exploring, this loop will run.\n    while(exploring) {\n\n        // If still exploring, ask the player where they want to go next\n        std::cout \u003C\u003C \"--------------------------------------------------------\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Where will \" \u003C\u003C player.name \u003C\u003C \" go next?\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"1. Moonlight Markets\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"2. Grand Library\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"3. Shimmering Lake\" \u003C\u003C std::endl;\n        std::cout \u003C\u003C \"Please enter your choice: \";\n        // Update value of choice\n        std::cin >> choice;\n\n        // Respond based on the player's main choice\n        switch(choice) {\n            //  Handle the Moonlight Markets scenario\n            case 1:\n                std::cout \u003C\u003C \"You chose Moonlight Markets.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Grand Library scenario.\n            case 2:\n                std::cout \u003C\u003C \"You chose Grand Library.\" \u003C\u003C std::endl;\n                break;\n            // Handle the Shimmering Lake scenario.\n            case 3:\n                std::cout \u003C\u003C player.name \u003C\u003C \" arrives at Shimmering Lake. It is one of the most beautiful lakes that\" \u003C\u003C player.name \u003C\u003C \" has seen. They hear a mysterious melody from the water. They can either: \" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"1. Stay quiet and listen\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"2. Sing along with the melody\" \u003C\u003C std::endl;\n                std::cout \u003C\u003C \"Please enter your choice: \";\n\n                // Capture the user's nested choice\n                std::cin >> nestedChoice;\n\n                // If the player chooses to remain silent\n                if (nestedChoice == 1)\n                {\n                    std::cout \u003C\u003C \"Remaining silent, \" \u003C\u003C player.name \u003C\u003C \" hears whispers of the merfolk below, but nothing happens.\" \u003C\u003C std::endl;\n                }\n                // If the player chooses to sing along with the melody\n                else if (nestedChoice == 2)\n                {\n                    std::cout \u003C\u003C \"Singing along, a merfolk surfaces and gifts \" \u003C\u003C player.name\n                            \u003C\u003C \" a special blue gem as a token of appreciation for their voice.\"\n                            \u003C\u003C std::endl;\n                    player.inventory[player.inventoryCount] = \"Blue Gem\";\n                    player.inventoryCount++;\n\n                    std::cout \u003C\u003C player.name \u003C\u003C \"'s Inventory:\" \u003C\u003C std::endl;\n                    // Loop through the player's inventory up to the count of items they have\n                    for (int i = 0; i \u003C player.inventoryCount; i++)\n                    {\n                        // Check if the inventory slot is not empty.\n                        if (!player.inventory[i].empty())\n                        {\n                            // Output the item in the inventory slot\n                            std::cout \u003C\u003C \"- \" \u003C\u003C player.inventory[i] \u003C\u003C std::endl;\n                        }\n                    }\n\n                }\n                break;\n            // Option to exit the game\n            case 4:\n                exploring = false;\n                break;\n            // If 'choice' is not 1, 2, or 3, this block is executed.\n            default:\n                std::cout \u003C\u003C \"You did not enter a valid choice.\" \u003C\u003C std::endl;\n                continue; // Errors continue with the next loop iteration\n        }\n    }\n\n    // Return 0 to indicate successful execution\n    return 0;\n}\n```\n\n![adventure.cpp - A full output of the game at the current state - our player sugaroverflow visits the Lake, receives the gem, adds it to their inventory, and we display the inventory before returning to the loop](https://about.gitlab.com/images/blogimages/2023-08-21-building-a-text-adventure-using-cplusplus-and-code-suggestions/5.5-full-output-shimmering-lake.png){: .shadow}\n",[478,704,940,835,9],{"slug":1206,"featured":6,"template":684},"building-a-text-adventure-using-cplusplus-and-code-suggestions","content:en-us:blog:building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","Building A Text Adventure Using Cplusplus And Code Suggestions","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions.yml","en-us/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions",{"_path":1212,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1213,"content":1219,"config":1226,"_id":1228,"_type":13,"title":1229,"_source":15,"_file":1230,"_stem":1231,"_extension":18},"/en-us/blog/building-build-images",{"title":1214,"description":1215,"ogTitle":1214,"ogDescription":1215,"noIndex":6,"ogImage":1216,"ogUrl":1217,"ogSiteName":669,"ogType":670,"canonicalUrls":1217,"schema":1218},"Getting [meta] with GitLab CI/CD: Building build images","Let's talk about building build images with GitLab CI/CD. The power of Docker as a build platform is unleashed when you get meta.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678567/Blog/Hero%20Images/building-blocks.jpg","https://about.gitlab.com/blog/building-build-images","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting [meta] with GitLab CI/CD: Building build images\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2019-08-28\",\n      }",{"title":1214,"description":1215,"authors":1220,"heroImage":1216,"date":1222,"body":1223,"category":769,"tags":1224},[1221],"Brendan O'Leary","2019-08-28","\n> An alternative title for this post could have been:\n>\n> I heard you liked Docker, so I put [dind](https://hub.docker.com/_/docker/).\n\n## Getting started\nIt should be clear by now that I love building stuff with GitLab CI/CD. From\n[DNS](https://medium.com/gitlab-magazine/ci-cd-all-the-things-pihole-625a0ceaf12)\nto [breakfast](/blog/introducing-auto-breakfast-from-gitlab/) GitLab CI/CD\noffers a pretty wide range. However, past those \"fun\" use cases, I also like\nto share some ~~best~~ practices I have acquired during my years of using [GitLab\nCI/CD](/solutions/continuous-integration/), both for software and non-software projects alike.\n\nI crossed out \"best\" above because I don't really like the term \"best practices.\" It\nimplies that there is only one right answer to a given question – which is the\nopposite of the point of computer science. Sure there are better and worse ways to\ndo something – but like many things in life, you have to find what works for\nyou. \"[The best camera is the one you have with you](https://www.amazon.com/Best-Camera-One-Thats-You/dp/0321684788)\"\ncomes to mind when building CI/CD for projects. Something that works is better than something that's pretty.\n\nBut, enough of my digression, let's get to the practice I wanted to share in this\npost: Building build images as part of the build process. Yes, it is precisely as meta as it sounds.\n\n## Why?\n\nOften when building a particular project, you may have several unique build dependencies.\nIn many languages, package managers solve for the majority if not all of these\ndependencies – at least for build time (think [npm](https://www.npmjs.com), [RubyGems](https://rubygems.org/),\n[Maven](https://maven.apache.org/what-is-maven.html)). However, when we are building and\ndeploying (CI/**CD** let's remember) from a machine that is not our own, that may not\nbe enough. There may be a few dependencies we might need from elsewhere.\n\nThe language libraries themselves are one such dependency – to build Java I'm going to need\nthe JDK or JRE. To build Node, I'll need... well Node, etc. In a Docker-based environment,\nthose languages and dependencies typically have an official image on Docker\nHub ([JRE from Oracle](https://hub.docker.com/_/oracle-serverjre-8) or\n[Node from Node.js](https://hub.docker.com/_/node) for instance). Assume, however, that\nI may need a few other things not included in **either** those official Docker images or\nthe package manager I'm using. For instance, maybe I need a CLI tool for\ndeploy ([AWS](https://aws.amazon.com/cli/), [Heroku](https://devcenter.heroku.com/articles/heroku-cli),\n[Firebase](https://firebase.google.com/docs/cli), etc.). We also might need a testing\nframework or tool like [Selenium](https://www.seleniumhq.org) or\n[headless Chrome](https://developers.google.com/web/updates/2017/04/headless-chrome).\nOr I may need other tools for packaging, testing, or deployment.\n\nSometimes there is a Docker image on Docker Hub for these combinations – or some of\nthem – but not always a maintained version. One easy solution to this could be to\njust run the install of the tools before every job that needs it. This can\neven be \"automated\" using something like\nthe [before_script](https://docs.gitlab.com/ee/ci/yaml/#before_script-and-after_script) syntax.\nHowever, this adds time to our pipeline and seems inefficient: Is there a better way?\n\n## Enter the GitLab Docker registry\nSince GitLab is a single application for the entire [DevOps](/topics/devops/) lifecycle – it actually\nships out of the box with a built-in\n[Docker registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\nThis can be a useful tool when deploying code in a containerized environment. We can\nbuild our application into a container and send it off into Kubernetes or some\nother Docker orchestrator.\n\nHowever, I also see this registry as an opportunity to save time in my\npipeline (and save round trips to Docker hub and back every time). For builds that require\nsome of these extra dependencies, I like to build a \"build\" Docker image.\nThat way, I have an image with all of those baked right in. Then, as part of my\npipeline, I can build the image at the start (only when changes are made or every time).\nAnd the rest of the pipeline can consume that image as the base image.\n\n## Putting it in practice\nFor example, let's see what it looks like to build a simple Docker image to use with\ndeploying to [Google Firebase](https://firebase.google.com/).\n\nFirebase is a \"backend as a service\" tool that provides a database, authentication,\nand other services across platforms (web, iOS, and Android). It also includes web hosting\nand several other items that can be deployed through [a CLI](https://firebase.google.com/docs/cli).\nThis tool makes getting started really easy. You can deploy the whole stack with\n`firebase deploy.` Alternatively, you can deploy a part (like [serverless](/topics/serverless/) functions)\nwith a command like `firebase deploy --only functions.`\n\nMaking this work in a CI/CD world requires a few extra steps though. We'll need a Node\nDocker image that has the firebase CLI in it, so let's make a simple Dockerfile to do that.\n\n> Putting this Dockerfile in `.meta/Dockerfile`\n\n```dockerfile\nFROM node:10\n\nRUN npm install -g firebase-tools\n```\n\nNext, I'll add a job to the front of my pipeline.\n\n> Added to the front of my `.gitlab-ci.yml`\n\n```yaml\nmeta-build-image:\n  image: docker:stable\n  services:\n    - docker:dind\n  stage: prepare\n  script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - cd .meta\n    - docker build -t $CI_REGISTRY/group/project/buildimage:latest .\n    - docker push $CI_REGISTRY/group/project/buildimage:latest\n  only:\n    refs:\n      - main\n    changes:\n      - .meta/Dockerfile\n```\n\nLet's break down that job:\n1. We use the `docker:stable` image and a service of `docker:dind`\n1. The stage is my first stage called `prepare`\n1. In the script, we login to the GitLab registry with the built-in variables and build the\nimage. For more details see the [GitLab documentation for building Docker images](https://docs.gitlab.com/ee/ci/docker/using_docker_build.html).\n1. We only run this on `main` and only when the `.meta/Dockerfile` changes. This makes\nsure we are specific about when we change the Docker image. We could also use the\ncommit hash or other methods here to make the image more fungible.\n\nNow, in further jobs down the pipeline, I can use the latest build of the Docker image like this:\n\n```yaml\nfirestore:\n  image: registry.gitlab.com/group/project/buildimage\n  stage: deploy 🚢🇮🇹\n  script:\n    - firebase deploy --only firestore\n  only:\n    changes:\n      - .firebase-config/firestore.rules\n      - .firebase-config/firestore.indexes.json\n```\n\nIn this job, we only run the job if something about\nthe [Firestore](https://firebase.google.com/docs/firestore) (the database from Firebase)\nconfiguration changes. And when it does, we run the `firestore deploy` command in CI. I\nalso added a token for deploy as a [GitLab CI/CD variable](https://docs.gitlab.com/ee/ci/variables/)\nbased off the Firebase documentation\nfor [using firebase with CI](https://firebase.google.com/docs/cli#admin-commands).\n\n## Summary\nIn the end, this helps speed up pipelines by ensuring that you have a custom-built build\nimage that you control. You don't have to rely on unstable or unmaintained Docker Hub\nimages or even have a Docker Hub account yourself to get started.\n\nTo learn more about GitLab CI/CD you can [read the GitLab website](/solutions/continuous-integration/)\nor the [CI/CD docs](https://docs.gitlab.com/ee/ci/introduction/). Also, there's a lot more to\nlearn about the [GitLab Docker registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\nCover image by [Hack Capital](https://unsplash.com/@markusspiske?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/search/photos/build?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText).\n{: .note}\n",[108,1225,9],"kubernetes",{"slug":1227,"featured":6,"template":684},"building-build-images","content:en-us:blog:building-build-images.yml","Building Build Images","en-us/blog/building-build-images.yml","en-us/blog/building-build-images",{"_path":1233,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1234,"content":1240,"config":1249,"_id":1251,"_type":13,"title":1252,"_source":15,"_file":1253,"_stem":1254,"_extension":18},"/en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"title":1235,"description":1236,"ogTitle":1235,"ogDescription":1236,"noIndex":6,"ogImage":1237,"ogUrl":1238,"ogSiteName":669,"ogType":670,"canonicalUrls":1238,"schema":1239},"Building GitLab with GitLab: A multi-region service to deliver AI features","Discover how we built our first multi-region deployment for teams at GitLab using the platform's many features, helping create a frictionless developer experience for GitLab Duo users.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098664/Blog/Hero%20Images/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type_building-gitlab-with-gitlab-no-type.png_1750098663794.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: A multi-region service to deliver AI features\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chance Feick\"},{\"@type\":\"Person\",\"name\":\"Sam Wiskow\"}],\n        \"datePublished\": \"2024-09-12\",\n      }",{"title":1235,"description":1236,"authors":1241,"heroImage":1237,"date":1244,"body":1245,"category":769,"tags":1246},[1242,1243],"Chance Feick","Sam Wiskow","2024-09-12","For GitLab Duo, real-time AI-powered capabilities like [Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) need low-latency response times for a frictionless developer experience. Users don’t want to interrupt their flow and wait for a code suggestion to show up. To ensure GitLab Duo can provide the right suggestion at the right time and meet high performance standards for critical AI infrastructure, GitLab recently launched our first multi-region service to deliver AI features.\n\nIn this article, we will cover the benefits of multi-region services, how we built an internal platform codenamed ‘Runway’ for provisioning and deploying multi-region services using GitLab features, and the lessons learned migrating to multi-region in production.\n\n## Background on the project\n\nRunway is GitLab’s internal platform as a service (PaaS) for provisioning, deploying, and operating containerized services. Runway's purpose is to enable GitLab service owners to self-serve infrastructure needs with production readiness out of the box, so application developers can focus on providing value to customers. As part of [our corporate value of dogfooding](https://handbook.gitlab.com/handbook/values/#results), the first iteration was built in 2023 by the Infrastructure department on top of core GitLab capabilities, such as continuous integration/continuous delivery ([CI/CD](https://about.gitlab.com/topics/ci-cd/)), environments, and deployments.\n\nBy establishing automated GitOps best practices, Runway services use infrastructure as code (IaC), merge requests (MRs), and CI/CD by default.\n\nGitLab Duo is primarily powered by [AI Gateway](https://gitlab.com/gitlab-org/modelops/applied-ml/code-suggestions/ai-assist), a satellite service written in Python outside of GitLab’s modular monolith written in Ruby. In cloud computing, a region is a geographical location of data centers operated by cloud providers.\n\n## Defining a multi-region strategy\n\nDeploying in a single region is a good starting point for most services, but can come with downsides when you are trying to reach a global audience. Users who are geographically far from where your service is deployed may experience different levels of service and responsiveness than those who are closer. This can lead to a poor user experience, even if your service is well built in all other respects.\n\nFor AI Gateway, it was important to meet global customers wherever they are located, whether on GitLab.com or self-managed instances using Cloud Connector. When a developer is deciding to accept or reject a code suggestion, milliseconds matter and can define the user experience.\n\n### Goals\n\nMulti-region deployments require more infrastructure complexity, but for use cases where latency is a core component of the user experience, the benefits often outweigh the downsides. First, multi-region deployments offer increased responsiveness to the user. By serving requests from locations closest to end users, latency can be significantly reduced. Second, multi-region deployments provide greater availability. With fault tolerance, services can fail over during a regional outage. There is a much lower chance of a service failing completely, meaning users should not be interrupted even in partial failures.\n\nBased on our goals for performance and availability, we used this opportunity to create a scalable multi-region strategy in Runway, which is built leveraging GitLab features.\n\n### Architecture\n\nIn SaaS platforms, GitLab.com’s infrastructure is hosted on Google Cloud Platform (GCP). As a result, Runway’s first supported platform runtime is Cloud Run. The initial workloads deployed on Runway are stateless satellite services (e.g., AI Gateway), so Cloud Run services are a good fit that provide a clear migration path to more complex and flexible platform runtimes, e.g. Kubernetes.\n\nBuilding Runway on top of GCP Cloud Run using GitLab has allowed us to iterate and tease out the right level of abstractions for service owners as part of a platform play in the Infrastructure department.\n\nTo serve traffic from multiple regions in Cloud Run, the multi-region deployment strategy must support global load balancing, and the provisioning and configuration of regional resources. Here’s a simplified diagram of the proposed architecture in GCP:\n\n![simplified diagram of the proposed architecture in GCP](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098671/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098671612.png)\n\nBy replicating Cloud Run services across multiple regions and configuring the existing global load balancing with serverless network endpoint group (NEG) backends, we’re able to serve traffic from multiple regions. For the remainder of the article, we’ll focus less on specifics of Cloud Run and more on how we’re building with GitLab.\n\n## Building a multi-region platform with GitLab\n\nNow that you have context about Runway, let's walk through how to build a multi-region platform using GitLab features.\n\n### Provision\n\nWhen building an internal platform, the first challenge is provisioning infrastructure for a service. In Runway, Provisioner is the component that is responsible for maintaining a service inventory and managing IaC for GCP resources using Terraform.\n\nTo provision a service, an application developer will open an MR to add a service project to the inventory using git, and Provisioner will create required resources, such as service accounts and identity and access management policies. When building this functionality with GitLab, Runway leverages [OpenID Connect (OIDC) with GPC Workload Identity Federation](https://docs.gitlab.com/ee/ci/cloud\\_services/google\\_cloud/) for managing IaC.\n\nAdditionally, Provisioner will create a deployment project for each service project. The purpose of creating separate projects for deployments is to ensure the [principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/) by authenticating as a GCP service account with restricted permissions. Runway leverages the [Projects API](https://docs.gitlab.com/ee/api/projects.html) for creating projects with [Terraform provider](https://registry.terraform.io/providers/gitlabhq/gitlab/latest/docs).\n\nFinally, Provisioner defines variables in the deployment project for the service account, so that deployment CI jobs can authenticate to GCP. Runway leverages [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and [Job Token allowlist](https://docs.gitlab.com/ee/ci/jobs/ci\\_job\\_token.html\\#add-a-group-or-project-to-the-job-token-allowlist) to handle authentication and authorization.\n\nHere’s a simplified example of provisioning a multi-region service in the service inventory:\n\n```\n{\n  \"inventory\": [\n    {\n      \"name\": \"example-service\",\n      \"project_id\": 46267196,\n      \"regions\": [\n        \"europe-west1\",\n        \"us-east1\",\n        \"us-west1\"\n      ]\n    }\n  ]\n}\n```\n\nOnce provisioned, a deployment project and necessary infrastructure will be created for a service.\n\n### Configure\n\nAfter a service is provisioned, the next challenge is the configuration for a service. In Runway, [Reconciler](https://gitlab.com/gitlab-com/gl-infra/platform/runway/runwayctl) is a component that is responsible for configuring and deploying services by aligning the actual state with the desired state using Golang and Terraform.\n\nHere’s a simplified example of an application developer configuring GitLab CI/CD in their service project:\n\n```\n# .gitlab-ci.yml\nstages:\n  - validate\n  - runway_staging\n  - runway_production\n\ninclude:\n  - project: 'gitlab-com/gl-infra/platform/runway/runwayctl'\n    file: 'ci-tasks/service-project/runway.yml'\n    inputs:\n      runway_service_id: example-service\n      image: \"$CI_REGISTRY_IMAGE/${CI_PROJECT_NAME}:${CI_COMMIT_SHORT_SHA}\"\n      runway_version: v3.22.0\n\n# omitted for brevity\n```\n\nRunway provides sane default values for configuration that are based on our experience in delivering stable and reliable features to customers. Additionally, service owners can configure infrastructure using a service manifest file hosted in a service project. The service manifest uses JSON Schema for validation. When building this functionality with GitLab, Runway leverages [Pages](https://docs.gitlab.com/ee/user/project/pages/) for schema documentation.\n\nTo deliver this part of the platform, Runway leverages [CI/CD templates](https://docs.gitlab.com/ee/development/cicd/templates.html), [Releases](https://docs.gitlab.com/ee/user/project/releases/), and [Container Registry](https://docs.gitlab.com/ee/user/packages/container\\_registry/) for integrating with service projects.\n\nHere’s a simplified example of a service manifest:\n\n```\n# .runway/runway-production.yml\napiVersion: runway/v1\nkind: RunwayService\nspec:\n container_port: 8181\n regions:\n   - us-east1\n   - us-west1\n   - europe-west1\n\n# omitted for brevity\n```\n\nFor multi-region services, Runway injects an environment variable into the container instance runtime, e.g. RUNWAY\\_REGION, so application developers have the context to make any downstream dependencies regionally-aware, e.g. Vertex AI API.\n\nOnce configured, a service project will be integrated with a deployment project.\n\n### Deploy\n\nAfter a service project is configured, the next challenge is deploying a service. In Runway, Reconciler handles this by triggering a deployment job in the deployment project when an MR is merged to the main branch. When building this functionality with GitLab, Runway leverages [Trigger Pipelines](https://docs.gitlab.com/ee/ci/triggers/) and [Multi-Project Pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream\\_pipelines.html\\#multi-project-pipelines) to trigger jobs from service project to deployment project.\n\n![trigger jobs from service project to deployment project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098671612.png)\n\nOnce a pipeline is running in a deployment project, it will be deployed to an environment. By default, Runway will provision staging and production environments for all services. At this point, Reconciler will apply any Terraform resource changes for infrastructure. When building this functionality with GitLab, Runway leverages [Environments/Deployments](https://docs.gitlab.com/ee/ci/environments/) and [GitLab-managed Terraform state](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform\\_state.html) for each service.\n\n![Reconciler applies any Terraform resource changes for infrastructure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098671614.png)\n\nRunway provides default application metrics for services. Additionally, custom metrics can be used by enabling a sidecar container with OpenTelemetry Collector configured to scrape Prometheus and remote write to Mimir. By providing observability out of the box, Runway is able to bake monitoring into CI/CD pipelines.\n\nExample scenarios include gradual rollouts for blue/green deployments, preventing promotions to production when staging is broken, or automatically rolling back to previous revision when elevated error rates occur in production.\n\n![Runway bakes monitoring into CI/CD pipelines](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098671615.png)\n\nOnce deployed, environments will serve the latest revision of a service. At this point, you should have a good understanding of some of the challenges that will be encountered, and how to solve them with GitLab features.\n\n## Migrating to multi-region in production\n\nAfter extending Runway components to support multi-region in Cloud Run, the final challenge was migrating from AI Gateway’s single-region deployment in production with zero downtime. Today, teams using Runway to deploy their services can self-serve on regions making a multi-region deployment just as simple as a single-region deployment. \n\nWe were able to iterate on building multi-region functionality without impacting existing infrastructure by using semantic versioning for Runway. Next, we’ll share some learnings from the migration that may inform how to operate services for an internal multi-region platform.\n\n### Dry run deployments\n\nIn Runway, Reconciler will apply Terraform changes in CI/CD. The trade-off is that plans cannot be verified in advance, which could risk inadvertently destroying or misconfiguring production infrastructure. To solve this problem, Runway will perform a “dry run” deployment for MRs.\n\n![\"Dry run\" deployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098671616.png)\n\nFor migrating AI Gateway, dry run deployments increased confidence and helped mitigate risk of downtime during rollout. When building an internal platform with GitLab, we recommend supporting dry run deployments from the start.\n\n### Regional observability\n\nIn Runway, existing observability was aggregated by assuming a single-region deployment. To solve this problem, Runway observability was retrofitted to include a new region label for Prometheus metrics.\n\nOnce metrics were retrofitted, we were able to introduce service level indicators (SLIs) for both regional Cloud Run services and global load balancing. Here’s an example dashboard screenshot for a general Runway service:\n\n![dashboard screenshot for a general Runway service](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nAdditionally, we were able to update our service level objectives (SLOs) to support regions. As a result, service owners could be alerted when a specific region experiences an elevated error rate, or increase in response times.\n\n![screenshot of alerts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098672/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098671617.png)\n\n***Note:** Data is not actual production data and is only for illustration purposes.*\n\nFor migrating AI Gateway, regional observability increased confidence and helped provide more visibility into new infrastructure. When building an internal platform with GitLab, we recommend supporting regional observability from the start.\n\n### Self-service regions\n\nThe Infrastructure department successfully performed the initial migration of multi-region support for AI Gateway in production with zero downtime. Given the risk associated with rolling out a large infrastructure migration, it was important to ensure the service continued working as expected.\n\nShortly afterwards, service owners began self-serving additional regions to meet the growth of customers. At the time of writing, [GitLab Duo](https://about.gitlab.com/gitlab-duo/) is available in six regions around the globe and counting. Service owners are able to configure the desired regions, and Runway will provide guardrails along the way in a scalable solution.\n\nAdditionally, three other internal services have already started using multi-region functionality on Runway. Application developers have entirely self-served functionality, which validates that we’ve provided a good platform experience for service owners. For a platform play, a scalable solution like Runway is considered a good outcome since the Infrastructure department is no longer a blocker.\n\n## What’s next for Runway\n\nBased on how quickly we could iterate to provide results for customers, the SaaS Platforms department has continued to invest in Runway. We’ve grown the Runway team with additional contributors, started evolving the platform runtime (e.g. Google Kubernetes Engine), and continue dogfooding with tighter integration in the product.\n\nIf you’re interested in learning more, feel free to check out [https://gitlab.com/gitlab-com/gl-infra/platform/runway](https://gitlab.com/gitlab-com/gl-infra/platform/runway).\n\n## More Building GitLab with GitLab\n- [Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)\n- [Stress-testing Product Analytics](https://about.gitlab.com/blog/building-gitlab-with-gitlab-stress-testing-product-analytics/)\n- [Web API Fuzz Testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n- [How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n- [Expanding our security certification portfolio](https://about.gitlab.com/blog/building-gitlab-with-gitlab-expanding-our-security-certification-portfolio/)\n",[108,772,771,1247,9,728,1248,726,835,704],"inside GitLab","google",{"slug":1250,"featured":90,"template":684},"building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features","content:en-us:blog:building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","Building Gitlab With Gitlab A Multi Region Service To Deliver Ai Features","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features.yml","en-us/blog/building-gitlab-with-gitlab-a-multi-region-service-to-deliver-ai-features",{"_path":1256,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1257,"content":1263,"config":1270,"_id":1272,"_type":13,"title":1273,"_source":15,"_file":1274,"_stem":1275,"_extension":18},"/en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow",{"title":1258,"description":1259,"ogTitle":1258,"ogDescription":1259,"noIndex":6,"ogImage":1260,"ogUrl":1261,"ogSiteName":669,"ogType":670,"canonicalUrls":1261,"schema":1262},"Building GitLab with GitLab: Web API Fuzz Testing","Our new series shows how we dogfood new DevSecOps platform features to ready them for you. First up, security testing.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659740/Blog/Hero%20Images/building-gitlab-with-gitlab-no-type.png","https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Web API Fuzz Testing\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mike Eddington\"},{\"@type\":\"Person\",\"name\":\"Eugene Lim\"}],\n        \"datePublished\": \"2023-05-09\",\n      }",{"title":1258,"description":1259,"authors":1264,"heroImage":1260,"date":1267,"body":1268,"category":769,"tags":1269},[1265,1266],"Mike Eddington","Eugene Lim","2023-05-09","\n\nAt GitLab, we try to [dogfood everything](/handbook/product/product-processes/#dogfood-everything) to help us better understand the product, pain points, and configuration issues. We use what we learn to build a more efficient, feature-rich platform and user experience. In this first installment of our “Building GitLab with GitLab” series, we will focus on security testing. We constantly strive to improve our security testing coverage and integrate it into our DevSecOps lifecycle. These considerations formed the motivation for the API fuzzing dogfooding project at GitLab. By sharing our lessons from building this workflow, we hope other teams can also learn how to integrate GitLab’s Web API Fuzz Testing and solve some common challenges.\n\n## What is Web API Fuzz Testing?\n\nWeb API Fuzz Testing involves generating and sending various unexpected input parameters to a web API in an attempt to trigger unexpected behavior and errors in the API backend. By analyzing these errors, you can discover bugs and potential security issues missed by other scanners that focus on specific vulnerabilities. GitLab's Web API Fuzz Testing complements and should be run in addition to GitLab Secure’s other security scanners such as static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) and dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) APIs.\n\n## Auto-generating an OpenAPI specification\nTo run the Web API Fuzzing Analyzer, you need one of the following:\n* OpenAPI Specification - Version 2 or 3\n* GraphQL Schema\n* HTTP Archive (HAR)\n* Postman Collection - Version 2.0 or 2.1\n\nAt the start of the API fuzzing project, the [API Vision working group](/company/team/structure/working-groups/api-vision/) was also working on an issue to automatically document [GitLab’s REST API endpoints in an OpenAPI specification](https://gitlab.com/groups/gitlab-org/-/epics/8636), so we worked with our colleague Andy Soiron on implementing it. Because GitLab uses the [grape](https://github.com/ruby-grape/grape) API framework, Andy had already identified and [tested](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/95877) the [grape-swagger](https://github.com/ruby-grape/grape-swagger) gem that auto-generates an OpenAPI v2 specification based on existing grape annotations. For example, the following API endpoint code:\n\n```\n     Class.new(Grape::API) do\n       format :json\n       desc 'This gets something.'\n       get '/something' do\n         { bla: 'something' }\n       end\n       add_swagger_documentation\n     end\n``` \nWill be parsed by grape-swagger into:\n\n```\n{\n  // rest of OpenAPI v2 specification\n  …\n  \"paths\": {\n    \"/something\": {\n      \"get\": {\n        \"description\": \"This gets something.\",\n        \"produces\": [\n          \"application/json\"\n        ],\n        \"operationId\": \"getSomething\",\n        \"responses\": {\n          \"200\": {\n            \"description\": \"This gets something.\"\n          }\n        }\n      }\n    }\n  }\n}\n```\n\n\nHowever, with almost 2,000 API operations with different requirements and formats, a lot of additional work needed to be done to resolve edge cases that did not meet the requirements of grape-swagger or the OpenAPI format. For example, one simple case was API endpoints that accept file parameters, such as the [upload metric image endpoint](https://docs.gitlab.com/ee/api/issues.html#upload-metric-image). GitLab uses the [Workhorse](https://gitlab.com/gitlab-org/gitlab/tree/master/workhorse) smart reverse proxy to handle \"large\" HTTP requests such as file uploads. As such, file parameters must be of the type WorkhorseFile:\n\n\n```\nnamespace ':id/issues/:issue_iid/metric_images' do\n            …\n            desc 'Upload a metric image for an issue' do\n              success Entities::IssuableMetricImage\n            end\n            params do\n              requires :file, type: ::API::Validations::Types::WorkhorseFile, desc: 'The image file to be uploaded'\n              optional :url, type: String, desc: 'The url to view more metric info'\n              optional :url_text, type: String, desc: 'A description of the image or URL'\n            end\n            post do\n              require_gitlab_workhorse!\n```\n\nBecause grape-swagger does not recognize what OpenAPI type WorkhorseFile corresponds to, it excludes the parameter from its output. We fixed this by adding a grape-swagger-specific documentation to override the type during generation:\n\n```\n             requires :file, type: ::API::Validations::Types::WorkhorseFile, desc: 'The image file to be uploaded', documentation: { type: 'file' }\n```\n\nHowever, not all edge cases could be resolved with a simple match-and-replace in the grape annotations. For example, Ruby on Rails supports wildcard segment parameters. A route like `get 'books/*section/:title'` would match`books/some/section/last-words-a-memoir`. In addition, the URI would be parsed such that the `section` path parameter would have the value `some/section` and the `title` path parameter would have the value `last-words-a-memoir`.\n\nCurrently, grape-swagger does not recognize these wildcard segments as path parameters. For example, the route would generate:\n\n```\n\"paths\": {\n  \"/api/v2/books/*section/{title}\": {\n    \"get\": {\n    ...\n      \"parameters\": [\n         {\n           \"in\": \"query\", \"name\": \"*section\"\n           ...\n  }\n}\n```\n\nInstead of the expected:\n\n```\n\"paths\": {\n  \"/api/v2/books/{section}/{title}\": {\n    \"get\": {\n    ...\n      \"parameters\": [\n         {\n           \"in\": \"path\", \"name\": \"section\"\n           ...\n  }\n}\n```\n\nAs such, we also needed to make several patches to grape-swagger, which we forked while waiting for the changes to be accepted upstream. Nevertheless, with lots of careful checking and cooperation across teams, we managed to get the OpenAPI specification generated for most of the endpoints.\n\n## Performance tuning\n\nWith the OpenAPI specification, we could now begin with the API fuzzing. GitLab already uses the [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) feature to generate testing environments for some feature changes, providing a readily available fuzzing target. However, given the large number of endpoints, it would be impossible to expect a standard shared runner to complete fuzzing in a single job. The Web API Fuzz Testing documentation includes a [performance tuning section](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/#performance-tuning-and-testing-speed) that recommends the following:\n\n* using a multi-CPU Runner\n* excluding slow operations\n* splitting a test into multiple jobs\n* excluding operations in feature branches, but not default branch\n\nThe first recommendation was easy to implement with a dedicated fuzzing runner. We recommend doing this for large scheduled fuzzing workflows, especially if you select the Long-100 fuzzing profile. We also began excluding slow operations by checking the job logs for the time taken to complete each operation. Along the way, we identified other endpoints that needed to be excluded, such as the [revoke token endpoint](https://docs.gitlab.com/ee/api/personal_access_tokens.html#revoke-a-personal-access-token) that prematurely ended the fuzzing session.\n\nSplitting the test into multiple jobs took the most effort due to the requirements of the OpenAPI format. Each OpenAPI document includes a required set of objects and fields, so it is not simply a matter of splitting after a fixed number of lines. Additionally, each operation relies on entities defined in the definitions object, so we needed to ensure that when splitting the OpenAPI specification, the entities required by the endpoints were included. We also wrote a quick script to fill the example parameter data with actual data from the testing environment, such as project IDs.\n\nWhile it was possible to run these scripts locally, then push the split jobs and OpenAPI specifications to the repository, this created a large number of changes every time we updated the original OpenAPI specification. Instead, we adapted the workflow to use dynamically generated child pipelines that would split the OpenAPI document in a CI job, then generate a child pipeline with jobs for each split document. This made iterating a lot easier and more agile. We have uploaded [the scripts and pipeline configuration](https://gitlab.com/eugene_lim/api-fuzzing-dogfooding) for reference.\n\nBy tweaking the number of parallel jobs and fuzzing profile, we were eventually able to achieve a reasonably comprehensive fuzzing session in an acceptable time frame. When tuning your own fuzzing workflow, balancing these trade-offs is essential.\n\n## Triaging the API fuzzing findings\n\nWith the fuzzing done, we were now confronted with hundreds of findings. Unlike DAST analyzers that try to detect specific vulnerabilities, Web API Fuzz Testing looks for unexpected behavior and errors that may not necessarily be vulnerabilities. This is why fuzzing faults discovered by the API Fuzzing Analyzer show up as vulnerabilities with a severity of “Unknown.” This requires more involved triaging.\n\nFortunately, the Web API fuzzer also outputs Postman collections as artifacts in the Vulnerability Report page. These collections allow you to quickly repeat requests that triggered a fault during fuzzing. For this stage of the fuzzing workflow, we recommend that you set up a local instance of the application so that you can easily check logs and debug specific faults. In this case, we ran the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit).\n\nMany of the faults occurred due to a lack of error handling for unexpected inputs. We created issues from the Vulnerability Report page, and if we found that a particular fault had the same root cause as a previously triaged fault, we linked the vulnerability to the original issue instead.\n\n## Lessons learned\n\nThe API fuzzing dogfooding project turned out to be a fruitful exercise that benefited other workstreams at GitLab, such as the API documentation project. In addition, tuning and triaging helped us identify key pain points in the process for improvement. Automated API documentation generation is difficult even with OpenAPI, particularly on a long-lived codebase. GitLab’s existing annotations and tests helped speed up documentation via a distributed, asynchronous workflow across multiple teams. In addition, many GitLab features such as Review Apps, Vulnerability Reports, and dynamically generated child pipelines helped us build a robust fuzzing workflow.\n\nThere are still many improvements that can be made to the workflow. Moving to OpenAPI v3 could improve endpoint coverage. The Secure team also wrote a [HAR Recorder](https://gitlab.com/gitlab-org/security-products/har-recorder) tool that could help generate HAR files on the fly instead of relying on static documentation. For now, due to the high compute cost of fuzzing thousands of operations in GitLab’s API, the workflow is better suited to a scheduled pipeline instead of GitLab’s core pipeline.\n\nFor teams that have already implemented several layers of static and dynamic checks and want to take further steps to increase coverage, we recommend trying a Web API fuzzing exercise as a way to validate assumptions and discover “unknown unknowns” in your code.\n\nWe encourage you to get familiar with API fuzzing and let us know how it works for you. If you face any issues or have any feedback, please file an issue at the [issue tracker on GitLab.com](https://gitlab.com/gitlab-org/gitlab/-/issues/). Use the `~\"Category:API Security\"` label when opening a new issue regarding API fuzzing to ensure it is quickly reviewed by the appropriate team members.\n",[1247,814,1247,1041,9],{"slug":1271,"featured":6,"template":684},"building-gitlab-with-gitlab-api-fuzzing-workflow","content:en-us:blog:building-gitlab-with-gitlab-api-fuzzing-workflow.yml","Building Gitlab With Gitlab Api Fuzzing Workflow","en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow.yml","en-us/blog/building-gitlab-with-gitlab-api-fuzzing-workflow",{"_path":1277,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1278,"content":1284,"config":1291,"_id":1293,"_type":13,"title":1294,"_source":15,"_file":1295,"_stem":1296,"_extension":18},"/en-us/blog/can-chatgpt-resolve-gitlab-issues",{"title":1279,"description":1280,"ogTitle":1279,"ogDescription":1280,"noIndex":6,"ogImage":1281,"ogUrl":1282,"ogSiteName":669,"ogType":670,"canonicalUrls":1282,"schema":1283},"Testing ChatGPT: Can it solve a GitLab issue?","We put ChatGPT to the test to see if it could contribute to GitLab. Here's what we learned.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670171/Blog/Hero%20Images/akshay-nanavati-Zq6HerrBPEs-unsplash.jpg","https://about.gitlab.com/blog/can-chatgpt-resolve-gitlab-issues","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Testing ChatGPT: Can it solve a GitLab issue?\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Coghlan\"},{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2022-12-15\",\n      }",{"title":1279,"description":1280,"authors":1285,"heroImage":1281,"date":1287,"body":1288,"category":769,"tags":1289},[1286,1201],"John Coghlan","2022-12-15","\nChatGPT has taken the tech world by storm since its [launch on November 30](https://openai.com/blog/chatgpt/). Media coverage, front page posts on Hacker News, Twitter threads, and videos - everywhere you look, there is another story.\n\nThe [GitLab Slack](/handbook/communication/#slack) was no different. In threads across Slack channels, including those for developer evangelism, UX, the CEO, random news, and every space in between, our team was chatting about this exciting new tool.\n\nAs we got more familiar with the tool, we started to learn about numerous things it can do. Here are a few that we found:\n\n- It can write poetry about GitLab features. \n- It can write blog posts.\n- It can write unit tests.\n- It gives advice on how to use certain features of GitLab.\n- It conducts competitive analysis.\n\nThere’s quite a bit more out there, including [inventing a new language](https://maximumeffort.substack.com/p/i-taught-chatgpt-to-invent-a-language) and [building a virtual machine](https://www.engraved.blog/building-a-virtual-machine-inside/). We can’t recall any technology that has generated more excitement in such a short time.\n\nWe acknowledge there are ethical and licensing concerns around using AI-generated code. For the purpose of this blog post, we will focus strictly on the capabilities of ChatGPT.\n\n## Testing ChatGPT\n\nAs members of GitLab’s [Developer Relations team](/handbook/marketing/developer-relations/), where we’re focused on growing our community of contributors and evangelists, our first reaction was to think of how this tool can help our contributors. The responses to questions like “How can I get started contributing to GitLab?” were cool but didn’t move the needle. So then we asked ourselves: Can we use ChatGPT to make a contribution to GitLab?\n\nHaving already been testing the tool, we knew we’d need to look for a very specific type of issue. We started to fine-tune our search. Here are the steps we took to find a potential issue:\n\n- Visited [https://gitlab.com/gitlab-org](https://gitlab.com/gitlab-org) and pulled up all the open issues by clicking on `Issues` in the left side nav.\n- Searched for all issues with the [“good for new contributors” label](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=updated_desc&state=opened&label_name%5B%5D=good%20for%20new%20contributors&first_page_size=20). This returned 482 issues at the time of writing.\n- Set [`“Assignee = None”`](https://gitlab.com/gitlab-org/gitlab/-/issues/?sort=updated_desc&state=opened&label_name%5B%5D=good%20for%20new%20contributors&assignee_id=None&first_page_size=20) to filter out issues that have already been assigned to someone. This trimmed things down even more.\n\nAfter poking through a few issues, this one caught our attention: an issue that is a request to [de-depulicate identical dropdown functionality from two Vue components into a shared component](https://gitlab.com/gitlab-org/gitlab/-/issues/221293).\n\n### Getting started\n\nWe opened up a ChatGPT thread via [https://chat.openai.com/chat](https://chat.openai.com/chat). If you don’t already have an account, you’ll need to register with OpenAI.\n\nThen we started a thread with Assistant (that’s what is returned when you ask ChatGPT what is its name). We asked Assistant to create a shared Vue component called `SharedDropdownFilter` from the duplicate code in `GroupsDropdownFilter` and `ProjectsDropdownFilter` and passed the scripts of both components.\n\nChatGPT replied with:\n\n>You can create a new `SharedDropdownFilter` component that contains the duplicate code and use it in both `GroupsDropdownFilter` and `ProjectsDropdownFilter`:\n\nand provided a template to describe the use. The template provided wasn’t quite what we were looking for.\n\n```vue\n\u003Ctemplate>\n  \u003CGlDropdown\n    :aria-label=\"label\"\n    :class=\"{ disabled: loading }\"\n    :disabled=\"loading\"\n    :selected=\"selectedProjectsLabel\"\n    :variant=\"multiSelect ? 'inline' : 'single-line'\"\n  >\n    \u003Ctemplate v-slot:menu>\n      \u003Cdiv class=\"py-4\">\n        \u003CGlSearchBoxByType\n          autofocus\n          class=\"search-box\"\n          :placeholder=\"searchPlaceholder\"\n          :type=\"searchType\"\n          v-model=\"searchTerm\"\n        />\n\n        \u003CGlDropdownHeader\n          v-if=\"selectedProjects.length && multiSelect\"\n          :class=\"{ 'd-none': !searchTerm }\"\n          class=\"js-dropdown-header\"\n        >\n          \u003Cspan>\n            {{ selectedProjectsLabel }}\n          \u003C/span>\n\n          \u003Cbutton\n```\n\nSo we tried again, this time being more specific and asking ChatGPT to “Create a shared Vue component that combines the two components `ProjectsDropdownFilter` and  `GroupsDropdownFilter`”. We passed in the full component files, including the templates.\n\nChatGPT responded with instructions, a sample Vue component, and an explanation of the functionality:\n\n> Here is a sample implementation of a shared Vue component that combines the two components `ProjectsDropdownFilter` and `GroupsDropdownFilter`:\n\n```vue\n\u003Ctemplate>\n  \u003Cdiv>\n    \u003CProjectsDropdownFilter\n      :groupId=\"groupId\"\n      :multiSelect=\"multiSelect\"\n      :label=\"label\"\n      :queryParams=\"queryParams\"\n      :defaultProjects=\"defaultProjects\"\n      @selected=\"onProjectsSelected\"\n    />\n    \u003CGroupsDropdownFilter\n      :multiSelect=\"multiSelect\"\n      :label=\"label\"\n      :queryParams=\"queryParams\"\n      :defaultGroups=\"defaultGroups\"\n      @selected=\"onGroupsSelected\"\n    />\n  \u003C/div>\n\u003C/template>\n\n\u003Cscript>\nimport { ProjectsDropdownFilter } from './ProjectsDropdownFilter.vue';\nimport { GroupsDropdownFilter } from './GroupsDropdownFilter.vue';\n\nexport default {\n  name: 'SharedDropdownFilter',\n  components: {\n    ProjectsDropdownFilter,\n    GroupsDropdownFilter,\n  },\n  props: {\n    groupId: {\n      type: Number,\n      required: true,\n    },\n    multiSelect: {\n      type: Boolean,\n      required: false,\n      default: false,\n    },\n    label: {\n      type: String,\n      required: false,\n      default: 'Dropdown filter',\n    },\n    queryParams: {\n      type: Object,\n      required: false,\n      default: () => ({}),\n    },\n    defaultProjects: {\n      type: Array,\n      required: false,\n      default: () => [],\n    },\n    defaultGroups: {\n      type: Array,\n      required: false,\n      default: () => [],\n    },\n  },\n  data() {\n    return {\n      selectedProjects: this.defaultProjects || [],\n      selectedGroups: this.defaultGroups || [],\n    };\n  },\n  methods: {\n    onProjectsSelected(selectedProjects) {\n      this.selectedProjects = selectedProjects;\n      this.$emit('selected', this.selectedProjects, this.selectedGroups);\n    },\n    onGroupsSelected(selectedGroups) {\n      this.selectedGroups = selectedGroups;\n      this.$emit('selected', this.selectedProjects, this.selectedGroups);\n    },\n  },\n};\n\u003C/script>\n```\n\n> In this implementation, the `SharedDropdownFilter` component is rendered as a container for the `ProjectsDropdownFilter` and` GroupsDropdownFilter` components. It passes down the `groupId`, `multiSelect`, `label`, `queryParams`, and `defaultProjects` props to the `ProjectsDropdownFilter` component, and the `multiSelect`, `label`, `queryParams`, and `defaultGroups` props to the `GroupsDropdownFilter` component. It also listens to the selected event emitted by the two components and updates its own `selectedProjects` and `selectedGroups` data accordingly. The `SharedDropdownFilter` component emits its own `selected` event with the updated `selectedProjects` and `selectedGroups` data when it receives the selected event from the `ProjectsDropdownFilter` and `GroupsDropdownFilter` components. This allows the parent component of the `SharedDropdownFilter` to be notified whenever the selected projects and groups are updated, so that it can take appropriate action, such as updating the data it displays.\n\nWith ChatGPT’s shared Vue component, we could start an MR for the issue, but would need to manually review the code and run tests in order to make sure the deduplication was done correctly.\n\n### Adjusting the filters\n\nIn hopes of finding an issue that ChatGPT could fully solve, this time we filtered using two labels (`good for new contributors` and `quick win`), Assignee = None, and added the keyword “refactor” to look for issues that required less net new code. At the time of writing, this [query](https://gitlab.com/gitlab-org/gitlab/-/issues/?search=refactor&sort=updated_asc&state=opened&label_name%5B%5D=quick%20win&label_name%5B%5D=good%20for%20new%20contributors&assignee_id=None&first_page_size=20) returned one issue: [a refactor for widget state tests](https://gitlab.com/gitlab-org/gitlab/-/issues/228611).\n\nAfter reviewing the issue, we opened the `/spec/frontend/vue_merge_request_widget/stores/get_state_key_spec.js` file mentioned in the issue description by navigating to that file on GitLab.com SaaS and opening the dropdown on the blue button at the top of the file frame, selecting “Open in Web IDE”, and then clicking the button to open the file in GitLab’s built-in Web IDE.\n\nWe copied the code from the file and asked ChatGPT to refactor these tests into a table of `it.each`. It was interesting to watch the output as ChatGPT provided an example of how we could refactor the tests using `it.each`:\n\n```javascript\nimport getStateKey from '~/vue_merge_request_widget/stores/get_state_key';\n\ndescribe('getStateKey', () => {\n  const context = {\n    mergeStatus: 'checked',\n    autoMergeEnabled: false,\n    canMerge: true,\n    onlyAllowMergeIfPipelineSucceeds: false,\n    isPipelineFailed: false,\n    hasMergeableDiscussionsState: false,\n    isPipelineBlocked: false,\n    canBeMerged: false,\n    projectArchived: false,\n    branchMissing: false,\n    commitsCount: 2,\n    hasConflicts: false,\n    draft: false,\n    detailedMergeStatus: null,\n  };\n\n  it.each([\n    [{}, null],\n    [{detailedMergeStatus: 'MERGEABLE'}, 'readyToMerge'],\n    [{detailedMergeStatus: 'DISCUSSIONS_NOT_RESOLVED'}, 'unresolvedDiscussions'],\n    [{detailedMergeStatus: 'DRAFT_STATUS'}, 'draft'],\n    [{detailedMergeStatus: 'CI_MUST_PASS'}, 'pipelineFailed'],\n    [{shouldBeRebased: true}, 'rebase'],\n    [{hasConflicts: true}, 'conflicts'],\n    [{detailedMergeStatus: 'CHECKING'}, 'checking'],\n    [{commitsCount: 0}, 'nothingToMerge'],\n    [{commitsCount: 1, branchMissing: true}, 'missingBranch'],\n    [{projectArchived: true}, 'archived'],\n  ])('should return \"%s\" when called with %p', (expected, input) => {\n    const bound = getStateKey.bind(Object.assign({}, context, input));\n    expect(bound()).toEqual(expected);\n  });\n\n  it('returns rebased state key', () => {\n    const context = {\n      mergeStatus: 'checked',\n      autoMergeEnabled: false,\n      canMerge: true,\n      onlyAllowMergeIfPipelineSucceeds: true,\n      isPipelineFailed: true,\n      hasMergeableDiscussionsState: false,\n      isPipelineBlocked: false,\n      canBeMerged: false,\n      shouldBeRebased: true,\n      projectArchived: false,\n      branchMissing: false,\n      commitsCount: 2,\n      hasConflicts: false,\n      draft: false,\n    };\n    const bound = getStateKey.bind(context);\n\n    expect(bound()).toEqual('rebase');\n  });\n});\n\n\n```\n\nAlong with an explanation of `it.each`:\n\n> Note that `it.each` is a utility provided by Jest that allows you to write the same test with different inputs and expected results. This can make your tests more concise and easier to read.\n\nOur next steps were to:\n- Paste the output from ChatGPT into the Web IDE.\n- Click the blue “Commit” button to commit the code.\n- Name the branch.\n- Open a merge request.\n\nAll of this can be done inside [GitLab’s Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/) without leaving the browser.\n\n### Examining the results\n\nOnce we opened the merge request, a CI/CD pipeline was created to test the change. Unfortunately, the pipeline jobs for the Jest tests failed because the tests were returning `null` for each case instead of the expected results. We went back to ChatGPT to see if it could help us understand why the test was returning null for all cases. ChatGPT suggested a few different implementations to check to make sure things are defined currently:\n\n> There are a few reasons why the tests in the `it.each` function might be returning null for every case. Here are a few possibilities:\n>\n> If the `getStateKey` function is not implemented correctly, it might always return null, regardless of the input.\n> If the `getStateKey` function relies on certain variables that are not defined in the test cases, it might return null because those variables are undefined.\n> If the context object passed to the `getStateKey` function is not being merged correctly with the input object in the test cases, the function might not have access to all of the necessary information, and it might return null.\n\nWhile ChatGPT couldn’t solve the issue with the returned value directly, it provided a lot of possibilities to look into and offered suggestions of what variables or functions to debug. The suggestions prompted us to add code changes that logged the variable values onto the [JavaScript debug console](https://www.w3schools.com/js/js_debugging.asp), find the implementation of the `getStateKey` function, and figure out why the return value was null. Whenever we came across something unfamiliar in the code, like syntax in the `it.each` that wasn’t familiar, we asked ChatGPT for clarification or a helpful example. Many times throughout this experiment, working with ChatGPT felt like “rubber duck debugging,” but with an AI with which you have to be very specific about your ask.\n\n## What we learned from ChatGPT\n\nIn the end, we weren’t able to figure out why our tests were returning null, so we asked the front-end team if someone could review the code. Senior Frontend Engineer [Angelo Gulina](https://gitlab.com/agulina) reviewed the MR. He found that the solution was actually quite trivial: The order of parameters was inverted, resulting in a comparison that led to null! In his assessment, ChatGPT wasn’t able to provide a working solution, but would be able to provide solutions and ideas to an engineer with some experience with the codebase. It delivered a clean, organized solution and answered the task of combining the tests into an it.each table. It could not, however, catch the actual error (the inversion of parameters) or correctly guess why the tests were returning null.\n\nLet's circle back to the question that started this experiment: Can we use ChatGPT to contribute to GitLab? At this time, we’d say, \"yes,\" and you will need some understanding of the code to complete your solution. Since ChatGPT is a language model trained by OpenAI, it can only answer questions and provide information addressed in the model, which means answers requiring contextual specificity may fall short of what is needed to resolve an issue. However, it’s a tool that can help you if you’re stuck, need more clarification on a code snippet, or are trying to refactor some code. It was fascinating for us to experiment with ChatGPT and we were excited to see what it was capable of. The code provided, however, lacked some of the valuable insight and industry experience that a community of contributors can provide.\n\nAt GitLab, our [community and our open source stewardship](https://about.gitlab.com/company/strategy/#dual-flywheels) are part of our company strategy. Thousands of open source contributors worldwide have helped make GitLab what it is today. We see potential for ChatGPT and similar AI tools, not as a replacement for our community, but a way to make our community more efficient and enable more people to contribute GitLab.\n\n\n\n\n",[9,266,1290,773],"contributors",{"slug":1292,"featured":6,"template":684},"can-chatgpt-resolve-gitlab-issues","content:en-us:blog:can-chatgpt-resolve-gitlab-issues.yml","Can Chatgpt Resolve Gitlab Issues","en-us/blog/can-chatgpt-resolve-gitlab-issues.yml","en-us/blog/can-chatgpt-resolve-gitlab-issues",{"_path":1298,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1299,"content":1305,"config":1311,"_id":1313,"_type":13,"title":1314,"_source":15,"_file":1315,"_stem":1316,"_extension":18},"/en-us/blog/cascading-merge-requests-with-gitlab-flow",{"title":1300,"description":1301,"ogTitle":1300,"ogDescription":1301,"noIndex":6,"ogImage":1302,"ogUrl":1303,"ogSiteName":669,"ogType":670,"canonicalUrls":1303,"schema":1304},"How to adopt a cascading merge request strategy with GitLab Flow","This tutorial explains how to consolidate updates in a single branch and propagate them to other branches using ucascade bot.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679851/Blog/Hero%20Images/cascade.jpg","https://about.gitlab.com/blog/cascading-merge-requests-with-gitlab-flow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to adopt a cascading merge request strategy with GitLab Flow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Madou Coulibaly\"}],\n        \"datePublished\": \"2023-08-31\",\n      }",{"title":1300,"description":1301,"authors":1306,"heroImage":1302,"date":1308,"body":1309,"category":769,"tags":1310},[1307],"Madou Coulibaly","2023-08-31","\nGit offers a range of branching strategies and workflows that can be utilized to enhance organization, efficiency, and code quality. Employing a well-defined workflow helps foster a successful and streamlined development process. By implementing the [release branches using GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html#release-branches-with-gitlab-flow), you can effectively handle multiple product releases. However, when it comes to fixing bugs, it often becomes necessary to apply the fix across various stable branches such as `main`,  `stable-1.0`, `stable-1.1`, and `stable-2.0`. The process of applying the fix to multiple locations can be time-consuming, as it involves the manual creation of multiple merge requests.\n\nBy consolidating updates in a single branch and propagating them to other branches, the cascading merge approach establishes a central source of truth, reducing confusion and maintaining consistency. In this blogpost, we will guide you through setting up this approach for your GitLab project using [ucascade bot](https://github.com/unblu/ucascade).\n\n## Getting started\nTo get started, you'll need the following prerequisites:\n\n### Environment\n  - a GitLab project that implemented [Release Branches Strategy](https://docs.gitlab.com/ee/topics/gitlab_flow.html#release-branches-with-gitlab-flow)\n  - a Kubernetes cluster\n\n### CLI\n  - git\n  - kubectl\n  - docker\n\n### Project access tokens\nFollow the instructions on the [Project access tokens page](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token) to create two project access tokens –`ucascade` and `ucascade-approver` – with the API scope in your GitLab project.\n\n![project access tokens](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/pat.png){: .shadow.medium}\n\n## Deploy ucascade bot on Kubernetes\nFirst, create the `bots-fleet` namespace on Kubernetes.\n\n```\nkubectl create namespace bots-fleet\n```\n\nThen, create the `cascading-merge-secret` secret that contains the GitLab project access tokens created previously.\n\n```\nkubectl create secret generic cascading-merge-secret -n bots-fleet \\\n--from-literal=gitlab-host=https://gitlab.com \\\n--from-literal=gitlab-api-token=\u003CUCASCADE_PROJECT_ACCESS_TOKEN> \\\n--from-literal=gitlab-api-token-approver=\u003CAPPROVER_BOT_PROJECT_ACCESS_TOKEN>\n```\n\nOnce done, (fork and) clone the [Cascading Merge repository](https://gitlab.com/madou-stories/bots-fleet/cascading-merge) that contains the Kubernetes manifests for the bot and replace the `host` field in the `kube/ingress.yaml` file according to your Kubernetes domain.\n\n```yaml\napiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n  annotations:\n    kubernetes.io/ingress.class: nginx\n  name: ucascade\n  namespace: bots-fleet\nspec:\n  rules:\n  - host: ucascade.\u003CKUBERNETES_BASED_DOMAIN>\n    http:\n      paths:\n      - backend:\n          service:\n            name: ucascade\n            port:\n              number: 80\n        path: /\n        pathType: Prefix\n\n``` \n\nNow, you are ready to deploy the `ucascade` bot.\n\n```\nkubectl apply -f kube/\n```\n\nYou should see the following resources deployed on Kubernetes:\n\n![ucascade-k8s](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/ucascade-k8s.png){: .shadow.medium}\n\n**Note:** The `ucascade` image is based on the [ucascade-bot](https://github.com/unblu/ucascade-bot) and is located in the [Container Registry](https://gitlab.com/madou-stories/bots-fleet/cascading-merge/container_registry) of the Cascading Merge repository.\n{: .note}\n\n## Create a GitLab webhook\nFollow the instructions on [the Webhooks page](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#configure-a-webhook-in-gitlab) to create a webhook with the following variables: \n  - **URL**: `\u003CUCASCADE_INGRESS_URL>/ucascade/merge-request`\n  - **Trigger**: `Merge request events`\n\n![webhook](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/webhook.png){: .shadow.medium}\n\n## Configure your Cascading Merge rule\nCreate a file called ucascade.json at the root level of your GitLab project as defined in [configuration file](https://unblu.github.io/ucascade/tech-docs/11_ucascade-configuration-file.html#_configuration_file) and matched with your release definition.\n\n![configuration](https://about.gitlab.com/images/blogimages/2023-06-22-cascading-merge-requests-with-gitlab-flow/configuration.png){: .shadow.medium}\n\n## Testing the Cascading Merge\nNow create a branch and an MR from your default branch, make a change, and merge it. The ucascade bot will propagate the change to all other release branches by automatically creating cascading MRs. The following video demonstrates the process:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ej7xf8axWMs\" title=\"Cascading Merge Approach\"\n  frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n# Additional resources\nFind more information about the `ucascade` bot in the [ucascade documentation](https://unblu.github.io/ucascade/index.html).\n\n_Special thank you to Jérémie Bresson for authoring and open sourcing this amazing bot!_\n",[108,793,726,9],{"slug":1312,"featured":90,"template":684},"cascading-merge-requests-with-gitlab-flow","content:en-us:blog:cascading-merge-requests-with-gitlab-flow.yml","Cascading Merge Requests With Gitlab Flow","en-us/blog/cascading-merge-requests-with-gitlab-flow.yml","en-us/blog/cascading-merge-requests-with-gitlab-flow",{"_path":1318,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1319,"content":1325,"config":1331,"_id":1333,"_type":13,"title":1334,"_source":15,"_file":1335,"_stem":1336,"_extension":18},"/en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"title":1320,"description":1321,"ogTitle":1320,"ogDescription":1321,"noIndex":6,"ogImage":1322,"ogUrl":1323,"ogSiteName":669,"ogType":670,"canonicalUrls":1323,"schema":1324},"Chat about your merge request with GitLab Duo","Learn how to use AI-powered Chat to quickly understand complex merge requests by asking about implementation choices, potential risks, and architectural decisions.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675536/Blog/Hero%20Images/blog-image-template-1800x945__2_.png","https://about.gitlab.com/blog/chat-about-your-merge-request-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Chat about your merge request with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2024-11-22\",\n      }",{"title":1320,"description":1321,"authors":1326,"heroImage":1322,"date":1328,"body":1329,"category":702,"tags":1330},[1327],"Torsten Linz","2024-11-22","Managing a merge request (MR) is an integral part of collaborative development, involving navigating through code changes, discussions, and dependencies to ensure high-quality outcomes. Whether you’re reviewing someone else’s code or trying to make your own changes clearer, the new [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/) capability, available in GitLab Duo Enterprise, can help simplify your workflow. Now, you can have a conversation with GitLab Duo Chat about an MR, directly inside GitLab.\n\n## What GitLab Duo Chat brings to an MR workflow\n\nImagine jumping into a merge request titled \"Add logging to order processing.\" Your goal is to onboard yourself to the MR as quickly as possible and to review it. You can use GitLab Duo Chat to onboard yourself faster and understand critical questions to accelerate your review:\n\n* \"Do the logs cover all failure scenarios, or are there any gaps where an issue might not be traceable?\"  \n* “Are there any potential privacy concerns with the logged data?\"  \n* \"Why was logging added at these specific points in the order processing workflow, and how does it help with debugging or monitoring?\"\n\n![MR context example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675670/Blog/Content%20Images/MR_Context_example.png)\n\nThese are the kinds of questions that GitLab Duo Chat is ready to answer – questions that let you quickly understand the intentions behind the changes and uncover any potential risks before diving into the details. Instead of spending a lot of time trying to follow code paths or waiting on the author to reply to your questions, you can start getting answers right away, saving valuable time.\n\n## In-depth conversations about MRs\n\nThe magic of this new chat capability isn’t just in summarizing code – it’s in its ability to support in-depth conversations about the MR at hand. Let's assume the logging MR also includes notifications and refactoring. You can ask specific, insightful questions, such as:\n\n* “What are the potential network failure points introduced by refactoring the payment service into a microservice?”  \n* \"Were there any trade-offs made in terms of consistency or accuracy for better performance?\"  \n* \"How are failures in sending notifications handled? Are retries implemented?\"\n\nInstead of simply telling you what changes have been made, GitLab Duo Chat helps you understand *why* those changes were made, what risks are involved, and how to mitigate them. It lets you dig deep and explore the context behind every line of code, every architectural decision, and every change in behavior within the specific MR you are working on.\n\nAnd it doesn't end with that one answer. You can engage in a follow-up conversation to dig deeper or to explore. \n\n## An evolving conversation tool\n\nWe’re really excited about how GitLab Duo Chat is evolving to become a true conversational partner for MR authors and reviewers alike. GitLab Duo Chat is [aware of the MR description, discussions, the code diff, and metadata of a single MR](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html#the-context-chat-is-aware-of). It’s like having an assistant who is well-versed in your MR and ready to explain any part of it – or even rewrite parts, if that’s what you need.\n\nWith GitLab Duo Chat, onboarding yourself to a complex MR or understanding a change in-depth is faster and more intuitive than ever before.\n\n## We need your feedback\n\nWe’re eager to hear how GitLab Duo Chat works for you. All feedback helps us refine this feature and make it even more useful. Please share your experiences by commenting on our [issue tracker](https://gitlab.com/gitlab-org/gitlab/-/issues/464587). Please include the questions you asked, the response you got, and whether it helped you move forward. Together, we can make GitLab Duo Chat an indispensable tool for every merge request!\n\nFor a deeper dive into how to use GitLab Duo Chat, check out our [documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples#ask-about-a-specific-merge-request) or watch our introductory video below. Start your first conversation today and let us know what you think!\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=7W4mHWw2iUOzoTUz\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->  \n\n> Sample this new capability with [a free 60-day trial of GitLab Ultimate and GitLab Duo Enterprise](https://gitlab.com/-/trials/new).\n\n## Learn more about GitLab Duo Chat\n\n- [GitLab Duo Chat: Get to know productivity-boosting AI enhancements](https://about.gitlab.com/blog/gitlab-duo-chat-get-to-know-productivity-boosting-ai-enhancements/)\n- [GitLab Duo Chat, your at-the-ready AI assistant, is now generally available](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/)\n- [GitLab Duo Chat 101: Get more done on GitLab with our AI assistant](https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant/)",[704,478,680,9,678,940],{"slug":1332,"featured":6,"template":684},"chat-about-your-merge-request-with-gitlab-duo","content:en-us:blog:chat-about-your-merge-request-with-gitlab-duo.yml","Chat About Your Merge Request With Gitlab Duo","en-us/blog/chat-about-your-merge-request-with-gitlab-duo.yml","en-us/blog/chat-about-your-merge-request-with-gitlab-duo",{"_path":1338,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1339,"content":1345,"config":1352,"_id":1354,"_type":13,"title":1355,"_source":15,"_file":1356,"_stem":1357,"_extension":18},"/en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups",{"title":1340,"description":1341,"ogTitle":1340,"ogDescription":1341,"noIndex":6,"ogImage":1342,"ogUrl":1343,"ogSiteName":669,"ogType":670,"canonicalUrls":1343,"schema":1344},"CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups","Learn the benefits of managing deploy freezes at the group level and follow step-by-step guidance on implementation.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667913/Blog/Hero%20Images/clocks.jpg","https://about.gitlab.com/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"}],\n        \"datePublished\": \"2024-02-08\",\n      }",{"title":1340,"description":1341,"authors":1346,"heroImage":1342,"date":1348,"body":1349,"category":1103,"tags":1350},[1347],"Christian Nnachi","2024-02-08","In the dynamic landscape of continuous integration and continuous deployment ([CI/CD](https://about.gitlab.com/topics/ci-cd/)), maintaining system stability during critical periods such as holidays, product launches, or maintenance windows can be challenging. Introducing new code during peak activity times raises the risk of issues affecting user experience. To strike a balance between innovation and stability, organizations may require a group-level deploy freeze — a strategic pause in deploying new code changes across groups to certain branches or environments.\n\n**Given that GitLab can be used for both continuous integration and continuous deployment efforts, GitLab's [Deploy Freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze)** capability aims to address this exact need.\n\nScoped at the project level, deploy freezes can prevent unintended production releases during a period of time you specify by setting a deploy freeze period. Deploy freezes help reduce uncertainty and risk when continuously deploying changes for a single project.\n\nMost teams, however, do not have a single project that represents all of their production environment. Given that deploy freezes are set at the project level, managing and enforcing deploy freezes across many projects can be an arduous and error-prone task, leading to unpredictability and disruption. The need for an automated cross-project solution to ensure stability is obvious.\n\n## What is a group deploy freeze?\n\nThe [Group Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods) takes the concept of individual project deploy freezes to the next level. It enables you to enforce the same deployment restrictions across one or many projects within a GitLab group from the GitLab UI.\n\nWhether you're managing a large suite of microservices or a collection of related projects, a group-managed deploy freeze solution provides a centralized mechanism to maintain stability.\n\n### Benefits of group deploy freeze\n\n**1. Centralized control**\n\nAdherence to your deployment strategy by allowing you to manage deploy freezes for multiple projects from a single location. This simplifies the process and reduces human errors.\n\n**2. Group-wide synchronization**\n\nEnforcing deploy freezes across an entire GitLab group ensures that all projects receive the same schedule at the same time. This maintains uniformity across your projects.\n\n**3. Streamlined collaboration**\n\nVisibility of changes to your development and operations teams can align their efforts effectively.\n\n## How to use GitLab Group Deploy Freeze\n\nWith [Group Deploy Freeze](https://gitlab.com/demos/solutions/group-deploy-freeze), GitLab CI becomes a general-purpose automation tool for ops-related changes, like setting deploy freezes on many projects.\n\nIn the following steps, you will successfully set up the Group Deploy Freeze feature. Remember to test thoroughly and consider any specific nuances of your team's deployment process.\n\n### Prerequisites\n\n- **GitLab account -** You need an active GitLab account with the necessary permissions to access and manage the projects within the target GitLab group.\n- **GitLab Personal Access Token (PAT) -** Generate a GitLab PAT with the permissions to read and write to the projects within the target GitLab group via the GitLab API. This token will be used by the Python script to authenticate API requests.\n- **Python environment -** Ensure that you have a Python environment set up on your machine or the environment where you plan to run the Python script. The script is written in Python, so you need a compatible Python interpreter.\n- **Python libraries -** Install the required Python libraries used by the script. These include requests, envparse, and python-gitlab. You can use pip to install these libraries.\n- **GitLab Group details -** Identify the GitLab group for which you want to manage deploy freezes. You'll need the group's slug (path) to specify which group the script will operate on.\n- **Time zone selection -** Decide on the time zone in which you want to schedule the deploy freezes. The time zone selection ensures that freeze periods are accurately timed based on your organization's preferred time zone.\n\n### Getting started\n\nTo use GitLab CI to author and automate the process of batch updating deploy freezes for all projects, fork the [Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods), which will then create a CI/CD pipeline that iterates through your projects and applies the desired deploy freeze schedule. You can customize this project to fit your organization's workflow.\n\nThe provided project contains a `.gitlab-ci.yml` file and a Python script designed to automate the management of deploy freezes for multiple projects within a GitLab group. It uses the GitLab API and various Python libraries to create and delete deploy freeze periods, and is designed to be run as part of a CI/CD pipeline to ensure code stability during deployments within a GitLab group.\n\n### Commit and push changes\n\nCommit and push the changes to your repository to trigger the CI/CD pipeline.\n\n### Pipeline execution\n\n- In the [Group Deploy Freeze project](https://gitlab.com/demos/solutions/group-deploy-freeze) on the GitLab UI, go to Pipelines.\n- Select the \"Run Pipeline\" option on the top right corner of the page.\n- You should see the variables defined in the `.gitlab-ci.yml` file like:\n![Set variables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676891/Blog/Content%20Images/Screenshot-2023-09-06-at-12-08-48-PM.png)\n- Define the values of the variables `FREEZE_START`, `FREEZE_END`, `CRON_TIME_ZONE` and `GROUP_SLUG`, then run the pipeline. You can define multiple freeze periods by skipping to the next line within the `FREEZE_START` and `FREEZE_END` variables.\n- Once the pipeline is successful, the freeze period should be populated in all projects within the defined groups.\n\n## Monitor and verify\n\n- Verify that these deploy freeze periods are being created and managed as intended.\n- Check your GitLab group's projects for deploy freezes during the specified periods.\n![Monitor and verify](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676891/Blog/Content%20Images/Screenshot-2023-09-12-at-2-08-24-PM.png)\n\n## Customization and iteration\n\n- If needed, iterate on the configuration, script, or pipeline based on your organization's requirements.\n- Make adjustments to freeze periods, time zones, project details, or other settings as needed.\n\nYou can optimize the group deploy feature by following the [Deploy freeze](https://docs.gitlab.com/ee/user/project/releases/index.html#prevent-unintentional-releases-by-setting-a-deploy-freeze) documentation, which outlines the steps to set up a `.freezedeployment` job that can conditionally block deployment jobs upon the presence of the `CI_DEPLOY_FREEZE` variable. By including the `.freezedeployment` template and extending it in your project's `.gitlab-ci.yml file`, you can prevent deployments during freeze periods, ensuring code stability. Manual deployment intervention is possible once the freeze period ends, allowing for controlled and predictable deployment processes across the group's projects.\n\n## Results\n\nBy extending deploy freezes to the group level, teams can easily streamline and enhance their deployment strategies to ensure consistency in preventing unintended production release during a period of time specified by you, whether it is a large company event or holiday. With the power of GitLab's API, CI/CD pipelines, and the flexibility of Python scripting, Group Deploy Freeze is your ally in maintaining code stability and predictability across diverse projects.\n\n> Get started with group deploy freezes today by visiting the [Group Deploy Freeze project](https://gitlab.com/cnnachi-demo/freezeperiods).",[108,678,1351,9],"production",{"slug":1353,"featured":6,"template":684},"ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups","content:en-us:blog:ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups.yml","Ci Cd Automation Maximize Deploy Freeze Impact Across Gitlab Groups","en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups.yml","en-us/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups",{"_path":1359,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1360,"content":1366,"config":1373,"_id":1375,"_type":13,"title":1376,"_source":15,"_file":1377,"_stem":1378,"_extension":18},"/en-us/blog/ci-deployment-and-environments",{"title":1361,"description":1362,"ogTitle":1361,"ogDescription":1362,"noIndex":6,"ogImage":1363,"ogUrl":1364,"ogSiteName":669,"ogType":670,"canonicalUrls":1364,"schema":1365},"How to use GitLab CI to deploy to multiple environments","We walk you through different scenarios to demonstrate the versatility and power of GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662033/Blog/Hero%20Images/intro.jpg","https://about.gitlab.com/blog/ci-deployment-and-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab CI to deploy to multiple environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Nemytchenko\"},{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2021-02-05\",\n      }",{"title":1361,"description":1362,"authors":1367,"heroImage":1363,"date":1369,"body":1370,"category":769,"tags":1371,"updatedDate":1372},[1368,699],"Ivan Nemytchenko","2021-02-05","This post is a success story of one imaginary news portal, and you're the happy\nowner, the editor, and the only developer. Luckily, you already host your project\ncode on GitLab.com and know that you can\n[run tests with GitLab CI/CD](https://docs.gitlab.com/ee/ci/testing/).\nNow you’re curious if it can be [used for deployment](/blog/how-to-keep-up-with-ci-cd-best-practices/), and how far can you go with it.\n\nTo keep our story technology stack-agnostic, let's assume that the app is just a\nset of HTML files. No server-side code, no fancy JS assets compilation.\n\nDestination platform is also simplistic – we will use [Amazon S3](https://aws.amazon.com/s3/).\n\nThe goal of the article is not to give you a bunch of copy-pasteable snippets.\nThe goal is to show the principles and features of [GitLab CI](/solutions/continuous-integration/) so that you can easily apply them to your technology stack.\n{: .alert .alert-warning}\n\nLet’s start from the beginning. There's no continuous integration (CI) in our story yet.\n\n## At the starting line\n\n**Deployment**: In your case, it means that a bunch of HTML files should appear on your\nS3 bucket (which is already configured for\n[static website hosting](http://docs.aws.amazon.com/AmazonS3/latest/dev/HowDoIWebsiteConfiguration.html?shortFooter=true)).\n\nThere are a million ways to do it. We’ll use the\n[awscli](http://docs.aws.amazon.com/cli/latest/reference/s3/cp.html#examples) library,\nprovided by Amazon.\n\nThe full command looks like this:\n\n```shell\naws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n![Manual deployment](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/13.jpg){: .center}\nPushing code to repository and deploying are separate processes.\n{: .note .text-center}\n\nImportant detail: The command\n[expects you](http://docs.aws.amazon.com/cli/latest/userguide/cli-chap-getting-started.html#config-settings-and-precedence)\nto provide `AWS_ACCESS_KEY_ID` and  `AWS_SECRET_ACCESS_KEY` environment\nvariables. Also you might need to specify `AWS_DEFAULT_REGION`.\n{: .alert .alert-info}\n\nLet’s try to automate it using [GitLab CI](/solutions/continuous-integration/).\n\n## The first automated deployment\n\nWith GitLab, there's no difference on what commands to run.\nYou can set up GitLab CI in a way that tailors to your specific needs, as if it was your local terminal on your computer. As long as you execute commands there, you can tell CI to do the same for you in GitLab.\nPut your script to `.gitlab-ci.yml` and push your code – that’s it: CI triggers\na _job_ and your commands are executed.\n\nNow, let's add some context to our story: Our website is small, there is 20-30 daily\nvisitors and the code repository has only one default branch: `main`.\n\nLet's start by specifying a _job_ with the command from above in the `.gitlab-ci.yml` file:\n\n```yaml\ndeploy:\n  script: aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\nNo luck:\n![Failed command](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/fail1.png){: .shadow}\n\nIt is our _job_ to ensure that there is an `aws` executable.\nTo install `awscli` we need `pip`, which is a tool for Python packages installation.\nLet's specify Docker image with preinstalled Python, which should contain `pip` as well:\n\n```yaml\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\n![Automated deployment](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/14.jpg){: .center}\nYou push your code to GitLab, and it is automatically deployed by CI.\n  {: .note .text-center}\n\nThe installation of `awscli` extends the job execution time, but that is not a big\ndeal for now. If you need to speed up the process, you can always [look for\na Docker image](https://hub.docker.com/explore/) with preinstalled `awscli`,\nor create an image by yourself.\n{: .alert .alert-warning}\n\nAlso, let’s not forget about these environment variables, which you've just grabbed\nfrom [AWS Console](https://console.aws.amazon.com/):\n\n```yaml\nvariables:\n  AWS_ACCESS_KEY_ID: \"AKIAIOSFODNN7EXAMPLE\"\n  AWS_SECRET_ACCESS_KEY: \"wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY\"\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://yourbucket/ --recursive --exclude \"*\" --include \"*.html\"\n```\nIt should work, but keeping secret keys open, even in a private repository,\nis not a good idea. Let's see how to deal with this situation.\n\n### Keeping secret things secret\n\nGitLab has a special place for secret variables: **Settings > CI/CD > Variables**\n\n![Picture of Variables page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/add-variable-updated.png)\n\nWhatever you put there will be turned into **environment variables**.\nChecking the \"Mask variable\" checkbox will obfuscate the variable in job logs. Also, checking the \"Protect variable\" checkbox will export the variable to only pipelines running on protected branches and tags. Users with Owner or Maintainer permissions to a project will have access to this section.\n\nWe could remove `variables` section from our CI configuration. However, let’s use it for another purpose.\n\n### How to specify and use variables that are not secret\n\nWhen your configuration gets bigger, it is convenient to keep some of the\nparameters as variables at the beginning of your configuration. Especially if you\nuse them in more than one place. Although it is not the case in our situation yet,\nlet's set the S3 bucket name as a [**variable**](https://docs.gitlab.com/ee/ci/variables/) for the purpose of this demonstration:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\nSo far so good:\n\n![Successful build](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/build.png){: .shadow.medium.center}\n\nIn our hypothetical scenario, the audience of your website has grown, so you've hired a developer to help you.\nNow you have a team. Let's see how teamwork changes the GitLab CI workflow.\n\n## How to use GitLab CI with a team\n\nNow, that there are two users working in the same repository, it is no longer convenient\nto use the `main` branch for development. You decide to use separate branches\nfor both new features and new articles and merge them into `main` when they are ready.\n\nThe problem is that your current CI config doesn’t care about branches at all.\nWhenever you push anything to GitLab, it will be deployed to S3.\n\nPreventing this problem is straightforward. Just add `only: main` to your `deploy` job.\n\n![Automated deployment of main branch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/15-updated.png){: .center}\nYou don't want to deploy every branch to the production website but it would also be nice to preview your changes from feature-branches somehow.\n{: .note .text-center}\n\n### How to set up a separate place for testing code\n\nThe person you recently hired, let's call him Patrick, reminds you that there is a featured called\n[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/). It looks like a perfect candidate for\na place to preview your work in progress.\n\nTo [host websites on GitLab Pages](/blog/gitlab-pages-setup/) your CI configuration file should satisfy three simple rules:\n\n- The _job_ should be named `pages`\n- There should be an `artifacts` section with folder `public` in it\n- Everything you want to host should be in this `public` folder\n\nThe contents of the public folder will be hosted at `http://\u003Cusername>.gitlab.io/\u003Cprojectname>/`\n{: .alert .alert-info}\n\nAfter applying the [example config for plain-html websites](https://gitlab.com/pages/plain-html/blob/master/.gitlab-ci.yml),\nthe full CI configuration looks like this:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy:\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n  only:\n  - main\n\npages:\n  image: alpine:latest\n  script:\n  - mkdir -p ./public\n  - cp ./*.html ./public/\n  artifacts:\n    paths:\n    - public\n  except:\n  - main\n```\n\nWe specified two jobs. One job deploys the website for your customers to S3 (`deploy`).\nThe other one (`pages`) deploys the website to GitLab Pages.\nWe can name them \"Production environment\" and \"Staging environment\", respectively.\n\n![Deployment to two places](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/16-updated.png){: .center}\nAll branches, except main, will be deployed to GitLab Pages.\n{: .note .text-center}\n\n## Introducing environments\n\nGitLab offers\n [support for environments](https://docs.gitlab.com/ee/ci/environments/) (including dynamic environments and static environments),\n and all you need to do it to specify the corresponding environment for each deployment *job*:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy to production:\n  environment: production\n  image: python:latest\n  script:\n  - pip install awscli\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n  only:\n  - main\n\npages:\n  image: alpine:latest\n  environment: staging\n  script:\n  - mkdir -p ./public\n  - cp ./*.html ./public/\n  artifacts:\n    paths:\n    - public\n  except:\n  - main\n```\n\nGitLab keeps track of your deployments, so you always know what is currently being deployed on your servers:\n\n![List of environments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/envs-updated.png){: .shadow.center}\n\nGitLab provides full history of your deployments for each of your current environments:\n\n![List of deployments to staging environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/staging-env-detail-updated.png){: .shadow.center}\n\n![Environments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/17-updated.png){: .center}\n\nNow, with everything automated and set up, we’re ready for the new challenges that are just around the corner.\n\n## How to troubleshoot deployments\n\nIt has just happened again.\nYou've pushed your feature-branch to preview it on staging and a minute later Patrick pushed\nhis branch, so the staging environment was rewritten with his work. Aargh!! It was the third time today!\n\nIdea! \u003Ci class=\"far fa-lightbulb\" style=\"color:#FFD900; font-size:.85em\" aria-hidden=\"true\">\u003C/i> Let's use Slack to notify us of deployments, so that people will not push their stuff if another one has been just deployed!\n\n> Learn how to [integrate GitLab with Slack](https://docs.gitlab.com/ee/user/project/integrations/gitlab_slack_application.html).\n\n## Teamwork at scale\n\nAs the time passed, your website became really popular, and your team has grown from two people to eight people.\nPeople develop in parallel, so the situation when people wait for each other to\npreview something on Staging has become pretty common. \"Deploy every branch to staging\" stopped working.\n\n![Queue of branches for review on Staging](https://about.gitlab.com/images/blogimages/ci-deployment-and-environments/queue.jpg){: .center}\n\nIt's time to modify the process one more time. You and your team agreed that if\nsomeone wants to see their changes on the staging\nserver, they should first merge the changes to the \"staging\" branch.\n\nThe change of `.gitlab-ci.yml` is minimal:\n\n```yaml\nexcept:\n- main\n```\n\nis now changed to\n\n```yaml\nonly:\n- staging\n```\n\n![Staging branch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/18-updated.png){: .center}\nPeople have to merge their feature branches before preview on the staging server.\n{: .note .text-center}\n\nOf course, it requires additional time and effort for merging, but everybody agreed that it is better than waiting.\n\n### How to handle emergencies\n\nYou can't control everything, so sometimes things go wrong. Someone merged branches incorrectly and\npushed the result straight to production exactly when your site was on top of HackerNews.\nThousands of people saw your completely broken layout instead of your shiny main page.\n\nLuckily, someone found the **Rollback** button, so the\nwebsite was fixed a minute after the problem was discovered.\n\n![List of environments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/prod-env-rollback-arrow-updated.png){: .shadow.center}\nRollback relaunches the previous job with the previous commit\n{: .note .text-center}\n\nAnyway, you felt that you needed to react to the problem and decided to turn off\nauto-deployment to Production and switch to manual deployment.\nTo do that, you needed to add `when: manual` to your _job_.\n\nAs you expected, there will be no automatic deployment to Production after that.\nTo deploy manually go to **CI/CD > Pipelines**, and click the button:\n\n![Skipped job is available for manual launch](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674076/Blog/Content%20Images/manual-pipeline-arrow-updated.png){: .shadow.center}\n\nFast forward in time. Finally, your company has turned into a corporation. Now, you have hundreds of people working on the website,\nso all the previous compromises no longer work.\n\n### Time to start using Review Apps\n\nThe next logical step is to boot up a temporary instance of the application per feature branch for review.\n\nIn our case, we set up another bucket on S3 for that. The only difference is that\nwe copy the contents of our website to a \"folder\" with the name of the\nthe development branch, so that the URL looks like this:\n\n`http://\u003CREVIEW_S3_BUCKET_NAME>.s3-website-us-east-1.amazonaws.com/\u003Cbranchname>/`\n\nHere's the replacement for the `pages` _job_ we used before:\n\n```yaml\nreview apps:\n  variables:\n    S3_BUCKET_NAME: \"reviewbucket\"\n  image: python:latest\n  environment: review\n  script:\n  - pip install awscli\n  - mkdir -p ./$CI_BUILD_REF_NAME\n  - cp ./*.html ./$CI_BUILD_REF_NAME/\n  - aws s3 cp ./ s3://$S3_BUCKET_NAME/ --recursive --exclude \"*\" --include \"*.html\"\n```\n\nThe interesting thing is where we got this `$CI_BUILD_REF_NAME` variable from.\nGitLab predefines [many environment variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) so that you can use them in your jobs.\n\nNote that we defined the `S3_BUCKET_NAME` variable inside the *job*. You can do this to rewrite top-level definitions.\n{: .alert .alert-info}\n\nVisual representation of this configuration:\n![Review apps]![How to use GitLab CI - update - 19 - updated](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674077/Blog/Content%20Images/19-updated.png){: .illustration}\n\nThe details of the Review Apps implementation varies widely, depending upon your real technology\nstack and on your deployment process, which is outside the scope of this blog post.\n\nIt will not be that straightforward, as it is with our static HTML website.\nFor example, you had to make these instances temporary, and booting up these instances\nwith all required software and services automatically on the fly is not a trivial task.\nHowever, it is doable, especially if you use Docker containers, or at least Chef or Ansible.\n\nWe'll cover deployment with Docker in a future blog post.\nI feel a bit guilty for simplifying the deployment process to a simple HTML files copying, and not\nadding some hardcore scenarios. If you need some right now, I recommend you read the article [\"Building an Elixir Release into a Docker image using GitLab CI.\"](/blog/building-an-elixir-release-into-docker-image-using-gitlab-ci-part-1/)\n\nFor now, let's talk about one final thing.\n\n### Deploying to different platforms\n\nIn real life, we are not limited to S3 and GitLab Pages. We host, and therefore,\ndeploy our apps and packages to various services.\n\nMoreover, at some point, you could decide to move to a new platform and will need to rewrite all your deployment scripts.\nYou can use a gem called `dpl` to minimize the damage.\n\nIn the examples above we used `awscli` as a tool to deliver code to an example\nservice (Amazon S3).\nHowever, no matter what tool and what destination system you use, the principle is the same:\nYou run a command with some parameters and somehow pass a secret key for authentication purposes.\n\nThe `dpl` deployment tool utilizes this principle and provides a\nunified interface for [this list of providers](https://github.com/travis-ci/dpl#supported-providers).\n\nHere's how a production deployment _job_ would look if we use `dpl`:\n\n```yaml\nvariables:\n  S3_BUCKET_NAME: \"yourbucket\"\n\ndeploy to production:\n  environment: production\n  image: ruby:latest\n  script:\n  - gem install dpl\n  - dpl --provider=s3 --bucket=$S3_BUCKET_NAME\n  only:\n  - main\n```\n\nIf you deploy to different systems or change destination platform frequently, consider\nusing `dpl` to make your deployment scripts look uniform.\n\n## Five key takeaways\n\n1. Deployment is just a command (or a set of commands) that is regularly executed. Therefore it can run inside GitLab CI.\n2. Most times you'll need to provide some secret key(s) to the command you execute. Store these secret keys in **Settings > CI/CD > Variables**.\n3. With GitLab CI, you can flexibly specify which branches to deploy to.\n4. If you deploy to multiple environments, GitLab will conserve the history of deployments,\nwhich allows you to rollback to any previous version.\n5. For critical parts of your infrastructure, you can enable manual deployment from GitLab interface, instead of automated deployment.\n\n\u003Cstyle>\nimg.illustration {\n  padding-left: 12%;\n  padding-right: 12%;\n\n}\n@media (max-width: 760px) {\n  img.illustration {\n    padding-left: 0px;\n    padding-right: 0px;\n  }\n}\n\u003C/style>\n",[771,772,9],"2024-07-22",{"slug":1374,"featured":6,"template":684},"ci-deployment-and-environments","content:en-us:blog:ci-deployment-and-environments.yml","Ci Deployment And Environments","en-us/blog/ci-deployment-and-environments.yml","en-us/blog/ci-deployment-and-environments",{"_path":1380,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1381,"content":1387,"config":1393,"_id":1395,"_type":13,"title":1396,"_source":15,"_file":1397,"_stem":1398,"_extension":18},"/en-us/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform",{"title":1382,"description":1383,"ogTitle":1382,"ogDescription":1383,"noIndex":6,"ogImage":1384,"ogUrl":1385,"ogSiteName":669,"ogType":670,"canonicalUrls":1385,"schema":1386},"Combine GitLab webhooks and Twilio for SMS alerts on DevSecOps platform","Configure GitLab webhooks with SMS alerts to instantly get feedback on new and existing issues within a project and enable teams to react quickly to project- and group-level changes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099013/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2814%29_6VTUA8mUhOZNDaRVNPeKwl_1750099012960.png","https://about.gitlab.com/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Combine GitLab webhooks and Twilio for SMS alerts on DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ted Gieschen\"}],\n        \"datePublished\": \"2024-06-10\",\n      }",{"title":1382,"description":1383,"authors":1388,"heroImage":1384,"date":1390,"body":1391,"category":1103,"tags":1392},[1389],"Ted Gieschen","2024-06-10","We all strive to create the most robust and secure DevSecOps environments where everyone can collaborate to deliver amazing products for our customers. But no matter how robust and secure we design our environments we cannot exclude the possibility that something might go wrong. When an issue does occur we want to make sure we can remediate it quickly. To do that it's not only important to document the details of the issue but also get the right people notified immediately. In this article, we will set up GitLab [webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html) together with [Twilio's functionality](https://www.twilio.com/en-us) to [send SMS alerts](https://www.twilio.com/docs/messaging) to the right people, getting them up to date so they can mitigate problems quickly.\n\n## Prerequisites\n\n1. A GitLab account: Webhooks aren't restricted by tier, which means this feature can be used with a [Free, Premium or Ultimate license](https://about.gitlab.com/pricing/) for either [GitLab's SaaS or self-managed offering](https://docs.gitlab.com/ee/subscriptions/choosing_subscription.html). If you don't have an account yet, you can create one on [our sign-up page]( https://gitlab.com/users/sign_up).\n\n2. A Twilio account: To handle the incoming webhook and send an SMS, you will need a Twilio account. If you don't already have one, you can create one on [Twilio's sign-up page](https://www.twilio.com/try-twilio).\n\n3. (Optional) An SMS-capable phone to test the functionality: We will be testing the functionality at the end of this article. If you want to follow along, you will need access to a phone that can receive SMS texts.\n\n4. (Optional) A basic understanding of Node.js: We will be handling the webhooks using a serverless function provided by Twilio Functions. This will be written in [Node.js](https://nodejs.org/en/about). Although you can simply copy-paste the functionality, it would be beneficial to understand the basics of Node.js so you can expand functionality in the future.\n\n## Building automated SMS notifications\n\nNow, let's get hands-on with building real-time SMS notifications.\n\nAt a high level, the workflow looks as follows:\n\n![SMS workflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099023261.png)\n\n1. An event is triggered within GitLab. This event is then picked up by GitLab's webhook functionality.\n2. The information of the event is then sent as a webhook to a [Twilio Function](https://www.twilio.com/docs/serverless/functions-assets/functions).\n3. Twilio Functions processes the event data sent by GitLab and creates the SMS body with relevant information.\n4. When complete, Twilio Functions triggers [Twilio Programmable Messaging](https://www.twilio.com/docs/messaging) with the SMS body and recipient information.\n5. Twilio Programmable Messaging then sends the SMS with the generated body to the recipient.\n\n### Set up Twilio SMS\n\nWe need to set up our Twilio environment to be able to send SMS. To do this, log in to your Twilio account. If you don't have one just follow the link provided in the prerequisites section above.\n\nOnce logged in you will see the Twilio Console, which will look something like this:\n\n![Twilio console](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099023261.png)\n\nFrom here, we will head to the left sidebar menu and select __United States (US1) > Phone Numbers > Manage > Active numbers__ and then click the \"Buy a number\" button.\n\n![Buy a number screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750099023263.png)\n\nYou can select a phone number, which will be the number that notifications are sent from. There are some [guidelines](https://www.twilio.com/docs/messaging/guides/sending-international-sms-guide) specific to which countries you can send SMS based on the Twilio phone number you purchase, so please keep that in mind. In this example, I will be using my personal U.S. phone number for this article as the recipient phone number, so, in this case, I will purchase a U.S. Twilio number. Just make sure your phone number has the SMS capability. Once selected, simply click the \"Buy \u003Cphone number>\"  button.\n\n![twilio webhooks - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099023265.png)\n\nNext, we just need to make sure Twilio can send SMS to our recipient phone number by allowing Twilio Programmable Messaging to send SMS to the country our recipient phone number is associated with. To do so, head to __[United States (US1) > Messaging > Settings > Geo permissions__ and make sure that the country associated with the recipient's phone number is selected (for example, as I am using my U.S. phone number as the recipient phone number in this blog, I will select United States).\n\n![twilio webhooks - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750099023267.png)\n\nClick \"Save geo permissions.\" With that we're all set up to send SMS.\n\nNext, let's handle the processing of the webhook and the creation of our SMS alerts with Twilio Functions.\n\n### Set up Twilio Functions\n\nTo process the webhook we will be sending to Twilio, we need to define a Twilio Function. To do this, select **United States (US1) > Functions and Assets > Functions (Classic) > List** and click \"Create a Function.\" Select the \"Hello SMS\" option in the pop-up and click \"Create.\"\n\n![Create a Twilio function](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099023269.png)\n\nNow, let's go ahead and configure our Twilio Function.\n\n1. Extend the path for example `/handle-event-webhook`. In my case this would result in the following path: `https://daff-mac-7354.twil.io/handle-event-webhook`.\n\n2. Disable the option `Check for valid Twilio signature`.\n\n3. Adjust the code to the following, making sure to update the values for `\u003Cyour personal phone number>` and `\u003Cyour Twilio Phone number>`:\n\n``` javascript\nexports.handler = function (context, event, callback) {\n  const twilioClient = context.getTwilioClient();\n\n  twilioClient.messages\n    .create({\n      body: `Hi there! There was an update to issue (${event[\"object_attributes\"][\"id\"]}) with title \"${event[\"object_attributes\"][\"title\"]}\" in project ${event[\"repository\"][\"name\"]}. It was just ${event[\"object_attributes\"][\"action\"]}.`,\n      to: \"\u003Cyour personal phone number>\",\n      from: \"\u003Cyour Twilio Phone number>\",\n    })\n    .then((message) => {\n      console.log(\"SMS successfully sent\");\n      console.log(message.sid);\n      return callback(null, `Success! Message SID: ${message.sid}`);\n    })\n    .catch((error) => {\n      console.error(error);\n      return callback(error);\n    });\n};\n\n```\n\nIt should end up looking like the following:\n\n  ![Configuration for Twilio function](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099023271.jpg)\n\nNow, whenever our endpoint is hit, it should trigger an SMS with a custom message indicating a change to an existing issue which will represent an example of the various [webhook events](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html) we can configure.\n\nNext, let's set our webhooks within GitLab to trigger this endpoint whenever a change to an issue is made.\n\n### Set up GitLab webhooks\n\nLog in to your GitLab instance and go to the project you would like to configure event webhooks in.\n\nOnce in the Project, go to **Settings > Webhooks** and click on \"Add new webhook.\"\n\n![Screen to add a new webhook](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099023273.png)\n\nYou will only need to configure the following fields:\n\n1. URL: This should be the endpoint we defined in the previous section. In the previous example that would be `https://daff-mac-7354.twil.io/handle-event-webhook`.\n\n2. Trigger: In our case, we will be reacting to [issues events](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#issue-events), so check \"Issues events.\"\n\n![Configuring URL and trigger fields](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099023274.png)\n\nWe're all set to test our setup!\n\n### Testing\n\nWhile in the project that was just configured to react to issues events, head to \"Plan > Issues\" and click on \"New issue.\"\n\n![New issue screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750099023276.png)\n\nAdd a title and click on \"Create Issue.\"\n\n  ![Create issue screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099023/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750099023278.png)\n\nIf everything is configured correctly, you should get an SMS looking something like:\n\n`Sent from your Twilio trial account - Hi there! There was an update to issue (146735617) with title \"GitLab webhook example\" in project Webhooks Example. It was just opened.`\n\n## Expanding the use case\n\nWe've leveraged Twilio's SMS functionality in combination with GitLab webhooks to instantly get feedback on new and existing issues within our project, allowing us to react quickly to any changes that might occur. This simple use case showed how one person could instantly get informed about a single type of event. However, often we want to inform more people about various events or be able to react to more than just one type of event (like issue creation and updates).\n\nThis functionality can be expanded by:\n\n1. Sending SMS alerts to multiple people: This can be achieved by extending the Twilio Function to loop through a given array of phone numbers. [Twilio's Messaging Service](https://www.twilio.com/docs/messaging/services) can be leveraged to potentially simplify the process of sending SMS to various phone numbers.\n\n2. Handling different event types: Select more types of webhook events in the Project settings to react to other things like [comments](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#comment-events), [deployments](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#deployment-events), or [releases](https://docs.gitlab.com/ee/user/project/integrations/webhook_events.html#release-events).\n\n3. Configure on a group level: In this example, we’ve only configured webhooks on a project level. However, if it is relevant to react to events across projects on a group level, this can also be configured, removing the need to change webhook settings for each project.\n\n4. Self-host message generation functionality: Leverage [Twilio Server Side SDKs](https://www.twilio.com/docs/libraries) instead of Twilio Functions to host the code yourself. This could benefit you if you have restrictions on where you can host code as well as allow you to more easily connect with the rest of your code base likecfetching information from your database to get phone numbers for relevant people.\n\n> Start [a free 30-day trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today to test-drive more DevSecOps features.",[680,9,678,814,478],{"slug":1394,"featured":90,"template":684},"combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform","content:en-us:blog:combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform.yml","Combine Gitlab Webhooks And Twilio For Sms Alerts On Devsecops Platform","en-us/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform.yml","en-us/blog/combine-gitlab-webhooks-and-twilio-for-sms-alerts-on-devsecops-platform",{"_path":1400,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1401,"content":1407,"config":1413,"_id":1415,"_type":13,"title":1416,"_source":15,"_file":1417,"_stem":1418,"_extension":18},"/en-us/blog/compose-readers-and-writers-in-golang-applications",{"title":1402,"description":1403,"ogTitle":1402,"ogDescription":1403,"noIndex":6,"ogImage":1404,"ogUrl":1405,"ogSiteName":669,"ogType":670,"canonicalUrls":1405,"schema":1406},"Compose Readers and Writers in Golang applications","GitLab streams terabytes of Git data every hour using Golang abstractions of I/O implementations. Learn how to compose Readers and Writers in Golang apps.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099464/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_639935439_3oqldo5Yt5wPonEJYZOLTM_1750099464124.jpg","https://about.gitlab.com/blog/compose-readers-and-writers-in-golang-applications","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Compose Readers and Writers in Golang applications\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Igor Drozdov\"}],\n        \"datePublished\": \"2024-02-15\",\n      }",{"title":1402,"description":1403,"authors":1408,"heroImage":1404,"date":1410,"body":1411,"category":769,"tags":1412},[1409],"Igor Drozdov","2024-02-15","Every hour, GitLab transfers terabytes of Git data between a server and a client. It is hard or even impossible to handle this amount of traffic unless it is done efficiently in a streaming fashion. Git data is served by Gitaly (Git server), GitLab Shell (Git via SSH), and Workhorse (Git via HTTP(S)). These services are implemented using Go - the language that conveniently provides abstractions to efficiently deal with I/O operations.\n\nGolang's [`io`](https://pkg.go.dev/io) package provides [`Reader`](https://pkg.go.dev/io#Reader) and [`Writer`](https://pkg.go.dev/io#Writer) interfaces to abstract the functionality of I/O implementations into public interfaces.\n\n`Reader` is the interface that wraps the basic `Read` method:\n\n```go\ntype Reader interface {\n\tRead(p []byte) (n int, err error)\n}\n```\n\n`Writer` is the interface that wraps the basic `Write` method.\n\n```go\ntype Writer interface {\n\tWrite(p []byte) (n int, err error)\n}\n```\n\nFor example, [`os`](https://pkg.go.dev/os) package provides an implementation of reading a file. `File` type implements `Reader` and `Writer` interfaces by defining basic [`Read`](https://pkg.go.dev/os#File.Read) and [`Write`](https://pkg.go.dev/os#File.Write) functions.\n\nIn this blog post, you'll learn how to compose Readers and Writers in Golang applications.\n\nFirst, let's read from a file and write its content to [`os.Stdout`](https://cs.opensource.google/go/go/+/master:src/os/file.go;l=66?q=Stdout&ss=go%2Fgo).\n\n```go\nfunc main() {\n\tfile, err := os.Open(\"data.txt\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tp := make([]byte, 32 * 1024)\n\tfor {\n\t\tn, err := file.Read(p)\n\n\t\t_, errW := os.Stdout.Write(p[:n])\n\t\tif errW != nil {\n\t\t\tlog.Fatal(errW)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}\n```\n\nEach call of the `Read` function fills the buffer `p` with the content from the file, i.e. the file is being consumed in chunks (up to `32KB`) instead of being fully loaded into the memory.\n\nTo simplify this widely used pattern, `io` package conveniently provides [`Copy`](https://pkg.go.dev/io#Copy) function that allows passing content from any `Reader` to any `Writer` and also [handles](https://cs.opensource.google/go/go/+/refs/tags/go1.21.0:src/io/io.go;l=433) additional edge cases.\n\n```go\nfunc main() {\n\tfile, err := os.Open(\"data.txt\")\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer file.Close()\n\n\tif _, err := io.Copy(os.Stdout, file); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n```\n\n`Reader` and `Writer` interfaces are used across the whole Golang ecosystem because they facilitate reading and writing content in a streaming fashion. Therefore, gluing together the Readers and Writers with the functions that expect these interfaces as arguments is a frequent problem to solve. Sometimes it's as straightforward as passing content from a Reader into a Writer, but sometimes the content written into a Writer must be represented as a Reader or the content from a reader must be sent into multiple Writers. Let's have a closer look into different use cases and the examples of solving these types of problems in the `GitLab` codebase.\n\n## Reader -> Writer\n\n**Problem**\n\nWe need to pass content from a Reader into a Writer.\n\n![readers and writers - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099494917.png)\n\n**Solution**\n\nThe problem can be solved by using [`io.Copy`](https://pkg.go.dev/io#Copy).\n\n```go\nfunc Copy(dst Writer, src Reader) (written int64, err error)\n```\n\n**Example**\n\n[`InfoRefs*`](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/gitaly/smarthttp.go#L18-35) Gitaly RPCs return a `Reader` and we want to [stream](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/git/info-refs.go#L78-80) its content to a user via HTTP response:\n\n```go\nfunc handleGetInfoRefsWithGitaly(ctx context.Context, responseWriter *HttpResponseWriter, a *api.Response, rpc, gitProtocol, encoding string) error {\n        ...\n        infoRefsResponseReader, err := smarthttp.InfoRefsResponseReader(ctx, &a.Repository, rpc, gitConfigOptions(a), gitProtocol)\n        ...\n        if _, err = io.Copy(w, infoRefsResponseReader); err != nil {\n            return err\n        }\n        ...\n}\n```\n\n## Reader -> Multiple Writers\n\n**Problem**\n\nWe need to pass content from a Reader into multiple Writers.\n\n![readers and writers - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099494917.png)\n\n**Solution**\n\nThe `io` package provides [`io.MultiWriter`](https://pkg.go.dev/io#MultiWriter) function that _converts_ multiple Writers into a single one. When its `Write` function is called, the content is copied to all the Writers ([implementation](https://cs.opensource.google/go/go/+/refs/tags/go1.21.0:src/io/multi.go;l=127)).\n\n```go\nfunc MultiWriter(writers ...Writer) Writer\n```\n\n**Example**\n\nGiven we want to [build](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L13-18) `md5`, `sha1`, `sha256` and `sha512` hashes from the same content. [`Hash`](https://pkg.go.dev/hash#Hash) type is a `Writer`. Using `io.MultiWriter`, we define [`multiHash`](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L43-61) Writer. After the content is [written](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/destination.go#L124-125) to the `multiHash`, we [calculate](https://gitlab.com/gitlab-org/gitlab/blob/57aafb6a886d05c15dd0fa372fb4f008bec014ea/workhorse/internal/upload/destination/multi_hash.go#L63-70) the hashes of all these functions in a single run.\n\nThe simplified version of the example is:\n\n```go\npackage main\n\nimport (\n\t\"crypto/sha1\"\n\t\"crypto/sha256\"\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n)\n\nfunc main() {\n\ts1 := sha1.New()\n\ts256 := sha256.New()\n\n\tw := io.MultiWriter(s1, s256)\n\tif _, err := w.Write([]byte(\"content\")); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(s1.Sum(nil))\n\tfmt.Println(s256.Sum(nil))\n}\n```\n\nFor simplicity, we just call `Write` function on a Writer, but when content comes from a Reader, then `io.Copy` can be used as well:\n\n```go\n_, err := io.Copy(io.MultiWriter(s1, s256), reader)\n```\n\n## Multiple Readers -> Reader\n\n**Problem**\n\nWe have multiple Readers and need to sequentially read from them.\n\n![readers and writers - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099494919.png)\n\n**Solution**\n\nThe `io` package provides [`io.MultiReader`](https://pkg.go.dev/io#MultiReader) function that _converts_ multiple Readers into a single one. The Readers are read in the passed order.\n\n```go\nfunc MultiReader(readers ...Reader) Reader\n```\n\nThen this Reader can be used in any function that accepts `Reader` as an argument.\n\n**Example**\n\nWorkhorse [reads](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/cmd/gitlab-resize-image/png/reader.go#L26-38) the first `N` bytes of an image to detect whether it's a PNG file and _puts them back_ by building a Reader from multiple Readers:\n\n```go\nfunc NewReader(r io.Reader) (io.Reader, error) {\n\tmagicBytes, err := readMagic(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif string(magicBytes) != pngMagic {\n\t\tdebug(\"Not a PNG - read file unchanged\")\n\t\treturn io.MultiReader(bytes.NewReader(magicBytes), r), nil\n\t}\n\n\treturn io.MultiReader(bytes.NewReader(magicBytes), &Reader{underlying: r}), nil\n}\n```\n\n## Multiple Readers -> Multiple Writers\n\n**Problem**\n\nWe need to pass content from multiple Readers into multiple Writers.\n\n![readers and writers - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099494921.png)\n\n**Solution**\n\nThe solutions above can be generalized on the many-to-many use case.\n\n```go\n_, err := io.Copy(io.MultiWriter(w1, w2, w3), io.MultiReader(r1, r2, r3))\n```\n\n## Reader -> Reader + Writer\n\n**Problem**\n\nWe need to read content from a Reader or pass the Reader to a function and simultaneously write the content into a Writer.\n\n![readers and writers - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099494923.png)\n\n**Solution**\n\nThe `io` package provides [io.TeeReader](https://pkg.go.dev/io#TeeReader) function that accepts a Reader to read from, a Writer to write to, and returns a Reader that can be processed further.\n\n```go\nfunc TeeReader(r Reader, w Writer) Reader\n```\n\nThe [implementation](https://cs.opensource.google/go/go/+/refs/tags/go1.21.4:src/io/io.go;l=610) of the functionality is straightforward. The passed `Reader` and `Writer` are stored in a structure that is a `Reader` itself:\n\n```go\nfunc TeeReader(r Reader, w Writer) Reader {\n\treturn &teeReader{r, w}\n}\n\ntype teeReader struct {\n\tr Reader\n\tw Writer\n}\n```\n\nThe `Read` function implemented for the structure delegates the `Read` to the passed `Reader` and also performs a `Write` to the passed `Writer`:\n\n```\nfunc (t *teeReader) Read(p []byte) (n int, err error) {\n\tn, err = t.r.Read(p)\n\tif n > 0 {\n\t\tif n, err := t.w.Write(p[:n]); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\treturn\n}\n```\n\n**Example 1**\n\nWe already touched hashing topic in the `Multiple Writers -> Writer` section and `io.TeeReader` is [used](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/upload/destination/destination.go#L124-125) to provide a Writer to create a hash from content. The returned Reader can be further used to upload content to object storage.\n\n**Example 2**\n\nWorkhorse uses `io.TeeReader` to [implement](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/dependencyproxy/dependencyproxy.go#L57-101) Dependency Proxy [functionality](https://docs.gitlab.com/ee/user/packages/dependency_proxy/). Dependency Proxy caches requested upstream images in the object storage. The not-yet-cached use case has the following behavior:\n\n- A user performs an HTTP request.\n- The upstream image is fetched using [`net/http`](https://pkg.go.dev/net/http) and [`http.Response`](https://pkg.go.dev/net/http#Response) provides its content via `Body` field, which is [`io.ReadCloser`](https://pkg.go.dev/io#ReadCloser) (basically an `io.Reader`).\n- We need to send this content back to the user by writing it into [`http.ResponseWriter`](https://pkg.go.dev/net/http#ResponseWriter) (basically an `io.Writer`).\n- We need to simultaniously upload the content to object storage by performing an [`http.Request`](https://pkg.go.dev/net/http#NewRequest) (a function that accepts an `io.Reader`).\n\nAs a result, `io.TeeReader` can be used to glue these primitives together:\n\n```go\nfunc (p *Injector) Inject(w http.ResponseWriter, r *http.Request, sendData string) {\n\t// Fetch upstream data via HTTP\n\tdependencyResponse, err := p.fetchUrl(r.Context(), sendData)\n\t...\n\t// Create a tee reader. Each Read will read from dependencyResponse.Body and simultaneously\n        // perform a Write to w writer\n\tteeReader := io.TeeReader(dependencyResponse.Body, w)\n\t// Pass the tee reader as the body of an HTTP request to upload it to object storage\n\tsaveFileRequest, err := http.NewRequestWithContext(r.Context(), \"POST\", r.URL.String()+\"/upload\", teeReader)\n\t...\n\tnrw := &nullResponseWriter{header: make(http.Header)}\n\tp.uploadHandler.ServeHTTP(nrw, saveFileRequest)\n\t...\n```\n\n## Writer -> Reader\n\n**Problem**\n\nWe have a function that accepts a Writer, and we are interested in the content that the function would write into the Writer. We want to intercept the content and represent it as a Reader to further process it in a streaming fashion.\n\n![readers and writers - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099495/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099494924.png)\n\n**Solution**\n\nThe `io` package provides [`io.Pipe`](https://pkg.go.dev/io#Pipe) function that returns a Reader and a Writer:\n\n```go\nfunc Pipe() (*PipeReader, *PipeWriter)\n```\n\nThe Writer can be used to be passed to the function that accepts a Writer. All the content that has been written into it will be accessible via the reader, i.e. a synchronous in-memory pipe is created that can be used to connect code expecting an `io.Reader` with code expecting an `io.Writer`.\n\n**Example 1**\n\nFor [LSIF](https://lsif.dev/) file [transformation](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L68-72) for code navigation we need to:\n\n- [Read](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L48-51) content of a zip file.\n- Transform the content and [serialize](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/docs.go#L97-112) it into [`zip.Writer`](https://pkg.go.dev/archive/zip#Writer).\n- [Represent](https://gitlab.com/gitlab-org/gitlab/blob/d97ce3baab7fbf459728ce18766fefd3abb8892f/workhorse/internal/lsif_transformer/parser/parser.go#L68-72) the new compressed content as a Reader to be further processed in a streaming fashion.\n\nThe [`zip.NewWriter`](https://pkg.go.dev/archive/zip#NewWriter) function accepts a Writer to which it will write the compressed content. It is handy when we need to pass an open file descriptor to the function to save the content to the file. However, when we need to pass the compressed content via an HTTP request, we need to represent the data as a Reader.\n\n```go\n// The `io.Pipe()` creates a reader and a writer.\npr, pw := io.Pipe()\n\n// The writer is passed to `parser.transform` function which will write\n// the transformed compressed content into it\n// The writing should happen asynchronously in a goroutine because each `Write` to\n// the `PipeWriter` blocks until it has satisfied one or more `Read`s from the `PipeReader`.\ngo parser.transform(pw)\n\n// Everything that has been written into it is now accessible via the reader.\nparser := &Parser{\n\tDocs: docs,\n\tpr:   pr,\n}\n\n// pr is a reader that can be used to read all the data written to the pw writer\nreturn parser, nil\n```\n\n**Example 2**\n\nFor Geo setups [GitLab Shell](https://gitlab.com/gitlab-org/gitlab-shell) proxies all `git push` operations to secondary and redirects them to primary.\n\n- GitLab Shell establishes an SSH connection and defines [`ReadWriter`](https://gitlab.com/gitlab-org/gitlab-shell/blob/7898d8e69daf51a7b6e01052c4516ca70893a2d4/internal/command/readwriter/readwriter.go#L6-7) struct that has `In` field of `io.Reader` type to read data from a user and `Out` field of `io.Writer` type to send response to the user.\n- GitLab Shell performs an HTTP request to `/info/refs` and sends `response.Body` of type `io.Reader` to the user using [`io.Copy`](https://gitlab.com/gitlab-org/gitlab-shell/blob/7898d8e69daf51a7b6e01052c4516ca70893a2d4/internal/command/githttp/push.go#L60)\n- The user reacts to this response by sending data to `In` and GitLab Shell needs to read this data, convert it to a request expected by Git HTTP, and send it as an HTTP request to `/git-receive-pack`. This is where `io.Pipe` becomes useful.\n\n```go\nfunc (c *PushCommand) requestReceivePack(ctx context.Context, client *git.Client) error {\n\t// Define pipeReader and pipeWriter and use pipeWriter to collect all the data\n\t//sent by the user converted to a format expected by Git HTTP.\n\tpipeReader, pipeWriter := io.Pipe()\n\t// The writing happens asynchronously because it's a blocking operation\n\tgo c.readFromStdin(pipeWriter)\n\n\t// pipeReader can be passed as io.Reader and used to read all the data written to pipeWriter\n\tresponse, err := client.ReceivePack(ctx, pipeReader)\n\t...\n\t_, err = io.Copy(c.ReadWriter.Out, response.Body)\n\t...\n}\n\nfunc (c *PushCommand) readFromStdin(pw *io.PipeWriter) {\n\tvar needsPackData bool\n\n\t// Scanner reads the user input line by line\n\tscanner := pktline.NewScanner(c.ReadWriter.In)\n\tfor scanner.Scan() {\n\t\tline := scanner.Bytes()\n\t\t// And writes it to the pipe writer\n\t\tpw.Write(line)\n\t\t...\n\t}\n\n\t// The data that hasn't been processed by a scanner is copied if necessary\n\tif needsPackData {\n\t\tio.Copy(pw, c.ReadWriter.In)\n\t}\n\n\t// Close the pipe writer to signify EOF for the pipe reader\n\tpw.Close()\n}\n```\n\n## Try Golang\n\nGolang provides elegant patterns designed to efficiently process data in a streaming fashion. The patterns can be used to address new challenges or refactor the existing performance issues associated with high memory consumption.\n\n> Learn more about [GitLab and Golang](https://docs.gitlab.com/ee/development/go_guide/).\n",[9,726,1247,728],{"slug":1414,"featured":6,"template":684},"compose-readers-and-writers-in-golang-applications","content:en-us:blog:compose-readers-and-writers-in-golang-applications.yml","Compose Readers And Writers In Golang Applications","en-us/blog/compose-readers-and-writers-in-golang-applications.yml","en-us/blog/compose-readers-and-writers-in-golang-applications",{"_path":1420,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1421,"content":1427,"config":1433,"_id":1435,"_type":13,"title":1436,"_source":15,"_file":1437,"_stem":1438,"_extension":18},"/en-us/blog/create-a-workspace-quickly-with-the-gitlab-default-devfile",{"title":1422,"description":1423,"ogTitle":1422,"ogDescription":1423,"noIndex":6,"ogImage":1424,"ogUrl":1425,"ogSiteName":669,"ogType":670,"canonicalUrls":1425,"schema":1426},"Create a workspace quickly with the GitLab default devfile","The GitLab default devfile makes it easier than ever to try out workspaces for new projects. Learn how to share developer environment configurations effortlessly with this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097860/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%281%29_2XDPsbkjQ3o6tcdom6IGxI_1750097859914.png","https://about.gitlab.com/blog/create-a-workspace-quickly-with-the-gitlab-default-devfile","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Create a workspace quickly with the GitLab default devfile\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Zhaochen Li\"}],\n        \"datePublished\": \"2025-02-27\",\n      }",{"title":1422,"description":1423,"authors":1428,"heroImage":1424,"date":1430,"body":1431,"category":678,"tags":1432},[1429],"Zhaochen Li","2025-02-27","Software development environments can be complex to set up and maintain. Developers often spend a significant amount of time configuring their local environments with the right dependencies, tools, and settings. GitLab aims to solve this by providing a default devfile that enables you to create workspaces and to start developing quickly.\n\n## GitLab Workspaces\n\nGitLab Workspaces provide isolated development environments for making changes to your GitLab projects without the complexity of setting up local dependencies. Workspaces ensure reproducible development setups, allowing developers to share their environment configurations effortlessly.\n\nBy default, GitLab Workspaces are configured to use the GitLab VS Code fork and include the GitLab Workflow extension. To learn more, visit [the GitLab Workspaces documentation](https://docs.gitlab.com/ee/user/workspace/).\n\n## Understand devfiles\n\nA [**devfile**](https://devfile.io/docs/2.2.0/devfile-ecosystem) is a YAML-based declarative configuration file that defines a project's development environment. It specifies the necessary tools, languages, runtimes, and other components required for development.\n\nPreviously, [setting up a workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) required a custom devfile at the root of the repository. For example, a `.devfile.yaml` file. A typical devfile looked like this:\n\n![typical default devfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097868/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2025-02-26_at_8.15.58_AM_aHR0cHM6_1750097868229.png)\n\n## GitLab default devfile\n\nStarting in GitLab 17.9, a GitLab default devfile is available for all projects when creating a workspace. This eliminates the need to manually create a devfile before starting a workspace.\nHere is the content of the default devfile:\n\n![GitLab default devfile content](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097868/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2025-02-26_at_8.16.20_AM_aHR0cHM6_1750097868230.png)\n\nWhen creating a workspace with the GitLab UI, the option **Use GitLab default devfile** is always available – regardless of whether custom devfiles exist in the repository. Simply select this option to start exploring GitLab Workspaces with one less setup step.\n\n![Use GitLab default devfile screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097868/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097868232.png)\n\n## Create your own custom devfiles\nWhile the GitLab default devfile provides a quick way to start a workspace, you may want to customize your development environment to better fit your project's needs. By creating a custom devfile, you can tailor your development environment with the exact tools, dependencies, and configurations needed for your workflow.\n\nConsider creating a custom devfile if you need to:\n\n- Add project-specific dependencies beyond the base development image.\n- Adjust CPU and memory resource limits.\n- Configure multiple containers for additional services like databases.\n- Define custom, project-specific, environment variables.\n- Set up specific port mappings.\n- Integrate specialized development tools like debuggers or language servers.\n\nFor more details, see the [Workspaces devfile documentation](https://docs.gitlab.com/ee/user/workspace/#devfile).\n\n## Read more\n\n- [Build and run containers in Remote Development workspaces](https://about.gitlab.com/blog/build-and-run-containers-in-remote-development-workspaces/)\n- [Use GitLab AI features out-of-the-box in a GitLab Workspace](https://about.gitlab.com/blog/use-gitlab-ai-features-out-of-the-box-in-a-gitlab-workspace/)\n- [Quickstart guide for GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/)\n- [Enable secure sudo access for GitLab Remote Development workspaces](https://about.gitlab.com/blog/enable-secure-sudo-access-for-gitlab-remote-development-workspaces/)\n",[749,478,680,9,678],{"slug":1434,"featured":6,"template":684},"create-a-workspace-quickly-with-the-gitlab-default-devfile","content:en-us:blog:create-a-workspace-quickly-with-the-gitlab-default-devfile.yml","Create A Workspace Quickly With The Gitlab Default Devfile","en-us/blog/create-a-workspace-quickly-with-the-gitlab-default-devfile.yml","en-us/blog/create-a-workspace-quickly-with-the-gitlab-default-devfile",{"_path":1440,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1441,"content":1447,"config":1453,"_id":1455,"_type":13,"title":1456,"_source":15,"_file":1457,"_stem":1458,"_extension":18},"/en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards",{"title":1442,"description":1443,"ogTitle":1442,"ogDescription":1443,"noIndex":6,"ogImage":1444,"ogUrl":1445,"ogSiteName":669,"ogType":670,"canonicalUrls":1445,"schema":1446},"Data-driven DevSecOps: Exploring GitLab Insights Dashboards","Learn how to leverage GitLab Insights Dashboards to visualize key metrics, track project progress, and boost team productivity with customizable, data-driven views.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097210/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750097210214.png","https://about.gitlab.com/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Data-driven DevSecOps: Exploring GitLab Insights Dashboards\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ricardo Amarilla Villalba\"}],\n        \"datePublished\": \"2024-11-20\",\n      }",{"title":1442,"description":1443,"authors":1448,"heroImage":1444,"date":1450,"body":1451,"category":678,"tags":1452},[1449],"Ricardo Amarilla Villalba","2024-11-20","Metrics and analytics play a crucial role in driving productivity, quality, and success. GitLab, as a comprehensive DevSecOps platform, offers powerful tools for tracking and visualizing these vital metrics through its Insights Dashboards. In this article, you'll learn how to use the Insights Dashboards in your environment.\n\n## Introduction to GitLab metrics and analytics \n\nGitLab provides an array of metrics and analytics tools that cover various aspects of the DevSecOps lifecycle:\n\n1. [Productivity Analytics](https://docs.gitlab.com/ee/user/analytics/productivity_analytics.html): Track team velocity, cycle time, and lead time.  \n2. [Code Review Analytics](https://docs.gitlab.com/ee/user/analytics/code_review_analytics.html): Measure code quality, test coverage, and review efficiency.  \n3. [CI/CD Analytics](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html): Monitor pipeline performance and deployment frequency.  \n4. [Value Stream Analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/): Visualize the flow of work from idea to production.  \n5. [Insights](https://docs.gitlab.com/ee/user/project/insights/): Explore and visualize data about your projects and groups.\n\nThese metrics offer invaluable insights into your development process, helping teams identify bottlenecks, optimize workflows, and make data-driven decisions.\n\n## Leveraging labels for specific metrics\n\nOne of GitLab's most powerful, yet understated features, is Labels, which allows you to filter and focus on specific metrics with pinpoint accuracy. By strategically applying labels to issues, merge requests, and epics, you can create custom views that provide targeted insights into your project's performance and progress.\n\nLabels in GitLab act as versatile identifiers, allowing you to categorize and organize your work items with great flexibility. Whether you're tracking feature development, bug fixes, or team-specific tasks, labels enable you to slice and dice your project data in ways that reveal meaningful patterns and trends. This concept parallels the use of tags in cloud deployments, where resources are labeled for easier management, cost allocation, and operational insights.\n\nBy thoughtfully labeling your work items, you're essentially creating a sophisticated labeling system that can be leveraged to generate custom dashboards and reports. This approach empowers you to zoom in on the metrics that matter most to your team or stakeholders, providing a clear and focused view of your project's health and momentum.\n\n## How to configure GitLab Insights\n\nGitLab Insights allow you to explore and visualize data about your projects and groups. They provide valuable analytics on various aspects such as issues created and closed during a specified period, average time for merge requests to be merged, and triage hygiene. Insights can be configured for both projects and groups.\n\nTo configure Insights:\n\n1. For project insights:  \n   * Create a file named `.gitlab/insights.yml` in the root directory of your project.  \n2. For group insights:  \n   * Create a `.gitlab/insights.yml` file in a project that belongs to your group.  \n   * Go to your group's **Settings > General**.  \n   * Expand the **Analytics section** and find the **Insights section**.  \n   * Select the project containing the configuration file and save changes.\n\nThe `.gitlab/insights.yml` file is a YAML file where you define the structure and order of charts in a report, as well as the style of charts to be displayed. Each chart definition includes parameters such as title, description, type, and query to specify the data source and filtering conditions.\n\nTo view insights, navigate to **Analyze > Insights** in your project or group.\n\n![View default Insights Dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097217972.png)\n\n## Customize merge request insights\n\nWhile the default view provides valuable raw information, we can customize the Insights Dashboard to uncover additional layers of information, such as which team was responsible for each merge request and what type of problem each one solved.\n\n## Merge request insights for each squad and requirement type\n\nMeasuring squad productivity in GitLab can be challenging, especially when the GitLab group and subgroup structure doesn't align perfectly with your squad organization. Here's how to overcome these challenges and effectively track squad productivity:\n\n### **Setting up squad-based metrics**\n\n1. **Label creation:** Create unique scope labels for each squad (e.g., `squad::alpha`, `squad::beta`) and each requirement type (e.g., `type::bug`, `type::feature`, `type::maintenance`).\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZUOzORIUJeU?si=T8eHeGizS3blYFHB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n2. **Label application:** Consistently apply these squad labels to all issues and merge requests handled by each squad, regardless of the project or group they're in.  \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/fJ9entEBZG8?si=MlM6mKirEdkmwDDJ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Hints:**  \n   * Use GitLab API to apply labels massively to existing open, merged, and closed MRs.  \n   * Add/remove/update labels as part of your GitLab CI pipeline.  \n   * Leverage the GitLab Triage Bot to automate the labeling process.  \n\n3. Dashboard setup: Create a `.gitlab/insights.yml` file in your project repository with custom charts for team-specific and type-specific merge request insights.\n\n```\n\n## Default Merge Requests insights.yml \nmergeRequests:\n  title: Merge requests dashboard\n  charts:\n    - title: Merge requests merged per week \n      type: bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month\n      type: bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: month\n          period_limit: 3\n\n## Per-teams Merge Requests insights.yml\nmergeRequestsTeams:\n  title: Merge requests dashboard per teams\n  charts:\n    - title: Merge requests merged per week \n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: week\n          period_limit: 12\n          collection_labels:\n            - squad::alpha\n            - squad::beta\n    - title: Merge requests merged per month\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          group_by: month\n          period_limit: 3\n          collection_labels:\n            - squad::alpha\n            - squad::beta\n\n## Per-teams and Type Merge Requests insights.yml\nmergeRequestsTeamsAndType:\n  title: Per Teams and Type - Merge requests dashboard\n  charts:\n    - title: Merge requests merged per week - Squad Alpha\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::alpha\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month - Squad Alpha\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::alpha\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: month\n          period_limit: 3\n    - title: Merge requests merged per week - Squad Beta\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::beta\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: week\n          period_limit: 12\n    - title: Merge requests merged per month - Squad Beta\n      type: stacked-bar\n      query:\n        data_source: issuables\n        params:\n          issuable_type: merge_request\n          issuable_state: merged\n          filter_labels: squad::beta\n          collection_labels:\n            - type::feature\n            - type::bug\n            - type::maintenance\n          group_by: month\n          period_limit: 3\n\n```\n\nBy implementing these customizations, you can create insightful dashboards that provide a clear view of merge request activity per team and requirement type, allowing you to visualize trends over time, compare performance between squads, and analyze the distribution of different types of work for each squad. \n\n![dashboards with view of MR activity per team and requirement type](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097217972.png)\n\n![dashboard comparing performance between squads](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097217974.png)\n\n## Get started today\n\nGitLab Insights is just the tip of the iceberg when it comes to metrics and analytics. To explore the full range of GitLab's powerful analytics features, including Value Stream Analytics, CI/CD Analytics, and Code Review metrics, check out our Value Stream Management product tour:\n\n[![Value Stream Management product tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097218/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-11-20_at_12.28.08_PM_aHR0cHM6_1750097217976.png)](https://gitlab.navattic.com/vsm)\n\n> Ready to start your own metrics journey? Sign up for a [free 60-day trial of GitLab Ultimate today](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2F) and unlock the full potential of data-driven DevSecOps.\n\n## Read more\n- [Scheduled Reports Generation tool simplifies value stream management](https://about.gitlab.com/blog/new-scheduled-reports-generation-tool-simplifies-value-stream-management/)\n- [Getting started with the new GitLab Value Streams Dashboard](https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard/)\n- [AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n",[108,478,678,680,9,1000],{"slug":1454,"featured":90,"template":684},"data-driven-devsecops-exploring-gitlab-insights-dashboards","content:en-us:blog:data-driven-devsecops-exploring-gitlab-insights-dashboards.yml","Data Driven Devsecops Exploring Gitlab Insights Dashboards","en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards.yml","en-us/blog/data-driven-devsecops-exploring-gitlab-insights-dashboards",{"_path":1460,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1461,"content":1467,"config":1474,"_id":1476,"_type":13,"title":1477,"_source":15,"_file":1478,"_stem":1479,"_extension":18},"/en-us/blog/demystifying-ci-cd-variables",{"title":1462,"description":1463,"ogTitle":1462,"ogDescription":1463,"noIndex":6,"ogImage":1464,"ogUrl":1465,"ogSiteName":669,"ogType":670,"canonicalUrls":1465,"schema":1466},"GitLab environment variables demystified","CI/CD variables are useful (and flexible) tools to control jobs and pipelines. We unpack everything you need to know about GitLab environment variables.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664679/Blog/Hero%20Images/blog-image-template-1800x945__24_.png","https://about.gitlab.com/blog/demystifying-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab environment variables demystified\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Veethika Mishra\"}],\n        \"datePublished\": \"2021-04-09\",\n      }",{"title":1462,"description":1463,"authors":1468,"heroImage":1464,"date":1470,"body":1471,"category":769,"tags":1472,"updatedDate":1473},[1469],"Veethika Mishra","2021-04-09","There is a lot of flexibility when it comes to defining and using variables for [CI/CD](https://about.gitlab.com/topics/ci-cd/). Variables are extremely useful for controlling jobs and pipelines, and they help you avoid hard-coding values in your `.gitlab-ci.yml` configuration file. The information in this post should weave a larger picture by bringing together all (or most) of the information around defining and handling variables, making it easier to understand the scope and capabilities. Relevant documentation is linked throughout the post.\n\nIn [GitLab CI/CD](https://docs.gitlab.com/ee/ci/), variables can be used to customize jobs by defining and storing values. When using variables there is no need to hard code values. In GitLab, CI/CD variables can be defined by going to **Settings >> CI/CD >> Variables**, or by simply defining them in the `.gitlab-ci.yml` file.\n\nVariables are useful for configuring third-party services for different deployment environments, such as `testing`, `staging`, `production`, etc. Modify the services attached to those environments by simply changing the variable that points to the API endpoint the services need to use. Also use variables to configure jobs and then make them available as environment variables within the jobs when they run.\n\n![GitLab reads the .gitlab-ci.yml file to scan the referenced variable and sends the information to the GitLab Runner. The variables are exposed on and output by the runner.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_processing.jpeg)\n\n## The relationship between variables and environments\n\nSoftware development as a process includes stages to test a product before rolling it out to users. [Environments](https://docs.gitlab.com/ee/ci/environments/) are used to define what those stages look like and it may differ between teams and organizations.\n\nOn the other hand, variables are data values that are likely to change as a result of user interaction with a product. For example, their age, preference, or any input you could possibly think of that might determine their next step in the product task-flow.\n\nWe often hear the term [environment variable](https://docs.gitlab.com/ee/administration/environment_variables.html). These are variables that are defined in a given environment, but outside the application. GitLab CI/CD variables provide developers with the ability to configure values in their code. Using variables is helpful because it ensures that the code is flexible. GitLab CI/CD variables allow users to modify an application deployed to a certain environment without making any change to code. It is simple to run tests or even integrate third-party services by changing a configuration environment variable outside the application.\n\n## The scope of variables for CI/CD\n\n![Order of precedence for CI/CD variables: 1) Manual pipeline run, trigger and schedule pipeline variables, 2) Project level, group level, instance level protected variables, 3) Inherited CI/CD variables, 4) Job level, global yml defined variables, 5) Deployment variables, 6) Pre-defined CI/CD variables](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variables_precedence.jpeg)\n\n### `.gitlab-ci.yml` defined variables\n\nVariables that need to be available in the job environment can be added to GitLab. These CI/CD variables are meant to store non-sensitive project configuration, like the database URL in the `.gitlab-ci.yml` file. Reuse this variable in multiple jobs or scripts, wherever the value is needed. If the value changes, you only need to update the variable once, and the change is reflected everywhere the variable is used.\n\n### Project CI/CD variables\n\nMoving a step above the repository-specific requirements, you can define CI/CD variables in [project settings](https://docs.gitlab.com/ee/ci/variables/#for-a-project), which makes them available to CI/CD pipelines. These are stored out of the repository (not in the `.gitlab-ci.yml` file), but are still available to use in the CI/CD configuration and scripts. Storing the variables outside the `.gitlab-ci.yml` file keeps these values limited to a project-only scope, and not saved in plain text in the project.\n\n### Group and instance CI/CD variables\n\nSome variables are relevant at the group level, or even instance level, and could be useful to all projects in a group or instance. Define the variables in the [group or instance settings](https://docs.gitlab.com/ee/ci/variables/#for-a-group) so all projects within those scopes can use the variables without actually needing to know the value  or having to create the variables for the lower scope. For example, a common value that needs to be updated in multiple projects can be easily managed if it stays up-to-date in a single place. Alternatively, multiple projects could use a specific password without actually needing to know the value of the password itself.\n\n## Jobs and pipelines as environments\n\nGitLab CI/CD variables, besides being used as environment variables, also work in the scope of the `.gitlab-ci.yml` configuration file to configure pipeline behavior, unrelated to any environment. The variables can be stored in the project/group/instance settings and be made available to jobs in pipelines.\n\nFor example:\n\n```  \njob:  \n  rules:  \n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH  \n  script:  \n  - echo \"This job ran on the $CI_COMMIT_BRANCH branch.\"  \n```\n\nThe variable `($CI_COMMIT_BRANCH)` in the script section runs in the scope of the job in which it was defined. This scope is the \"job environment\" – meaning, when the job starts, the GitLab runner starts up a Docker container and runs the job in that environment. The runner will make that variable (and all other predefined or custom variables) available to the job, and it can display their value in the log output if needed.\n\nBut the variable is **also** used in the `if:` section to determine when the job should run. That in itself is not an environment, which is why we call these CI/CD variables. They can be used to dynamically configure your CI/CD jobs, **as well** as be used as environment variables when the job is running.\n\n## Predefined variables\n\nA number of variables are [predefined](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) when a GitLab CI/CD pipeline starts. A user can immediately access values for things like commit, project, or pipeline details without needing to define the variables themselves.\n\n## Custom CI/CD variables\n\n![Runners can create two kinds of custom CI/CD variables: Type and File.](https://about.gitlab.com/images/blogimages/demystifying-ci-cd-variables/variable_types.jpeg)\n\nWhen creating a CI/CD variable in the settings, GitLab gives the user more configuration options for the variable. Use these extra configuration options for stricter control over more sensitive variables:\n\n**Environment scope:** If a variable only ever needs to be used in one specific environment, set it to only ever be available in that environment. For example, you can set a deploy token to only be available in the `production` environment.\n\n**Protected variables:** Similar to the environment scope, you can set a variable to be available only when the pipeline runs on a protected branch, like your default branch.\n\n**Variable type:** A few applications require configuration to be passed to it in the form of a file. If a user has an application that requires this configuration, just set the type of variable as a \"File\". Configuring the CI/CD variable this way means that when the runner makes the variable available in the environment, it actually writes it out to a temporary file, and stores the path to the file as the value. Next, a user can pass the path to the file to any applications that need it.\n\nAlong with the listed ways of defining and using variables, GitLab introduced a feature that generates pre-filled variables when there's a need to run a pipeline manually. Prefilled variables reduce the chances of running into an error and makes running the pipeline easier.\n\n**Masked variables:** [Masked variables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable) are CI variables that have been **hidden in job logs** to prevent the variable’s value from being displayed. \n\n**Masked and hidden variables:** Introduced in [GitLab 17.4](https://about.gitlab.com/releases/2024/09/19/gitlab-17-4-released/#hide-cicd-variable-values-in-the-ui), [Masked and hidden](https://docs.gitlab.com/ee/ci/variables/#hide-a-cicd-variable) variables provide the same masking feature from job logs and **keep the value hidden** **in the Settings UI**. We do not recommend using either of these variables for sensitive data (e.g. secrets) as they can be inadvertently exposed. \n\n## Secrets\n\nA secret is a sensitive credential that should be kept confidential. Examples of a secret include:\n\n* Passwords  \n* SSH keys  \n* Access tokens  \n* Any other types of credentials where exposure would be harmful to an organization\n\nGitLab currently enables its users to [use external secrets in CI](https://docs.gitlab.com/ee/ci/secrets/), by leveraging HashiCorp Vault, Google Cloud Secret Manager, and Azure Key Vault to securely manage keys, tokens, and other secrets at the project level. This allows users to separate these secrets from other CI/CD variables for security reasons.\n\n### GitLab Secrets Manager\n\nBesides providing support for external secrets in CI, GitLab is also working on introducing a [native solution to secrets management](https://gitlab.com/groups/gitlab-org/-/epics/10108) to securely and conveniently store secrets within GitLab. This solution will also help customers use the stored secrets in GitLab specific components and environments, and easily manage access at namespace groups and projects level. \n\n## Read more\n* [GitLab native secrets manager to give software supply chain security a boost](https://about.gitlab.com/blog/gitlab-native-secrets-manager-to-give-software-supply-chain-security-a-boost/)\n\n***Disclaimer:** This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.*\n",[772,680,1247,771,108,9],"2025-01-13",{"slug":1475,"featured":6,"template":684},"demystifying-ci-cd-variables","content:en-us:blog:demystifying-ci-cd-variables.yml","Demystifying Ci Cd Variables","en-us/blog/demystifying-ci-cd-variables.yml","en-us/blog/demystifying-ci-cd-variables",{"_path":1481,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1482,"content":1488,"config":1493,"_id":1495,"_type":13,"title":1496,"_source":15,"_file":1497,"_stem":1498,"_extension":18},"/en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"title":1483,"description":1484,"ogTitle":1483,"ogDescription":1484,"noIndex":6,"ogImage":1485,"ogUrl":1486,"ogSiteName":669,"ogType":670,"canonicalUrls":1486,"schema":1487},"Deploy a NodeJS Express app with GitLab's Cloud Run integration","This tutorial will show you how to use NodeJS and Express to deploy an application to Google Cloud. This step-by-step guide will have you up and running in less than 10 minutes with the Cloud Run integration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097892/Blog/Hero%20Images/Blog/Hero%20Images/speedlights_speedlights.png_1750097891963.png","https://about.gitlab.com/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deploy a NodeJS Express app with GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2025-01-13\",\n      }",{"title":1483,"description":1484,"authors":1489,"heroImage":1485,"date":1473,"body":1491,"category":769,"tags":1492},[1490,831],"Sarah Matthies","Are you looking to deploy your NodeJS app to Google Cloud with the least maintenance possible? This tutorial will show you how to utilize GitLab’s Google Cloud integration to deploy your NodeJS app in less than 10 minutes.\n\nTraditionally, deploying an application often requires assistance from production or DevOps engineers. This integration now empowers developers to handle deployments independently. Whether you’re a solo developer or part of a large team, this setup gives everyone the ability to deploy their applications efficiently.\n\n## Overview\n\n- Create a new project in GitLab\n- Set up your NodeJS application\n- Use the Google Cloud integration to create a Service account\n- Use the Google Cloud integration to configure Cloud Run via Merge Request\n- Enjoy your newly deployed NodeJS app\n- Follow the cleanup guide\n\n## Prerequisites\n- Owner access on a Google Cloud Platform project\n- Working knowledge of JavaScript/TypeScript (not playing favorites here!)\n- Working knowledge of GitLab CI\n- 10 minutes \n\n## Step-by-step guide\n\n### 1. Create a new project in GitLab\n\nWe decided to call our project `nodejs–express-cloud-run` for simplicity.\n\n![Create a new project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097905106.png)\n\n### 2. Upload your NodeJS app or use this example to get started.\n\n[Demo](https://gitlab.com/demos/templates/nodejs-cloud-run)\n\n**Note:** Make sure to include the `cloud-run` [CI template](https://gitlab.com/gitlab-org/incubation-engineering/five-minute-production/library/-/raw/main/gcp/cloud-run.gitlab-ci.yml) within your project.\n\n![cloud-run CI template include](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097905107.png)\n\n### 3. Use the Google Cloud integration to create a Service account.\n\nNavigate to __Operate > Google Cloud > Create Service account__.\n\n![Create Service account screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097905109.png)\n\nAlso configure the region you would like the Cloud Run instance deployed to.\n\n![Cloud Run instance deployment region selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097905113.png)\n\n### 4. Go to the Deployments tab and use the Google Cloud integration to configure __Cloud Run via Merge Request__.\n\n![Deployments - Configuration of Cloud Run via Merge Request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097905115.png)\n\nThis will open a merge request – immediately merge it.\n\n![Merge request for deployment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097905117.png)\n\n__Note:__ `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, and `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![Variables listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097905118.png)\n\n### 5. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run using GitLab CI.\n\n![Successful deployment to Google Cloud Run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097905119.png)\n\nClick the Service URL to view your newly deployed Node server.\n\n![View newly deployed Node server](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097905120.png)\n\nIn addition, you can navigate to __Operate > Environments__ to see a list of deployments for your environments.\n\n![Environments view of deployment list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097905121.png)\n\nBy clicking on the environment called `main`, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Main view of deployments to specific environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097905122.png)\n\n### 6. Next steps\n\nTo get started with developing your Node application, try adding another endpoint. For instance, in your `index.js` file, you can add a **/bye** endpoint as shown below:\n\n```\napp.get('/bye', (req, res) => {\n  res.send(`Have a great day! See you!`);\n});\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once it’s complete, go back to the Service URL and navigate to the **/bye** endpoint to see the new functionality in action.\n\n![Bye message](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097905/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097905123.png)\n\n## Follow the cleanup guide\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide here](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Read more of these helpful [tutorials from GitLab solutions architects](https://about.gitlab.com/blog/tags/solutions-architecture/).\n",[108,1248,230,1000,9],{"slug":1494,"featured":90,"template":684},"deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration","content:en-us:blog:deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","Deploy A Nodejs Express App With Gitlabs Cloud Run Integration","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration.yml","en-us/blog/deploy-a-nodejs-express-app-with-gitlabs-cloud-run-integration",{"_path":1500,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1501,"content":1507,"config":1513,"_id":1515,"_type":13,"title":1516,"_source":15,"_file":1517,"_stem":1518,"_extension":18},"/en-us/blog/deploy-a-server-using-go-with-gitlab-google-cloud",{"title":1502,"description":1503,"ogTitle":1502,"ogDescription":1503,"noIndex":6,"ogImage":1504,"ogUrl":1505,"ogSiteName":669,"ogType":670,"canonicalUrls":1505,"schema":1506},"Deploy a server using Go with GitLab + Google Cloud","This tutorial shows how to use GitLab’s Google Cloud integration to deploy a Golang server in less than 10 minutes, helping developers become more independent and efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098028/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750098028126.png","https://about.gitlab.com/blog/deploy-a-server-using-go-with-gitlab-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Deploy a server using Go with GitLab + Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Claire Champernowne\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2025-01-28\",\n      }",{"title":1502,"description":1503,"authors":1508,"heroImage":1504,"date":1510,"body":1511,"category":678,"tags":1512},[1509,831],"Claire Champernowne","2025-01-28","Deploying an application to the cloud often requires assistance from production or DevOps engineers. GitLab's Google Cloud integration empowers developers to handle deployments independently. In this tutorial, you'll learn how to deploy a server to Google Cloud in less than 10 minutes using Go. Whether you’re a solo developer or part of a large team, this setup allows you to deploy applications efficiently.\n\n## You'll learn how to:\n\n1. Create a new project in GitLab\n2. Create a Go server utilizing `main.go`\n3. Use the Google Cloud integration to create a Service account\n4. Use the Google Cloud integration to create Cloud Run via a merge request\n5. Access your newly deployed Go server\n6. Clean up your environment\n\n## Prerequisites\n\n- Owner access on a Google Cloud Platform project\n- Working knowledge of Golang\n- Working knowledge of GitLab CI\n- 10 minutes\n\n## Step-by-step Golang server deployment to Google Cloud\n\n### 1. Create a new blank project in GitLab.\n\nWe decided to call our project `golang-cloud-run` for simplicity.\n\n![Create a new blank project in GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098035249.png)\n\n### 2. Create a server utilizing this `main.go` demo.\n\nFind the `main.go` demo [here](https://gitlab.com/demos/applications/golang-cloud-run).\n\n```\n// Sample run-helloworld is a minimal Cloud Run service.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n)\n\nfunc main() {\n\tlog.Print(\"starting server...\")\n\thttp.HandleFunc(\"/\", handler)\n\n\t// Determine port for HTTP service.\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t\tlog.Printf(\"defaulting to port %s\", port)\n\t}\n\n\t// Start HTTP server.\n\tlog.Printf(\"listening on port %s\", port)\n\tif err := http.ListenAndServe(\":\"+port, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Hello %s!\\n\", name)\n}\n```\n\n### 3. Use the Google Cloud integration to create a Service account.\n\nNavigate to **Operate \\> Google Cloud \\> Create Service account**.\n\n![Golang tutorial - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098036/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750098035250.png)\n\n### 4. Configure the region you would like the Cloud Run instance deployed to.\n\n![Golang tutorial - image10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098035252.png)\n\n### 5. Use the Google Cloud integration to configure Cloud Run via Merge Request.\n\n![Golang tutorial - image4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098035254.png)\n\n### 6. This will open a merge request. Immediately merge the MR.\n\n![Golang tutorial - image6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098036/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098035257.png)\n\nThis merge request adds a CI/CD deployment job to your pipeline definition. In our case, this is also creating a pipeline definition, as we didn’t have one before.\n\n**Note:** The CI/CD variables `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps. \n\n![Golang tutorial - image7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098035259.png)\n\n### 7. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.\n\n![Golang tutorial - image2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098035261.png)\n\n\u003Cbr>\n\n![Golang tutorial - image3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098035262.png)\n\n## 8. Click the Service URL to view your newly deployed server.\n\nAlternatively, you can navigate to **Operate \\> Environments** to see a list of deployments for your environments.\n\n![Golang tutorial - image5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098035264.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Golang tutorial - image8](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098035/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098035265.png)\n\n## Next steps\n\nTo get started with developing your Go application, try adding another endpoint. For instance, in your `main.go` file, you can add a `/bye` endpoint as shown below (don’t forget to register the new handler function in main!):\n\n```\nfunc main() {\n\tlog.Print(\"starting server...\")\n\n\thttp.HandleFunc(\"/\", handler)\n\thttp.HandleFunc(\"/bye\", byeHandler)\n```\n\n```\nfunc byeHandler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Bye %s!\\n\", name)\n}\n```\n\nYour `main.go` file should now look something like this:\n\n```\n// Sample run-helloworld is a minimal Cloud Run service.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"log\"\n\t\"net/http\"\n\t\"os\"\n)\n\nfunc main() {\n\tlog.Print(\"starting server...\")\n\n\thttp.HandleFunc(\"/\", handler)\n\n\thttp.HandleFunc(\"/bye\", byeHandler)\n\n\t// Determine port for HTTP service.\n\tport := os.Getenv(\"PORT\")\n\tif port == \"\" {\n\t\tport = \"8080\"\n\t\tlog.Printf(\"defaulting to port %s\", port)\n\t}\n\n\t// Start HTTP server.\n\tlog.Printf(\"listening on port %s\", port)\n\tif err := http.ListenAndServe(\":\"+port, nil); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}\n\nfunc handler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Hello %s!\\n\", name)\n}\n\nfunc byeHandler(w http.ResponseWriter, r *http.Request) {\n\tname := os.Getenv(\"NAME\")\n\tif name == \"\" {\n\t\tname = \"World\"\n\t}\n\tfmt.Fprintf(w, \"Bye %s!\\n\", name)\n}\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run job` deploy the updates. Once it’s complete, go back to the Service URL and navigate to the `/bye` endpoint to see the new functionality in action.\n\n## Clean up the environment\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Discover more tutorials like this in our [Solutions Architecture](https://about.gitlab.com/blog/tags/solutions-architecture/) area.\n",[835,478,9,1000,678,680],{"slug":1514,"featured":6,"template":684},"deploy-a-server-using-go-with-gitlab-google-cloud","content:en-us:blog:deploy-a-server-using-go-with-gitlab-google-cloud.yml","Deploy A Server Using Go With Gitlab Google Cloud","en-us/blog/deploy-a-server-using-go-with-gitlab-google-cloud.yml","en-us/blog/deploy-a-server-using-go-with-gitlab-google-cloud",{"_path":1520,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1521,"content":1527,"config":1533,"_id":1535,"_type":13,"title":1536,"_source":15,"_file":1537,"_stem":1538,"_extension":18},"/en-us/blog/deploy-remix-with-gitlab-and-cloudflare",{"title":1522,"description":1523,"ogTitle":1522,"ogDescription":1523,"noIndex":6,"ogImage":1524,"ogUrl":1525,"ogSiteName":669,"ogType":670,"canonicalUrls":1525,"schema":1526},"How to publish a Remix app to the edge with GitLab and Cloudflare","Learn how to deploy a Remix app with GitLab and Cloudflare Workers.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682517/Blog/Hero%20Images/ryoji-hayasaka-0UZj73PQVew-unsplash.jpg","https://about.gitlab.com/blog/deploy-remix-with-gitlab-and-cloudflare","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish a Remix app to the edge with GitLab and Cloudflare\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Janis Altherr\"}],\n        \"datePublished\": \"2022-11-21\",\n      }",{"title":1522,"description":1523,"authors":1528,"heroImage":1524,"date":1530,"body":1531,"category":769,"tags":1532},[1529],"Janis Altherr","2022-11-21","\n\n[Remix](https://remix.run) has had a significant impact in the frontend space. \nAfter eons of backend frameworks providing some sort of frontend options that are more or \nless hated by frontend engineers, followed by frontend frameworks that \nrequired a separate API for the most simple tasks that were a pain to \nmaintain, now suddenly there are frontend frameworks that you can write\nbackend code with.\n\nThis is ideal as an application scales: Remix offers the comfort of writing \nserver-side code, but should the business logic start to exceed the \ncapabilities of Remix, it's easy to move code to an API on a per-request basis. \nThis comes without the need to rewrite the entire application logic, while \nstill retaining server-side-rendering or even pre-rendering capabilities!\n\nThe most performant way to deploy a Remix app is to the edge. This means \nthat small instances of your Remix app are run on a server close to the requesting\nuser. An edge network consists of hundreds of \nservers all over the world, so you can be sure the network latency for the \nuser stays low.\n\nCurrently the most popular edge service capable of running Remix apps are \nCloudflare Workers. Not only does Cloudflare offer a generous free tier, \nWorkers are also extremely easy to deploy using GitLab CI/CD. \nHere's how to create a Remix app and then deploy it to Cloudflare Workers.\n\n## Create your Remix app\n\nCreate your Remix app locally using:\n\n```bash\nnpx create-remix@latest \u003Cmy-app-name>\n```\n\nThe CLI will now guide you through a series of questions. Some of those you \nmay answer as you prefer, but answer the following questions as indicated \nbelow:\n\n```text\n? What type of app do you want to create? \n> choose \"Just the Basics\"\n\n? Where do you want to deploy? [...]\n> choose \"Cloudflare Workers\"\n\n? Do you want me to run `npm install`?\n> answer \"Yes\"\n```\n\nInitialize the repository and add the first commit:\n\n```shell\ngit init\ngit add .\ngit commit -m \"initial commit\"\n```\n\n## Create the project in GitLab\n\nYou can't push the code as we have yet to set up the remote repository.\nVisit GitLab and create a new project. When asked, select \"Create blank \nproject.\"\n\nIn the project setup dialog, select `Edge Computing` as the `Deployment \ntarget`. Choose the visibility level however you like as this affects your \nsource code visibility.\n\nMake sure you unset the checkbox next to **Initialize repository with a \nREADME**, otherwise GitLab will begin a new Git history that you will have to reconcile\nwith your existing local one.\n\nOnce the project is set up, follow the instructions on how to add an \nexisting repository – if you've followed the above instructions to the letter \nyou don't have an existing remote yet, so you can run this simplified set of \ncommands:\n\n```shell\ngit remote add origin \u003Cgit-project-url>\ngit push -u origin main\n```\n\n## Configure Cloudflare\n\nNow set up your Cloudflare account to enable deployments from GitLab. \n[Login](https://dash.cloudflare.com/login) or [create an account](https://dash.cloudflare.com/sign-up).\n\n### Subscribe to a Workers plan\n\nIf you are creating a Worker for the first time, you will have to sign up for a Workers plan in Cloudflare.\n\nIn the Cloudflare dashboard's left sidebar click the entry **Workers**. Let \nCloudflare guide you through the setup.\n\n![Screenshot: Signing up for Workers in Cloudflare](https://about.gitlab.com/images/blogimages/remix-cloudflare/workers_onboarding.png)\n\nOnce you're back to the Workers overview page, continue below.\n\n### Obtain an API token\n\nTo be able to deploy your Cloudflare Worker from a GitLab pipeline you will need\nan API token. To do so, log in to the Cloudflare dashboard, then open the [API \ntokens page](https://dash.cloudflare.com/profile/api-tokens) (or find it \nmanually via the **user icon** > **My Profile** > **Api Tokens**).\n\nClick **Create Token**. Find **Edit Cloudflare Workers**, click **use \ntemplate**.\n\n![Screenshot: Select API Token template \"Edit Cloudflare Workers\"](https://about.gitlab.com/images/blogimages/remix-cloudflare/api_token_template_selection.png)\n\nUnder **Account Resources** choose *Include* and your account name.\n\nUnder **Zone Resources** choose *Include*, *Specific Zone* and your site's \ndomain. If you haven't set up a domain, you can use a less specific rule \nsuch as *All zones from an account*, although we don't recommend doing this; the API token could potentially be used beyond its scope if you add more zones to your Cloudflare account later.\n\n![Screenshot: API Token Account and Zone Settings](https://about.gitlab.com/images/blogimages/remix-cloudflare/api_token_rules.png)\n\n**Note:** If you have more than one account associated with the API token used \nduring deployment, you will have to update your project's `wrangler.toml` file\nto use the correct account. [Read more in the Cloudflare documentation](https://developers.cloudflare.com/workers/wrangler/ci-cd/#account-id).\n\nOnce you're done setting up the API token, click **Continue to summary**, \nand verify your selections. It should look like this:\n\n![Screenshot: API Token Summary View](https://about.gitlab.com/images/blogimages/remix-cloudflare/api_token_summary.png)\n\nIf you're happy, click **Create Token**. Cloudflare will then show you the new \ntoken. \n\nCopy the token and save it in GitLab: Open your project in GitLab, then \nvisit **Settings** > **CI/CD**. Find **Variables** and click **Expand**. Click \n**Add Variable**.\n\nIn the **Key** field, enter `CLOUDFLARE_API_TOKEN`.\nIn the **Value** field, paste the API token from Cloudflare.\n\nNow make sure your token isn't leaked in any logs: Check both **Protect** \nand **Mask**. When done, click **Add Variable**.\n\n![Adding a Variable in GitLab](https://about.gitlab.com/images/blogimages/remix-cloudflare/adding_cf_api_token_as_variable.gif)\n\n## Create the deployment pipeline\n\nThe last step is to create a GitLab pipeline. In your local repository root \nfolder, create a file named `.gitlab-ci.yml` and add the following content:\n\n```yaml\nstages:\n- deploy\n\ndeploy-worker:\n    image: node:lts\n    stage: deploy\n    environment: production\n    before_script:\n      # install dependencies\n      - npm ci\n    script:\n      - npm run deploy\n    rules:\n      # This rule triggers this job after any push to the default branch\n      - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n```\n\nTo learn more about how to configure your GitLab pipeline, read our \n[documentation](https://docs.gitlab.com/ee/ci/).\n\nNow add the file to the repository:\n\n```shell\ngit add .gitlab-ci.yml\ngit commit -m \"Add Deployment Pipeline\"\ngit push\n```\n\nThis last push will immediately run this pipeline. To monitor the pipeline \nprogress, open GitLab. In the left sidebar, find **CI/CD** > **Pipelines**. \nOnce the pipeline is marked as _passed_, your Remix site is live!\n\nIf you've used the create-app instructions from this blogpost, your app should \nhave been configured to use the app name as the Worker's name. Check the \n`name` setting in your project's `wrangler.toml`.\n\nGo to `https://\u003Cworker-name>.\u003Ccloudflare-account-name>.workers.dev` to see \nyour Remix site in action. Congratulations!\n\nIn your Cloudflare dashboard, you can monitor your new app by selecting \n**Workers** from the left sidebar and then clicking on the Worker with the \nname of your app.\n\nFrom now on, any push to your repositories default branch will automatically \nbe built and deployed to Cloudflare. \n\n### Use a custom Domain for your app\n\nIf you want to use your own domain, set up your website as a resource now.\n\nIn the left sidebar, click on **Websites**. In the main window, find and click\nthe **Add Site** button.\n\n![Screenshot: Add a new site in Cloudflare](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_site.png)\n\nNow enter your site's domain. Select a plan that suits your needs.\nFollow the DNS setup instructions provided on the following pages.\n\nOnce you have set up your domain as a website in Cloudflare, go to the \nwebsite settings. (In the left sidebar click **Websites**, then select your \nsite).\n\n![Screenshot: Find your website on the Cloudflare Dashboard](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_route_step_1.png)\n\nThe left sidebar now shows the detail navigation for the selected website. \nClick **Workers Routes**, then click **Add Route**. \n\n![Screenshot: Add a new route to your Site](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_route_step_2.png)\n\nIn the Add Route Modal you can add a dynamic pattern to let Cloudflare know which requests to route to your Worker. \nFor Remix apps that's usually all of them, so if your site's domain is \n`my-site.com`, use `my-site.com/*`. You can also redirect all subdomain \nrequests to the worker by using `*.my-site.com/*` (this is useful if you \nwould like to also serve your site at `www.my-site.com`).\n\nUnder **Service**, select your newly created Worker.\nUnder **Environment**, select **production**.\n\nClick \"Save\".\n\n![Screenshot: Add route modal](https://about.gitlab.com/images/blogimages/remix-cloudflare/add_route_step_3.png)\n\nOnce the DNS servers are updated, your Remix site should be accessible with \nyour custom domain.\n\n\n## Read More\n\n- [Learn more about Cloudflare Workers](https://developers.cloudflare.com/workers/wrangler/configuration/)\n- [Check out the Remix docs](https://remix.run/docs/en/v1)\n- [Learn about GitLab pipelines](https://docs.gitlab.com/ee/ci/)\n",[9,230,678],{"slug":1534,"featured":6,"template":684},"deploy-remix-with-gitlab-and-cloudflare","content:en-us:blog:deploy-remix-with-gitlab-and-cloudflare.yml","Deploy Remix With Gitlab And Cloudflare","en-us/blog/deploy-remix-with-gitlab-and-cloudflare.yml","en-us/blog/deploy-remix-with-gitlab-and-cloudflare",{"_path":1540,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1541,"content":1547,"config":1553,"_id":1555,"_type":13,"title":1556,"_source":15,"_file":1557,"_stem":1558,"_extension":18},"/en-us/blog/deploy-shopify-themes-with-gitlab",{"title":1542,"description":1543,"ogTitle":1542,"ogDescription":1543,"noIndex":6,"ogImage":1544,"ogUrl":1545,"ogSiteName":669,"ogType":670,"canonicalUrls":1545,"schema":1546},"How to deploy Shopify themes with GitLab","Streamline your development workflow by configuring auto deployments for Shopify themes with GitLab pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683288/Blog/Hero%20Images/storefront.jpg","https://about.gitlab.com/blog/deploy-shopify-themes-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy Shopify themes with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Austin Regnery\"}],\n        \"datePublished\": \"2022-04-14\",\n      }",{"title":1542,"description":1543,"authors":1548,"heroImage":1544,"date":1550,"body":1551,"category":769,"tags":1552},[1549],"Austin Regnery","2022-04-14","\n[1.75 million sellers are using Shopify's eCommerce platform](https://backlinko.com/shopify-stores), and every one of these online stores has a codebase that lives somewhere. You may have encountered some challenges while scaling your development efforts at your organization while working within Shopify. Setting up a process for repeatable deployments with GitLab can keep everything streamlined and safe. No one wants something going live in production before it's ready.\n\nHere's a simple development flow you are going to be able to replicate using GitLab CI/CD pipelines for Shopify theme deployments.\n\n1. Develop locally on a feature branch until you are happy with your local changes\n2. Merge your `feature` branch into your `main` branch → This will update the staging theme in Shopify\n3. When everything is ready to go live, create a new tag and push it to GitLab → The live theme will be updated automatically 🎉\n\nThis tutorial assumes you have set up a repository in a GitLab project.\n\n## 1. Add your variables\n\nFor security purposes, you don't want to store your credentials for your Shopify site in your configuration file. You can use variables in GitLab to handle that.\n\nUse the [ThemeKit CLI](https://shopify.github.io/themekit/configuration/) to retrieve all the available theme IDs from your Shopify store by entering this into your command line:\n\n```curl\ntheme get --list -p=[shopify-api-access-token] -s=[your-store.myshopify.com]\n```\n\n> **Help:** [Generate API credentials in Shopify](https://shopify.dev/apps/auth/basic-http#step-2-generate-api-credentials)\n\nOpen your project in GitLab, navigate to `/settings/ci_cd`, and open the variables section.\n\nAdd four unique variables with their corresponding keys and values\n\n| Key | Value |\n| --- | ----- |\n| `STAGING_THEME_ID`     | [staging-theme-id-number]    |\n| `PRODUCTION_THEME_ID`  | [production-theme-id-number] |\n| `SHOP_WEB_ADDRESS`     | [your-store.myshopify.com]   |\n| `SHOPIFY_API_ACCESS_TOKEN` | [shopify-api-access-token]  |\n\n> **Note:** A protected variable will not show in the CI logs, which adds an extra layer of security. If you choose to protect your variables, you need to make sure that your `main` branch and the tag `v*` wildcard are protected as well.\n\n## 2. Add a `config.yml` to your project repository\n\nThis file may already exist, but `config.yml` needs to have the following to properly map the variables from step 1 with your Shopify theme for deployments.\n\n```yml\nstaging:\n  password: ${SHOPIFY_API_ACCESS_TOKEN}\n  theme_id: ${STAGING_THEME_ID}\n  store: ${SHOP_WEB_ADDRESS}\n\nproduction:\n  password: ${SHOPIFY_API_ACCESS_TOKEN}\n  theme_id: ${PRODUCTION_THEME_ID}\n  store: ${SHOP_WEB_ADDRESS}\n```\n\n## 3. Add a `.gitlab-ci.yml` file to your project\n\nNow set up your pipeline to run on specific triggers. Go to your local theme folder, create a `.gitlab-ci.yml` file at the project root, and add the snippet below. This snippet is the configuration for the CI pipeline.\n\n```yml\nimage: python:2\n\nstages:\n  - staging\n  - production\n\nstaging:\n  image: python:2\n  stage: staging\n  script:\n    - curl -s https://shopify.github.io/themekit/scripts/install.py | python\n    - theme deploy -e=staging\n  only:\n    variables:\n      - $CI_DEFAULT_BRANCH == $CI_COMMIT_BRANCH\n\nproduction:\n  image: python:2\n  stage: production\n  script:\n    - curl -s https://shopify.github.io/themekit/scripts/install.py | python\n    - theme deploy -e=production --allow-live\n  only:\n    - tags\n\n```\n\nIt has two stages: **staging** and **production**. Each will install the ThemeKit CLI first and then deploy the repository to the corresponding theme.\n\n## 4. Now push some changes to deploy\n\nAny code pushed to the `main` branch will set up a deployment to the staging theme in Shopify\n\n```\ngit commit -am \"commit message\"\ngit push\n```\n\nWhen you are ready to push changes to production, add a tag and push it.\n\n```\ngit tag -a \"v1.0.0\" -m \"First release to production from GitLab\"\ngit push --tags\n```\n\n> **Alternative option:** [Create a tag from GitLab](https://docs.gitlab.com/ee/user/project/releases/#create-a-release-in-the-tags-page)\n\nThat's it! You're now using CI to automate deployments from GitLab to your Shopify themes.\n\nFurther refine this workflow by [incorporating merge requests approvals](/blog/feature-highlight-merge-request-approvals/), [setting up merge trains](/blog/merge-trains-explained/), or learning more about [GitLab CI/CD pipelines](/blog/guide-to-ci-cd-pipelines/).\n\nBig thanks to Alex Gogl for their [blog](https://medium.com/@gogl.alex/how-to-deploy-shopify-themes-automatically-1ac17ee1229c). This `.gitlab-ci.yml` will appear as an available template when [merge request !52279](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/52279) is merged.\n\nCover image by [Artem Gavrysh](https://unsplash.com/@tmwd?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/)\n",[9],{"slug":1554,"featured":6,"template":684},"deploy-shopify-themes-with-gitlab","content:en-us:blog:deploy-shopify-themes-with-gitlab.yml","Deploy Shopify Themes With Gitlab","en-us/blog/deploy-shopify-themes-with-gitlab.yml","en-us/blog/deploy-shopify-themes-with-gitlab",{"_path":1560,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1561,"content":1567,"config":1575,"_id":1577,"_type":13,"title":1578,"_source":15,"_file":1579,"_stem":1580,"_extension":18},"/en-us/blog/deploying-application-eks",{"title":1562,"description":1563,"ogTitle":1562,"ogDescription":1563,"noIndex":6,"ogImage":1564,"ogUrl":1565,"ogSiteName":669,"ogType":670,"canonicalUrls":1565,"schema":1566},"Deploying apps to GitLab-managed Amazon EKS with Auto DevOps","A Kubernetes tutorial: Use GitLab AutoDevOps to deploy your applications to Amazon EKS.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666959/Blog/Hero%20Images/gitlab-aws-cover.png","https://about.gitlab.com/blog/deploying-application-eks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy your application to a GitLab-managed Amazon EKS cluster with Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2020-05-05\",\n      }",{"title":1568,"description":1563,"authors":1569,"heroImage":1564,"date":1571,"body":1572,"category":769,"tags":1573},"How to deploy your application to a GitLab-managed Amazon EKS cluster with Auto DevOps",[1570],"Abubakar Siddiq Ango","2020-05-05","\n\nDeploying an application onto Amazon EKS doesn't have to be painful. In fact, GitLab's [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) function makes it easy for developers to deploy applications from GitLab onto any cloud. In this tutorial, I break down how to deploy a simple ruby Hello, World application onto our GitLab-managed Amazon EKS cluster, which we created earlier ([read part one to learn how](/blog/gitlab-eks-integration-how-to/)). For the tutorial, I integrated GitLab with Amazon EKS in a GitLab group I created purposely for this, so all the projects created in the group can use the integration without any extra configuration. \n\nIn the previous blog post, we saw how seamless it is to create a Kubernetes cluster on Amazon EKS in GitLab with the right permissions. Developer productivity is greatly improved because there is no more need to manually set-up clusters and the same cluster can be used for multiple projects when Amazon EKS is integrated with GitLab at the group and instance levels, thus making onboarding new projects a breeze.\n\nIn this tutorial, we will be deploying a simple ruby Hello World application to our GitLab-managed Amazon EKS cluster. For the purpose of this tutorial, I have integrated GitLab with Amazon EKS at the group level on a group I own on GitLab.com, this way all projects created in the group can make use of the integration with no extra configuration.\n\n## A few things to note about AutoDevOps\n\nAuto DevOps provides pre-defined [CI/CD configuration](/topics/ci-cd/) which allows you to automatically detect, build, test, deploy, and monitor your applications. All you need to do is push your code and GitLab does the rest, saving you a lot of effort to set up the workflow and processes required to build, deploy, and monitor your project.\n\nYou'll need to execute the following steps for GitLab AutoDevOps to work seamlessly:\n\n* A [base domain](https://docs.gitlab.com/ee/user/project/clusters/#base-domain) name needs to be provided on GitLab’s integration page for Amazon EKS.\n\n ![AutoDevOps Base Domain](https://about.gitlab.com/images/blogimages/deploying-application-eks/base-domain.png){: .shadow.medium.center}\n Setting the base domain for Auto DevOps\n{: .note.text-center}\n\n* GitLab creates subdomains for every project that is deployed using the project slug, project ID and the base domain name. For example, the link `https://abubakar-te-demos-minimal-ruby-app-2.eksdemo-project.gitlabtechevangelism.net/` is automatically created where `abubakar-te-demos-minimal-ruby-app` is the project slug and the project ID of two, both prepended to the base domain name, `eksdemo-project.gitlabtechevangelism.net`.\n\n* Create a wildcard A-record for the base domain and point it to the Ingress endpoint created during the integration in the public-hosted zone of your domain name on Route53. Selecting the ALIAS option in Route 53 will present a list of resources you have already created. You will see your Ingress endpoint in the list of elastic load balancers. Alternatively, you can copy and paste from GitLab’s integration page.\n\n ![Route53 Alias for base Domain](https://about.gitlab.com/images/blogimages/deploying-application-eks/route53.png){: .shadow.small.center}\n Set-up alias for base domain using the generated Ingress endpoint.\n{: .note.text-center}\n\n* Install the pre-defined Kubernetes certificate management controller, certmanager on the GitLab - EKS integration, to ensure every URL created for your application has a Let’s Encrypt certificate.\n\n## Now, lets deploy our application\n\n### How to set-up the project\n\nIt takes five simple steps to set-up the project for your application.\n\nFirst, create a GitLab project from an existing sample, in this case, GitLab’s Auto DevOps example called Minimal Ruby App. There is nothing special about this application, it's just a ruby application you can use to try out the integration. If you integrated Amazon EKS at the group level on GitLab, you can just go ahead to create the project in the group. At the project level, you will have to perform the integration after creating the project.\n\nNext, copy the URL from the “Clone with HTTPS” field of the sample project, Minimal Ruby App:\n\n  ![Cloning over HTTPS](https://about.gitlab.com/images/blogimages/deploying-application-eks/https-clone.png){: .shadow.small.center}\n  The clone sample project.\n{: .note.text-center}\n\nThird, click the \"import project\" tab on the new project page, then click on the \"repo by URL\" button. Paste the URL you copied earlier in the text box for \"Git repository URL\" and click on \"create project\"\n\n  ![Importing Project](https://about.gitlab.com/images/blogimages/deploying-application-eks/import-project.png){: .shadow.medium.center}\n  The progress of the sample project import.\n  {: .note.text-center}\n\nNext, the project will be imported and all the files from the sample will be available in your new project.\n\n  ![Project import progress](https://about.gitlab.com/images/blogimages/deploying-application-eks/import-progress.png){: .shadow.medium.center}\n  The project import is completed.\n  {: .note.text-center}\n\nFinally, go to project settings > CI/CD > Auto DevOps and enable “Default to Auto DevOps pipeline”\n\n  ![Project Settings](https://about.gitlab.com/images/blogimages/deploying-application-eks/project-settings.png){: .shadow.medium.center}\n  Enable the Auto DevOps pipeline.\n  {: .note.text-center}\n\n### How to deploy your application\n\n* Now a pipeline is created and the project built, tested and deployed to production using the [default AutoDevOps CI files](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml).\n\n  ![Project Pipeline](https://about.gitlab.com/images/blogimages/deploying-application-eks/pipeline.png)\n  The first Auto DevOps pipeline.\n  {: .note.text-center}\n\n* Look inside the pipeline output to see the \"deployment to production\" line. This is where the URL is to access your application.\n\n  ![Deployment to production](https://about.gitlab.com/images/blogimages/deploying-application-eks/production-deploy.png)\n  Next, link to the deployed application.\n  {: .note.text-center}\n\n* In the image above, you can see the application has been deployed and can be accessed at `https://abubakar-te-demos-minimal-ruby-app-1.eksdemo-project.gitlabtechevangelism.net/`\n\nAnd it should show a “Hello World” message:\n\n  ![Deployed Application](https://about.gitlab.com/images/blogimages/deploying-application-eks/hello-world.png){: .shadow.medium.center}\n  The deployed application with \"Hello World\" message.\n  {: .note.text-center}\n\n## How to make changes to the deployed application\n\nIf any new changes are pushed, a different set of jobs is run to build, test, and review the changes before they can be merged to the master branch. I changed the \"Hello World\" text in the previous deployment to an HTML text in a new Git branch called `amazon-eks-html` using the GitLab WebIDE tool, and committed the changes.\n\n  ![Make changes to application](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-commit.png)\n  Making new changes to application.\n  {: .note.text-center}\n\nWhile committing the changes, I selected \"start a new merge request (MR),\" which took me to the MR page where I added more information about the changes in a new MR.\n\n  ![New Merge request](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-mr.png)\n  The MR to deploy the new application.\n  {: .note.text-center}\n\nIn the image above, you can see a pipeline is created to build, test and deploy using [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) to allow you review the changes before deploying to production.\n\n  ![New MR pipeline test](https://about.gitlab.com/images/blogimages/deploying-application-eks/new-mr-test.png)\n  MR with Review Apps\n  {: .note.text-center}\n\nOnce the review is finished, the application is deployed to a dedicated namespace in the Amazon EKS cluster for you to review before deploying to production. A URL for the [Review App](https://docs.gitlab.com/ee/ci/review_apps/) is provided, as shown in the image below.\n\n  ![Review Applications](https://about.gitlab.com/images/blogimages/deploying-application-eks/review-apps.png){: .shadow.medium.center}\n  The application in the Review App.\n  {: .note.text-center}\n\nThe `stop_review` job cleans up the Review App once the review is done. If MR approvals are required, the MR must be approved before being merged into the master branch. Once merged to master, the project is built, tested, and deployed to production.\n\n  ![Merged Change MR](https://about.gitlab.com/images/blogimages/deploying-application-eks/merged-mr.png){: .shadow.medium.center}\n  Deploying changes to production.\n  {: .note.text-center}\n\nThe image above shows that a second pipeline ran after the MR was merged. Once completed, a button is provided to `view app` and also see memory consumption as the app runs. The `view app`\"` button will open the application on the project's subdomain.\n\n  ![Updated application](https://about.gitlab.com/images/blogimages/deploying-application-eks/updated-site.png)\n  Changes deployed to production.\n  {: .note.text-center}\n\n## Deploy to Amazon EKS with Auto DevOps\n\nThe Auto DevOps function at GitLab makes deploying an application to the Amazon EKS cluster quite simple. Really, all you need to do is push code, and Auto DevOps automatically detects the programming language and uses the necessary [buildpack](https://buildpacks.io/) to test, build, and deploy your application. GitLab also takes making changes to your application a step further using Review Apps, which deploys your app to a temporary environment for you to review the app before deploying to production.\n\nIf you have questions about how to integrate GitLab with Amazon EKS to create a Kubernetes cluster, revisit the [first blog post](/blog/gitlab-eks-integration-how-to/).\n",[1225,680,1574,9],"demo",{"slug":1576,"featured":6,"template":684},"deploying-application-eks","content:en-us:blog:deploying-application-eks.yml","Deploying Application Eks","en-us/blog/deploying-application-eks.yml","en-us/blog/deploying-application-eks",{"_path":1582,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1583,"content":1589,"config":1594,"_id":1596,"_type":13,"title":1597,"_source":15,"_file":1598,"_stem":1599,"_extension":18},"/en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"title":1584,"description":1585,"ogTitle":1584,"ogDescription":1585,"noIndex":6,"ogImage":1586,"ogUrl":1587,"ogSiteName":669,"ogType":670,"canonicalUrls":1587,"schema":1588},"Develop C++ unit testing with Catch2, JUnit, and GitLab CI","Learn how to set up, write, and automate C++ unit tests using Catch2 with GitLab CI/CD. See examples from a working air quality app project and AI-powered help from GitLab Duo.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659684/Blog/Hero%20Images/AdobeStock_479904468__1_.jpg","https://about.gitlab.com/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Develop C++ unit testing with Catch2, JUnit, and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-07-02\",\n      }",{"title":1584,"description":1585,"authors":1590,"heroImage":1586,"date":1591,"body":1592,"category":1103,"tags":1593},[1201],"2024-07-02","Continuous integration (CI) and automated testing are important DevSecOps workflows for software developers to detect bugs early, improve code quality, and streamline their development processes. \n\nIn this tutorial, you'll learn how to set up unit testing on a `C++` project with [Catch2](https://github.com/catchorg/Catch2) and GitLab CI for continuous integration. You'll also see how the AI-powered features of [GitLab Duo](https://about.gitlab.com/gitlab-duo/) can help. We’ll use [an air quality monitoring application](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) as our reference project.\n\n## Prerequisites\n\n- Ensure you have [CMake](https://cmake.org/ \"CMake\") installed on your machine. \n- A modern `C++` compiler such as GCC or Clang is required. \n- An API key from [OpenWeatherMap](https://openweathermap.org/api) - requires signing up for a free account (1,000/calls per day are included for free). \n\n## Set up the application for testing\n\nThe reference project we’ll be using for demonstrating testing in this blog post is an air quality monitoring application that fetches air quality data from the OpenWeatherMap API based on the U.S zip codes only provided by the user.\n\nHere are the steps to set up the application for testing:\n\n1. Fork the [the reference project](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/air-quality-app) and clone the fork to your local environment.\n\n2. Generate an API key from  [OpenWeatherMap](https://openweathermap.org/) and export it into the environment. \n\n```shell\nexport API_KEY=\"YOURAPIKEY_HERE\"\n```\n\n3. Alternatively, you can add the key into your `.env` configuration, and source it with `source ~/.env`, or use a different mechanism to populate the environment.\n\n4. Compile and build the project code with the following instructions:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n```\n\n5. Run the application using the executable and passing in a U.S zip code (90210 as an example): \n\n```cpp\n./build/air_quality_app 90210\n```\n\nHere’s an example of what running the program will look like in your terminal:  \n\n```bash\n❯ ./build/air_quality_app 90210\nAir Quality Index (AQI) for Zip Code 90210: 2 (Fair)\n```\n\n## Install Catch2\n\nNow that the application is set up and working, let's start working on adding testing using Catch2. Catch2 is a modern, `C++-native` testing framework for unit tests. \n\nYou can also ask GitLab Duo Chat within your IDE for an introduction to getting started with Catch2 as a `C++` testing framework. GitLab Duo Chat will provide getting started steps as well as an example test: \n\n![GitLab Duo Chat starting steps and example test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676997/Blog/Content%20Images/1.duo-chat-installing-catch2.png)\n\n1. First navigate to your project’s root directory and create an externals folder using the `mkdir` command.\n\n```shell\nmkdir externals\n```\n\n2. There are several ways to install Catch2 via [its CMake integration](https://github.com/catchorg/Catch2/blob/devel/docs/cmake-integration.md#top). We will use the option of installing it as a submodule and including it as part of the source code to simplify dependency management. To add Catch2 to your project in the `externals` folder: \n\n```shell\ngit submodule add https://github.com/catchorg/Catch2.git externals/Catch2\ngit submodule update --init --recursive\n```\n\n3. Update `CMakeLists.txt` to include Catch2’s directory as a subdirectory. This allows CMake to find and build Catch2 as a part of our project. \n\n```cpp\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n```\n\n4. Create a `tests.cpp` file in your project root to write our tests to: \n\n```shell\ntouch tests.cpp\n```\n\n5. Update `CMakeLists.txt` Link against Catch2. When defining your test executable in CMake, link it against Catch2:\n\n```cpp\n# Add tests executable and link it to Catch2\nadd_executable(tests test.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\n## Structure the project for testing\n\nBefore we start writing our tests, we should separate our application logic into separate files in order to maintain and test our code more efficiently. At the end of this section we should have:\n\n```\nmain.cpp containing only the main() function and application setup\nincludes/functions.cpp containing all functional code such as API calls and data processing: \nincludes/functions.h containing the declarations for the functions defined in functions.cpp.  It needs to define the preprocessor macro guards, and include all necessary headers. \n```\n\nApply the following changes to the files: \n\n1. `main.cpp`\n\n```cpp\n#include \u003Ciostream>\n#include \"functions.h\"\n\nint main(int argc, char* argv[]) {\n   if (argc \u003C 2) {\n       std::cerr \u003C\u003C \"Usage: \" \u003C\u003C argv[0] \u003C\u003C \" \u003CZip Code>\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string zipCode = argv[1];\n   std::string apiKey = getApiKey();\n   if (apiKey.empty()) {\n       std::cerr \u003C\u003C \"API key not found.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   auto [lat, lon] = geocodeZipcode(zipCode, apiKey);\n   if (lat == 0 && lon == 0) {\n       std::cerr \u003C\u003C \"Failed to geocode zipcode.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   std::string airQualityInfo = parseAirQualityResponse(response);\n\n   std::cout \u003C\u003C \"Air Quality Index for Zip Code \" \u003C\u003C zipCode \u003C\u003C \": \" \u003C\u003C airQualityInfo \u003C\u003C std::endl;\n\n   return 0;\n}\n```\n\n2. Create a `functions.h:` in the `includes` folder: \n\n```cpp\n#ifndef FUNCTIONS_H\n#define FUNCTIONS_H\n\n#include \u003Cstring>\n#include \u003Cutility>\n#include \u003Cvector>\n\n// Declare the function prototype\nstd::string httpRequest(const std::string& url);\nbool loadEnvFile(const std::string& filename);\nstd::string getApiKey();\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey);\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey);\nstd::string parseAirQualityResponse(const std::string& response);\n\n#endif\n```\n\n3. Create a `functions.cpp` in the `includes` folder: \n\n```cpp\n#include \"functions.h\"\n#include \u003Cfstream>\n#include \u003Celnormous/HTTPRequest.hpp>\n#include \u003Cnlohmann/json.hpp>\n#include \u003Ciostream>\n#include \u003Ccstdlib> // For getenv\n\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\nstd::string getApiKey() {\n   const char* envApiKey = std::getenv(\"API_KEY\");\n   if (envApiKey) {\n       return std::string(envApiKey);\n   }\n   // If the environment variable is not set, fallback to the config file\n   std::ifstream configFile(\"config.txt\");\n   std::string line;\n   if (getline(configFile, line)) {\n       return line.substr(line.find('=') + 1);\n   }\n   return \"\";\n}\n\nstd::pair\u003Cdouble, double> geocodeZipcode(const std::string& zipCode, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/geo/1.0/zip?zip=\" + zipCode + \",US&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"lat\") && json.contains(\"lon\")) {\n           double lat = json[\"lat\"];\n           double lon = json[\"lon\"];\n           return {lat, lon};\n       } else {\n           std::cerr \u003C\u003C \"Geocode response missing 'lat' or 'lon' fields: \" \u003C\u003C response \u003C\u003C std::endl;\n       }\n   } catch (const nlohmann::json::parse_error& e) {\n       std::cerr \u003C\u003C \"Failed to parse geocode response: \" \u003C\u003C e.what() \u003C\u003C \" - Response: \" \u003C\u003C response \u003C\u003C std::endl;\n   }\n   return {0, 0};\n}\n\nstd::string fetchAirQuality(double lat, double lon, const std::string& apiKey) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/air_pollution?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   return response;\n}\n\nstd::string parseAirQualityResponse(const std::string& response) {\n   try {\n       auto json = nlohmann::json::parse(response);\n       if (json.contains(\"list\") && !json[\"list\"].empty() && json[\"list\"][0].contains(\"main\")) {\n           int aqi = json[\"list\"][0][\"main\"][\"aqi\"];\n           std::string aqiCategory;\n           switch (aqi) {\n               case 1:\n                   aqiCategory = \"Good\";\n                   break;\n               case 2:\n                   aqiCategory = \"Fair\";\n                   break;\n               case 3:\n                   aqiCategory = \"Moderate\";\n                   break;\n               case 4:\n                   aqiCategory = \"Poor\";\n                   break;\n               case 5:\n                   aqiCategory = \"Very Poor\";\n                   break;\n               default:\n                   aqiCategory = \"Unknown\";\n                   break;\n           }\n           return std::to_string(aqi) + \" (\" + aqiCategory + \")\";\n       } else {\n           return \"No AQI data available\";\n       }\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Failed to parse JSON response: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"Error parsing AQI data\";\n   }\n}\n\n```\n\n4. Now that we have separated the source files, we also need to update our `CMakeLists.txt` to include `functions.cpp` in the `add_executable()` calls:\n\n```cpp\ncmake_minimum_required(VERSION 3.14)\nproject(air-quality-app)\n\n# Set the C++ standard for the project\nset(CMAKE_CXX_STANDARD 17)\nset(CMAKE_CXX_STANDARD_REQUIRED ON)\nset(CMAKE_CXX_EXTENSIONS OFF)\n\ninclude_directories(${CMAKE_SOURCE_DIR}/includes)\n\n# Define the main program executable\nadd_executable(air_quality_app main.cpp includes/functions.cpp)\n\n# Assuming Catch2 in externals/Catch2\nadd_subdirectory(externals/Catch2)\n\n# Add tests executable and link it to Catch2\nadd_executable(tests tests.cpp includes/functions.cpp)\ntarget_link_libraries(tests PRIVATE Catch2::Catch2WithMain)\n```\n\nTo verify that the changes are working, regenerate the CMake configuration and rebuild the source code with the following commands. The build will take longer now that we're compiling Catch2 files. \n\n```shell\nrm -rf build # delete existing build files\ncmake -S . -B build \ncmake --build build  \n```\n\nYou should be able to run the application without any errors.\n\n```shell\n./build/air_quality_app 90210\n```\n\n## Write tests in Catch2  \n\nCatch2 tests are made up of [macros and assertions](https://github.com/catchorg/Catch2/blob/devel/docs/assertions.md). Macros in Catch2 are used to define test cases and sections within those test cases. They help in organizing and structuring the tests. Assertions are used to verify that the code behaves as expected. If an assertion fails, the test case will fail, and Catch2 will report the failure.\n\nLet’s review a basic test scenario for an addition function to understand. Note: This test is read-only, as an example. \n\n```cpp\nint add(int a, int b) {\n   return a + b;\n}\n\nTEST_CASE(\"Addition works correctly\", \"[math]\") {\n   REQUIRE(add(1, 1) == 2);  // Test passes if 1+1 equals 2\n   REQUIRE(add(2, 2) != 5);  // Test passes if 2+2 does not equal 5\n}\n```\n\n- Each test begins with the `TEST_CASE` macro, which defines a test case container. The macro accepts two parameters: a string describing the test case and optionally a second string for tagging the test for easy filtering.\n- Tests are also composed of assertions, which are statements that check if conditions are true. Catch2 provides macros for assertion that include `REQUIRE`, which aborts the current test if the assertion fails, and `CHECK`, which logs the failure but continues with the current test.\n\n### Prepare to write tests with Catch2\n\nTo test the API retrieval functions in our air quality application, we’ll be using mock API requests. Mock API testing is a technique used to test how your application will interact with an external API without making any real API calls. Instead of sending requests to a live API server, we can simulate the responses using predefined data. Mock requests allow us to control the input data and specify exactly what the API would return for different requests, making sure that our tests aren't affected by changes in the real API responses or unexpected data. This also makes it easier for us to simulate and catch different failures.\n\nIn our `tests.cpp` file, let’s define the following function to run mock API requests.   \n\n```cpp\n#include \"includes/functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string& url) {\n   if (url.find(\"geo\") != std::string::npos) {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\"; \n   } else if (url.find(\"air_pollution\") != std::string::npos) {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   // Default mock response for unmatched endpoints\n   return \"{}\";\n}\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string& url) {\n   return mockHttpRequest(url);\n}\n```\n\n- This function simulates HTTP requests and returns predefined JSON responses based on the URL given as input. \n- It also checks the URL to determine which type of data is being requested based on the functionality of the application (geocoding, air pollution, or forecast data). If the URL doesn’t match the expected endpoint, it returns an empty JSON object. \n\nDon't compile the code just yet, as you'll see a linker error. Since we're overriding the original `httpRequest` function with our mock function for testing, we'll need a preprocessor macro to enable conditional compilation - indicating which `httpRequest` function should run when we're compiling tests. \n\n#### Define a preprocessor macro for testing  \n\nBecause we’ve overridden `httpRequest` in our `tests.cpp`, we need to exclude that code from `functions.cpp` when we’re testing. When building tests, we may need to ensure that certain parts of our code behave differently or are excluded. We can do this by defining a preprocessor macro `TESTING` which enables conditional compilation, allowing us to selectively include or exclude code when compiling the test target:  \n\nWe define the `TESTING` macro in our `CMakeLists.txt` at the end:  \n\n```cpp\n# Define TESTING macro for this target\ntarget_compile_definitions(tests PRIVATE TESTING)\n```\n\nAnd add the macro wrapper in  `functions.cpp` around the original `httpRequest` function:  \n\n```cpp\n#ifndef TESTING  // Exclude this part when TESTING is defined\nstd::string httpRequest(const std::string& url) {\n   try {\n       http::Request request{url};\n       const auto response = request.send(\"GET\");\n       return std::string{response.body.begin(), response.body.end()};\n   } catch (const std::exception& e) {\n       std::cerr \u003C\u003C \"Request failed, error: \" \u003C\u003C e.what() \u003C\u003C std::endl;\n       return \"\";\n   }\n}\n#endif\n```\n\nRegenerate the CMake configuration and rebuild the source code to verify it works.\n\n```shell\ncmake --build build  \n```\n\n### Write the first tests \n\nNow, let’s write some tests for our air quality application.\n\n#### Test 1: Verify API key retrieval \n\nThis test ensures that the `getApiKey` function retrieves the API key correctly from the environment variable or the configuration file. Add the test case to our `tests.cpp`:\n\n```cpp\n\nTEST_CASE(\"API Key Retrieval\", \"[api]\") {\n   // Set the API_KEY environment variable for testing\n   setenv(\"API_KEY\", \"test_key\", 1);\n   // Test if the key is retrieved correctly\n   REQUIRE(getApiKey() == \"test_key\");\n}\n```\n\nYou can verify that this tests passes by rebuilding the code and running the tests:\n\n```shell\ncmake --build build\n./build/tests\n```\n\n#### Test 2: Geocode the zip code\n\nThis test ensures that the `geocodeZipcode` function returns the correct latitude and longitude for a given zip code using the mock API response function we set up earlier. The  `geocodeZipcode` function is supposed to hit an API that returns geographic coordinates based on a zip code. \n\nIn `tests.cpp`, add this test case for the zip code 90210: \n\n```cpp\nTEST_CASE(\"Geocode Zip code\", \"[geocode]\") {\n   std::string apiKey = \"test_key\";\n   std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n   // Check latitude\n   REQUIRE(coordinates.first == 40.7128);\n   // Check longitude \n   REQUIRE(coordinates.second == -74.0060);\n}\n```\n\nThe purpose of this test is to verify that the function `geocodeZipcode` can correctly parse the latitude and longitude from the API response. By hardcoding the expected response, we ensure that the test environment is controlled and predictable.\n\n #### Test 3: Air quality API test\n\nThis test ensures that the `fetchAirQuality` function correctly fetches air quality data using the mock API response function we set up earlier. It verifies that the function constructs the API request properly, sends it, and accurately parses the air quality index (AQI) from the mock JSON response. This validation helps ensure that the overall process of fetching and interpreting air quality data works as intended.\n\n```cpp\nTEST_CASE(\"Fetch Air Quality\", \"[airquality]\") {\n   std::string apiKey = \"test_key\";\n   double lat = 40.7128;\n   double lon = -74.0060;\n   std::string response = fetchAirQuality(lat, lon, apiKey);\n   // Check the response\n   REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n}\n```\n\n## Build and run the tests\n\nTo  build and compile our application, we'll use the same CMake commands as before:\n\n```cpp\ncmake -S . -B build\ncmake --build build\n\n```\n\nAfter building, we can run our tests by executing the test binary:  \n\n```cpp\n./build/tests\n\n```\n\nRunning this command will execute all defined tests, and you will see output indicating whether each test has passed or failed.\n\n![Output showing pass/fail of tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.running-catch2-tests.png)\n\n## Set up GitLab CI/CD\n\nTo automate the testing process each time we push some new code to our repository, let’s set up [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/). Create a new `.gitlab-ci.yml` configuration file in the root directory. \n\n```yaml\nimage: gcc:latest\n\nvariables:\n GIT_SUBMODULE_STRATEGY: recursive\n\nstages:\n - build\n - test\n\nbefore_script:\n - apt-get update && apt-get install -y cmake\n\ncompile:\n stage: build\n script:\n   - cmake -S . -B build\n   - cmake --build build\n artifacts:\n   paths:\n     - build/\n\ntest:\n stage: test\n script:\n   - ./build/tests --reporter junit -o test-results.xml\n artifacts:\n   reports:\n     junit: test-results.xml\n```\n\nThis CI/CD configuration will compile both the main application and the test suite, then run the tests, generating a JUnit XML report which GitLab uses to display the test results.  \n\n- In `before_script`, we added an installation for `cmake`, and `git submodule sync --recursive` which initializes and updates our submodules (catch2). \n- In the `test` stage, `--reporter junit -o test-results.xml` specifies that the test results should be treated as a JUnit report which allows GitLab CI to display results in the UI. This is super helpful when you have several tests in your application.  \n\nWe also need to [add an environmental variable](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui) with the `API_KEY` in project settings on GitLab.\n\nDon’t forget to add all new files to Git, and commit and push the changes in a new MR:\n\n```shell\ngit checkout -b tests-catch2-cicd\n\ngit add includes/functions.{h,cpp} tests.cpp .gitlab-ci.yml \ngit add CMakeLists.txt main.cpp \n\ngit commit -vm “Add Catch2 tests and CI/CD configuration”\ngit push \n```\n\n## View the test report\n\nAfter pushing our code changes, we can review the results of our tests in the GitLab UI in the Pipeline view in the `Tests` tab:\n\n![GitLab pipeline view shows test results](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.0-passed-tests-UI.png)\n\n## Simulate a test failure\n\nTo demonstrate how our UI will handle test failures, we can intentionally introduce a bug into our code and observe the resulting behavior. \n\nLet's modify our `parseAirQualityResponse` function to introduce an error. We can change the AQI category for an AQI value of 2 from \"Fair\" to \"Poor.\" This change will cause the related test to fail, allowing us to see the test failure in the GitLab UI.\n\nIn `functions.cpp`, find the `parseAirQualityResponse` function and modify the switch statement for case `2` to set the `Poor` value instead of `Fair`:\n\n```cpp\n               // Intentional bug:\n               case 2:\n                   aqiCategory = \"Poor\";\n                   break;\n```\n\nIn tests.cpp, add a new test case that directly checks the output of the `parseAirQualityResponse` function. This test ensures that the `parseAirQualityResponse` function correctly parses and categorizes the air quality data from the mock API response. This function takes a JSON response, extracts the AQI value, and translates it into a human-readable category.\n\n```cpp\n\nTEST_CASE(\"Parse Air Quality Response\", \"[airquality]\") {\n   std::string mockResponse = R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   std::string result = parseAirQualityResponse(mockResponse);\n   // This should fail due to the intentional bug\n   REQUIRE(result == \"2 (Fair)\");\n}\n\n```\n\nCommit the changes, and push them into the MR. Open the MR in your browser. \n\nBy introducing an intentional bug in this function, we can see how a test failure is reported in GitLab's pipelines UI. We must add, commit, and push the changes to our repository to view the test failure in the pipeline. \n\n![Simulated test failure](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.1-failed-test-simulation.png)\n\n![Details of the simulated failed test](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/2.2-failed-test-simulation-details.png)\n\nOnce we've verified this simulated test failure, we can use `git revert` to roll back that commit. \n\n```shell\ngit revert\n```\n\n## Add and test a new feature\n\nLet’s put what you've learned together by creating a new feature in the air quality application and then writing a test for that feature using Catch2. The new feature will fetch the current weather forecast for the provided zip code.\n\nFirst, we'll define a `Weather` struct and add the function prototype in our `functions.h` file (inside the `#endif`):\n\n```cpp\n\nstruct Weather {\n   std::string main;\n   std::string description;\n   double temperature;\n};\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon);\n```\n\nThen, we implement the `getCurrentWeather` function in `functions.cpp`. This function calls the OpenWeatherMap API to retrieve the current weather and parses the JSON response. This code was generated using [GitLab Duo](https://about.gitlab.com/gitlab-duo/). If you start typing `Weather getCurrentWeather(const std::string& apiKey, double lat, double lon) {` to complete the function, GitLab Duo will provide the function contents for you, line by line. \n\n![GitLab Duo completing the function contents](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676998/Blog/Content%20Images/3.get-current-weather-function-completion.png)\n\nHere's what your `getCurrentWeather()` function can look like: \n\n```cpp\n\nWeather getCurrentWeather(const std::string& apiKey, double lat, double lon) {\n   std::string url = \"http://api.openweathermap.org/data/2.5/weather?lat=\" + std::to_string(lat) + \"&lon=\" + std::to_string(lon) + \"&appid=\" + apiKey;\n   std::string response = httpRequest(url);\n   auto json = nlohmann::json::parse(response);\n   Weather weather;\n   if (!json.is_null()) {\n       weather.main = json[\"weather\"][0][\"main\"];\n       weather.description = json[\"weather\"][0][\"description\"];\n       weather.temperature = json[\"main\"][\"temp\"];\n   }\n   return weather;\n}\n```\n\nAnd, finally, we update our `main.cpp` file in the main function to output the current forecast (and converting Kelvin to Celsius for the output):  \n\n```cpp\n   Weather currentWeather = getCurrentWeather(apiKey, lat, lon);\n   if (currentWeather.main.empty()) {\n       std::cerr \u003C\u003C \"Failed to fetch current weather.\" \u003C\u003C std::endl;\n       return 1;\n   }\n\n   std::cout \u003C\u003C \"Current Weather: \" \u003C\u003C currentWeather.main \u003C\u003C \", \" \u003C\u003C currentWeather.description\n       \u003C\u003C \", temperature \" \u003C\u003C currentWeather.temperature - 273.15 \u003C\u003C \" °C\" \u003C\u003C std::endl;\n```\n\nWe can confirm that our new feature is working by building and running the application:  \n\n```shell\ncmake --build build\n./build/air_quality_app \n```\n\nAnd we should see the following output or similar in case the weather is different on the day the code is run :)\n\n```\nAir Quality Index for Zip Code 90210: 2 (Poor)\nCurrent Weather: Clouds, broken clouds, temperature 23.2 °C\n```\n\nWith all new functionality, there should be testing! We can also write a test to check whether the application is fetching and parsing a weather forecast correctly. This test checks that the function returns a list containing the correct number of forecast entries and that each entry has accurate data regarding time and temperature.\n\n```cpp\nTEST_CASE(\"Current Weather functionality\", \"[api]\") {\n   auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n   // Ensure main weather description is not empty\n   REQUIRE_FALSE(weather.main.empty());\n   // Validate that temperature is a reasonable value\n   REQUIRE(weather.temperature > 0); \n}\n```\n\nWe’ll also have to update our `mockHTTPRequest` function in `tests.cpp` to account for this new test. Modify the if-condition with a new else-if branch checking for the `weather` string in the URL:  \n\n```cpp\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n```\n\nAnd verify that our tests are working by rebuilding and running our tests:  \n\n```shell\ncmake --build build \n./build/tests\n```\n\nAll tests should pass, including the new one for Current Weather Functionality. \n\n## Optimize tests.cpp with sections\n\nTo better organize our tests as the project grows and categorize each functionality, we can use Catch2’s `SECTION` macro. The `SECTION` macro allows you to define logically separate test scenarios within a single test case, providing a clean way to test different behaviors or conditions without requiring multiple separate test cases or multiple files. This approach keeps related tests bundled together and also improves test maintainability by allowing shared setup code to be executed repeatedly for each section.\n\nSince some of our functionality is preprocessing data to retrieve information, let’s section our tests as such:\n- preprocessing steps: \n\t- API key validation\n\t- geocoding validation\n-  API data retrieval:\n\t- air pollution retrieval \n\t- forecast retrieval\n\nHere’s what our `tests.cpp` will look like if organized by sections: \n\n```cpp\n#include \"functions.h\"\n#include \u003Ccatch2/catch_test_macros.hpp>\n#include \u003Cstring>\n\n// Mock HTTP request function that simulates API responses\nstd::string mockHttpRequest(const std::string &url)\n{\n   if (url.find(\"geo\") != std::string::npos)\n   {\n       // Mock response for geocoding\n       return R\"({\"lat\": 40.7128, \"lon\": -74.0060})\";\n   }\n   else if (url.find(\"air_pollution\") != std::string::npos)\n   {\n       // Mock response for air quality\n       return R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\";\n   }\n   else if (url.find(\"weather\") != std::string::npos)\n   {\n       // Mock response for current weather\n       return R\"({\n          \"weather\": [{\"main\": \"Clear\", \"description\": \"clear sky\"}],\n          \"main\": {\"temp\": 298.55}\n      })\";\n   }\n   return \"{}\";\n}\n\n// Overriding the actual httpRequest function with the mockHttpRequest for testing\nstd::string httpRequest(const std::string &url)\n{\n   return mockHttpRequest(url);\n}\n\n// Preprocessing Steps\nTEST_CASE(\"Preprocessing Steps\", \"[preprocessing]\") {\n   SECTION(\"API Key Retrieval\") {\n       // Set the API_KEY environment variable for testing\n       setenv(\"API_KEY\", \"test_key\", 1);\n       // Test if the key is retrieved correctly\n       REQUIRE_FALSE(getApiKey().empty());\n   }\n\n   SECTION(\"Geocode Functionality\") {\n       std::string apiKey = \"test_key\";\n       std::pair\u003Cdouble, double> coordinates = geocodeZipcode(\"90210\", apiKey);\n       // Check latitude\n       REQUIRE(coordinates.first == 40.7128);\n       // Check longitude \n       REQUIRE(coordinates.second == -74.0060);\n   }\n}\n\n// API Data Retrieval\nTEST_CASE(\"API Data Retrieval\", \"[data_retrieval]\") {\n   SECTION(\"Air Quality Functionality\") {\n       std::string apiKey = \"test_key\";\n       double lat = 40.7128;\n       double lon = -74.0060;\n       std::string response = fetchAirQuality(lat, lon, apiKey);\n       // Check the response\n       REQUIRE(response == R\"({\"list\": [{\"main\": {\"aqi\": 2}}]})\");\n   }\n\n   SECTION(\"Current Weather Functionality\") {\n       auto weather = getCurrentWeather(\"dummyApiKey\", 40.7128, -74.0060);\n       // Ensure main weather description is not empty\n       REQUIRE_FALSE(weather.main.empty());\n       // Validate that temperature is a reasonable value\n       REQUIRE(weather.temperature > 0);\n   }\n}\n```\n\nRebuild the code and run the tests again to verify.\n\n```shell\ncmake --build build \n./build/tests\n```\n\n## Next steps\n\nIn this post, we covered how to integrate unit testing into a `C++` project using Catch2 testing framework and GitLab CI/CD and set up basic tests for our reference air quality application project.\n\nTo explore these concepts further, you can check out the [Catch2 documentation](https://github.com/catchorg/Catch2) and [GitLab's Unit test report examples documentation](https://docs.gitlab.com/ee/ci/testing/unit_test_report_examples.html). \n\nFor an advanced async exercise, you could build upon this project by using GitLab Duo to implement a feature that retrieves and analyzes historical air quality data and add code quality checks into the CI/CD pipeline. Happy coding! \n",[9,1041,771,704,835],{"slug":1595,"featured":90,"template":684},"develop-c-unit-testing-with-catch2-junit-and-gitlab-ci","content:en-us:blog:develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","Develop C Unit Testing With Catch2 Junit And Gitlab Ci","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci.yml","en-us/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci",{"_path":1601,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1602,"content":1608,"config":1616,"_id":1618,"_type":13,"title":1619,"_source":15,"_file":1620,"_stem":1621,"_extension":18},"/en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd",{"title":1603,"description":1604,"ogTitle":1603,"ogDescription":1604,"noIndex":6,"ogImage":1605,"ogUrl":1606,"ogSiteName":669,"ogType":670,"canonicalUrls":1606,"schema":1607},"GitLab Duo: AI-powered CI/CD pipeline root cause analysis","Discover how we've infused Root Cause Analysis with AI to help remedy broken CI/CD pipelines, including example scenarios and take-away exercises.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097321/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750097321081.png","https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rutvik Shah\"},{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-06-06\",\n      }",{"title":1609,"description":1604,"authors":1610,"heroImage":1605,"date":1613,"body":1614,"category":702,"tags":1615},"Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines",[1611,1612],"Rutvik Shah","Michael Friedrich","2024-06-06","___Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.___\n\nHave you ever encountered a broken [CI/CD](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) pipeline and had to halt your DevSecOps workflow, or even delay software deployment, as you try to figure out the root cause? Traditionally, when something goes wrong in the process of creating software, developers have to troubleshoot, dig through log files, and often do a lot of trial and error development. [GitLab Duo Root Cause Analysis](https://about.gitlab.com/gitlab-duo/), part of our suite of AI-powered features, removes the guesswork by determining the root cause for a failed CI/CD pipeline. In this article, you'll learn what Root Cause Analysis is and how to apply the AI-powered GitLab Duo feature to your DevSecOps workflow.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n### What is Root Cause Analysis?\n\nGitLab Duo Root Cause Analysis is an AI-powered feature that assists you in determining a root cause and suggesting a fix for a CI/CD job log failure by analyzing the logs.\n\nWhile Root Cause Analysis is often seen in product incident management, its workflows and debugging practices can be found in any DevSecOps workflow. Ops teams, administrators, and platform engineers are challenged by infrastructure-as-code (IaC) deployment errors, Kubernetes and GitOps problems, and long stack traces while investigating pipeline failures.\n\nGitLab Duo Root Cause Analysis keeps everyone in the same interface and uses AI-powered help to summarize, analyze, and propose fixes so that organizations can release secure software faster.\n\nA pipeline can encounter failures for a variety of reasons, including syntax errors in the code, missing dependencies that the pipeline relies on, test failures during the build process, Kubernetes and IaC deployment timeouts, and numerous other potential issues. When such failures occur, it becomes the responsibility of everyone to meticulously review the logs generated by the pipeline. This job log review process involves scrutinizing the detailed output to identify the specific errors and pinpoint the root cause of the pipeline failure. For example, the following pipeline has multiple job failures that need to be investigated and fixed.\n\n![Image depicting multiple job failures](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097332601.png)\n\nThe duration required to fix these failures can vary significantly and is largely influenced by several factors such as:\n- the developer's familiarity with the project\n- their level of experience in dealing with similar issues\n- their overall skill level in troubleshooting and problem-solving within the context of the pipeline.\n\nManual analysis can be exceedingly challenging and time-consuming, given that log data consists of application logs and system messages with a wide variety of potential sources of failures. A typical pipeline fix can consist of several iterations and context switching. The complexity and the unstructured nature of the logs is a perfect fit for speeding up the task using generative AI.  Using AI can reduce the time to identify and fix a pipeline error significantly and also lower the barrier of expertise that would be needed to fix a pipeline such as the above.\n\nWatch GitLab Duo Root Cause Analysis in action:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n\n \u003Ciframe src=\"https://www.youtube.com/embed/sTpSLwX5DIs?si=J6-0Bf6PtYjrHX1K\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\n\u003C/figure>\n\n\u003C!-- blank line -->\n\n### How does Root Cause Analysis work?\n\n[Root Cause Analysis](https://docs.gitlab.com/ee/user/ai_experiments.html#root-cause-analysis) works by forwarding a portion of the CI/CD job log to the [GitLab AI Gateway](https://docs.gitlab.com/ee/architecture/blueprints/ai_gateway/). GitLab ensures that the portion sent will fit inside the large language model (LLM) token limits alongside a prompt that has been pre-crafted to provide insights into why the job might have failed. The prompt also instructs the LLM to provide an example of how a user might fix a broken job.\n\nHere are two example scenarios where Root Cause Analysis can provide assistance.\n\n#### 1. Analyze a Python dependency error\n\nA Python application can import package modules with functionality that is not provided in the standard library. The project [Challenge - Root Cause Analysis - Python Config](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config) implements an application that parses configuration and initializes an SQLite database, which both work well without any dependencies. It uses best practices in CI/CD with a Python environment and caching. The latest feature implementation adds a Redis caching client, and now the CI/CD build is failing for some reason. \n\nBy using Root Cause Analysis, you can immediately learn that the `ModuleNotFoundError` text means that the module is actually not installed in the Python environment. GitLab Duo also suggests an example fix: Installing the Redis module through the PIP package manager. \n\n![Image depicting 'modulenotfounderror' and GL Duo suggested resolution](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097332602.png)\n\nThe failing pipeline can be viewed [here](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config/-/jobs/6992716398). \n\nThe Root Cause Analysis prompt provides a summary of the problem, which seems to be a problem with a missing `redis` module. Let's try to fix the problem by installing the `redis` module. You can either call `pip install redis` in the CI/CD job `script` section, or use a more sophisticated approach with the `requirements.txt` file. The latter is useful for a single source of truth for dependencies installed in the development environment and CI/CD pipelines.\n\n```yaml\ntest:\n  extends: [.python-req]\n  stage: test \n  before_script:\n    # [🦊] hint: Root cause analysis.\n    # Solution 1: Install redis using pip\n    - pip install redis\n    # Solution 2: Add redis to requirements.txt, use pip\n    - pip install -r requirements.txt \n\n  script:\n    - python src/main.py\n```\n\nAfter fixing the missing Python dependency, the CI/CD job fails again. Use Root Cause Analysis again to learn that no Redis service is running in the job. Switch to using GitLab Duo Chat and use the prompt `How to start a Redis service in CI/CD` to learn how to configure the `services` attribute in the CI/CD job.\n\n![Depicts the prompt for how to start a Redis service](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097333/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097332602.png)\n\nModify the `.gitlab-ci.yml` with the `test` job, and specify the `redis` service.\n\n```yaml\ntest:\n  extends: [.python-req]\n  stage: test \n  before_script:\n    # [🦊] hint: Root cause analysis.\n    # Solution 1: Install redis using pip\n    - pip install redis\n    # Solution 2: Add redis to requirements.txt, use pip\n    - pip install -r requirements.txt \n\n  script:\n    - python src/main.py\n\n  # Solution 3 - Running Redis\n  services:\n    - redis\n```\n\nRunning the Redis server allows you to successfully execute the Python application, and print its output into the CI/CD job log.\n\n![output of Python application](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097332603.png)\n\nThe solution is provided in the [solution/ directory](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-python-config/-/tree/main/solution?ref_type=heads).\n\n**Tip:** You can also ask [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) to follow up on potential future problems:\n\n```markdown\nHow to lint Python code? Which tools are recommended for CI/CD.\nHow to pin a package version in Python requirements file?\t\nWhat are possible ways that this exception stacktrace is triggered in the future?\nAre there ways to prevent the application from failing?\n``` \n\nThe next example is more advanced and includes multiple failures. \n\n#### 2. Analyze missing Go runtime\n\nCI/CD jobs can be executed in containers, spawned from the contributed `image` attribute. If the container does not provide a programming language runtime, the executed `script` sections referencing the `go` binary fail. For example, the error message `/bin/sh: eval: line 149: go: not found` needs to be understood and fixed. \n\nIf the `go` command is not found in the container's runtime context, this can have multiple reasons:\n\n1. The job uses a minimal container image, for example `alpine`, and the Go language runtime was not installed.\n1. The job uses the wrong default container image, for example, specified on top of the CI/CD configuration, or using the `default` keyword.\n1. The job does not use a container image but the shell executor. The host operating system does not have the Go language runtime installed, or it is otherwise broken/not configured.\n\nThe project [Challenge - Root Cause Analysis - Go GitLab Release Fetcher](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-go-gitlab-release-fetcher) provides an exercise challenge to analyze and fix CI/CD problems with a GitLab release fetcher application, written in Go. The `build` and `docker-build` CI/CD jobs are failing. Fixing the problem requires different scopes: Understanding why the Go runtime is not installed, and learning about the `Dockerfile` syntax. \n\n![Screenshot showing Change Docker Label job failed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097332603.png)\n\nThe [`solution/` directory](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/root-cause-analysis/challenge-root-cause-analysis-go-gitlab-release-fetcher) provides two possible solutions after Root Cause Analysis. \n\n## Practice using Root Cause Analysis\n\nHere are some scenarios to use to practice using Root Cause Analysis.\n\n- When you are running into Kubernetes deployment errors or timeouts. \n\n- With OpenTofu or Terraform IaC pipelines failing to provision your cloud resources.\n\n- When the Ansible playbook fails with a cryptic permission error in CI/CD.\n\n- When the Java stack trace is 10 pages long.\n\n- With a shell script highlighting an execution error.\n\n- When a Perl script fails in a single line, which is the only line in the script.\n\n- When the CI/CD job times out and it is unclear which section would cause this.\n\n- When a network connection timeout is reached, and you think it cannot be DNS.\n\n### What is next for GitLab Duo Root Cause Analysis?\n\nWe want to help our users to get their pipelines back to passing in fewer iterations. The Root Cause Analysis will open and show the response in GitLab Duo Chat, our AI assistant. Users can build on the recommendation to generate a more precise fix by asking specific questions (e.g., programming language-specific fixes) or asking for alternative fixes based on the root cause.\n\nFor example, here is the Root Cause Analysis for a failing job:\n\n![Root Cause Analysis response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097332/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097332603.png)\n\nUsers can ask follow-up questions that build upon the AI-generated response.\n\n- I do not want to create my own Docker image. Please explain different ways to fix the problem.\n\n- I don't have access to the Docker image creation. It seems that the Go binary is missing. Are there alternative images you can suggest?\n\nGitLab also will be running quality benchmarks for the generated responses and shipping usability improvements.\n\nPlease see our [Root Cause Analysis GA epic](https://gitlab.com/groups/gitlab-org/-/epics/13080) for more details. We would also love your feedback on the feature. Please leave a comment on our [Root Cause Analysis feedback issue](https://gitlab.com/groups/gitlab-org/-/epics/13872).\n\n## Get started with Root Cause Analysis\n\nPlease see our [documentation](https://docs.gitlab.com/ee/user/ai_experiments.html#root-cause-analysis) on how to enable the feature available to our GitLab Ultimate customers. Also, GitLab Duo Root Cause Analysis will soon be coming to GitLab self-managed and GitLab Dedicated.\n\nNot a GitLab Ultimate customer? Start [a 30-day free trial](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today. \n\n## Read more of our \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n\n- [Developing GitLab Duo: Secure and thoroughly test AI-generated code](https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code/)",[704,9,835,478,680],{"slug":1617,"featured":90,"template":684},"developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd","content:en-us:blog:developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd.yml","Developing Gitlab Duo Blending Ai And Root Cause Analysis To Fix Ci Cd","en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd.yml","en-us/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd",{"_path":1623,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1624,"content":1630,"config":1636,"_id":1638,"_type":13,"title":1639,"_source":15,"_file":1640,"_stem":1641,"_extension":18},"/en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities",{"title":1625,"description":1626,"ogTitle":1625,"ogDescription":1626,"noIndex":6,"ogImage":1627,"ogUrl":1628,"ogSiteName":669,"ogType":670,"canonicalUrls":1628,"schema":1629},"Developing GitLab Duo: Use AI to remediate security vulnerabilities ","This tutorial shows how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability Resolution, along with our other AI-powered features, can help to address vulnerabilities quickly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098106/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750098106040.png","https://about.gitlab.com/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Use AI to remediate security vulnerabilities \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"},{\"@type\":\"Person\",\"name\":\"Alana Bellucci\"}],\n        \"datePublished\": \"2024-07-15\",\n      }",{"title":1625,"description":1626,"authors":1631,"heroImage":1627,"date":1633,"body":1634,"category":702,"tags":1635},[1612,1632],"Alana Bellucci","2024-07-15","You’ve just started into a new job, and on your first day, a large-scale production incident requires all hands on deck. There are a number of critical new vulnerabilities that require immediate attention, analysis, mitigation and remediation. Where do you start your investigation? \n\nLearn how GitLab Duo Vulnerability Explanation and GitLab Duo Vulnerability Resolution, along with our other AI-powered features, can help you begin addressing vulnerabilities in minutes. You will learn how to benefit from AI-powered assistance to analyze and explain vulnerabilities in a practical example. Additional remediation is highlighted with AI-generated code fixes in MRs to aid faster vulnerability resolution.\n\n> Start [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) to bring these powerful vulnerability remediation benefits to your own organization!\n\n## How to get started: Analyze\n\nThe first step is to analyze the impact and severity of the vulnerability. Open the GitLab UI and navigate into the [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) in the `Secure > Vulnerability Report` menu. Filter the vulnerability list by `SAST`, and identify the most critical vulnerabilities to work on.\n\n![Vulnerability reports overview](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/vulnerability_reports_overview_aHR0cHM6_1750098116056.png)\n\nThe SAST scanner results are summarized in the detail view, linking to the source code. They provide details from publicly available security advisories. As a developer, it is often hard to start the analysis from the security report, unless you are fully aware of the attack scope, technical details, and vulnerable environments.\n\n## Understand and mitigate with Vulnerability Explanation \n\nUnderstanding the vulnerability and how to fix it in the best and most efficient way is crucial. Fixes must not break existing functionality. If they do, a discussion with maintainers and product owners will be necessary, and, as such, will require a high-level summary and potential mitigation alternatives. Code that someone who left the company wrote or code that has no tests can make the planning for a fix even more difficult. \n\nAI-powered Vulnerability Explanation helps with a summary of how an attacker can exploit the vulnerability, and provides more explanations about the impact and potential fixes. \n\nThe following example shows an OS Command Injection vulnerability, using this code snippet:\n\n```php\n\u003C?php \n\n// Read variable name from GET request\n$name = $_GET['name'];\n\n// Use the variable name to call eval and print its value \neval('echo $' . $name . ';');\n```\n\nThe vulnerability report does not go into much detail, and requires understanding of the full context and impact. Select `Explain vulnerability` from the upper right corner, which will open GitLab Duo Chat with a pre-defined prompt action. This will give an additional summary of the vulnerability, describe how the vulnerability can be exploited, and provide a suggested fix. \n\n![Improper Neutralization of\nSpecial Elements used in an OS Command\n('OS Command Injection') ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098116057.png)\n\n### Make Vulnerability Explanation a conversation with context \n\nYou’ll also recognize a change in UX: The previous vulnerability explanation overlay was replaced with a GitLab Duo Chat workflow. Sometimes, a complex vulnerability unfolds into multiple mitigation steps, or unclear source code paths.\n\nYou can navigate into the source code tree, and continue with the same Chat context to explain, fix, refactor, and test the code. \n\nLet’s try the full workflow with an example in C, where security scanning detected a buffer overflow.\n\n1. Open the security vulnerability detail view, and select \"Explain vulnerability\" on the button in the upper right. This will open up the Chat prompt, providing a summary of the problem, potential attack vectors, and a proposed fix.\n\n![AI for vulnerabilities - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750098116059.png)\n\n2. Review the proposed fix, and ask Chat in a follow-up prompt to share alternative paths, using `Can you show an alternative fix using a different function`. The idea is to learn about alternative functions to `strcpy()` that can be more safe to use. \n\n![AI for vulnerabilities - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098116060.png)\n\n3. Chat comes up with an alternative fix using `strlcpy()` in the following example. The function only copies as many characters as allowed in the target string, and always terminates the string with null. It also returns the length of the source string to determine whether the string was truncated. \n\n![AI for vulnerabilities - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098116062.png)\n\n4. Next, click on the `Location` file URL to jump into the source code view. Open Chat again, and verify that the previous vulnerability explanation context is still there. As a next step, we want to add tests before continuing with a proposed fix. This helps to avoid breaking functionality or introduce regressions. For example, use this Chat prompt: `Based on the vulnerability context and opened source code, how would you add tests for it?`.\n\n![AI for vulnerabilities - image 7 ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098116063.png)\n\n5. After generating tests (and assuming they were added now), you can also ask Chat to refactor the source code, using the prompt `Can you refactor the source code too?` in the same session.\n\n![AI for vulnerabilities - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098116063.png)\n\nThe workflow shows how to analyze, understand, mitigate, get alternative approaches, add tests, and even refactor fixes for vulnerabilities. \n\nYou can continue this path using Chat, and then switch into the Web IDE to modify the source code after learning how to do it. Additional continued workflows include committing changes and triggering CI/CD and security scans for the full DevSecOps lifecycle loop. \n\n## Remediate with AI-assisted Vulnerability Resolution \n\nUnderstanding and mitigating a security vulnerability still requires engineering work to create a fix for the problem, run pipelines and security scanning in a new merge request again. It can also be necessary to deploy the fixes into a staging environment and test them for a longer period of time.\n\nAI can help here with generating a proposed fix based on the provided context of the vulnerability and source code.\n\nTip: Think of the most annoying vulnerability you had to fix in your career, and re-create the use case example for your GitLab Duo adoption. The [MITRE CWE Top 25 of the most dangerous software weaknesses](https://cwe.mitre.org/top25/archive/2023/2023_top25_list.html) also provides a good starting point.  \n\nThe following example implements [CWE-328: Use of a weak hash function](https://cwe.mitre.org/data/definitions/328.html) by using `md5`. It is correctly identified by [SAST scanning](https://docs.gitlab.com/ee/user/application_security/sast/). \n\n```python\nimport hashlib\n\nclass User:\n    def __init__(self, username, password):\n        self.username = username\n        self.password = password\n\n    def set_password(self, password):\n        self.password = hashlib.md5(password.encode()).hexdigest()\n```\n\n![AI for vulnerabilities - image 8](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098116064.png)\n\nClick on the button in the upper right `Resolve with merge request`.  This will open an MR that uses AI to propose the fix. For this vulnerability, one possible fix could be using a different hash function. \n\n![AI for vulnerabilities - image 9](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098116065.png)\n\nAnother common vulnerability example is not checking function error codes or potential exceptions. The following C code snippets implement an example for timing attacks against file operations with [CWE-362](https://cwe.mitre.org/data/definitions/362.html) for the `fopen()` and `chmod()` calls. \n\n```c\n#include \u003Cstdio.h>\n#include \u003Cstring.h>\n#include \u003Csys/mman.h>\n#include \u003Csys/stat.h>\n#include \u003Cunistd.h>\n\nint main(int argc, char **argv) {\n\n    // File operations\n    char *fname = \"gitlab.keksi\";\n\n    FILE *fp;\n    fp = fopen(fname, \"r\");\n    fprintf(fp, \"Hello from GitLab Duo Vulnerability Resolution Challenge\");\n    fclose(fp);\n\n    // Potential chmod() timing attacks    \n\n    // Make the file world readable\n    chmod(fname, S_IRWXU|S_IRWXG|S_IRWXO);\n\n    return 0;\n}\n```\n\nThe SAST report for `chmod()` can look like the following: \n\n![AI for vulnerabilities - image 10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098116065.png)\n\nThe proposed `chmod()` merge request includes error handling, and fixes another potential issue with world writable files, changing the permissions from `777` to `600`.\n\n![AI for vulnerabilities - image 11](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098116/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098116066.png)\n\n> Try this async exercise: Find, analyze, and fix the vulnerability for the `fopen()` function.\n\n## More AI assistance required from GitLab Duo \n\nOften, a security problem can be resolved with a quick fix or a workaround that grants the development teams time to discuss and plan a more long-term solution. In other cases, the problem becomes more complex and requires feature APIs disabled, or firewall mitigation, until a proper fix can be rolled into production.\n\nGitLab Duo offers additional AI-powered features that can help resolve these issues. \n\n**Code Explanation:** As a developer or security engineer, it's crucial to feel confident in the changes you've made. Within the IDE, you can use the [Code Explanation feature](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide) to gain a deeper understanding of the AI-suggested fix for the vulnerability. This ensures you know exactly what adjustments have been made and why.\n\n**Root Cause Analysis:** If the fix breaks your pipeline, you can utilize the [Root Cause Analysis feature](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/). This tool helps identify and explain the underlying problem, allowing you to address it effectively. After applying the necessary corrections, you can rerun the tests to ensure a successful resolution.\n\n**Refactor:** Even if the vulnerability has been fixed, it's worth considering if the code can be written in a safer manner. In the IDE, you can open GitLab Duo Chat and use the [refactor action](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide) to explore alternative, more secure ways to write your code. This proactive approach helps maintain a robust and secure codebase.\n\nBy leveraging these GitLab Duo features, you can confidently navigate and resolve vulnerabilities, ensuring your code remains secure and efficient.\n\n## What’s next?\n\nWe plan to bring both Vulnerability Explanation and Vulnerability Resolution \"left\" by incorporating them directly into the MR process. This integration ensures that you can address and resolve vulnerabilities earlier in the development cycle, streamlining your workflow and enhancing code security from the outset.\n\n## Get started with GitLab Duo\n\nPlease see our [documentation](https://docs.gitlab.com/ee/user/gitlab_duo/turn_on_off.html) on how to enable the feature available to our GitLab Ultimate customers. Also, GitLab Duo [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability) and [Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-resolution) will soon be coming to GitLab self-managed and GitLab Dedicated.\n\nYou can keep up with what's new in GitLab Duo by [following the \"Developing GitLab Duo\" blog series](https://about.gitlab.com/blog/developing-gitlab-duo-series/).\n\n> Start [a free trial of GitLab Duo](https://about.gitlab.com/gitlab-duo/#free-trial) to bring these powerful vulnerability remediation benefits to your own organization!\n",[704,814,678,680,9],{"slug":1637,"featured":90,"template":684},"developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities","content:en-us:blog:developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities.yml","Developing Gitlab Duo Use Ai To Remediate Security Vulnerabilities","en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities.yml","en-us/blog/developing-gitlab-duo-use-ai-to-remediate-security-vulnerabilities",{"_path":1643,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1644,"content":1650,"config":1656,"_id":1658,"_type":13,"title":1659,"_source":15,"_file":1660,"_stem":1661,"_extension":18},"/en-us/blog/devops-adoption",{"title":1645,"description":1646,"ogTitle":1645,"ogDescription":1646,"noIndex":6,"ogImage":1647,"ogUrl":1648,"ogSiteName":669,"ogType":670,"canonicalUrls":1648,"schema":1649},"Understand how your teams adopt DevOps with DevOps reports","Learn about analytics, DevOps reports, DevOps scores, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668473/Blog/Hero%20Images/john-schnobrich-FlPc9_VocJ4-unsplash.jpg","https://about.gitlab.com/blog/devops-adoption","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Understand how your teams adopt DevOps with DevOps reports\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Orit Golowinski\"}],\n        \"datePublished\": \"2021-12-15\",\n      }",{"title":1645,"description":1646,"authors":1651,"heroImage":1647,"date":1653,"body":1654,"category":1103,"tags":1655},[1652],"Orit Golowinski","2021-12-15","\n\nGitLab has an extraordinary range of features for a single application, providing an [entire DevOps platform](/stages-devops-lifecycle/) from [portfolio planning](/stages-devops-lifecycle/plan/) all the way through to [monitoring](/stages-devops-lifecycle/monitor/) and [service desk](https://docs.gitlab.com/ee/user/project/service_desk/). As such, GitLab is uniquely positioned to deliver a complete picture of your organization's DevOps journey and your return on investment in automation and DevOps practices.\n\nSome of the most interesting and difficult questions that organizations ask themselves are:\n\n* What do we gain from different development practices used by our teams?\n* What makes one team more efficient than another?\n* What practices have been successful in one team that we can introduce to others?\n\n## Analytics\n\nGitLab has several metrics to give you insight into the development lifecycle:\n\n* [Application Security](https://docs.gitlab.com/ee/user/application_security/security_dashboard/#project-security-dashboard) -  provides a comprehensive set of features for viewing and managing vulnerabilities.\n* [CI/CD](https://docs.gitlab.com/ee/user/analytics/ci_cd_analytics.html) - tracks the history of your pipeline successes and failures, as well as how long each pipeline ran.\n* [Code Review](https://docs.gitlab.com/ee/user/analytics/code_review_analytics.html) - displays open merge requests and their review time.\n* [Insights](https://docs.gitlab.com/ee/user/project/insights/index.html)- allows you to configure custom analytics that will be displayed.\n* [Issue](https://docs.gitlab.com/ee/user/group/issues_analytics/index.html) - illustrates the number of issues created each month.\n* [Merge Request](https://docs.gitlab.com/ee/user/analytics/merge_request_analytics.html) - displays information that will help you evaluate the efficiency and productivity of your merge request process.\n* [Repository](https://docs.gitlab.com/ee/user/analytics/repository_analytics.html) - displays information such as commit statistics, code coverage, and programming languages used in the repository.\n* [Value Stream Analytics](https://docs.gitlab.com/ee/user/analytics/value_stream_analytics.html) - measures the time spent to go from an idea to production.\n\nSome analytics are only available for instance-level (self-managed), group level, or project level. Read [more](https://docs.gitlab.com/ee/user/analytics/) about analytics.\n\nThese analytics are a great way to see contributions from different projects and groups. On their own, however, they don't give insights into which processes your teams are using. For that, we offer DevOps Reports.\n\n## DevOps adoption reports\n\nDevOps Adoption is a DevOps Report located in group-level analytics. It shows you data for how teams in your organization use the most essential GitLab features.\n\nYou can use DevOps Adoption to:\n\n- Identify specific subgroups that are lagging in their adoption of GitLab features, so you can guide them on their DevOps journey.\n- Find subgroups that have successfully adopted certain features, and could provide guidance to other subgroups on how to use those features.\n- Verify if you are getting the return on investment that you expected from GitLab.\n\n![DevOps Adoption](https://about.gitlab.com/images/blogimages/devops_reports.png){: .shadow}\n\nIn this example, we can see some interesting data on how a team uses features in development, security, and operations categories:\n\n* **Development**\n  * Approvals: At least one merge request approval on a merge request.\n  * Code owners: At least 1 defined code owner that owns a specific file or repository in the group.\n  * Issues: At least 1 issue opened in this group.\n  * Merge requests: At least 1 merge request opened in this group.\n* **Security**\n  * DAST:  At least 1 DAST scan run in a pipeline in the group.\n  * Dependency Scanning: At least 1 dependency scan ran in a pipeline in the group.\n  * Fuzz Testing: At least 1 fuzz testing scan ran in a pipeline in the group.\n  * SAST: At least 1 SAST scan ran in a pipeline in the group.\n* **Operations**\n  * Deployments: At least 1 deployment.\n  * Pipelines: At least 1 pipeline ran successfully.\n  * Runners: At least 1 runner configured for the project or group.\n\nIn the future we plan to add even more feature categories to DevOps Reports, such as:\n* [Environments](https://docs.gitlab.com/ee/ci/environments/#environments-and-deployments)\n* [Pages](https://docs.gitlab.com/ee/user/project/pages/)\n* [Compliance Pipelines](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-pipeline-configuration)\n* [Incidents](https://docs.gitlab.com/ee/operations/incident_management/incidents.html)\n* [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/#review-apps)\n\n...and much more. You can follow our future plans in the following [epic](https://gitlab.com/groups/gitlab-org/-/epics/5019).\n\n_DevOps Reports are available for the Ultimate tier for self-managed and SaaS users. To find DevOps Reports, go to your group and in the left sidebar, select Analytics > DevOps adoption_\n\n## DevOps Score\n\nYou can use the DevOps score to compare your DevOps status to other organizations.\n\nThe DevOps Score tab shows usage of major GitLab features on your instance over the last 30 days. GitLab calculates the averages feature usage based on the number of billable users in that time period. You can also see the Leader usage score, calculated from top-performing instances based on Service Ping data that GitLab collects. GitLab compares your score to the lead score of each feature and shows it as a percentage underneath the feature. Your overall DevOps Score is an average of your feature scores.\n\nTo analyze your DevOps Score, GitLab aggregates Service Ping (sometimes referred to as Usage Ping) data on GitLab servers for analysis. Your usage information is not sent to any other GitLab instances. If you have just started using GitLab, it may take a few weeks for GitLab to collect enough data to calculate your DevOps Score.\n\n![DevOps Score](https://about.gitlab.com/images/blogimages/dev_ops_score_v12_6.png){: .shadow}\n\n_DevOps score is available at the admin panel for all tiers under Analytics > DevOps Reports._\n\nTo see the DevOps score, you must activate your GitLab instance’s [Service Ping](https://docs.gitlab.com/ee/administration/settings/usage_statistics.html#service-ping). This is because DevOps Score is a comparative tool, so your score data must first be centrally processed by GitLab, Inc.\n\nThere are several benefits of enabling Service Ping, such as DevOps Score and cohorts:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZhLrhZlb_zI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Cohorts\n\nCohorts shows your teams' GitLab activities over time, and is a useful tool for administrators to view user retention and manage seats in their GitLab instance.\n\n![Cohorts](https://about.gitlab.com/images/blogimages/cohorts_v13_9_a.png){: .shadow}\n\nUsers are considered active if they have performed at least one of the following activities:\n\n* Sign in to GitLab.\n* Perform a Git activity such as push or pull.\n* Visit pages related to dashboards, projects, issues, or merge requests.\n* Use the API.\n* Use the GraphQL API.\n\nCover image credit:\n\nCover image by [John Schnobrich](https://unsplash.com/photos/FlPc9_VocJ4) on [Unsplash](https://unsplash.com)\n{: .note}\n",[773,680,9],{"slug":1657,"featured":6,"template":684},"devops-adoption","content:en-us:blog:devops-adoption.yml","Devops Adoption","en-us/blog/devops-adoption.yml","en-us/blog/devops-adoption",{"_path":1663,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1664,"content":1670,"config":1676,"_id":1678,"_type":13,"title":1679,"_source":15,"_file":1680,"_stem":1681,"_extension":18},"/en-us/blog/devops-workflows-json-format-jq-ci-cd-lint",{"title":1665,"description":1666,"ogTitle":1665,"ogDescription":1666,"noIndex":6,"ogImage":1667,"ogUrl":1668,"ogSiteName":669,"ogType":670,"canonicalUrls":1668,"schema":1669},"JSON formatting and CI/CD linting tips for DevOps workflows","Learn how to filter in JSON data structures and interact with the REST API. Use the GitLab API to lint your CI/CD configuration and dive into Git hooks speeding up your workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681979/Blog/Hero%20Images/gert-boers-unsplash.jpg","https://about.gitlab.com/blog/devops-workflows-json-format-jq-ci-cd-lint","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-04-21\",\n      }",{"title":1671,"description":1666,"authors":1672,"heroImage":1667,"date":1673,"body":1674,"category":769,"tags":1675},"Tips for productive DevOps workflows: JSON formatting with jq and CI/CD linting automation",[1612],"2021-04-21","\n\n## What is JSON linting?\n\nTo understand JSON linting, let’s quickly break down the two concepts of JSON and linting. \n\n***JSON*** is an acronym for JavaScript Object Notation, which is a lightweight, text-based, open standard format designed specifically for representing structured data based on the JavaScript object syntax. It is most commonly used for transmitting data in web applications. It parses data faster than XML and is easy for humans to read and write.\n\n***Linting*** is a process that automatically checks and analyzes static source code for programming and stylistic errors, bugs and suspicious constructs. \n\nJSON has become popular because it is human-readable and doesn’t require a complete markup structure like XML. It is easy to analyze into logical syntactic components, especially in JavaScript. It also has many JSON libraries for most programming languages.\n\n### Benefits of JSON linting\n\nFinding an error in JSON code can be challenging and time-consuming. The best way to find and correct errors while simultaneously saving time is to use a linting tool. When Json code is copied and pasted into the linting editor, it validates and reformats Json. It is easy to use and supports a wide range of browsers, so applications development with Json coding don’t require a lot of effort to make them browser-compatible.\n\nJSON linting is an efficient way to reduce errors and it improves the overall quality of the JSON code. This can help accelerate development and reduce costs because errors are discovered earlier.\n\n### Some common JSON linting errors\n\nIn instances where a JSON transaction fails, the error information is conveyed to the user by the API gateway. By default, the API gateway returns a very basic fault to the client when a message filter has failed.\n\nOne common JSON linting error is parsing. A “parse: unexpected character\" error occurs when passing a value that is not a valid JSON string to the JSON. parse method, for example, a native JavaScript object. To solve the error, make sure to only pass valid JSON strings to the JSON.\n\nAnother common error is NULL or inaccurate data errors, not using the right data type per column or extension for JSON files, and not ensuring every row in the JSON table is in the JSON format.\n\n### How to fix JSON linting errors\n\nIf you encounter a NULL or inaccurate data error in parsing, the first step is to make sure you use the right data type per column. For example, in the case of “age,” use 12 instead of twelve.\n\nAlso make sure you are using the right extension for JSON files. When using a compressed JSON file, it must end with “json” followed by the extension of the format, such as “.gz.”\n\nNext, make sure the JSON format is used for every row in the JSON table. Create a table with a delimiter that is not in the input files. Then, run a query equivalent to the return name of the file, row points and the file path for the null NSON rows.\n\nSometimes you may find files that are not your source code files, but ones generated by the system when compiling your project. In that instance, when the file has a .js extension, the ESLint needs to exclude that file when searching for errors. One method of doing this is by using ‘IgnorePatterns:’ in .eslintrc.json file either after or before the “rules” tag.\n\n“ignorePatterns”: [“temp.js”, “**/vendor/*.js”],\n\n“rules”: {\n\nAlternatively, you can create a separate file named‘.eslintignore’ and incorporate the files to be excluded as shown below :\n**/*.js\nIf you opt to correct instead of ignore, look for the error code in the last column. Correct all the errors in one fule and rerun ‘npx eslint . >errfile’ and ensure all the errors of that type are cleared. Then look for the next error code and repeat the procedure until all errors are cleared.\n\nOf course, there will be instances when you won’t understand an error, so in that case, open [https://eslint.org/docs/user-guide/getting-started](https://eslint.org/docs/user-guide/getting-started) and type the error code in the ‘Search’ field on the top of the document. There you will find very detailed instructions as to why that error is raised and how to fix it.\n\nFinally, you can forcibly fix errors automatically while generating the error list using:\n\nNpx eslintrc . — fix \n\nThis is not recommended until you become more well-versed with lint errors and how to fix them. Also, you should keep a backup of the files you are linting because while fixing errors, certain code may get overwritten, which could cause your program to fail.\n\n## JSON linting best practices\n\nHere are some tips for helping your consumers use your output:\n\nFirst, always enclose the **Key** **:** **Value** pair within **double quotes**. It may be convenient (not sure how) to generate with Single quotes, but JSON parser don’t like to parse JSON objects with single quotes.\n\nFor numerical values, quotes are optional but it is a good idea to enclose them in double quotes.\n\nNext, don’t ever use hyphens in your key fields because it breaks python and scala parser. Instead use underscores (_). \n\nIt’s a good idea to always create a root element, especially when you’re creating a complicated JSON.\n\n\nModern web applications come with a REST API which returns JSON. The format needs to be parsed, and often feeds into scripts and service daemons polling the API for automation.\n\nStarting with a new REST API and its endpoints can often be overwhelming. Documentation may suggest looking into a set of SDKs and libraries for various languages, or instruct you to use `curl` or `wget` on the CLI to send a request. Both CLI tools come with a variety of parameters which help to download and print the response string, for example in JSON format.\n\nThe response string retrieved from `curl` may get long and confusing. It can require parsing the JSON format and filtering for a smaller subset of results. This helps with viewing the results on the CLI, and minimizes the data to process in scripts. The following example retrieves all projects from GitLab and returns a paginated result set with the first 20 projects:\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\"\n```\n\n![Raw JSON as API response](https://about.gitlab.com/images/blogimages/devops-workflows-json-format-jq-ci-cd-lint/gitlab_api_response_raw_json.png){: .shadow}\n\nThe [GitLab REST API documentation](https://docs.gitlab.com/ee/api/#how-to-use-the-api) guides you through the first steps with error handling and authentication. In this blog post, we will be using the [Personal Access Token](https://docs.gitlab.com/ee/api/#personalproject-access-tokens) as the authentication method. Alternatively, you can use [project access tokens](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html) for [automated authentication](https://docs.gitlab.com/ee/api/#authentication) that avoids the use of personal credentials.\n\n### REST API authentication\n\nSince not all endpoints are accessible with anonymous access they might require authentication. Try fetching user profile data with this request:\n\n```shell\n$ curl \"https://gitlab.com/api/v4/user\"\n{\"message\":\"401 Unauthorized\"}\n```\n\nThe API request against the `/user` endpoint requires to pass the personal access token into the request, for example, as a request header. To avoid exposing credentials on the terminal, you can export the token and its value into the user's environment. You can automate the variable export with ZSH and the [.env plugin](https://github.com/ohmyzsh/ohmyzsh/tree/master/plugins/dotenv) in your shell environment. You can also source the `.env` once in the existing shell environment.\n\n```shell\n$ vim ~/.env\n\nexport GITLAB_TOKEN=”...”\n\n$ source ~/.env\n```\n\nScripts and commands being run in your shell environment can reference the `$GITLAB_TOKEN` variable. Try querying the user API endpoint again, with adding the authorization header into the request:\n\n```shell\n$ curl -H \"Authorization: Bearer $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/user\"\n```\n\nA reminder that only administrators can see the attributes of all users, and the individual can only see their user profile – for example, `email` is hidden from the public domain.\n\n### How to request responses in JSON\n\nThe [GitLab API provides many resources](https://docs.gitlab.com/ee/api/api_resources.html) and URL endpoints. You can manage almost anything with the API that you’d otherwise configure using the graphic user interface.\n\nAfter sending the [API request](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Request_message), the [response message](https://en.wikipedia.org/wiki/Hypertext_Transfer_Protocol#Response_message) contains the body as string, for example as a [JSON content type](https://docs.gitlab.com/ee/api/#content-type). `curl` can provide more information about the response headers which is helpful for debugging. Multiple verbose levels enable the full debug output with `-vvv`:\n\n```shell\n$ curl -vvv \"https://gitlab.com/api/v4/projects\"\n[...]\n* SSL connection using TLSv1.2 / ECDHE-RSA-CHACHA20-POLY1305\n* ALPN, server accepted to use h2\n* Server certificate:\n*  subject: CN=gitlab.com\n*  start date: Jan 21 00:00:00 2021 GMT\n*  expire date: May 11 23:59:59 2021 GMT\n*  subjectAltName: host \"gitlab.com\" matched cert's \"gitlab.com\"\n*  issuer: C=GB; ST=Greater Manchester; L=Salford; O=Sectigo Limited; CN=Sectigo RSA Domain Validation Secure Server CA\n*  SSL certificate verify ok.\n[...]\n> GET /api/v4/projects HTTP/2\n> Host: gitlab.com\n> User-Agent: curl/7.64.1\n> Accept: */*\n[...]\n\u003C HTTP/2 200\n\u003C date: Mon, 19 Apr 2021 11:25:31 GMT\n\u003C content-type: application/json\n[...]\n[{\"id\":25993690,\"description\":\"project for adding issues\",\"name\":\"project-for-issues-1e1b6d5f938fb240\",\"name_with_namespace\":\"gitlab-qa-sandbox-group / qa-test-2021-04-19-11-13-01-d7d873fd43cd34b6 / project-for-issues-1e1b6d5f938fb240\",\"path\":\"project-for-issues-1e1b6d5f938fb240\",\"path_with_namespace\":\"gitlab-qa-sandbox-group/qa-test-2021-04-19-11-13-01-d7d873fd43cd34b6/project-for-issues-1e1b6d5f938fb240\"\n\n[... JSON content ...]\n\n\"avatar_url\":null,\"web_url\":\"https://gitlab.com/groups/gitlab-qa-sandbox-group/qa-test-2021-04-19-11-12-56-7f3128bd0e41b92f\"}}]\n* Closing connection 0\n```\n\nThe `curl` command output provides helpful insights into TLS ciphers and versions, the request lines starting with `>` and response lines starting with `\u003C`. The response body string is encoded as JSON.\n\n### How to see the structure of the returned JSON\n\nTo get a quick look at the structure of the returned JSON file, try these tips:\n\n* Enclose square brackets to identify an array `[ …. ]`.\n* Enclose curly brackets identify a [dictionary](https://en.wikipedia.org/wiki/Associative_array) `{ … }`. Dictionaries are also called associative arrays, maps, etc.\n* `”key”: value` indicates a key-value pair in a dictionary, which is identified by curly brackets enclosing the key-value pairs.\n\nThe values in [JSON](https://en.wikipedia.org/wiki/JSON) consist of specific types - a string value is put in double-quotes. Boolean true/false, numbers, and floating-point numbers are also present as types. If a key exists but its value is not set, REST APIs often return `null`.\n\nVerify the data structure by running \"linters\". Python's JSON module can parse and lint JSON strings. The example below misses a closing square bracket to showcase the error:\n\n```shell\n$ echo '[{\"key\": \"broken\"}' | python -m json.tool\nExpecting object: line 1 column 19 (char 18)\n```\n\n[jq](https://stedolan.github.io/jq/) – a lightweight and flexible CLI processor – can be used as a standalone tool to parse and validate JSON data.\n\n```shell\n$ echo '[{\"key\": \"broken\"}' | jq\nparse error: Unfinished JSON term at EOF at line 2, column 0\n```\n\n[`jq` is available](https://stedolan.github.io/jq/download/) in the package managers of most operating systems.\n\n```shell\n$ brew install jq\n$ apt install jq\n$ dnf install jq\n$ zypper in jq\n$ pacman -S jq\n$ apk add jq\n```\n\n### Dive deep into JSON data structures\n\nThe true power of `jq` lies in how it can be used to parse JSON data:\n\n> `jq` is like `sed` for JSON data. It can be used to slice, filter, map, and transform structured data with the same ease that `sed`, `awk`, `grep` etc., let you manipulate text.\n\nThe output below shows how it looks to run the request against the project API again, but this time, the output is piped to `jq`.\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\" | jq\n[\n  {\n    \"id\": 25994891,\n    \"description\": \"...\",\n    \"name\": \"...\",\n\n[...]\n\n    \"forks_count\": 0,\n    \"star_count\": 0,\n    \"last_activity_at\": \"2021-04-19T11:50:24.292Z\",\n    \"namespace\": {\n      \"id\": 11528141,\n      \"name\": \"...\",\n\n[...]\n\n    }\n  }\n]\n```\n\nThe first difference is the format of the JSON data structure, so-called [pretty-printed](https://en.wikipedia.org/wiki/Prettyprint). New lines and indents in data structure scopes help your eyes and allow you to identify the inner and outer data structures involved. This format is needed to determine which `jq` filters and methods you want to apply next.\n\n#### About arrays and dictionaries\n\nThe set of results from an API often is returned as a list (or \"array\") of items. An item itself can be a single value or a JSON object. The following example mimics the response from the GitLab API and creates an array of dictionaries as a nested result set.\n\n```shell\n$ vim result.json\n[\n  {\n    \"id\": 1,\n    \"name\": \"project1\"\n  },\n  {\n    \"id\": 2,\n    \"name\": \"project2\"\n  },\n  {\n    \"id\": 3,\n    \"name\": \"project-internal-dev\",\n    \"namespace\": {\n      \"name\": \"🦊\"\n    }\n  }\n]\n```\n\nUse `cat` to print the file content on stdout and pipe it into `jq`. The outer data structure is an array – use `-c .[]` to access and print all items.\n\n```shell\n$ cat result.json | jq -c '.[]'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n```\n\n### How to filter data structures with `jq`\n\nFilter items by passing `| select (...)` to `jq`. The filter takes a lambda callback function as a comparator condition. When the item matches the condition, it is returned to the caller.\n\nUse the dot indexer `.` to access dictionary keys and their values. Try to filter for all items where the name is `project2`:\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name == \"project2\")'\n{\"id\":2,\"name\":\"project2\"}\n```\n\nPractice this example by selecting the `id` with the value `2` instead of the `name`.\n\n#### Filter with matching a string\n\nDuring tests, you may need to match different patterns instead of knowing the full name. Think of projects that match a specific path or are located in a group where you only know the prefix. Simple string matches can be achieved with the `| contains (...)` function. It allows you to check whether the given string is inside the target string – which requires the selected attribute to be of the string type.\n\nFor a filter with the select chain, the comparison condition needs to be changed from the equal operator `==` to checking the attribute `.name` with `| contains (\"dev\")`.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name | contains (\"dev\") )'\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n```\n\nSimple matches can be achieved with the `contains` function.\n\n#### Filter with matching regular expressions\n\nFor advanced string pattern matching, it is recommended to use regular expressions. `jq` provides the [test function for this use case](https://stedolan.github.io/jq/manual/#RegularexpressionsPCRE). Try to filter for all projects which end with a number, represented by `\\d+`. Note that the backslash `\\` needs to be escaped as `\\\\` for shell execution. `^` tests for beginning of the string, `$` is the ending check.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.name | test (\"^project\\\\d+$\") )'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n```\n\nTip: You can [test and build the regular expression with regex101](https://regex101.com/) before test-driving it with `jq`.\n\n#### Access nested values\n\nKey value pairs in a dictionary may have a dictionary or array as a value. `jq` filters need to take this factor into account when filtering or transforming the result. The example data structure provides `project-internal-dev` which has the key `namespace` and a value of a dictionary type.\n\n```shell\n  {\n    \"id\": 3,\n    \"name\": \"project-internal-dev\",\n    \"namespace\": {\n      \"name\": \"🦊\"\n    }\n  }\n```\n\n`jq` allows the user to specify the [array and dictionary types](https://stedolan.github.io/jq/manual/#TypesandValues) as `[]` and `{}` to be used in select chains with greater and less than comparisons. The `[]` brackets select filters for non-empty dictionaries for the `namespace` attribute, while the `{}` brackets select for all `null` (raw JSON) values.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.namespace >={} )'\n{\"id\":3,\"name\":\"project-internal-dev\",\"namespace\":{\"name\":\"🦊\"}}\n\n$ cat result.json | jq -c '.[] | select (.namespace \u003C={} )'\n{\"id\":1,\"name\":\"project1\"}\n{\"id\":2,\"name\":\"project2\"}\n```\n\nThese methods can be used to access the name attribute of the namespace, but only if the namespace contains values. Tip: You can chain multiple `jq` calls by piping the result into another `jq` call. `.name` is a subkey of the primary `.namespace` key.\n\n```shell\n$ cat result.json | jq -c '.[] | select (.namespace >={} )' | jq -c '.namespace.name'\n\"🦊\"\n```\n\nThe additional select command with non-empty namespaces ensures that only initialized values for `.namespace.name` are returned. This is a safety check, and avoids receiving `null` values in the result you would need to filter again.\n\n```shell\n$ cat result.json| jq -c '.[]' | jq -c '.namespace.name'\nnull\nnull\n\"🦊\"\n```\n\nBy using the additional check with `| select (.namespace >={} )`, you only get the expected results and do not have to filter empty `null` values.\n\n### How to expand the GitLab endpoint response\n\nSave the result from the API projects call and retry the examples above with `jq`.\n\n```shell\n$ curl \"https://gitlab.com/api/v4/projects\" -o result.json 2&>1 >/dev/null\n```\n\n### Validate CI/CD YAML with `jq` for Git hooks\n\nWhile writing this blog post, I learned that you can [escape and encode YAML into JSON with `jq`](https://docs.gitlab.com/ee/api/lint.html#escape-yaml-for-json-encoding). This trick comes in handy when automating YAML linting on the CLI, for example as a Git pre-commit hook.\n\nLet’s take a look at the simplest way to test GitLab CI/CD from our [community meetup workshops](https://gitlab.com/gitlab-de/swiss-meetup-2021-jan#resources). A common mistake with the first steps of the process can be missing the two spaces indent or missing whitespace between the dash and following command. The following examples use `.gitlab-ci.error.yml` as a filename to showcase errors and `.gitlab-ci.main.yml` for working examples.\n\n```shell\n$ vim .gitlab-ci.error.yml\n\nimage: alpine:latest\n\ntest:\nscript:\n  -exit 1\n```\n\nCommitting the change and waiting for the CI/CD pipeline to validate at runtime can be time-consuming. The [GitLab API provides a resource endpoint /ci/lint](https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration). A POST request with JSON-encoded YAML content will return a linting result faster.\n\n#### Parse CI/CD YAML into JSON with jq\n\nYou can use jq to parse the raw YAML string into JSON:\n\n```shell\n$ jq --raw-input --slurp \u003C .gitlab-ci.error.yml\n\"image: alpine:latest\\n\\ntest:\\nscript:\\n  -exit 1\\n\"\n```\n\nThe `/ci/lint` API endpoint requires a JSON dictionary with `content` as key, and the raw YAML string as a value. You can use `jq` to format the input by using the arg parser:\n\n```shell\n§ jq --null-input --arg yaml \"$(\u003C.gitlab-ci.error.yml)\" '.content=$yaml'\n{\n  \"content\": \"image: alpine:latest\\n\\ntest:\\nscript:\\n  -exit 1\"\n}\n```\n\n#### Send POST request to /ci/lint\n\nThe next building block is to [send a POST request to the /ci/lint](https://docs.gitlab.com/ee/api/lint.html#validate-the-ci-yaml-configuration). The request needs to specify the `Content-Type` header for the body. With using the pipe `|` character, the JSON-encoded YAML configuration is fed into the curl command call.\n\n```shell\n$ jq --null-input --arg yaml \"$(\u003C.gitlab-ci.error.yml)\" '.content=$yaml' \\\n| curl \"https://gitlab.com/api/v4/ci/lint?include_merged_yaml=true\" \\\n--header 'Content-Type: application/json' --data @-\n{\"status\":\"invalid\",\"errors\":[\"jobs test config should implement a script: or a trigger: keyword\",\"jobs script config should implement a script: or a trigger: keyword\",\"jobs config should contain at least one visible job\"],\"warnings\":[],\"merged_yaml\":\"",[771,773,9],{"slug":1677,"featured":6,"template":684},"devops-workflows-json-format-jq-ci-cd-lint","content:en-us:blog:devops-workflows-json-format-jq-ci-cd-lint.yml","Devops Workflows Json Format Jq Ci Cd Lint","en-us/blog/devops-workflows-json-format-jq-ci-cd-lint.yml","en-us/blog/devops-workflows-json-format-jq-ci-cd-lint",{"_path":1683,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1684,"content":1690,"config":1695,"_id":1697,"_type":13,"title":1698,"_source":15,"_file":1699,"_stem":1700,"_extension":18},"/en-us/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation",{"ogTitle":1685,"schema":1686,"ogImage":1687,"ogDescription":1688,"ogSiteName":669,"noIndex":6,"ogType":670,"ogUrl":1689,"title":1685,"canonicalUrls":1689,"description":1688},"Efficient DevSecOps workflows: Hands-on python-gitlab API automation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Efficient DevSecOps workflows: Hands-on python-gitlab API automation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-02-01\",\n      }","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659883/Blog/Hero%20Images/post-cover-image.jpg","The python-gitlab library is a useful abstraction layer for the GitLab API. Dive into hands-on examples and best practices in this tutorial.","https://about.gitlab.com/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation",{"title":1685,"description":1688,"authors":1691,"heroImage":1687,"date":1692,"body":1693,"category":769,"tags":1694},[1612],"2023-02-01","A friend once said in a conference presentation, “Manual work is a bug.\" When there are repetitive tasks in workflows, I tend to [come back to this quote](https://twitter.com/dnsmichi/status/1574087419237916672), and try to automate as much as possible. For example, by querying a REST API to do an inventory of settings, or calling API actions to create new comments in GitLab issues/merge requests. The interaction with the GitLab REST API can be done in different ways, using HTTP requests with curl (or [hurl](/blog/how-to-continously-test-web-apps-apis-with-hurl-and-gitlab-ci-cd/)) on the command line, or by writing a script in a programming language. The latter can become reinventing the wheel again with raw HTTP requests code, and parsing the JSON responses.\n\nThanks to the wider GitLab community, many different languages are supported by API abstraction libraries. They provide support for all API attributes, add helper functions to get/create/delete objects, and generally aim to help developers focus. The [python-gitlab library](https://python-gitlab.readthedocs.io/en/stable/) is a feature-rich and easy-to-use library written in Python.\n\nIn this blog post, you will learn about the basic usage of the library by working with API objects, attributes, pagination and resultsets, and dive into more concrete use cases collecting data, printing summaries and writing data to the API to create comments and commits. There is a whole lot more to learn, with many of the use cases inspired by wider community questions on the forum, Hacker News, issues, etc.\n\nThis blog post is a long read, so feel free to stick with the beginner's tutorial or skip to the advanced [DevSecOps](https://about.gitlab.com/topics/devsecops/) use cases, development tips and code optimizations by navigating the table of contents:\n\n- [Getting started](#getting-started)\n- [Configuration](#configuration)\n- [Managing objects: The GitLab Object](#managing-objects-the-gitlab-object)\n    - [Objects managers and loading](#objects-managers-and-loading)\n    - [Pagination of results](#pagination-of-results)\n    - [Working with object relationships](#working-with-object-relationships)\n    - [Working with different object collection scopes](#working-with-different-object-collection-scopes)\n- [DevSecOps use cases for API read actions](#devsecops-use-cases-for-api-read-actions)\n    - [List branches by merged state](#list-branches-by-merged-state)\n    - [Print project settings for review: MR approval rules](#print-project-settings-for-review-mr-approval-rules)\n    - [Inventory: Get all CI/CD variables that are protected or masked](#inventory-get-all-cicd-variables-that-are-protected-or-masked)\n    - [Download a file from the repository](#download-a-file-from-the-repository)\n    - [Migration help: List all certificate-based Kubernetes clusters](#migration-help-list-all-certificate-based-kubernetes-clusters)\n    - [Team efficiency: Check if existing merge requests need to be rebased after merging a huge refactoring MR](#team-efficiency-check-if-existing-merge-requests-need-to-be-rebased-after-merging-a-huge-refactoring-mr)\n- [DevSecOps use cases for API write actions](#devsecops-use-cases-for-api-write-actions)\n    - [Move epics between groups](#move-epics-between-groups)\n    - [Compliance: Ensure that project settings are not overridden](#compliance-ensure-that-project-settings-are-not-overridden)\n    - [Taking notes, generate due date overview](#taking-notes-generate-due-date-overview)\n    - [Create issue index in a Markdown file, grouped by labels](#create-issue-index-in-a-markdown-file-grouped-by-labels)\n- [Advanced DevSecOps workflows](#advanced-devsecops-workflows)\n    - [Container images to run API scripts](#container-images-to-run-api-scripts)\n    - [CI/CD integration: Release and changelog generation](#cicd-integration-release-and-changelog-generation)\n    - [CI/CD integration: Pipeline report summaries](#cicd-integration-pipeline-report-summaries)\n- [Development tips](#development-tips)\n    - [Advanced custom configuration](#advanced-custom-configuration)\n    - [CI/CD code linting for different Python versions](#cicd-code-linting-for-different-python-versions)\n- [Optimize code and performance](#optimize-code-and-performance)\n    - [Lazy objects](#lazy-objects)\n    - [Object-oriented programming](#object-oriented-programming)\n- [More use cases](#more-use-cases)\n- [Conclusion](#conclusion)\n\n## Getting started\n\nThe python-gitlab documentation is a great resource for [getting started guides](https://python-gitlab.readthedocs.io/en/stable/api-usage.html), object types and their available methods, and combined workflow examples. Together with the [GitLab API resources documentation](https://docs.gitlab.com/ee/api/api_resources.html), which provides the object attributes that can be used, these are the best resources to get going.\n\nThe code examples in this blog post require Python 3.8+, and the `python-gitlab` library. Additional requirements are specified in the `requirements.txt` file – one example requires `pyyaml` for YAML config parsing. To follow and practice the use cases code, it is recommended to clone the project, install the requirements and run the scripts. Example with Homebrew on macOS:\n\n```shell\ngit clone https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python.git\n\ncd gitlab-api-python\n\nbrew install python\n\npip3 install -r requirements.txt\n\npython3 \u003Cscriptname>.py\n```\n\nThe scripts are intentionally not using a common shared library that provides generic functions for parameter reads, or additional helper functionality, for example. The idea is to show easy-to-follow examples that can be used stand-alone for testing, and only require installing the `python-gitlab` library as a dependency. Improving the code for production use is recommended. This can also help with building a maintained API tooling project that, for example, includes container images and CI/CD templates for developers to consume on a DevSecOps platform.\n\n## Configuration\n\nWithout configuration, python-gitlab will run unauthenticated requests against the default server `https://gitlab.com`. The most common configuration settings relate to the GitLab instance to connect to, and the authentication method by specifying access tokens. Python-gitlab supports different types of configuration: A configuration file or environment variables.\n\nThe [configuration file](https://python-gitlab.readthedocs.io/en/stable/cli-usage.html#cli-configuration) is available for the API library bindings, and the CLI (the CLI is not explained in this blog post). The configuration file supports [credential helpers](https://python-gitlab.readthedocs.io/en/stable/cli-usage.html#credential-helpers) to access tokens directly.\n\nEnvironment variables as an alternative configuration method provide an easy way to run the script on terminal, integrate into container images, and prepare them for running in CI/CD pipelines.\n\nThe configuration needs to be loaded into the Python script context. Start by importing the `os` library to fetch environment variables using the `os.environ.get()` method. The first parameter specifies the key, the second parameter sets the default value when the variable is not available in the environment.\n\n```python\nimport os\n\ngl_server = os.environ.get('GL_SERVER', 'https://gitlab.com')\n\nprint(gl_server)\n```\n\nThe parametrization on the terminal can happen directly for the command only, or exported into the shell environment.\n\n```shell\n$ GL_SERVER=’https://gitlab.company.com’ python3 script.py\n\n$ export GL_SERVER=’https://gitlab.company.com’\n$ python3 script.py\n```\n\nIt is recommended to add safety checks to ensure that all variables are set before continuing to run the program. The following snippet imports the required libraries, reads the `GL_SERVER` environment variable and expects the user to set the `GL_TOKEN` variable. If not, the script prints and throws errors, and calls `sys.exit(1)` indicating an error status.\n\n```python\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n```\n\nWe will look into a more detailed example now which creates a connection to the API and makes an actual data request.\n\n## Managing objects: The GitLab object\n\nAny interaction with the API requires the GitLab object to be instantiated. This is the entry point to configure the GitLab server to connect, authenticate using access tokens, and more global settings for pagination, object loading and more.\n\nThe following example runs an unauthenticated request against GitLab.com. It is possible to access public API endpoints and for example get a specific [.gitignore template for Python](https://python-gitlab.readthedocs.io/en/stable/gl_objects/templates.html#gitignore-templates).\n\n[python_gitlab_object_unauthenticated.py](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_object_unauthenticated.py)\n\n```python\nimport gitlab\n\ngl = gitlab.Gitlab()\n\n# Get .gitignore templates without authentication\ngitignore_templates = gl.gitignores.get('Python')\n\nprint(gitignore_templates.content)\n```\n\nThe next sections provide more insights into:\n\n- [Objects managers and loading](#objects-managers-and-loading)\n- [Pagination of results](#pagination-of-results)\n- [Working with object relationships](#working-with-object-relationships)\n- [Working with different object collection scopes](#working-with-different-object-collection-scopes)\n\n### Objects managers and loading\n\nThe python-gitlab library provides access to GitLab resources using so-called “[managers](https://python-gitlab.readthedocs.io/en/stable/api-usage.html#managers)\". Each manager type implements methods to work with the datasets (list, get, etc.).\n\nThe script shows how to access subgroups, direct projects, all projects including subgroups, issues, epics and todos. These methods and API endpoint require authentication to access all attributes. The code snippet, therefore, uses variables to get the authentication token, and also uses the `GROUP_ID` variable to specify a main group at which to start searching.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\n# https://gitlab.com/gitlab-de/use-cases/\nGROUP_ID = os.environ.get('GL_GROUP_ID', 16058698)\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Main\nmain_group = gl.groups.get(GROUP_ID)\n\nprint(\"Sub groups\")\nfor sg in main_group.subgroups.list():\n    print(\"Subgroup name: {sg}\".format(sg=sg.name))\n\nprint(\"Projects (direct)\")\nfor p in main_group.projects.list():\n    print(\"Project name: {p}\".format(p=p.name))\n\nprint(\"Projects (including subgroups)\")\nfor p in main_group.projects.list(include_subgroups=True, all=True):\n     print(\"Project name: {p}\".format(p=p.name))\n\nprint(\"Issues\")\nfor i in main_group.issues.list(state='opened'):\n    print(\"Issue title: {t}\".format(t=i.title))\n\nprint(\"Epics\")\nfor e in main_group.issues.list():\n    print(\"Epic title: {t}\".format(t=e.title))\n\nprint(\"Todos\")\nfor t in gl.todos.list(state='pending'):\n    print(\"Todo: {t} url: {u}\".format(t=t.body, u=t.target_url\n```\n\nYou can run the script [`python_gitlab_object_manager_methods.py`](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_object_manager_methods.py) by overriding the `GROUP_ID` variable on GitLab.com SaaS for your own group to analyze. The `GL_SERVER` variable needs to be specified for self-managed instance targets. `GL_TOKEN` must provide the personal access token.\n\n```shell\nexport GL_TOKEN=xxx\n\nexport GL_SERVER=”https://gitlab.company.com”\n\nexport GL_SERVER=”https://gitlab.com”\n\nexport GL_GROUP_ID=1234\n\npython3 python_gitlab_object_manager_methods.py\n```\n\nGoing forward, the example snippets won’t show the Python headers and environment variable parsing to focus on the algorithm and functionality. All scripts are open source under the MIT license and available in [this project](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python).\n\n### Pagination of results\n\nBy default, the GitLab API does not return all result sets and requires the clients to use [pagination](https://docs.gitlab.com/ee/api/rest/index.html#pagination) to iterate through all result pages. The python-gitlab library [allows users to specify the settings](https://python-gitlab.readthedocs.io/en/stable/api-usage.html#pagination) globally in the GitLab object, or on each `list()` call. By default, all result sets would fire API requests, which can slow down the script execution. The recommended way is using `iterator=True` which returns a generator object, and API calls are fired on-demand when accessing the object.\n\nThe following example searches for the group name `everyonecancontribute`, and uses keyset pagination with 100 results on each page. The iterator is set to true on `gl.groups.list(iterator=True)` to fetch new result sets on demand. If the searched group name is found, the loop breaks and prints a summary, including measuring the duration of the complete search request.\n\n```python\nSEARCH_GROUP_NAME=\"everyonecancontribute\"\n\n# Use keyset pagination\n# https://python-gitlab.readthedocs.io/en/stable/api-usage.html#pagination\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN,\n    pagination=\"keyset\", order_by=\"id\", per_page=100)\n\n# Iterate over the list, and fire new API calls in case the result set does not match yet\ngroups = gl.groups.list(iterator=True)\n\nfound_page = 0\nstart = timer()\n\nfor group in groups:\n    if SEARCH_GROUP_NAME == group.name:\n        # print(group) # debug\n        found_page = groups.current_page\n        break\n\nend = timer()\n\nduration = f'{end-start:.2f}'\n\nif found_page > 0:\n    print(\"Pagination API example for Python with GitLab{desc} - found group {g} on page {p}, duration {d}s\".format(\n        desc=\", the DevSecOps platform\", g=SEARCH_GROUP_NAME, p=found_page, d=duration))\nelse:\n    print(\"Could not find group name '{g}', duration {d}\".format(g=SEARCH_GROUP_NAME, d=duration))\n```\n\nExecuting `python_gitlab_pagination.py` found the [everyonecancontribute group](https://gitlab.com/everyonecancontribute) on page 5.\n\n```shell\n$ python3 python_gitlab_pagination.py\nPagination API example for Python with GitLab, the DevSecOps platform - found group everyonecancontribute on page 5, duration 8.51s\n```\n\n### Working with object relationships\n\nWhen working with object relationships – for example, collecting all projects in a given group – additional steps need to be taken. The returned project objects provide limited attributes by default. Manageable objects require an additional `get()` call which requests the full project object from the API in the background. This on-demand workflow helps to avoid waiting times and traffic by reducing the immediately returned attributes.\n\nThe following example illustrates the problem by looping through all projects in a group, and tries to call the `project.branches.list()` function, raising an exception in the try/except flow. The second example gets a manageable project object and tries the function call again.\n\n```python\n# Main\ngroup = gl.groups.get(GROUP_ID)\n\n# Collect all projects in group and subgroups\nprojects = group.projects.list(include_subgroups=True, all=True)\n\nfor project in projects:\n    # Try running a method on a weak object\n    try:\n       print(\"🤔 Project: {pn} 💡 Branches: {b}\\n\".format(\n        pn=project.name,\n        b=\", \".join([x.name for x in project.branches.list()])))\n    except Exception as e:\n        print(\"Got exception: {e} \\n ===================================== \\n\".format(e=e))\n\n    # Retrieve a full manageable project object\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n    manageable_project = gl.projects.get(project.id)\n\n    # Print a method available on a manageable object\n    print(\"🤔 Project: {pn} 💡 Branches: {b}\\n\".format(\n        pn=manageable_project.name,\n        b=\", \".join([x.name for x in manageable_project.branches.list()])))\n```\n\nThe exception handler in the python-gitlab library prints the error message, and also links to the documentation. It is helpful to take a debugging note that objects might not be available to manage whenever you cannot access object attributes or function calls.\n\n```shell\n$ python3 python_gitlab_manageable_objects.py\n\n🤔 Project: GitLab API Playground 💡 Branches: cicd-demo-automated-comments, docs-mr-approval-settings, main\n\nGot exception: 'GroupProject' object has no attribute 'branches'\n\n\u003Cclass 'gitlab.v4.objects.projects.GroupProject'> was created via a\nlist() call and only a subset of the data may be present. To ensure\nall data is present get the object using a get(object.id) call. For\nmore details, see:\n\nhttps://python-gitlab.readthedocs.io/en/v3.8.1/faq.html#attribute-error-list\n =====================================\n```\n\nThe full script is located [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_manageable_objects.py).\n\n### Working with different object collection scopes\n\nSometimes, the script needs to collect all projects from a self-managed instance, or from a group with subgroups, or from a single project. The latter is helpful for faster testing on the required attributes, and the group fetch helps with testing at scale later. The following snippet collects all project objects into the `projects` list, and appends objects from different incoming configuration. You will also see the manageable object pattern for project in groups again.\n\n```python\n    # Collect all projects, or prefer projects from a group id, or a project id\n    projects = []\n\n    # Direct project ID\n    if PROJECT_ID:\n        projects.append(gl.projects.get(PROJECT_ID))\n\n    # Groups and projects inside\n    elif GROUP_ID:\n        group = gl.groups.get(GROUP_ID)\n\n        for project in group.projects.list(include_subgroups=True, all=True):\n            # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n            manageable_project = gl.projects.get(project.id)\n            projects.append(manageable_project)\n\n    # All projects on the instance (may take a while to process)\n    else:\n        projects = gl.projects.list(get_all=True)\n```\n\nThe full example is located in [this script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_mr_approval_rules.py) for listing MR approval rules settings for specified project targets.\n\n## DevSecOps use cases for API read actions\n\nThe authenticated access token needs [`read_api` scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#personal-access-token-scopes).\n\nThe following use cases are discussed:\n\n- [List branches by merged state](#list-branches-by-merged-state)\n- [Print project settings for review: MR approval rules](#print-project-settings-for-review-mr-approval-rules)\n- [Inventory: Get all CI/CD variables that are protected or masked](#inventory-get-all-cicd-variables-that-are-protected-or-masked)\n- [Download a file from the repository](#download-a-file-from-the-repository)\n- [Migration help: List all certificate-based Kubernetes clusters](#migration-help-list-all-certificate-based-kubernetes-clusters)\n- [Team efficiency: Check if existing merge requests need to be rebased after merging a huge refactoring MR](#team-efficiency-check-if-existing-merge-requests-need-to-be-rebased-after-merging-a-huge-refactoring-mr)\n\n### List branches by merged state\n\nA common ask is to do some Git housekeeping in the project, and see how many merged and unmerged branches are floating around. [A question on the GitLab community forum](https://forum.gitlab.com/t/python-gitlab-project-branch-list-filter/80257) about filtering branch listings inspired me look into writing a [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_branches_by_state.py) that helps achieve this goal. The `branches.list()` method returns all branch objects that are stored in a temporary list for later processing for two loops: Collecting merged branch names, and not merged branch names. The `merged` attribute on the `branch` object is a boolean value indicating whether the branch has been merged.\n\n```python\nproject = gl.projects.get(PROJECT_ID, lazy=False, pagination=\"keyset\", order_by=\"updated_at\", per_page=100)\n\n# Get all branches\nreal_branches = []\nfor branch in project.branches.list():\n    real_branches.append(branch)\n\nprint(\"All branches\")\nfor rb in real_branches:\n    print(\"Branch: {b}\".format(b=rb.name))\n\n# Get all merged branches\nmerged_branches_names = []\nfor branch in real_branches:\n    if branch.default:\n        continue # ignore the default branch for merge status\n\n    if branch.merged:\n        merged_branches_names.append(branch.name)\n\nprint(\"Branches merged: {b}\".format(b=\", \".join(merged_branches_names)))\n\n# Get un-merged branches\nnot_merged_branches_names = []\nfor branch in real_branches:\n    if branch.default:\n        continue # ignore the default branch for merge status\n\n    if not branch.merged:\n        not_merged_branches_names.append(branch.name)\n\nprint(\"Branches not merged: {b}\".format(b=\", \".join(not_merged_branches_names)))\n```\n\nThe workflow is intentionally a step-by-step read, you can practice optimizing the Python code for the conditional branch name collection.\n\n\n### Print project settings for review: MR approval rules\n\nThe following [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_mr_approval_rules.py) walks through all collected project objects, and checks whether approval rules are specified. If the list length is greater than zero, it loops over the list and prints the settings using a JSON pretty-print method.\n\n```python\n    # Loop over projects and print the settings\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/merge_request_approvals.html\n    for project in projects:\n        if len(project.approvalrules.list()) > 0:\n            #print(project) #debug\n            print(\"# Project: {name}, ID: {id}\\n\\n\".format(name=project.name_with_namespace, id=project.id))\n            print(\"[MR Approval settings]({url}/-/settings/merge_requests)\\n\\n\".format(url=project.web_url))\n\n            for ar in project.approvalrules.list():\n                print(\"## Approval rule: {name}, ID: {id}\".format(name=ar.name, id=ar.id))\n                print(\"\\n```json\\n\")\n                print(json.dumps(ar.attributes, indent=2)) # TODO: can be more beautiful, but serves its purpose with pretty print JSON\n                print(\"\\n```\\n\")\n\n```\n\n### Inventory: Get all CI/CD variables that are protected or masked\n\n[CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) are helpful for pipeline parameterization, and can be configured globally on the instance, in groups and in projects. Secrets, passwords and otherwise sensitive information could be stored there, too. Sometimes it can be necessary to get an overview of all CI/CD variables that are either protected or masked to get a sense of how many variables need to be updated when rotating tokens for example.\n\nThe following [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_all_cicd_variables_masked_or_protected.py) gets all groups and projects and tries to collect the CI/CD variables from the global instance (requires admin permissions), groups and projects (requires maintainer/owner permissions). It prints all CI/CD variables that are either protected or masked, adding that a potential secret value is stored.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\n\n# Helper function to evaluate secrets and print the variables\ndef eval_print_var(var):\n    if var.protected or var.masked:\n        print(\"🛡️🛡️🛡️ Potential secret: Variable '{name}', protected {p}, masked: {m}\".format(name=var.key,p=var.protected,m=var.masked))\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\nGITLAB_TOKEN = os.environ.get('GL_TOKEN') # token requires maintainer+ permissions. Instance variables require admin access.\nPROJECT_ID = os.environ.get('GL_PROJECT_ID') #optional\nGROUP_ID = os.environ.get('GL_GROUP_ID', 8034603) # https://gitlab.com/everyonecancontribute\n\nif not GITLAB_TOKEN:\n    print(\"🤔 Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Collect all projects, or prefer projects from a group id, or a project id\nprojects = []\n# Collect all groups, or prefer group from a group id\ngroups = []\n\n# Direct project ID\nif PROJECT_ID:\n    projects.append(gl.projects.get(PROJECT_ID))\n\n# Groups and projects inside\nelif GROUP_ID:\n    group = gl.groups.get(GROUP_ID)\n\n    for project in group.projects.list(include_subgroups=True, all=True):\n        # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n        manageable_project = gl.projects.get(project.id)\n        projects.append(manageable_project)\n\n    groups.append(group)\n\n# All projects/groups on the instance (may take a while to process, use iterators to fetch on-demand).\nelse:\n    projects = gl.projects.list(iterator=True)\n    groups = gl.groups.list(iterator=True)\n\nprint(\"# List of all CI/CD variables marked as secret (instance, groups, projects)\")\n\n# https://python-gitlab.readthedocs.io/en/stable/gl_objects/variables.html\n\n# Instance variables (if the token has permissions)\nprint(\"Instance variables, if accessible\")\ntry:\n    for i_var in gl.variables.list(iterator=True):\n        eval_print_var(i_var)\nexcept:\n    print(\"No permission to fetch global instance variables, continueing without.\")\n    print(\"\\n\")\n\n# group variables (maintainer permissions for groups required)\nfor group in groups:\n    print(\"Group {n}, URL: {u}\".format(n=group.full_path, u=group.web_url))\n    for g_var in group.variables.list(iterator=True):\n        eval_print_var(g_var)\n\n    print(\"\\n\")\n\n# Loop over projects and print the settings\nfor project in projects:\n    # skip archived projects, they throw 403 errors\n    if project.archived:\n        continue\n\n    print(\"Project {n}, URL: {u}\".format(n=project.path_with_namespace, u=project.web_url))\n    for p_var in project.variables.list(iterator=True):\n        eval_print_var(p_var)\n\n    print(\"\\n\")\n```\n\nThe script intentionally does not print the variable values, this is left as an exercise for safe environments. The recommended way of storing secrets is to [use external providers](https://docs.gitlab.com/ee/ci/secrets/).\n\n### Download a file from the repository\n\nThe [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/get_raw_file_content.py) goal is download a file path from a specified branch name, and store its content in a new file.\n\n```python\n# Goal: Try to download README.md from https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/README.md\nFILE_NAME = 'README.md'\nBRANCH_NAME = 'main'\n\n# Search the file in the repository tree and get the raw blob\nfor f in project.repository_tree():\n    print(\"File path '{name}' with id '{id}'\".format(name=f['name'], id=f['id']))\n\n    if f['name'] == FILE_NAME:\n        f_content = project.repository_raw_blob(f['id'])\n        print(f_content)\n\n# Alternative approach: Get the raw file from the main branch\nraw_content = project.files.raw(file_path=FILE_NAME, ref=BRANCH_NAME)\nprint(raw_content)\n\n# Store the file on disk\nwith open('raw_README.md', 'wb') as f:\n    project.files.raw(file_path=FILE_NAME, ref=BRANCH_NAME, streamed=True, action=f.write)\n```\n\n### Migration help: List all certificate-based Kubernetes clusters\n\nThe certificate-based integration of Kubernetes clusters into GitLab [was deprecated](https://docs.gitlab.com/ee/update/deprecations.html#self-managed-certificate-based-integration-with-kubernetes). To help with migration plans, the inventory of existing groups and projects can be automated using the GitLab API.\n\n\n```python\ngroups = [ ]\n\n# get GROUP_ID group\ngroups.append(gl.groups.get(GROUP_ID))\n\nfor group in groups:\n    for sg in group.subgroups.list(include_subgroups=True, all=True):\n        real_group = gl.groups.get(sg.id)\n        groups.append(real_group)\n\ngroup_clusters = {}\nproject_clusters = {}\n\nfor group in groups:\n    #Collect group clusters\n    g_clusters = group.clusters.list()\n\n    if len(g_clusters) > 0:\n        group_clusters[group.id] = g_clusters\n\n    # Collect all projects in group and subgroups and their clusters\n    projects = group.projects.list(include_subgroups=True, all=True)\n\n    for project in projects:\n        # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n        manageable_project = gl.projects.get(project.id)\n\n        # skip archived projects\n        if project.archived:\n            continue\n\n        p_clusters = manageable_project.clusters.list()\n\n        if len(p_clusters) > 0:\n            project_clusters[project.id] = p_clusters\n\n# Print summary\nprint(\"## Group clusters\\n\\n\")\nfor g_id, g_clusters in group_clusters.items():\n    url = gl.groups.get(g_id).web_url\n    print(\"Group ID {g_id}: {u}\\n\\n\".format(g_id=g_id, u=url))\n    print_clusters(g_clusters)\n\nprint(\"## Project clusters\\n\\n\")\nfor p_id, p_clusters in project_clusters.items():\n    url = gl.projects.get(p_id).web_url\n    print(\"Project ID {p_id}: {u}\\n\\n\".format(p_id=p_id, u=url))\n    print_clusters(p_clusters)\n```\n\nThe full script is available [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/list_cert_based_kubernetes_clusters.py).\n\n### Team efficiency: Check if existing merge requests need to be rebased after merging a huge refactoring MR\n\nThe [GitLab handbook](/handbook/) repository is a large monorepo with many merge requests created, reviewed, approved and merged. Some reviews take longer than others, and some merge requests touch multiple pages when renaming a string, or [all handbook pages](/handbook/about/#count-handbook-pages). The marketing handbook needed restructuring (think of code refactoring), and as such, many directories and paths were moved or renamed. [The issue tasks](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13991#tasks) grew over time, and I was worried that other merge requests would run into conflicts after merging the huge changes. I remembered that the python-gitlab can fetch all merge requests in a given project, including details on the Git branch, source paths changed and much more.\n\nThe resulting script configures a list of source paths that are touched by all merge requests, and checks against the merge request diff with `mr.diffs.list()` and comparing if a pattern matches against the value in `old_path`. If a match is found, the script logs it, and saves the merge request in the `seen_mr` dictionary for the summary later. There are additional attributes collected to allow printing a Markdown task list with URLs for easier copy-paste into [issue descriptions](https://gitlab.com/gitlab-com/www-gitlab-com/-/issues/13991#additional-tasks). The full script is located [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/search_mr_contains_updated_path.py).\n\n\n```python\nPATH_PATTERNS = [\n    'path/to/handbook/source/page.md',\n]\n\n# Only list opened MRs\n# https://python-gitlab.readthedocs.io/en/stable/gl_objects/merge_requests.html#project-merge-requests\nmrs = project.mergerequests.list(state='opened', iterator=True)\n\nseen_mr = {}\n\nfor mr in mrs:\n    # https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs\n    real_mr = project.mergerequests.get(mr.get_id())\n    real_mr_id = real_mr.attributes['iid']\n    real_mr_url = real_mr.attributes['web_url']\n\n    for diff in real_mr.diffs.list(iterator=True):\n        real_diff = real_mr.diffs.get(diff.id)\n\n        for d in real_diff.attributes['diffs']:\n            for p in PATH_PATTERNS:\n                if p in d['old_path']:\n                    print(\"MATCH: {p} in MR {mr_id}, status '{s}', title '{t}' - URL: {mr_url}\".format(\n                        p=p,\n                        mr_id=real_mr_id,\n                        s=mr_status,\n                        t=real_mr.attributes['title'],\n                        mr_url=real_mr_url))\n\n                    if not real_mr_id in seen_mr:\n                        seen_mr[real_mr_id] = real_mr\n\nprint(\"\\n# MRs to update\\n\")\n\nfor id, real_mr in seen_mr.items():\n    print(\"- [ ] !{mr_id} - {mr_url}+ Status: {s}, Title: {t}\".format(\n        mr_id=id,\n        mr_url=real_mr.attributes['web_url'],\n        s=real_mr.attributes['detailed_merge_status'],\n        t=real_mr.attributes['title']))\n```\n\n\n## DevSecOps use cases for API write actions\n\nThe authenticated access token needs full [`api` scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#personal-access-token-scopes).\n\nThe following use cases are discussed:\n\n- [Move epics between groups](#move-epics-between-groups)\n- [Compliance: Ensure that project settings are not overridden](#compliance-ensure-that-project-settings-are-not-overridden)\n- [Taking notes, generate due date overview](#taking-notes-generate-due-date-overview)\n- [Create issue index in a Markdown file, grouped by labels](#create-issue-index-in-a-markdown-file-grouped-by-labels)\n\n### Move epics between groups\n\nSometimes it is necessary to move epics, similar to issues, into a different group. A question in the GitLab marketing Slack channel inspired me to look into a [feature proposal for the UI](https://gitlab.com/gitlab-org/gitlab/-/issues/12689), [quick actions](/blog/improve-your-gitlab-productivity-with-these-10-tips/), and later, thinking about writing an API script to automate the steps. The idea is simple: Move an epic from a source group to a target group, and copy its title, description and labels. Since epics allow to group issues, they need to be reassigned to the target epic, too. Parent-child epic relationships need to be taken into account to: All child epics of the source epics need to be reassigned to the target epic.\n\nThe following script looks up all source [epic attributes](https://python-gitlab.readthedocs.io/en/stable/gl_objects/epics.html) first, and then creates a new target epic with minimal attributes: title and description. The labels list is copied and the changes are persisted with the `save()` call. The issues assigned to the epic need to be re-created in the target epic. The `create()` call actually creates the relationship item, not a new issue object itself. The child epics move requires a different approach, since the relationship is vice versa: The `parent_id` on the child epic needs to be compared against the source epic ID, and if matching, updated to the target epic ID. After copying everything successfully, the source epic needs to be changed into the `closed` state.\n\n\n```python\n#!/usr/bin/env python\n\n# Description: Show how epics can be moved between groups, including title, description, labels, child epics and issues.\n# Requirements: python-gitlab Python libraries. GitLab API write access, and maintainer access to all configured groups/projects.\n# Author: Michael Friedrich \u003Cmfriedrich@gitlab.com>\n# License: MIT, (c) 2023-present GitLab B.V.\n\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\n# https://gitlab.com/gitlab-de/use-cases/gitlab-api\nSOURCE_GROUP_ID = os.environ.get('GL_SOURCE_GROUP_ID', 62378643)\n# https://gitlab.com/gitlab-de/use-cases/gitlab-api/epic-move-target\nTARGET_GROUP_ID = os.environ.get('GL_TARGET_GROUP_ID', 62742177)\n# https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/-/epics/1\nEPIC_ID = os.environ.get('GL_EPIC_ID', 1)\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Main\n# Goal: Move epic to target group, including title, body, labels, and child epics and issues.\nsource_group = gl.groups.get(SOURCE_GROUP_ID)\ntarget_group = gl.groups.get(TARGET_GROUP_ID)\n\n# Create a new target epic and copy all its items, then close the source epic.\nsource_epic = source_group.epics.get(EPIC_ID)\n# print(source_epic) #debug\n\nepic_title = source_epic.title\nepic_description = source_epic.description\nepic_labels = source_epic.labels\nepic_issues = source_epic.issues.list()\n\n# Create the epic with minimal attributes\ntarget_epic = target_group.epics.create({\n    'title': epic_title,\n    'description': epic_description,\n})\n\n# Assign the list\ntarget_epic.labels = epic_labels\n\n# Persist the changes in the new epic\ntarget_epic.save()\n\n# Epic issues need to be re-assigned in a loop\nfor epic_issue in epic_issues:\n    ei = target_epic.issues.create({'issue_id': epic_issue.id})\n\n# Child epics need to update their parent_id to the new epic\n# Need to search in all epics, use lazy object loading\nfor sge in source_group.epics.list(lazy=True):\n    # this epic has the source epic as parent epic?\n    if sge.parent_id == source_epic.id:\n        # Update the parent id\n        sge.parent_id = target_epic.id\n        sge.save()\n\nprint(\"Copied source epic {source_id} ({source_url}) to target epic {target_id} ({target_url})\".format(\n    source_id=source_epic.id, source_url=source_epic.web_url,\n    target_id=target_epic.id, target_url=target_epic.web_url))\n\n# Close the old epic\nsource_epic.state_event = 'close'\nsource_epic.save()\nprint(\"Closed source epic {source_id} ({source_url})\".format(\n    source_id=source_epic.id, source_url=source_epic.web_url))\n\n```\n\n\n```shell\n$  python3 move_epic_between_groups.py\nCopied source epic 725341 (https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/-/epics/1) to target epic 725358 (https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/epic-move-target/-/epics/6)\nClosed source epic 725341 (https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/-/epics/1)\n```\n\n\nThe [target epic](https://gitlab.com/groups/gitlab-de/use-cases/gitlab-api/epic-move-target/-/epics/5) was created and shows the expected result: Same title, description, labels, child epic, and issues.\n\n![Target epic which has all attributes copied from the source epic: title, description, labels, child epics, issues](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_moved_epic_with_all_attributes.png){: .shadow}\n\n**Exercise**: The script does not copy [comments](https://python-gitlab.readthedocs.io/en/stable/gl_objects/notes.html) and [discussion threads](https://python-gitlab.readthedocs.io/en/stable/gl_objects/discussions.html) yet. Research and help update the script – merge requests welcome!\n\n\n### Compliance: Ensure that project settings are not overridden\n\nProject and group settings may be accidentally changed by team members with maintainer permissions. Compliance requirements need to be met. Another use case is to manage configuration with Infrastructure as Code tools, and ensure that GitLab instance/group/project/etc. configuration is persisted and always the same. Tools like Ansible or Terraform can invoke an API script, or use the python-gitlab library to perform tasks to manage settings.\n\nThe following example only has the `main` branch protected.\n\n![GitLab project settings for repositories and protected branches, main branch](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_protected_branches_settings_main.png){: .shadow}\n\nLet us assume that a new `production` branch has been added and should be protected, too. The following [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/enforce_protected_branches.py) defines the dictionary of protected branches and their access levels for push/merge permissions to maintainer level, and builds the comparison logic around the [python-gitlab protected branches documentation](https://python-gitlab.readthedocs.io/en/stable/gl_objects/protected_branches.html).\n\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\n\nGITLAB_SERVER = os.environ.get('GL_SERVER', 'https://gitlab.com')\n# https://gitlab.com/gitlab-de/use-cases/\nGROUP_ID = os.environ.get('GL_GROUP_ID', 16058698)\nGITLAB_TOKEN = os.environ.get('GL_TOKEN')\n\nPROTECTED_BRANCHES = {\n    'main': {\n        'merge_access_level': gitlab.const.AccessLevel.MAINTAINER,\n        'push_access_level': gitlab.const.AccessLevel.MAINTAINER\n    },\n    'production': {\n        'merge_access_level': gitlab.const.AccessLevel.MAINTAINER,\n        'push_access_level': gitlab.const.AccessLevel.MAINTAINER\n    },\n}\n\nif not GITLAB_TOKEN:\n    print(\"Please set the GL_TOKEN env variable.\")\n    sys.exit(1)\n\ngl = gitlab.Gitlab(GITLAB_SERVER, private_token=GITLAB_TOKEN)\n\n# Main\ngroup = gl.groups.get(GROUP_ID)\n\n# Collect all projects in group and subgroups\nprojects = group.projects.list(include_subgroups=True, all=True)\n\nfor project in projects:\n    # Retrieve a full manageable project object\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/groups.html#examples\n    manageable_project = gl.projects.get(project.id)\n\n    # https://python-gitlab.readthedocs.io/en/stable/gl_objects/protected_branches.html\n    protected_branch_names = []\n\n    for pb in manageable_project.protectedbranches.list():\n        manageable_protected_branch = manageable_project.protectedbranches.get(pb.name)\n        print(\"Protected branch name: {n}, merge_access_level: {mal}, push_access_level: {pal}\".format(\n            n=manageable_protected_branch.name,\n            mal=manageable_protected_branch.merge_access_levels,\n            pal=manageable_protected_branch.push_access_levels\n        ))\n\n        protected_branch_names.append(manageable_protected_branch.name)\n\n    for branch_to_protect, levels in PROTECTED_BRANCHES.items():\n        # Fix missing protected branches\n        if branch_to_protect not in protected_branch_names:\n            print(\"Adding branch {n} to protected branches settings\".format(n=branch_to_protect))\n            p_branch = manageable_project.protectedbranches.create({\n                'name': branch_to_protect,\n                'merge_access_level': gitlab.const.AccessLevel.MAINTAINER,\n                'push_access_level': gitlab.const.AccessLevel.MAINTAINER\n            })\n```\n\nRunning the script prints the existing `main` branch, and a note that `production` will be updated. The screenshot from the repository settings proves this action.\n\n```\n$ python3 enforce_protected_branches.py                                                ─╯\nProtected branch name: main, merge_access_level: [{'id': 67294702, 'access_level': 40, 'access_level_description': 'Maintainers', 'user_id': None, 'group_id': None}], push_access_level: [{'id': 68546039, 'access_level': 40, 'access_level_description': 'Maintainers', 'user_id': None, 'group_id': None}]\nAdding branch production to protected branches settings\n```\n\n![GitLab project settings for repositories and protected branches, main and production branch](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_protected_branches_settings_main_production.png){: .shadow}\n\n\n### Taking notes, generate due date overview\n\nA [Hacker News discussion about note-taking tools](https://news.ycombinator.com/item?id=32155848) inspired me to take a look into creating a Markdown table overview, fetched from files that take notes, and sorted by the parsed due date. The script is located [here](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/generate_snippets_index_by_due_date.py) and more complex to understand.\n\n```\n# 2022-07-19 Notes\n\nHN topic about taking notes: https://news.ycombinator.com/item?id=32152935\n\n\u003C!--\n---\nTags: DevOps, Learn\nDue: 2022-08-01\n---\n-->\n\n```\n\n### Create issue index in a Markdown file, grouped by labels\n\nA similar Hacker News question inspired me to write a [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/generate_issue_index_grouped_by_label.py) that parses all issues in a GitLab project by labels, and creates or updates a Markdown index file in the same repository. The issues are grouped by label.\n\nFirst, the issues are fetched from the project, including all labels, and stored in the `index` dictionary.\n\n```python\np = gl.projects.get(PROJECT_ID)\n\nlabels = p.labels.list()\n\nindex={}\n\nfor i in p.issues.list():\n    for l in i.labels:\n        if l not in index:\n            index[l] = []\n\n        index[l].append(\"#{id} - {title}\".format(id=i.id, title=i.title))\n```\n\nThe second step is to create a Markdown formatted listing based on the collected index data, with the label name as key, holding a list of issue strings.\n\n```python\nindex_str = \"\"\"# Issue Overview\n_Grouped by issue labels._\n\"\"\"\n\nfor l_name, i_list in index.items():\n    index_str += \"\\n## {label} \\n\\n\".format(label=l_name)\n\n    for i in i_list:\n        index_str += \"- {title}\\n\".format(title=i)\n```\n\nThe last step is to create a new file in the repository, or update an existing one. This is a little tricky because the API expects you to define the action and will throw an error if you try to update a nonexistent file. The first condition checks whether the file path exists in the repository, and then defines the `action` attribute. The `data` dictionary gets built, with the final `commits.create()` method called.\n\n```python\n# Dump index_str to FILE_NAME\n# Create as new commit\n# See https://docs.gitlab.com/ce/api/commits.html#create-a-commit-with-multiple-files-and-actions\n# for actions detail\n\n# Check if file exists, and define commit action\nf = p.files.get(file_path=FILE_NAME, ref=REF_NAME)\nif not f:\n    action='create'\nelse:\n    action='update'\n\ndata = {\n    'branch': REF_NAME,\n    'commit_message': 'Generate new index, {d}'.format(d=date.today()),\n    'actions': [\n        {\n            'action': action,\n            'file_path': FILE_NAME,\n            'content': index_str\n        }\n    ]\n}\n\ncommit = p.commits.create(data)\n```\n\n## Advanced DevSecOps workflows\n\n- [Container images to run API scripts](#container-images-to-run-api-scripts)\n- [CI/CD integration: Release and changelog generation](#cicd-integration-release-and-changelog-generation)\n- [CI/CD integration: Pipeline report summaries](#cicd-integration-pipeline-report-summaries)\n\n### Container images to run API scripts\n\nInstalling the Python interpreter and dependent libraries into the operating system may not always work, or it may be a barrier to using the API scripts. A container image that can be pulled from the GitLab registry is a good first step towards more DevSecOps automation and future CI/CD integrations, and provides a tested environment. The python-gitlab project [provides container images](https://python-gitlab.readthedocs.io/en/stable/index.html#using-the-docker-images) which can be used for testing.\n\nThe cloned script repository can be mounted into the container, and the settings are configured using environment variables. Example with Docker CLI:\n\n```shell\n$ docker run -ti -v \"`pwd`:/app\" \\\n  -e \"GL_SERVER=http://gitlab.com\" \\\n  -e \"GL_TOKEN=$GITLAB_TOKEN\" \\\n  -e \"GL_GROUP_ID=16058698\" \\\nregistry.gitlab.com/python-gitlab/python-gitlab:slim-bullseye \\\npython /app/python_gitlab_manageable_objects.py\n```\n\n### CI/CD integration: Release and changelog generation\n\nCreating a Git tag and a release in GitLab often requires a changelog attached. This provides a summary into all Git commits, all merged merge requests, or something similar that is easier to consume for everyone interested in the changes in this new release. Automating the changelog generation in CI/CD pipelines is possible using the GitLab API. The simplest list uses the Git commit history shown in the [`create_simple_changelog_from_git_history.py`](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/create_simple_changelog_from_git_history.py) script below:\n\n\n```python\nproject = gl.projects.get(PROJECT_ID)\ncommits = project.commits.list(ref_name='main', lazy=True, iterator=True)\n\nprint(\"# Changelog\")\n\nfor commit in commits:\n    # Generate a markdown formatted list with URLs\n    print(\"- [{text}]({url}) ({name})\".format(text=commit.title, url=commit.web_url, name=commit.author_name))\n```\n\nExecuting the script on the [o11y.love project](https://gitlab.com/everyonecancontribute/observability/o11y.love) will print a Markdown list with URLs.\n\n```shell\n$ python3 create_changelog_from_git_history.py\n# Changelog\n- [Merge branch 'topics-ebpf-opentelemetry' into 'main'](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/75df97e13e0f429803dc451aac7fee080a51f44c) (Michael Friedrich)\n- [Move eBPF/OpenTelemetry into dedicated topics pages ](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/8fa4233630ff8c1d65aff589bd31c4c2f5df36cb) (Michael Friedrich)\n- [Merge branch 'workshop-add-k8s-o11y-toc' into 'main'](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/8b7949b19af6aa6bf25f73ca1ffe8616a7dbaa00) (Michael Friedrich)\n- [Add TOC for Kubesimplify Kubernetes Observability workshop ](https://gitlab.com/everyonecancontribute/observability/o11y.love/-/commit/63c8ad587f43e3926e6749a62c33ad0b6f229f47) (Michael Friedrich)\n\n...\n```\n\n**Exercise**: The script is not production ready yet but should get you going to group by commits by Git tag/release, filter merge commits, attach the changelog file or content into the [GitLab release details](https://docs.gitlab.com/ee/api/releases/), etc.\n\n### CI/CD integration: Pipeline report summaries\n\nWhen developing new API script in Python, a CI/CD integration with automated runs can be desired, too. My recommendation is to focus on writing and testing the script stand-alone on the command line first, and once it works reliably, adapt the code to run the script to perform actions in CI/CD, too. After writing a few scripts, and practicing a lot, you will have learned to write code that can be executed on the CLI, in containers and in CI/CD jobs.\n\nA good preparation for CI/CD is to focus on environment variables to configure the script. The environment variables can be defined as CI/CD variables, and there is no extra work with additional configuration files, or command line parameters involved. This keeps the CI/CD configuration footprint small and reusable, too.\n\nAn example integration to automatically create security summaries as markdown comment in a merge request was described in the [\"Fantastic Infrastructure-as-Code security attacks and how to find them\" blog post](/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them/#integrations-into-cicd-and-merge-requests-for-review). This use case required research and testing before actually writing the full API script:\n\n1. Read the python-gitlab documentation to learn how [merge request comments (notes)](https://python-gitlab.readthedocs.io/en/stable/gl_objects/notes.html#project-notes) can be created.\n2. Create a test project and a test merge request for testing.\n3. Start writing code which instantiates the GitLab connection object, fetches the project object, and gets the merge request object from a pre-defined ID.\n4. Run `mr.notes.create({‘body’: ‘This is a test by dnsmichi’})`\n5. Iterate on the body content and pre-fill a string with a markdown table.\n6. Fetch pre-defined CI/CD variables to get the `CI_MERGE_REQUEST_ID` value which will be required to update as target.\n6. Verify the API permissions and learn that the CI job token is not sufficient.\n7. Implement the full algorithm, integrated CI/CD testing and add documentation.\n\nThe script runs continuously after security scans have been completed with a report. Another use case can be using [Pipeline schedules](https://docs.gitlab.com/ee/ci/pipelines/schedules.html) which provide synchronization capabilities, and the comments get posted to an issue summary.\n\n## Development tips\n\nCode and abstraction libraries are helpful but sometimes it can be hard to see the problem why an attribute or object does not provide the expected behavior. It is helpful to take a step back, and look into different ways to fetch data from the REST API, for example [using jq and curl](/blog/devops-workflows-json-format-jq-ci-cd-lint/). The [GitLab CLI](/blog/introducing-the-gitlab-cli/) can also be used to query the API and get immediate results.\n\nDeveloping scripts that interact with APIs can become a repetitive task, adding more needed attributes, and the need to learn about object relations, methods and how to store the retrieved data. Especially for larger datasets, it can be a good idea to use the JSON library to dump data structures into a file cache on disk, and provide a debug configuration option to read the data from that file, instead of firing the API requests again all the time. This also helps to mitigate potential rate limiting.\n\nAdding timing points to the code can help measure the performance, and efficiency of the algorithm used. The following snippet [measures the duration](https://stackoverflow.com/questions/7370801/how-do-i-measure-elapsed-time-in-python ) of requests to retrieve the merge request status. It is part of a script that was used to analyze a potential problem with the `detailed_merge_status` attribute in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/386661#note_1237757295).\n\n```\nmrs = project.mergerequests.list(state='opened', iterator=True, with_merge_status_recheck=True)\n\nfor mr in mrs:\n    start = timer()\n    #print(mr.attributes) #debug\n    # https://docs.gitlab.com/ee/api/merge_requests.html#list-merge-request-diffs\n    real_mr = project.mergerequests.get(mr.get_id())\n\n    print(\"- [ ] !{mr_id} - {mr_url}+ Status: {s}, Title: {t}\".format(\n        mr_id=real_mr.attributes['iid'],\n        mr_url=real_mr.attributes['web_url'],\n        s=real_mr.attributes['detailed_merge_status'],\n        t=real_mr.attributes['title']))\n\n    end = timer()\n    duration = end - start\n    if duration > 1.0:\n        print(\"ALERT: > 1s \")\n    print(\"> Execution time took {s}s\".format(s=(duration)))\n```\n\nMore tips are discussed in the following sections:\n\n- [Advanced custom configuration](#advanced-custom-configuration)\n- [CI/CD code linting for different Python versions](#cicd-code-linting-for-different-python-versions)\n\n### Advanced custom configuration\n\nWhen you are developing a script that requires advanced custom configuration, choose a format that fits best into existing infrastructure and development guidelines. Python provides libraries for parsing YAML, JSON, etc. The following example configuration file and script showcase a YAML configuration option. It is based on [a script that automatically updates a list of issues/epics](https://gitlab.com/gitlab-de/gitlab-api-automated-commenter) with a comment, reminding responsible team members for a recurring update for a cross-functional initiative at GitLab.\n\n[python_gitlab_custom_yaml_config.yml](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_custom_yaml_config.yml)\n```yaml\ntasks:\n  - name: \"Backend\"\n    url: \"https://gitlab.com/group1/project2/-/issues/1\"\n  - name: \"Frontend\"\n    url: \"https://gitlab.com/group2/project4/-/issues/2\"\n```\n\n[python_gitlab_custom_script_config_yaml.py](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_custom_script_config_yaml.py)\n```python\nimport os\nimport yaml\n\nCONFIG_FILE = os.environ.get('GL_CONFIG_FILE', \"python_gitlab_custom_yaml_config.yml\")\n\n# Read config\nwith open(CONFIG_FILE, mode=\"rt\", encoding=\"utf-8\") as file:\n    config = yaml.safe_load(file)\n    #print(config) #debug\n\ntasks = []\nif \"tasks\" in config:\n    tasks = config['tasks']\n\n# Process the tasks\nfor task in tasks:\n    print(\"Task name: '{n}' Issue URL to update: {id}\".format(n=task['name'], id=task['url']))\n    # print(task) #debug\n```\n\n```shell\n$ python3 python_gitlab_custom_script_config_yaml.py                                     ─╯\nTask name: 'Backend' Issue URL to update: https://gitlab.com/group1/project2/-/issues/1\nTask name: 'Frontend' Issue URL to update: https://gitlab.com/group2/project4/-/issues/2\n```\n\n\n### CI/CD code linting for different Python versions\n\nAll code examples in this blog post have been tested with Python 3.8, 3.9, 3.10 and 3.11, using [parallel matrix builds in GitLab CI/CD](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/.gitlab-ci.yml) and pyflakes for code linting. Automating the tests helps focus on development, and ensuring that the target platforms support the language features. Some Linux distributions do not provide Python 3.11 yet for example, and Python language features cannot be used or may need an alternative implementation.\n\n```yaml\ninclude:\n  - template: Security/SAST.gitlab-ci.yml\n  - template: Dependency-Scanning.gitlab-ci.yml\n  - template: Secret-Detection.gitlab-ci.yml\n\nstages:\n  - lint\n  - test\n\n.python-req:\n  image: python:$VERSION\n  script:\n    - pip install -r requirements_dev.txt\n  parallel:\n    matrix:\n      - VERSION: ['3.8', '3.9', '3.10', '3.11']   # https://hub.docker.com/_/python\n\nlint-python:\n  extends: .python-req\n  stage: lint\n  script:\n    - !reference [.python-req, script]\n    - pyflakes .\n\nsast:\n  stage: test\n\n```\n\n## Optimize code and performance\n\n- [Lazy objects](#lazy-objects)\n- [Object-oriented programming](#object-oriented-programming)\n\n### Lazy objects\n\nWhen working with objects that do not immediately need all attributes loaded, you can specify the [`lazy=True`](https://python-gitlab.readthedocs.io/en/stable/api-usage.html#lazy-objects) attribute to not invoke an API call immediately. A follow-up method call will then invoke the required API calls.\n\n\n```python\n# Lazy object, no API call\nproject = gl.projects.get(PROJECT_ID, lazy=True)\n\ntry:\n    print(\"Trying to access 'snippets_enabled' on a lazy loaded project object. This will throw an exception that we capture.\")\n    print(\"Project settings: snippets_enabled={b}\".format(b=project.snippets_enabled))\nexcept Exception as e:\n    print(\"Accessing lazy loaded object failed: {e}\".format(e=e))\n\nproject.snippets_enabled = True\n\nproject.save() # This creates an API call\n\nprint(\"\\nLazy object was loaded after save() call.\")\nprint(\"Project settings: snippets_enabled={b}\".format(b=project.snippets_enabled))\n\n```\n\nExecuting the [`python_gitlab_lazy_objects.py`](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_lazy_objects.py) script shows that the lazy object did not fire an API call, thus throwing an exception when accessing the project setting `snippets_enabled`. To show that the object still can be managed, the code catches the exception to proceed with updating the setting locally, and calling `project.save()` to persist the change and call the API update.\n\n```shell\n$ python3 python_gitlab_lazy_objects.py                                                ─╯\nTrying to access 'snippets_enabled' on a lazy loaded project object. This will throw an exception that we capture.\nAccessing lazy loaded object failed: 'Project' object has no attribute 'snippets_enabled'\n\nIf you tried to access object attributes returned from the server,\nnote that \u003Cclass 'gitlab.v4.objects.projects.Project'> was created as\na `lazy` object and was not initialized with any data.\n\nLazy object was loaded after save() call.\nProject settings: snippets_enabled=True\n```\n\n### Object-oriented programming\n\nFor better code quality, it makes sense to follow object-oriented programming and create classes that store attributes, provide methods, and enable better unit testing. The [storage analyzer tool](https://gitlab.com/gitlab-de/gitlab-storage-analyzer) was developed to create a summary of projects that consume lots storage, for example CI/CD job artifacts. By inspecting the [Git history](https://gitlab.com/gitlab-de/gitlab-storage-analyzer/-/commits/main), you can learn from the different iterations to a first working version.\n\nThe following example is a trimmed version which shows how to initialize the class `GitLabUseCase`, add helper functions for logging and JSON pretty-printing, and print all project attributes.\n\n```python\n#!/usr/bin/env python\n\nimport gitlab\nimport os\nimport sys\nimport json\n\n# Print an error message with prefix, and exit immediately with an error code.\ndef error(text):\n    logger(\"ERROR\", text)\n    sys.exit(1)\n\n# Log a line with a given prefix (e.g. INFO)\ndef logger(prefix, text):\n    print(\"{prefix}: {text}\".format(prefix=prefix, text=text))\n\n# Return a pretty-printed JSON string with indent of 4 spaces\ndef render_json_output(data):\n    return json.dumps(data, indent=4, sort_keys=True)\n\n# Class definition\nclass GitLabUseCase(object):\n    # Initializer to set all required parameters\n    def __init__(self, verbose, gl_server, gl_token, gl_project_id):\n        self.verbose = verbose\n        self.gl_server = gl_server\n        self.gl_token = gl_token\n        self.gl_project_id = gl_project_id\n\n    # Debug logger, controlled via verbose parameter\n    def log_debug(self, text):\n        if self.verbose:\n            print(\"DEBUG: {d}\".format(d=text))\n\n    # Connect to the GitLab server and store the connection handle\n    def connect(self):\n        self.log_debug(\"Connecting to GitLab API at {s}\".format(s=self.gl_server))\n        # Supports personal/project/group access token\n        # https://docs.gitlab.com/ee/api/index.html#personalprojectgroup-access-tokens\n        self.gl = gitlab.Gitlab(self.gl_server, private_token=self.gl_token)\n\n    # Use the stored connection handle to fetch a project object by id,\n    # and print its attribute with JSON pretty-print.\n    def print_project_attributes(self):\n        project = self.gl.projects.get(self.gl_project_id)\n        print(render_json_output(project.attributes))\n\n\n## main\nif __name__ == '__main__':\n    # Fetch configuration from environment variables.\n    # The second parameter specifies the default value when not provided.\n    gl_verbose = os.environ.get('GL_VERBOSE', False)\n    gl_server = os.environ.get('GL_SERVER', 'https://gitlab.com')\n\n    gl_token = os.environ.get('GL_TOKEN')\n\n    if not gl_token:\n        error(\"Please specifiy the GL_TOKEN env variable\")\n\n    gl_project_id = os.environ.get('GL_PROJECT_ID', 42491852) # https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python\n\n    # Instantiate new object and run methods\n    gl_use_case = GitLabUseCase(gl_verbose, gl_server, gl_token, gl_project_id)\n    gl_use_case.connect()\n    gl_use_case.print_project_attributes()\n```\n\nRunning the [script](https://gitlab.com/gitlab-de/use-cases/gitlab-api/gitlab-api-python/-/blob/main/python_gitlab_oop_helpers.py) with the `GL_PROJECT_ID` environment variable pretty-prints the project attributes as JSON on the terminal.\n\n![Example script that pretty-prints the project object attributes as JSON](/images/blogimages/efficient-devsecops-workflows-python-gitlab-handson/python_gitlab_oop_example_terminal_output_project_attributes.png){: .shadow}\n\n## More use cases\n\nBetter performance with API requests can be achieved by looking into parallelization and threading in Python. Users have been testing the storage analyzer script, and provided feedback to optimize the performance for the single-threaded script by using tasks and [Python threading](https://realpython.com/intro-to-python-threading/), similar to [this community project](https://gitlab.com/thelabnyc/gitlab-storage-cleanup). I might follow up on this topic in a future blog post, there are many more great use cases to cover using python-gitlab.\n\nThere is so much more to learn, here are a few examples from the GitLab community forum that could not make it into this blog post:\n\n* [Fetch review app environment URL from Merge Request](https://forum.gitlab.com/t/fetch-review-app-environment-url-from-merge-request/71335/2)\n* [Project visibility, project features, permissions](https://forum.gitlab.com/t/project-visibility-project-features-permissions-settings-api/32242)\n* [Download GitLab CI/CD job artifacts using Python](https://forum.gitlab.com/t/download-gitlab-ci-jobs-artifacts-using-python/25436/$)\n\n## Conclusion\n\nThe python-gitlab library helps to abstract raw REST API calls, and to keep access to attributes, functions and objects short and relatively easy. There are many use cases that can be solved efficiently. Alternative programming language libraries for the GitLab REST API are available [in the API clients section here](/partners/technology-partners/#api-clients).\n\nThe [GitLab Community Forum](https://forum.gitlab.com/) is a great place to collaborate on use cases and questions about possible solutions or code snippets. We'd love to hear from you about your use cases and challenges using the python-gitlab library.\n\nShoutout to the python-gitlab maintainers and contributors, developing this fantastic API library for many years now! If this blog post and the python-gitlab library helped you get more efficient, please consider [contributing to python-gitlab](https://python-gitlab.readthedocs.io/en/stable/#contributing). When there is a GitLab API feature missing, look into [contributing to GitLab](https://about.gitlab.com/community/contribute/), too. Thank you!\n\n\nCover image by [David Clode](https://unsplash.com/@davidclode) on [Unsplash](https://unsplash.com/photos/cxMJYcuCLEA)\n{: .note}",[230,9,835,478],{"slug":1696,"featured":6,"template":684},"efficient-devsecops-workflows-hands-on-python-gitlab-api-automation","content:en-us:blog:efficient-devsecops-workflows-hands-on-python-gitlab-api-automation.yml","Efficient Devsecops Workflows Hands On Python Gitlab Api Automation","en-us/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation.yml","en-us/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation",{"_path":1702,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1703,"content":1709,"config":1715,"_id":1717,"_type":13,"title":1718,"_source":15,"_file":1719,"_stem":1720,"_extension":18},"/en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines",{"title":1704,"description":1705,"ogTitle":1704,"ogDescription":1705,"noIndex":6,"ogImage":1706,"ogUrl":1707,"ogSiteName":669,"ogType":670,"canonicalUrls":1707,"schema":1708},"DevSecOps workflows with conditional CI/CD pipeline rules","CI/CD pipelines can be simple or complex, what makes them efficient are CI rules that define when and how they run.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669673/Blog/Hero%20Images/engineering.png","https://about.gitlab.com/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create efficient DevSecOps workflows with rules for conditional CI/CD pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2023-06-27\",\n      }",{"title":1710,"description":1705,"authors":1711,"heroImage":1706,"date":1712,"body":1713,"category":769,"tags":1714},"How to create efficient DevSecOps workflows with rules for conditional CI/CD pipelines",[1570],"2023-06-27","\nCI/CD pipelines can be simple or complex – what makes them efficient are rules that define when and how they run. By using rules, you create smarter CI/CD pipelines, which increase teams' productivity and allow organizations to iterate faster. In this tutorial, you will learn about the different types of CI/CD pipelines and rules and their use cases.\n\n## What is a pipeline?\nA pipeline is a top-level component of [continuous integration](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-integration) and [continuous delivery](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-delivery)/[continuous deployment](https://docs.gitlab.com/ee/ci/introduction/index.html#continuous-deployment), and it comprises [jobs](https://docs.gitlab.com/ee/ci/jobs/index.html), which are lists of tasks to be executed. Jobs are organized in [stages](https://docs.gitlab.com/ee/ci/yaml/index.html#stages), which define when the jobs run.\n\nA pipeline can be a [basic one](https://docs.gitlab.com/ee/ci/pipelines/pipeline_architectures.html#basic-pipelines) in which jobs run concurrently in each stage. Pipelines can also be complex, like [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines), [merge trains](https://docs.gitlab.com/ee/ci/pipelines/merge_trains.html), [multi-project pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#multi-project-pipelines), or the more advanced [Directed Acyclic Graph pipelines](https://docs.gitlab.com/ee/ci/directed_acyclic_graph/index.html) (DAG).\n\n![Complex pipeline showing dependencies](https://about.gitlab.com/images/blogimages/2023-06-15-efficient-devsecops-workflows-with-rules-for-conditional-pipelines/complex-pipelines.png)\n\nA [gitlab-runner pipeline](https://gitlab.com/gitlab-org/gitlab-runner/-/pipelines/798871212/) showing job dependencies.\n{: .note.text-center}\n\n![Directed Acyclic Graph](https://about.gitlab.com/images/blogimages/2023-06-15-efficient-devsecops-workflows-with-rules-for-conditional-pipelines/dag-pipelines.png)\n\nDirected Acyclic Graph pipeline\n{: .note.text-center}\n\nUse cases determine how complicated a pipeline can get. A use case might require testing an application and packaging it into a container; the pipeline can even further deploy the container to an orchestrator like Kubernetes or a container registry. Another use case might involve building applications that target different platforms with varying dependencies, which is where DAG pipelines shine.\n\n## What are CI/CD rules?\nCI/CD rules are the key to managing the flow of jobs in a pipeline. One of the powerful features of GitLab CI/CD is the ability to control when a CI/CD job runs, which can depend on context, changes made, [workflow](https://docs.gitlab.com/ee/ci/yaml/workflow.html) rules, values of CI/CD variables, or custom conditions. Aside from using `rules`, you can also control the flow of CI/CD pipelines using:\n\n* [`needs`](https://docs.gitlab.com/ee/ci/yaml/index.html#needs): establishes relationships between jobs and used in DAG pipelines\n* [`only`](https://docs.gitlab.com/ee/ci/yaml/index.html#only--except): defines when a job should run\n* [`except`](https://docs.gitlab.com/ee/ci/yaml/index.html#only--except): defines when a job should not run\n* [`workflow`](https://docs.gitlab.com/ee/ci/yaml/workflow.html): controls when pipelines are created\n\n`only` and `except` should not be used with `rules` as this can lead to unexpected behavior. It is recommended to use `rules`, learn more in the following sections.\n\n## What is the `rules` feature?\n`rules` determine when and if a job runs in a pipeline. If you have multiple rules defined, they are all evaluated in order until a matching rule is found and the job is executed according to the specified configuration.\n\n[Rules](https://docs.gitlab.com/ee/ci/yaml/#rules) can be defined using the keywords: `if`, `changes`, `exists`, `allow_failure`, `variables`, `when` and `needs`.\n\n### `rules:if`\nThe `if` keyword evaluates if a job should be added to a pipeline. The evaluation is done based on the values of [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/index.html) defined in the scope of the job or pipeline and [predefined CI/CD variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html).\n\n```yaml\njob:\n  script:\n    - echo $(date)\n  rules:\n    - if: $CI_MERGE_REQUEST_SOURCE_BRANCH_NAME == $CI_DEFAULT_BRANCH\n```\n\nIn the CI/CD script above, the job prints the current date and time with the `echo` command. The job is only executed if the source branch of a merge request (`CI_MERGE_REQUEST_SOURCE_BRANCH_NAME`) is the same as the project's default branch (`CI_DEFAULT_BRANCH`) in a [merge request pipeline](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html). You can use the `==` and `!=` operators for comparison, while `=~` and `!~` allow you to compare a variable to a regular expression. You can combine multiple expressions using the `&&` (AND), `||` (OR) operators, and parentheses for grouping expressions.\n\n### `rules:changes`\nWith the `changes` keyword, you can watch for changes to certain files or folders for a job to execute. GitLab uses the output of [Git diffstat](https://git-scm.com/docs/git-diff#Documentation/git-diff.txt\n\n```yaml\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        - terraform/**/*.tf\n```\n\nIn this example, the `terraform plan` is only executed when files with the `.tf` extension are changed in the `terraform` folder and its subdirectories. An additional rule ensures the job is executed for [merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html).\n\nThe `changes` rule can look for changes in specific files with `paths`:\n\n```yaml\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        paths:\n          - terraform/main.tf\n```\n\nChanges to files in a source reference (branch, tag, commit) can also be compared against other references in the Git repository. The CI/CD job will only execute when the source reference differs from the [specified reference value defined in `rules:changes:compare_to`](https://docs.gitlab.com/ee/ci/yaml/#ruleschangescompare_to). This value can be a Git commit SHA, tag, or branch name. The following example compares the source reference to the current `production` branch (`refs/head/production`).\n\n```yaml\njob:\n  script:\n    - terraform plan\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        paths:\n          - terraform/main.tf\n        compare_to: 'refs/head/production'\n```\n\n### `rules:exists`\nLike `changes`, you can execute CI/CD jobs only when specific files exist [using `rules:exists` rules](https://docs.gitlab.com/ee/ci/yaml/#rulesexists). For example, you can run a job that checks whether a `Gemfile.lock` file exists. The following example audits a Ruby project for vulnerable versions of gems or insecure gem sources using the [bundler-audit project](https://github.com/rubysec/bundler-audit).\n\n```yaml\njob:\n  script:\n    - bundle-audit check --format json --output bundle-audit.json\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes:\n        exits:\n          - Gemfile.lock\n```\n\n### `rules:allow_failure`\nThere are scenarios where the failure of a job should not affect the following jobs and stages of the pipeline. This can be useful in use cases where non-blocking tasks are required as part of a project but don't impact the project in any way. The [`rules:allow_failure` rule](https://docs.gitlab.com/ee/ci/yaml/#rulesallow_failure) can be set to `true` or `false`. It defaults to `false` implicitly when the rule is not specified.\n\n```yaml\njob:\n  script:\n    - bundle-audit check --format json --output bundle-audit.json\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_MERGE_REQUEST_TARGET_BRANCH_PROTECTED == \"false\"\n      changes:\n        exits:\n          - Gemfile.lock\n      allow_failure: true\n```\n\nIn this example, the job can fail only if a merge request event triggers the pipeline and the target branch is not protected.\n\n### `rules:needs`\nDisabled by fault, [`rules:needs`](https://docs.gitlab.com/ee/ci/yaml/#rulesneeds) was introduced in [GitLab 16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/) and can be enabled with the `introduce_rules_with_needs` [feature flag](https://docs.gitlab.com/ee/user/feature_flags.html). [`needs`](https://docs.gitlab.com/ee/ci/yaml/index.html#needs) is used to execute jobs out of order without waiting for other jobs in a stage to complete. When used with `rules`, it replaces the job's `needs` specification when the set conditions are met.\n\n```yaml\nstages:\n  - build\n  - qa\n  - deploy\n\nbuild-dev:\n  stage: build\n  rules:\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n  script: echo \"Building dev version...\"\n\nbuild-prod:\n  stage: build\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n  script: echo \"Building production version...\"\n\nqa-checks:\n  stage: qa\n  script:\n    - echo \"Running QA checks before publishing to Production....\"\n\ndeploy:\n  stage: deploy\n  needs: ['build-dev']\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      needs: ['build-prod', 'qa-checks']\n    - when: on_success # Run the job in other cases\n  script: echo \"Deploying application.\"\n\n```\n\nIn the example above, the deploy job has the `build-dev` job as a dependency before it runs; however, when the commit branch is the project's default branch, its dependency changes to `build-prod` and `qa-checks`. This can allow for extra checks to be implemented based on context.\n\n### `rules:variables`\nIn some situations, you only need certain variables in specific conditions, or their values change based on content; you can use the [`rules:variables`](https://docs.gitlab.com/ee/ci/yaml/#rulesvariables) rule to define variables when specific conditions are met. This also allows to create more dynamic CI/CD execution workflows.\n\n```\njob:\n  variables:\n    DEPLOY_VERSION: \"dev\"\n  rules:\n    - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n      variables:\n        DEPLOY_VERSION: \"stable\"\n  script:\n    - echo \"Deploying $DEPLOY_VERSION version\"\n```\n\n### `workflow:rules`\nSo far, we have looked at controlling when jobs run in a pipeline using the `rules` keyword. Sometimes, you want to control how the entire pipeline behaves: That's where [`workflow:rules` provide a powerful option](https://docs.gitlab.com/ee/ci/yaml/#workflowrules). `workflow:rules` are evaluated before jobs and take precedence over the job rules. For example, if a job has rules that allow it to run against a specific branch, but the workflow rules set jobs running against the branch to `when: never`, the jobs will not run.\n\nAll the features of `rules` mentioned in the previous sections work for `workflow:rules`.\n\n```yaml\nworkflow:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"schedule\"\n      when: never\n    - if: $CI_PIPELINE_SOURCE == \"push\"\n      when: never\n    - when: always\n```\n\nIn the example above, the CI/CD pipeline runs except when a schedule or push event is triggered.\n\n## Use cases for CI/CD rules\nIn the previous section, we looked at different ways of using the `rules` feature of GitLab CI/CD. In this section, we will explore practical use cases.\n\n### Developer experience\nOne of the benefits of a DevSecOps platform is to allow developers to focus on what they do best: writing their code and doing as little operations as possible. A company's DevOps or Platform team can create CI/CD templates for different stages of their development lifecycle and use rules to add CI/CD jobs to handle specific tasks based on their technology stack. A developer only needs to include a default CI/CD script and pipelines are automatically created based on files detected, refs used, or defined variables, leading to increased productivity.\n\n### Security and quality assurance\nA major function of CI/CD pipelines is to catch bugs or vulnerabilities before they are deployed into production infrastructure. Using CI/CD rules, security and quality assurance teams can dynamically run extra checks on changes introduced when certain factors are introduced. For example, malware scans can be added when new file extensions not in an approved list are detected, or more advanced performance tests are automatically added when a certain level of change has been introduced to the codebase. With GitLab's built-in security, including security in your pipelines can be done with just a few lines of code.\n\n```yaml\ninclude:\n  # Static\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n  - template: Jobs/SAST-IaC.gitlab-ci.yml\n  - template: Jobs/Code-Quality.gitlab-ci.yml\n  - template: Security/Coverage-Fuzzing.gitlab-ci.yml\n  # Dynamic\n  - template: Security/DAST.latest.gitlab-ci.yml\n  - template: Security/BAS.latest.gitlab-ci.yml\n  - template: Security/DAST-API.latest.gitlab-ci.yml\n  - template: API-Fuzzing.latest.gitlab-ci.yml\n```\n\n### Automation\nThe power of CI/CD rules shines through in the (nearly) limitless possibilities of automating your CI/CD pipelines. GitLab [AutoDevOps](https://docs.gitlab.com/ee/topics/autodevops/) is an example. It uses an opinionated best-practice collection of [GitLab CI/CD templates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates) and rules to detect the technology stack used. AutoDevOps creates relevant jobs that take your application all the way to production from a push. You can review the [AutoDevOps template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Auto-DevOps.gitlab-ci.yml) to learn how it leverages CI/CD rules for greater efficiency.\n\n### Using CI/CD components\nGrowth comes with several iterations of work and creating best practices. While building CI/CD pipelines, your DevOps team would have made several CI/CD scripts that they repurpose across pipelines using the [`include`](https://docs.gitlab.com/ee/ci/yaml/#include) keyword. In [GitLab 16](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/), GitLab [introduced CI/CD Components](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#cicd-components), an experimental feature that allows your team to create reusable CI/CD components and publish them as a catalog that can be used to build smarter CI/CD pipelines rapidly. You can learn more [about using CI/CD components](https://docs.gitlab.com/ee/ci/components/) and the [component catalog direction](https://about.gitlab.com/direction/verify/component_catalog/).\n\nGitLab CI/CD enables you to run smarter pipelines, and it does so together with [GitLab Duo, AI-powered workflows](/gitlab-duo/) to help you build more secure software, faster.\n",[9,771,772,835,478],{"slug":1716,"featured":6,"template":684},"efficient-devsecops-workflows-with-rules-for-conditional-pipelines","content:en-us:blog:efficient-devsecops-workflows-with-rules-for-conditional-pipelines.yml","Efficient Devsecops Workflows With Rules For Conditional Pipelines","en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines.yml","en-us/blog/efficient-devsecops-workflows-with-rules-for-conditional-pipelines",{"_path":1722,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1723,"content":1729,"config":1734,"_id":1736,"_type":13,"title":1737,"_source":15,"_file":1738,"_stem":1739,"_extension":18},"/en-us/blog/enable-secure-sudo-access-for-gitlab-remote-development-workspaces",{"title":1724,"description":1725,"ogTitle":1724,"ogDescription":1725,"noIndex":6,"ogImage":1726,"ogUrl":1727,"ogSiteName":669,"ogType":670,"canonicalUrls":1727,"schema":1728},"Enable secure sudo access for GitLab Remote Development workspaces","Learn how to allow support for sudo commands using Sysbox, Kata Containers, and user namespaces in this easy-to-follow tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675033/Blog/Hero%20Images/blog-image-template-1800x945.png","https://about.gitlab.com/blog/enable-secure-sudo-access-for-gitlab-remote-development-workspaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Enable secure sudo access for GitLab Remote Development workspaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Vishal Tak\"}],\n        \"datePublished\": \"2024-11-20\",\n      }",{"title":1724,"description":1725,"authors":1730,"heroImage":1726,"date":1450,"body":1731,"category":814,"tags":1732},[1163],"A development environment often requires sudo permissions to install, configure, and use dependencies during runtime. GitLab now allows secure sudo access for [GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/). This tutorial shows you how to enable GitLab workspace users to securely use sudo commands to perform common tasks.\n\n## The challenge\n\nFor the sake of this article, say your project is as simple as the below code.\n\n```\npackage main\n\nimport (\n\t\"encoding/json\"\n\t\"log/slog\"\n\t\"net/http\"\n\t\"os\"\n)\n\nfunc main() {\n\t// Set up JSON logger\n\tlogFile, err := os.OpenFile(\"server.log\", os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer logFile.Close()\n\n\tjsonHandler := slog.NewJSONHandler(logFile, nil)\n\tlogger := slog.New(jsonHandler)\n\tslog.SetDefault(logger)\n\n\t// Define handlers\n\thttp.HandleFunc(\"/path1\", handleRequest)\n\thttp.HandleFunc(\"/path2\", handleRequest)\n\n\t// Start server\n\tslog.Info(\"Starting server on :3000\")\n\terr = http.ListenAndServe(\":3000\", nil)\n\tif err != nil {\n\t\tslog.Error(\"Server failed to start\", \"error\", err)\n\t}\n}\n\nfunc handleRequest(w http.ResponseWriter, r *http.Request) {\n\tdata := make(map[string]interface{})\n\tfor k, v := range r.Header {\n\t\tdata[k] = v\n\t}\n\n\tdata[\"method\"] = r.Method\n\tdata[\"url\"] = r.URL.String()\n\tdata[\"remote_addr\"] = r.RemoteAddr\n\n\tresponse, err := json.MarshalIndent(data, \"\", \"  \")\n\tif err != nil {\n\t\tslog.Error(\"Failed to marshal metadata\", \"error\", err)\n\t\thttp.Error(w, \"Internal Server Error\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t// Log the metadata\n\tslog.Info(\"Request received\",\n\t\t\"path\", r.URL.Path,\n\t\t\"response\", string(response),\n\t)\n\n\t// Write response\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(response)\n}\n```\n\nThis code starts an HTTP server on port 3000, exposes two paths: `path1` and `path2`. Each HTTP request received is logged to a file `server.log`.\n\nLet's run this code with `go run main.go` and generate some requests.\n\n```\ni=1\nwhile [ \"$i\" -le 100 ]; do\n  echo \"Iteration $i\"\n\n  if [ $((random_number % 2)) -eq 0 ]; then\n    curl \"localhost:3000/path1\"\n  else\n    curl \"localhost:3000/path2\"\n  fi\n\n  i=$((i + 1))\ndone\n```\n\nAs you work on this application, you realize the need to analyze the logs to debug an issue. You look at the log file and it is long to parse with a simple glance. You remember there is a handy tool, [jq](https://jqlang.github.io/jq/), which parses JSON data. But your workspace does not have it installed.\n\nYou want to install `jq` through the package manager for this workspace only.\n\n```\nsudo apt update\nsudo apt install jq\n```\n\nThe output is:\n\n```\nsudo: The \"no new privileges\" flag is set, which prevents sudo from running as root.\nsudo: If sudo is running in a container, you may need to adjust the container configuration to disable the flag.\n```\n\nThis happens because GitLab workspaces explicitly disallows `sudo` access to prevent privilege escalation on the Kubernetes host.\n\nNow, there is a more secure way to run `sudo` commands in a workspace.\n\n## How sudo access works\n\nThat is exactly what we have [unlocked](https://docs.gitlab.com/ee/user/workspace/configuration.html#configure-sudo-access-for-a-workspace) in the 17.4 release of GitLab.\n\nYou can configure secure sudo access for workspaces using any of the following options:\n\n- Sysbox  \n- Kata Containers  \n- User namespaces\n\nWe will set up three GitLab agents for workspaces to demonstrate each option.\n\n### Sysbox\n\n[Sysbox](https://github.com/nestybox/sysbox) is a container runtime that improves container isolation and enables containers to run the same workloads as virtual machines.\n\nTo configure sudo access for a workspace with Sysbox:\n\n1. In the Kubernetes cluster, [install Sysbox](https://github.com/nestybox/sysbox#installation).\n2. In the GitLab agent for workspaces, set the following config:\n\n```\nremote_development:\n  enabled: true\n  dns_zone: \"sysbox-update.me.com\"\n  default_runtime_class: \"sysbox-runc\"\n  allow_privilege_escalation: true\n  annotations:\n    \"io.kubernetes.cri-o.userns-mode\": \"auto:size=65536\"\n```\n\n3. Add other settings in the agent config as per your requirements. [GitLab agent for workspaces settings](https://docs.gitlab.com/ee/user/workspace/gitlab_agent_configuration.html#workspace-settings) for more information about individual settings.  \n4. Allow the agent to be used for workspaces in a group. See the [documentation](https://docs.gitlab.com/ee/user/workspace/gitlab_agent_configuration.html#allow-a-cluster-agent-for-workspaces-in-a-group) for more information.  \n5. Update GitLab Workspaces Proxy to serve traffic for the domain used in the above agent configuration. See [Tutorial: Set up the GitLab workspaces proxy](https://docs.gitlab.com/ee/user/workspace/set_up_workspaces_proxy.html) for more information.\n\n### Kata Containers\n\n[Kata Containers](https://github.com/kata-containers/kata-containers) is a standard implementation of lightweight virtual machines that perform like containers but provide the workload isolation and security of virtual machines.\n\nTo configure sudo access for a workspace with Kata Containers:\n\n1. In the Kubernetes cluster, [install Kata Containers](https://github.com/kata-containers/kata-containers/tree/main/docs/install).  \n2. In the GitLab agent for workspaces, set the following config:\n\n```\nremote_development:\n  enabled: true\n  dns_zone: \"kata-update.me.com\"\n  default_runtime_class: \"kata-qemu\"\n  allow_privilege_escalation: true\n```\n\n3. Add other settings in the agent config as per your requirements. [GitLab agent for workspaces settings](https://docs.gitlab.com/ee/user/workspace/gitlab_agent_configuration.html#workspace-settings) for more information about individual settings.  \n4. Allow the agent to be used for workspaces in a group. See the [documentation](https://docs.gitlab.com/ee/user/workspace/gitlab_agent_configuration.html#allow-a-cluster-agent-for-workspaces-in-a-group) for more information.  \n5. Update GitLab Workspaces Proxy to serve traffic for the domain used in the above agent configuration. See [Tutorial: Set up the GitLab workspaces proxy](https://docs.gitlab.com/ee/user/workspace/set_up_workspaces_proxy.html) for more information.\n\n### User namespaces\n\n[User namespaces](https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/) isolate the user running inside the container from the user on the host.\n\nTo configure sudo access for a workspace with user namespaces:\n\n1. In the Kubernetes cluster, [configure user namespaces](https://kubernetes.io/blog/userns-beta/).  \n2. In the GitLab agent for workspaces, set the following config:\n\n```\nremote_development:\n  enabled: true\n  dns_zone: \"userns-update.me.com\"\n  use_kubernetes_user_namespaces: true\n  allow_privilege_escalation: true\n```\n\n3. Add other settings in the agent config as per your requirements. [GitLab agent for workspaces settings](https://docs.gitlab.com/ee/user/workspace/gitlab_agent_configuration.html#workspace-settings) for more information about individual settings.  \n4. Allow the agent to be used for workspaces in a group. See the [documentation](https://docs.gitlab.com/ee/user/workspace/gitlab_agent_configuration.html#allow-a-cluster-agent-for-workspaces-in-a-group) for more information.  \n5. Update GitLab Workspaces Proxy to serve traffic for the domain used in the above agent configuration. See [Tutorial: Set up the GitLab workspaces proxy](https://docs.gitlab.com/ee/user/workspace/set_up_workspaces_proxy.html) for more information.\n\nSetting up a Kubernetes cluster with user namespaces configured is challenging since it is behind a beta feature gate in Kubernetes Version 1.31.0. This means it is not yet possible to configure such a cluster on the major cloud providers because they don't provide a mechanism to enable feature gates in their managed Kubernetes offering. Here is an example of [configuring a simple Kuberenetes cluster using `kubeadm`](https://gitlab.com/gitlab-org/gitlab/-/issues/468290#note_1959300036).\n\n### Create a workspace\n\nIf you now create a workspace with these agents and try installing `jq` through a package manager, it should succeed!\n\nYou can analyze the logs using `jq`. Say you wanted to inspect the log entries where the path is `/path1`, you can run:\n\n```\njq 'select(.path == \"/path1\")' server.log\n```\n\nThe output is:\n\n```\n{\n  \"time\": \"2024-10-31T12:04:38.474806+05:30\",\n  \"level\": \"INFO\",\n  \"msg\": \"Request received\",\n  \"path\": \"/path1\",\n  \"response\": \"{\\n  \\\"Accept\\\": [\\n    \\\"*/*\\\"\\n  ],\\n  \\\"User-Agent\\\": [\\n    \\\"curl/8.7.1\\\"\\n  ],\\n  \\\"method\\\": \\\"GET\\\",\\n  \\\"remote_addr\\\": \\\"[::1]:61246\\\",\\n  \\\"url\\\": \\\"/path1\\\"\\n}\"\n}\n{\n  \"time\": \"2024-10-31T12:06:22.397453+05:30\",\n  \"level\": \"INFO\",\n  \"msg\": \"Request received\",\n  \"path\": \"/path1\",\n  \"response\": \"{\\n  \\\"Accept\\\": [\\n    \\\"*/*\\\"\\n  ],\\n  \\\"User-Agent\\\": [\\n    \\\"curl/8.7.1\\\"\\n  ],\\n  \\\"method\\\": \\\"GET\\\",\\n  \\\"remote_addr\\\": \\\"[::1]:61311\\\",\\n  \\\"url\\\": \\\"/path1\\\"\\n}\"\n}\n{\n  \"time\": \"2024-10-31T12:19:34.974354+05:30\",\n  \"level\": \"INFO\",\n  \"msg\": \"Request received\",\n  \"path\": \"/path1\",\n  \"response\": \"{\\n  \\\"Accept\\\": [\\n    \\\"*/*\\\"\\n  ],\\n  \\\"User-Agent\\\": [\\n    \\\"curl/8.7.1\\\"\\n  ],\\n  \\\"method\\\": \\\"GET\\\",\\n  \\\"remote_addr\\\": \\\"[::1]:61801\\\",\\n  \\\"url\\\": \\\"/path1\\\"\\n}\"\n}\n```\n\n## Get started today\n\nLearn even more with our [Configure sudo access for a workspace documentation](https://docs.gitlab.com/ee/user/workspace/configuration.html#configure-sudo-access-for-a-workspace). See [GitLab agent for workspaces settings](https://docs.gitlab.com/ee/user/workspace/gitlab_agent_configuration.html#workspace-settings) for details on individual settings.\n\n> New to GitLab Remote Development? Here is a [quickstart guide](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/) to get you up to speed.",[814,9,1733,478,749],"remote work",{"slug":1735,"featured":90,"template":684},"enable-secure-sudo-access-for-gitlab-remote-development-workspaces","content:en-us:blog:enable-secure-sudo-access-for-gitlab-remote-development-workspaces.yml","Enable Secure Sudo Access For Gitlab Remote Development Workspaces","en-us/blog/enable-secure-sudo-access-for-gitlab-remote-development-workspaces.yml","en-us/blog/enable-secure-sudo-access-for-gitlab-remote-development-workspaces",{"_path":1741,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1742,"content":1745,"config":1751,"_id":1753,"_type":13,"title":1754,"_source":15,"_file":1755,"_stem":1756,"_extension":18},"/en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"noIndex":6,"title":1743,"description":1744},"Enhance application quality with AI-powered test generation","Learn how GitLab Duo with Amazon Q improves the QA process by automatically generating comprehensive unit tests.",{"title":1743,"description":1744,"authors":1746,"heroImage":1747,"date":1748,"body":1749,"category":702,"tags":1750},[699],"https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659604/Blog/Hero%20Images/Screenshot_2024-11-27_at_4.55.28_PM.png","2025-07-03","You know how critical application quality is to your customers and reputation. However, ensuring that quality through comprehensive testing can feel like an uphill battle. You're dealing with time-consuming manual processes, inconsistent test coverage across your team, and those pesky issues that somehow slip through the cracks. It's frustrating when your rating drops because quality assurance becomes a bottleneck rather than a safeguard.\n\nHere's where [GitLab Duo with Amazon Q ](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which delivers agentic AI throughout the software development lifecycle for AWS customers, can help transform your QA process. This AI-powered capability can automatically generate comprehensive unit tests for your code, dramatically accelerating your quality assurance workflow. Instead of spending hours writing tests manually, you can let AI analyze your code and create tests that ensure optimal coverage and consistent quality across your entire application.\n\n## How GitLab Duo with Amazon Q works\n\nSo how does this work? Let's walk through the process together.\nWhen you're working on a new feature, you start by selecting the Java class you've added to your project through a merge request. You simply navigate to your merge request and click on the \"Changes\" tab to see the new code you've added.\n\nNext, you invoke Amazon Q by entering a quick action command. All you need to do is type `/q test` in the issue comment box. It's that simple – just a forward slash, the letter \"q\", and the word \"test\".\n\nOnce you hit enter, Amazon Q springs into action. It analyzes your selected code, understanding its structure, logic, and purpose. The AI examines your class methods, dependencies, and potential edge cases to determine what tests are needed.\n\nWithin moments, Amazon Q generates comprehensive unit test coverage for your new class. It creates tests that cover not just the happy path, but also edge cases and error conditions you might have overlooked. The generated tests follow your project's existing patterns and conventions, ensuring they integrate seamlessly with your codebase.\n\n## Why use GitLab Duo with Amazon Q?\n\nHere's the bottom line: You started with a critical challenge – maintaining high-quality applications while dealing with time constraints and inconsistent testing practices. GitLab Duo with Amazon Q addresses this by automating the test generation process, ensuring optimal code coverage and consistent testing standards. The result? Issues are detected before deployment, your applications maintain their quality, and you can develop software faster without sacrificing reliability.\n\nKey benefits of this feature:\n\n* Significantly reduces time spent writing unit tests\n* Ensures comprehensive test coverage across your codebase\n* Maintains consistent testing quality across all team members\n* Catches issues before they reach production\n* Accelerates your overall development velocity\n\nReady to see this game-changing feature in action? Watch how GitLab Duo with Amazon Q can transform your quality assurance process:\n\n\u003C!-- blank line -->\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pxlYJVcHY28?si=MhIz6lnHxc6kFhlL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Get started with GitLab Duo with Amazon Q today\n\nWant to learn more about GitLab Duo with Amazon Q? Visit the [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/) for detailed information.\n\n## Agentic AI resources\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[704,678,1041,835,9,794],{"featured":90,"template":684,"slug":1752},"enhance-application-quality-with-ai-powered-test-generation","content:en-us:blog:enhance-application-quality-with-ai-powered-test-generation.yml","Enhance Application Quality With Ai Powered Test Generation","en-us/blog/enhance-application-quality-with-ai-powered-test-generation.yml","en-us/blog/enhance-application-quality-with-ai-powered-test-generation",{"_path":1758,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1759,"content":1765,"config":1772,"_id":1774,"_type":13,"title":1775,"_source":15,"_file":1776,"_stem":1777,"_extension":18},"/en-us/blog/enhance-application-security-with-gitlab-hackerone",{"title":1760,"description":1761,"ogTitle":1760,"ogDescription":1761,"noIndex":6,"ogImage":1762,"ogUrl":1763,"ogSiteName":669,"ogType":670,"canonicalUrls":1763,"schema":1764},"Enhance application security with GitLab + HackerOne","Learn about the GitLab + HackerOne partnership and how to easily implement an integration that improves your organization’s application security posture.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097503/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2810%29_5ET24Q6i8ihqrAOkge7a1R_1750097503214.png","https://about.gitlab.com/blog/enhance-application-security-with-gitlab-hackerone","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Enhance application security with GitLab + HackerOne\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-04-03\",\n      }",{"title":1760,"description":1761,"authors":1766,"heroImage":1762,"date":1768,"body":1769,"category":814,"tags":1770},[1767],"Fernando Diaz","2025-04-03","Security can no longer be an afterthought in the development process. Organizations need robust solutions that integrate security throughout the entire software development lifecycle. This is where the partnership between HackerOne and GitLab creates a compelling combination for modern application development teams.\n\nGitLab, the comprehensive, AI-powered DevSecOps platform, and HackerOne, the leading crowd-sourced security platform, have established a partnership that brings together the best of both worlds: GitLab's streamlined DevSecOps workflow and HackerOne's powerful vulnerability management capabilities.\n\nIn this tutorial, you'll learn how to enhance developer productivity and your security posture by implementing HackerOne's GitLab integration.\n\n## An integration that empowers developers\n\nHackerOne's GitLab integration is remarkably straightforward, yet powerful. When security researchers discover vulnerabilities through HackerOne's platform, these findings are automatically converted into GitLab issues. This creates a seamless workflow where:\n\n* Security researchers identify vulnerabilities via HackerOne's platform  \n* Validated vulnerabilities are automatically converted into GitLab issues  \n* Development teams can address these issues directly within their existing workflow  \n* Resolution status is synchronized between both platforms\n\nYou can start leveraging the benefits of GitLab and HackerOne by using the [integration](https://docs.hackerone.com/en/articles/8571227-gitlab-integration) to track GitLab issues as references on HackerOne. This integration provides bi-directional and seamless data syncing between your HackerOne report and GitLab issues, improving alignment between development and security teams while streamlining security vulnerability processing.\n\nTo configure the GitLab integration to sync information between your HackerOne report and your Gitlab issue, follow the instructions provided in [HackerOne's GitLab integration documentation](https://docs.hackerone.com/en/articles/10394699-gitlab-setup), which includes:\n\n1. [Setting up an OAuth 2.0 application](https://docs.gitlab.com/ee/integration/oauth_provider.html) for your GitLab instance with the provided HackerOne settings  \n2. Connecting HackerOne to the newly created OAuth 2.0 on GitLab  \n3. Authorizing HackerOne to access the GitLab API  \n4. Configuring which GitLab project you would like to escalate HackerOne reports to  \n5. Selecting the HackerOne fields to map to corresponding GitLab fields  \n6. GitLab-to-HackerOne and HackerOne-to-GitLab event configuration\n\nOnce the integration is in place, you’ll be able to seamlessly sync data bi-directionally between both GitLab and HackerOne. This helps simplify context-switching and allows vulnerabilities to be tracked with ease throughout both systems. The integration allows for the following features:\n\n* **Creating a GitLab Issue from HackerOne:** You can create new GitLab issues for reports you receive on HackerOne.  \n* **Linking HackerOne reports to existing GitLab tasks.**   \n* **Syncing updates from HackerOne to GitLab:** The following updates on a report are synced as a comment to GitLab.  \n  * Report comments  \n  * State changes  \n  * Rewards  \n  * Assignee changes  \n  * Public disclosure  \n  * Close GitLab Issue  \n* **Syncing Updates from GitLab to HackerOne:** The following updates on GitLab will be reflected in HackerOne as an internal comment on the associated report:  \n  * Comments  \n  * State changes  \n* **HackerOne severity to GitLab label mapping**: Allows you to set a custom priority when escalating a report to GitLab.  \n* **Due date mapping:** Allows you to automatically set a custom due date based on the severity of a report.\n\n![GitLab + HackerOne adding comments or change the state of the report in GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097510/Blog/Content%20Images/Blog/Content%20Images/sync_aHR0cHM6_1750097509644.png)\n\nThese features improve alignment between development and security teams and streamlining security vulnerability processing. To learn more on how the integration works, see the [integration documentation](https://docs.hackerone.com/en/articles/8571227-gitlab-integration).\n\n## A look into HackerOne bug bounty programs\n\nHackerOne provides bug bounty programs or cybersecurity initiatives where rewards are offered for discovering and reporting vulnerabilities in customers’ software systems, websites, or applications. Bug bounty programs help enhance the security of an application by:\n\n* Identifying security flaws before malicious actors can exploit them  \n* Leveraging diverse expertise from a global community of security researchers  \n* Providing a cost-effective way to improve cybersecurity  \n* Complementing internal security efforts and traditional penetration testing\n\nGitLab utilizes HackerOne’s bug bounty program, allowing security researchers to report vulnerabilities in GitLab applications or infrastructure. This crowdsourced approach helps GitLab identify and address potential security issues more effectively.\n\n![HackerOne GitLab Bug Bounty page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097510/Blog/Content%20Images/Blog/Content%20Images/hackerone_gitlab_bug_bounty_page_aHR0cHM6_1750097509645.png)\n\nBy leveraging HackerOne's platform and the global hacker community, organizations can significantly enhance their security posture, identify vulnerabilities faster, and stay ahead of potential threats.\n\n## Secure applications and improve efficiency with GitLab \n\nGitLab provides a complete DevSecOps platform, which enables functionality for the complete software development lifecycle, including security and compliance tools. GitLab supports the following security scanner types:\n- Static Application Security Testing (SAST)\n- Dynamic Application Security Testing (DAST)\n- Container Scanning\n- Dependency Scanning\n- Infrastructure as Code Scanning\n- Coverage-guided Fuzzing\n- Web API Fuzzing\n\nWith GitLab, you can add security scanning by simply applying a template to your CI/CD pipeline definition file. For example, enabling SAST just takes a few lines of code in the `.gitlab-ci.yml`:\n\n```yaml\nstage:\n  - test\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n```\n\nThis will run SAST on the test stage, and [auto-detect the languages used](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks) in your application. Then, whenever you create a merge request, SAST will detect the vulnerabilities in the diff between the feature branch and the target branch and provide relevant data on each vulnerability to assist with remediation.\n\n![NoSQL injection vulnerability seen in MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097510/Blog/Content%20Images/Blog/Content%20Images/no_sql_injection_vulnerability_mr_view_aHR0cHM6_1750097509647.png)\n\nThe results of the SAST scanner can block code from being merged if security policies are applied. Native GitLab users can be set as approvers, allowing required reviews before merging insecure code. This assures that all vulnerabilities have oversight from the appropriate parties.\n\n![Merge request approval policy](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097510/Blog/Content%20Images/Blog/Content%20Images/merge_request_approval_policy_aHR0cHM6_1750097509649.png)\n\nHackerOne has integrated GitLab into its operations and development processes in several significant ways, which have led to development process improvements and enhanced scalability and collaboration. These improvements include faster deployments and cross-team planning.\n\n## Key benefits of HackerOne's GitLab integration\n\nThe key benefits of using HackerOne and GitLab together include:\n\n* **Enhanced security visibility:** Development teams gain immediate visibility into security vulnerabilities without leaving their primary workflow environment. This real-time awareness helps teams prioritize security issues alongside feature development.  \n* **Streamlined remediation process:** By converting HackerOne reports directly into GitLab issues, the remediation process becomes part of the standard development cycle. This eliminates context switching between platforms and ensures security fixes are tracked alongside other development work.  \n* **Accelerated time to fix:** The integration significantly reduces the time between vulnerability discovery and resolution. With HackerOne submissions immediately available in GitLab, development teams can begin working on fixes without delay, improving overall security posture.  \n* **Improved collaboration:** Security researchers, security teams, and developers can communicate more effectively through this integration. Comments and updates flow between both platforms, creating a collaborative environment focused on improving security.  \n* **Real-world impact:** Organizations implementing the HackerOne and GitLab integration have reported:  \n  * Up to 70% reduction in time from vulnerability discovery to fix  \n  * Improved developer satisfaction by keeping them in their preferred workflow  \n  * Enhanced security visibility across the organization  \n  * More effective allocation of security resources\n\n> To get started today, visit [the integration setup page](https://docs.hackerone.com/en/articles/10394699-gitlab-setup) today.\n\n## Learn more\n\nTo learn more about GitLab and HackerOne, and how we can help enhance your security posture, check out the following resources:\n* [HackerOne's GitLab Integration Usage](https://docs.hackerone.com/en/articles/8571227-gitlab-integration)  \n* [HackerOne GitLab Bug Bounty Program](https://hackerone.com/gitlab?type=team)\n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [HackerOne achieves 5x faster deployments with GitLab’s integrated security](https://about.gitlab.com/customers/hackerone/)  \n* [GitLab Application Security Documentation](https://docs.gitlab.com/ee/user/application_security/)\n",[814,9,230,281,478,835,1771],"bug bounty",{"slug":1773,"featured":6,"template":684},"enhance-application-security-with-gitlab-hackerone","content:en-us:blog:enhance-application-security-with-gitlab-hackerone.yml","Enhance Application Security With Gitlab Hackerone","en-us/blog/enhance-application-security-with-gitlab-hackerone.yml","en-us/blog/enhance-application-security-with-gitlab-hackerone",{"_path":1779,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1780,"content":1786,"config":1791,"_id":1793,"_type":13,"title":1794,"_source":15,"_file":1795,"_stem":1796,"_extension":18},"/en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets",{"title":1781,"description":1782,"ogTitle":1781,"ogDescription":1782,"noIndex":6,"ogImage":1783,"ogUrl":1784,"ogSiteName":669,"ogType":670,"canonicalUrls":1784,"schema":1785},"Strengthen data security with custom PII detection rulesets","This tutorial explains how GitLab's customizable Secret Detection rulesets enhance data security by identifying PII patterns in code repositories. Learn how AI can help.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097701/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%285%29_1iy516k40hwBDChKcUJ2zb_1750097700983.png","https://about.gitlab.com/blog/enhance-data-security-with-custom-pii-detection-rulesets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Strengthen data security with custom PII detection rulesets\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-04-01\",\n      }",{"title":1781,"description":1782,"authors":1787,"heroImage":1783,"date":1788,"body":1789,"category":814,"tags":1790},[1767],"2025-04-01","Protecting sensitive information is more critical than ever. GitLab's Secret Detection feature provides a powerful solution to identify and prevent the exposure of sensitive data. This tutorial explores how GitLab Secret Detection works, how to create custom rulesets for finding personally identifiable information, and how GitLab Duo Chat can streamline the creation of regex patterns for PII detection.\n\n## Understanding GitLab Secret Detection\n\n[GitLab Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/) is a security scanning feature integrated into the GitLab CI/CD pipeline. It automatically scans your codebase to identify hardcoded secrets, credentials, and other sensitive information that shouldn't be stored in your repository.\n\n### Key benefits\n\n* **Data breach prevention** detects secrets before they're committed to your repository.  \n* **Automated scanning** runs as part of your CI/CD pipeline without manual intervention.  \n* **Customizable rules** extend detection capabilities with custom patterns.  \n* **Compliance support** helps meet regulatory requirements like GDPR, HIPAA, and the California Privacy Protection Act.\n\n## Create custom rulesets for PII detection\n\nWhile GitLab's default secret detection covers common secrets like API keys and passwords, you may need custom rules to identify specific types of PII relevant to your organization. \n\nTo get started, create a new GitLab project and follow the steps below. You can follow along and see usage examples in our [PII Demo Application](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset).\n\n**Step 1: Set up Secret Detection**\n\nEnsure Secret Detection is enabled in your `.gitlab-ci.yml` file:\n\n```\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml\n\nsecret_detection:\n  variables:\n    SECRET_DETECTION_EXCLUDED_PATHS: \"rules,.gitlab,README.md,LICENSE\"\n    SECRET_DETECTION_HISTORIC_SCAN: \"true\"\n```\n\n**Step 2: Create a custom ruleset file**\n\nCreate the directory and file `rules/pii-data-extenson.toml`, which contains the regex patterns for PII data along with an allowlist of patterns to ignore. Below are patterns to detect passport numbers (USA), phone numbers (USA), and email addresses:\n\n```toml\n[extend]\n# Extends default packaged ruleset, NOTE: do not change the path.\npath = \"/gitleaks.toml\"\n\n# Patterns to ignore (used for tests)\n[allowlist]\ndescription = \"allowlist of patterns and paths to ignore in detection\"\nregexTarget = \"match\"\nregexes = ['''555-555-5555''', '''user@example.com''']\npaths = ['''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)''']\n\n# US Passport Number (USA)\n[[rules]]\nid = \"us_passport_detection\"\ntitle = \"US Passport Number\"\ndescription = \"Detects US passport numbers\"\nregex = '''\\b[A-Z]{1,2}[0-9]{6,9}\\b'''\nkeywords = [\"passport\"]\n\n# Phone Number (USA)\n[[rules]]\nid = \"us_phone_number_detection_basic\"\ntitle = \"US Phone Number\"\ndescription = \"Detects US phone numbers in basic format\"\nregex = '''\\b\\d{3}-\\d{3}-\\d{4}\\b'''\nkeywords = [\"phone\", \"mobile\"]\n\n# Email Address\n[[rules]]\nid = \"email_address\"\ntitle = \"Email Address\"\ndescription = \"Detects email addresses\"\nregex = '''[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\\.[a-zA-Z]{2,}'''\nkeywords = [\"email\", \"e-mail\"]\n```\n**Step 3: Extend Secret Detection with the custom ruleset file**\n\nCreate a directory and file `.gitlab/secret-detection-ruleset.toml` in the root of your repository. This file allows you to extend the standard configuration with the PII rules file, and overwrite the severity of the detected vulnerabilities (default severity is `Critical`).\n\n```\n# Define the pii rules to add to default configuration\n[[secrets.passthrough]]\ntype = \"file\"\ntarget = \"gitleaks.toml\"\nvalue = \"rules/pii-data-extension.toml\"\n\n# Overwrite Phone Number (USA) PII Severity\n[[secrets.ruleset]]\n[secrets.ruleset.identifier]\ntype = \"gitleaks_rule_id\"\nvalue = \"us_phone_number_detection_basic\"\n[secrets.ruleset.override]\nseverity = \"Medium\"\n\n# Overwrite Email Address PII Severity\n[[secrets.ruleset]]\n[secrets.ruleset.identifier]\ntype = \"gitleaks_rule_id\"\nvalue = \"email_address\"\n[secrets.ruleset.override]\nseverity = \"Low\"\n```\n\n**Step 4: Commit your changes**\n\nNow add the changes in the above steps to your project.\n\n```\ncd /path/to/your/project\ngit add .\ngit commit -m \"Add PII data ruleset and Secret Scanning\"\ngit push\n```\n\nOnce the code is committed, Secret Detection will run within the default branch.\n\n**Step 5: Test detection of PII data**\n\nNow that we have configured the Secret Detection scanner, we should perform a test to see if the scanner is detecting the new custom patterns. This can be done by creating a merge request, which adds a new file named `customer-data.yaml` with the following:\n\n```yaml  \ncustomers:  \n  test_user:  \n    phone_number: 555-555-555  \n    email: user@example.com  \n  justin_case:  \n    phone_number: 512-123-4567  \n    passport_number: A12345678  \n    email: justin_case@example.com  \n  chris_p_bacon: \n    phone_number: 305-123-4567  \n    passport_number: B09876543  \n    email: chris_p_bacon@example.com  \n```\n\nThe scanner should now perform the following:\n\n* Ignore the `phone_number` and `email` of `test_user` due to patterns being in allowlist  \n* Detect six potential vulnerabilities due to the information present for both `justin_case` and `chris_p_bacon`\n  * U.S. passport number severity is set to `Critical` (default)  \n  * U.S. phone number severity is set to `Medium` (override)  \n  * Email address severity is set to `Low` (override)\n  * Data from rules override is added to each vulnerability\n\nOnce the [merge request](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset/-/merge_requests/4) is submitted, the Secret Detection scanner runs and provides the following results:\n\n![Secret Detection finding custom PII data MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097709/Blog/Content%20Images/Blog/Content%20Images/pii_vulns_aHR0cHM6_1750097709683.png)\n\nWhen clicking on a vulnerability, you are presented with detailed vulnerability data based on what was configured in your newly set up rules:\n\n![Expanded custom PII data vulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097710/Blog/Content%20Images/Blog/Content%20Images/pii_vulns_expanded_aHR0cHM6_1750097709684.png)\n\nThis data allows you to determine the validity of the data present and address it accordingly.\n\nThere are additional ways to configure custom rulesets. For example, rules can be applied remotely to several projects, avoiding the need to duplicate the rules file. See the [Secret Detection Configuration documentation](https://docs.gitlab.com/user/application_security/secret_detection/pipeline/configure/) for more information.\n\n## Common PII types to consider\n\nWhen building your custom ruleset, consider including patterns for:\n\n* Social Security Numbers  \n* Credit card numbers  \n* Driver's license numbers  \n* Passport numbers  \n* Email addresses  \n* Phone numbers  \n* IP addresses  \n* Physical addresses  \n* Medical record numbers\n\n## Leveraging GitLab Duo Chat for pattern creation\n\nCreating effective regex patterns can be challenging. This is where [GitLab Duo Chat](https://docs.gitlab.com/user/gitlab_duo_chat/) comes in handy. GitLab Duo Chat is an AI-powered assistant that accelerates development with contextual, conversational AI. Chat is available to any user with an assigned GitLab Duo seat.\n\n### Using GitLab Duo Chat to generate PII regex patterns\n\n1. **Access GitLab Duo Chat:** In the upper right corner, select __GitLab Duo Chat__. A drawer opens on the right side of your screen.\n\n2. **Prompt the chat:** Enter a prompt in the chat input box and press __Enter__ or select __Send__:\n\n```\nI need a regex pattern to detect Canadian Zip Codes in our codebase for secret detection\n```\n\n3. **Review and refine:** GitLab Duo Chat will suggest a regex pattern that you can review and refine:\n\n```\nCanadian postal codes follow the format \"A1A 1A1\" (where A is a letter and 1 is a digit). Here's a regex pattern that will detect Canadian postal codes in your codebase, accounting for both spaced and non-spaced formats:\n[A-Za-z]\\d[A-Za-z][ -]?\\d[A-Za-z]\\d\nThis pattern matches:\n* A letter, followed by a digit, followed by a letter\n* An optional space or hyphen\n* A digit, followed by a letter, followed by a digit\nIf you need a more strict pattern that only matches uppercase letters (as official Canadian postal codes use), you can use:\n[A-Z]\\d[A-Z][ -]?\\d[A-Z]\\d\nFor implementation in a secret detection tool, you might want to add word boundaries to avoid partial matches:\n\\b[A-Z]\\d[A-Z][ -]?\\d[A-Z]\\d\\b\nWould you like more details about implementing this in a specific secret detection system?\n```\n\n4. **Test the pattern:** Test the pattern against sample data to ensure it works correctly. You can use sites like [https://regex101.com/](https://regex101.com/) to validate patterns before you submit them to your ruleset.\n\n5. **Add to your ruleset:** Once satisfied, add the pattern to your `rules/pii-data-extension.toml` file and perform some tests to validate. This sample [merge request](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/secret-scanning/pii-data-ruleset/-/merge_requests/5/diffs) contains a newly added rule for Canadian Zip Codes based on the above.\n\nAdditionally, you can use GitLab Duo Chat in:\n\n* [The GitLab Web IDE (VS Code in the cloud)](https://docs.gitlab.com/user/project/web_ide/)  \n* VS Code, with the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow)  \n* JetBrains IDEs, with the [GitLab Duo Plugin for JetBrains](https://plugins.jetbrains.com/plugin/22325-gitlab-duo)  \n* Visual Studio for Windows, with the [GitLab Extension for Visual Studio](https://marketplace.visualstudio.com/items?itemName=GitLab.GitLabExtensionForVisualStudio)\n\nIn the future, you’ll be able to leverage [GitLab Duo Workflow](https://docs.gitlab.com/user/duo_workflow/) (currently in private beta) to automatically generate and add these patterns to your code base directly from your IDE. GitLab Duo Workflow is an AI agent, which transforms AI from reactive assistant to autonomous contributor, optimizing your software development lifecycle. Learn more about [GitLab Duo Workflow](https://about.gitlab.com/blog/meet-gitlab-duo-workflow-the-future-of-ai-driven-development/).\n\n## Best practices for PII detection\n\n1. **Start small:** Begin with a few critical PII types and expand gradually.  \n2. **Test thoroughly:** Test your patterns against sample data to avoid false positives.  \n3. **Update regularly:** Review and update your rulesets as new PII requirements emerge.  \n4. **Document patterns:** Maintain documentation for your custom regex patterns.  \n5. **Balance precision:** Make patterns specific enough to avoid false positives but flexible enough to catch variations.  \n6. **Implement [Secret Push Protection](https://about.gitlab.com/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection/):** Prevent PII data from making it into your repository.  \n7. **Set up [Merge Request Approval Policies](https://docs.gitlab.com/user/application_security/policies/merge_request_approval_policies/):** Require approval before merging any possible PII data to your repository.\n\nOnce you have set up a PII data ruleset to meet your organization's needs, remote rulesets can scan for PII data across multiple repositories without the need to duplicate the rules file. Watch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/vjJxQz918WE?si=CRdIEodo3ALxVWXO\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Handling Secret Detection findings\n\nWhen GitLab Secret Detection identifies potential PII in your code:\n\n1. **Review the finding:** Assess whether it's a legitimate finding or a false positive.  \n2. **Remediate:** Remove the sensitive data and replace it with environment variables or secrets management.  \n3. **[Redact text from repository](https://docs.gitlab.com/user/project/merge_requests/revert_changes/#redact-text-from-repository):** Permanently delete sensitive or confidential information that was accidentally committed, ensuring it's no longer accessible in your repository's history. \n4. **Track progress:** Use GitLab's security dashboard to monitor ongoing compliance.\n\n## Get started today\n\nGitLab Secret Detection, combined with custom PII rulesets, provides a powerful defense against inadvertent exposure of sensitive information. By leveraging GitLab Duo Chat to create precise regex patterns, teams can efficiently implement comprehensive PII detection across their codebase, ensuring regulatory compliance and protecting user data.\n\nRemember that secret detection is just one component of a comprehensive security strategy. Combine it with other GitLab security features like static application security testing, dynamic application security testing, and dependency scanning for a more robust security posture.\n\nStart implementing these practices today to better protect your users' personal information and maintain the security integrity of your applications.\n\n> Start [a free, 60-day trial of GitLab Ultimate and GitLab Duo ](https://about.gitlab.com/free-trial/)today!\n\n## More resources\n\nTo learn more about GitLab security and compliance and how we can help enhance your AppSec workflows, follow the links below:\n\n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab DevSecOps Platform](https://about.gitlab.com/platform/)  \n* [GitLab Duo (AI)](https://about.gitlab.com/gitlab-duo/)  \n* [GitLab Application Security documentation](https://docs.gitlab.com/user/application_security/) \n* [Secret Detection documentation](https://docs.gitlab.com/user/application_security/secret_detection/)\n",[814,678,9,478,704],{"slug":1792,"featured":90,"template":684},"enhance-data-security-with-custom-pii-detection-rulesets","content:en-us:blog:enhance-data-security-with-custom-pii-detection-rulesets.yml","Enhance Data Security With Custom Pii Detection Rulesets","en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets.yml","en-us/blog/enhance-data-security-with-custom-pii-detection-rulesets",{"_path":1798,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1799,"content":1802,"config":1809,"_id":1811,"_type":13,"title":1812,"_source":15,"_file":1813,"_stem":1814,"_extension":18},"/en-us/blog/exact-code-search-find-code-faster-across-repositories",{"noIndex":6,"title":1800,"description":1801},"Exact Code Search: Find code faster across repositories","Discover how this new GitLab feature can find exact matches, use regex patterns, and see contextual results across terabytes of codebases.",{"title":1800,"description":1801,"authors":1803,"heroImage":1805,"date":1806,"body":1807,"category":678,"tags":1808},[1804],"Dmitry Gruzd","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675154/Blog/Hero%20Images/blog-image-template-1800x945__8_.png","2025-06-25","**TL;DR:** What if you could find any line of code across 48 TB of repositories in milliseconds? GitLab's new [Exact Code Search](https://docs.gitlab.com/ee/user/search/exact_code_search.html) makes this possible, delivering pinpoint precision, powerful regex support, and contextual multi-line results that transform how teams work with large codebases.\n## Why traditional code search is challenging\n\nAnyone who works with code knows the frustration of searching across repositories. Whether you're a developer debugging an issue, a DevOps engineer examining configurations, a security analyst searching for vulnerabilities, a technical writer updating documentation, or a manager reviewing implementation, you know exactly what you need, but traditional search tools often fail you.\n\nThese conventional tools return dozens of false positives, lack the context needed to understand results, and slow to a crawl as codebases grow. The result? Valuable time spent hunting for needles in haystacks instead of building, securing, or improving your software.\n\nGitLab's code search functionality has historically been backed by Elasticsearch or OpenSearch. While these are excellent for searching issues, merge requests, comments, and other data containing natural language, they weren't specifically designed for code. After [evaluating numerous options](https://gitlab.com/groups/gitlab-org/-/epics/7404), we developed a better solution.\n\n## Introducing Exact Code Search: Three game-changing capabilities\n\nEnter GitLab's **[Exact Code Search](https://docs.gitlab.com/ee/user/search/exact_code_search.html)**, currently in beta testing and powered by [Zoekt](https://github.com/sourcegraph/zoekt) (pronounced \"zookt\", Dutch for \"search\"). Zoekt is an open-source code search engine originally created by Google and now maintained by Sourcegraph, specifically designed for fast, accurate code search at scale. We've enhanced it with GitLab-specific integrations, enterprise-scale improvements, and seamless permission system integration.\n\nThis feature revolutionizes how you find and understand code with three key capabilities:\n\n**1. Exact Match mode: Zero false positives**\n\nWhen toggled to **Exact Match mode**, the search engine returns only results that match your query exactly as entered, eliminating false positives. This precision is invaluable when:\n\n* Searching for specific error messages\n* Looking for particular function signatures\n* Finding instances of specific variable names\n\n**2. Regular Expression mode: Powerful pattern matching**\n\nFor complex search needs, Regular Expression mode allows you to craft sophisticated search patterns:\n\n* Find functions following specific naming patterns\n* Locate variables matching certain constraints\n* Identify potential security vulnerabilities using pattern matching\n\n**3. Multiple-line matches: See code in context**\n\n![Exact Code Search](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750704179/ttjuilkt3v7gtyywnchx.png)\n\nInstead of seeing just a single line with your matching term, you get the surrounding context that's crucial for understanding the code. This eliminates the need to click through to files for basic comprehension, significantly accelerating your workflow.\n\n## From features to workflows: Real-world use cases and impact\n\nLet's see how these capabilities translate to real productivity gains in everyday development scenarios:\n\n### Debugging: From error message to root cause in seconds\n\nBefore Exact Code Search:\nCopy an error message, search, wade through dozens of partial matches in comments and documentation, click through multiple files, and eventually find the actual code.\n\nWith Exact Code Search:\n\n1. Copy the exact error message\n2. Paste it into Exact Code Search with Exact Match mode\n3. Instantly find the precise location where the error is thrown, with surrounding context\n\n**Impact:** Reduce debugging time from minutes to seconds, eliminating the frustration of false positives.\n\n### Code exploration: Master unfamiliar codebases quickly\n\nBefore Exact Code Search:\nBrowse through directories, make educated guesses about file locations, open dozens of files, and slowly build a mental map of the codebase.\n\nWith Exact Code Search:\n\n* Search for key methods or classes with Exact Match mode\n* Review multiple line matches to understand implementation details\n* Use Regular Expression mode to find similar patterns across the codebase\n\n**Impact:** Build a mental map of code structure in minutes rather than hours, dramatically accelerating onboarding and cross-team collaboration.\n\n### Refactoring with confidence\n\nBefore Exact Code Search:\nAttempt to find all instances of a method, miss some occurrences, and introduce bugs through incomplete refactoring.\n\nWith Exact Code Search:\n\n* Use Exact Match mode to find all occurrences of methods or variables\n* Review context to understand usage patterns\n* Plan your refactoring with complete information about impact\n\n**Impact:** Eliminate the \"missed instance\" bugs that often plague refactoring efforts, improving code quality and reducing rework.\n\n### Security auditing: Finding vulnerable patterns\n\nSecurity teams can:\n\n* Create regex patterns matching known vulnerable code\n* Search across all repositories in a namespace\n* Quickly identify potential security issues with context that helps assess risk\n\n**Impact:** Transform security audits from manual, error-prone processes to systematic, comprehensive reviews.\n\n### Cross-repository insights\n\nSearch across your entire namespace or instance to:\n\n* Identify similar implementations across different projects\n* Discover opportunities for shared libraries or standardization\n\n**Impact:** Break down silos between projects and identify opportunities for code reuse and standardization.\n\n## The technical foundation: How Zoekt delivers speed and precision\n\nBefore diving into our scale achievements, let's explore what makes Zoekt fundamentally different from traditional search engines — and why it can find exact matches so incredibly fast.\n\n### Positional trigrams: The secret to lightning-fast exact matches\n\nZoekt's speed comes from its use of **positional trigrams** — a technique that indexes every sequence of three characters along with their exact positions in files. This approach solves one of the biggest pain points developers have had with Elasticsearch-based code search: false positives.\n\nHere's how it works:\n\n**Traditional full-text search engines** like Elasticsearch tokenize code into words and lose positional information. When you search for `getUserId()`, they might return results containing **user**, **get**, and **Id** scattered throughout a file — leading to those frustrating false positives for GitLab users.\n\n**Zoekt's positional trigrams** maintain exact character sequences and their positions. When you search for `getUserId()`, Zoekt looks for the exact trigrams like **get**, **etU**, **tUs**, **Use**, **ser**, **erI**, **rId**, **Id(\", \"d()**, all in the correct sequence and position. This ensures that only exact matches are returned.\n\nThe result? Search queries that previously returned hundreds of irrelevant results now return only the precise matches you're looking for. This was [one of our most requested features](https://gitlab.com/gitlab-org/gitlab/-/issues/325234) for good reason - developers were losing significant time sifting through false positives.\n\n### Regular expression performance at scale\n\nZoekt excels at exact matches and is optimized for regular expression searches. The engine uses sophisticated algorithms to convert regex patterns into efficient trigram queries when possible, maintaining speed even for complex patterns across terabytes of code.\n\n## Built for enterprise scale\n\nExact Code Search is powerful and built to handle massive scale with impressive performance. This is not just a new UI feature — it's powered by a completely reimagined backend architecture.\n\n### Handling terabytes of code with ease\n\nOn GitLab.com alone, our Exact Code Search infrastructure indexes and searches over **48 TB** of code data while maintaining lightning-fast response times. This scale represents millions of repositories across thousands of namespaces, all searchable within milliseconds. To put this in perspective: This scale represents more code than the entire Linux kernel, Android, and Chromium projects combined. Yet Exact Code Search can find a specific line across this massive codebase in milliseconds.\n\n### Self-registering node architecture\n\nOur innovative implementation features:\n\n* **Automatic node registration:** Zoekt nodes register themselves with GitLab\n* **Dynamic shard assignment:** The system automatically assigns namespaces to nodes\n* **Health monitoring:** Nodes that don't check in are automatically marked offline\n\nThis self-configuring architecture dramatically simplifies scaling. When more capacity is needed, administrators can simply add more nodes without complex reconfiguration.\n\n### Distributed system with intelligent load balancing\n\nBehind the scenes, Exact Code Search operates as a distributed system with these key components:\n\n* **Specialized search nodes:** Purpose-built servers that handle indexing and searching\n* **Smart sharding:** Code is distributed across nodes based on namespaces\n* **Automatic load balancing:** The system intelligently distributes work based on capacity\n* **High availability:** Multiple replicas ensure continuous operation even if nodes fail\n\n*Note: High availability is built into the architecture but not yet fully enabled. See [Issue 514736](https://gitlab.com/gitlab-org/gitlab/-/issues/514736) for updates.*\n\n### Seamless security integration\n\nExact Code Search automatically integrates with GitLab's permission system:\n\n* Search results are filtered based on the user's access rights\n* Only code from projects the user has access to is displayed\n* Security is built into the core architecture, not added as an afterthought\n\n### Optimized performance\n\n* **Efficient indexing:** Large repositories are indexed in tens of seconds\n* **Fast query execution:** Most searches return results with sub-second response times\n* **Streaming results:** The new gRPC-based federated search streams results as they're found\n* **Early termination:** Once enough results are collected, the system pauses searching\n\n## From library to distributed system: Engineering challenges we solved\n\nWhile Zoekt provided the core search technology, it was originally designed as a minimal library for managing `.zoekt` index files - not a distributed database or enterprise-scale service. Here are the key engineering challenges we overcame to make it work at GitLab's scale\"\n\n### Challenge 1: Building an orchestration layer\n\n**The problem:** Zoekt was designed to work with local index files, not distributed across multiple nodes serving many concurrent users.\n\n**Our solution:** We built a comprehensive orchestration layer that:\n\n* Creates and manages database models to track nodes, indices, repositories, and tasks\n* Implements a self-registering node architecture (inspired by GitLab Runner)\n* Handles automatic shard assignment and load balancing across nodes\n* Provides bidirectional API communication between GitLab Rails and Zoekt nodes\n\n### Challenge 2: Scaling storage and indexing\n\n**The problem:** How do you efficiently manage terabytes of index data across multiple nodes while ensuring fast updates?\n\n**Our solution:** We implemented:\n\n* Intelligent sharding: Namespaces are distributed across nodes based on capacity and load\n* Independent replication: Each node independently indexes from [Gitaly](https://gitlab.com/gitlab-org/gitaly) (our Git storage service), eliminating complex synchronization\n* Watermark management: Sophisticated storage allocation that prevents nodes from running out of space\n* Unified binary architecture: A single `gitlab-zoekt` binary that can operate in both indexer and webserver modes\n\n### Challenge 3: Permission Integration\n\n**The problem:** Zoekt had no concept of GitLab's complex permission system - users should only see results from projects they can access.\n\n**Our solution:** We built native permission filtering directly into the search flow:\n\n* Search requests include user permission context\n* Results are filtered to include only those the user can access in case permissions change before indexing completes\n\n### Challenge 4: Operational simplicity\n\n**The problem:** Managing a distributed search system shouldn't require a dedicated team.\n\n**Our solution:**\n\n* Auto-scaling: Adding capacity is as simple as deploying more nodes - they automatically register and start handling work\n* Self-healing: Nodes that don't check in are automatically marked offline and their work redistributed\n* Zero-configuration sharding: The system automatically determines optimal shard assignments\n\n## Gradual rollout: Minimizing risk at scale\n\nRolling out a completely new search backend to millions of users required careful planning. Here's how we minimized customer impact while ensuring reliability:\n\n### Phase 1: Controlled testing (gitlab-org group)\n\nWe started by enabling Exact Code Search only for the `gitlab-org` group - our own internal repositories. This allowed us to:\n\n* Test the system with real production workloads\n* Identify and fix performance bottlenecks\n* Streamline the deployment process\n* Learn from real users' workflows and feedback\n\n### Phase 2: Performance validation and optimization\n\nBefore expanding, we focused on ensuring the system could handle GitLab.com's scale:\n\n* Implemented comprehensive monitoring and alerting\n* Validated storage management with real production data growth\n\n### Phase 3: Incremental customer expansion\n\nWe gradually expanded to customers interested in testing Exact Code Search:\n\n* Gathered feedback on performance and user experience\n* Refined the search UI based on real user workflows\n* Optimized indexing performance (large repositories like `gitlab-org/gitlab` now index in ~10 seconds)\n* Refined the architecture based on operational learnings\n* Massively increased indexing throughput and improved state transition livecycle\n\n### Phase 4: Broad rollout\n\nToday, over 99% of Premium and Ultimate licensed groups on GitLab.com have access to Exact Code Search. Users can:\n\n* Toggle between regex and exact search modes\n* Experience the benefits without any configuration changes\n* Fall back to the previous search if needed (though few choose to)\n\nRolling this out gradually meant users didn't experience service disruptions, performance degradation, or feature gaps during the transition. We've already received positive feedback from users as they notice their results becoming more relevant and faster.\n\n> **For technical deep dive:** Interested in the detailed architecture and implementation? Check out our comprehensive [design document](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/code_search_with_zoekt/) for in-depth technical details about how we built this distributed search system.\n\n## Getting started with Exact Code Search\n\nGetting started with Exact Code Search is simple because it's already enabled by default for Premium and Ultimate groups on GitLab.com (over 99% of eligible groups currently have access).\n\n### Quickstart guide\n\n1. Navigate to the Advanced Search in your GitLab project or group\n2. Enter your search term in the code tab\n3. Toggle between Exact Match and Regular Expression modes\n4. Use filters to refine your search\n\n### Basic search syntax\n\nWhether using Exact Match or Regular Expression mode, you can refine your search with modifiers:\n\n| Query Example | What It Does                                             |\n| ------------- | -------------------------------------------------------- |\n| `file:js`     | Searches only in files containing \"js\" in their name     |\n| `foo -bar`    | Finds \"foo\" but excludes results with \"bar\"              |\n| `lang:ruby`   | Searches only in Ruby files                              |\n| `sym:process` | Finds \"process\" in symbols (methods, classes, variables) |\n\n> **Pro Tip:** For the most efficient searches, start specific and then broaden if needed. Using `file:` and `lang:` filters dramatically increases relevance.\n\n### Advanced search techniques\n\nStack multiple filters for precision:\n\n```\nis_expected file:rb -file:spec\n```\n\nThis finds \"is_expected\" in Ruby files that don't have \"spec\" in their name.\n\nUse regular expressions for powerful patterns:\n\n```\ntoken.*=.*[\\\"']\n```\n\n[Watch this search performed against the GitLab Zoekt repository.](https://gitlab.com/search?search=token.*%3D.*%5B%5C%22'%5D&nav_source=navbar&project_id=46649240&group_id=9970&search_code=true&repository_ref=main&regex=true)\n\nThe search helps find hardcoded passwords, which, if not found, can be a security issue.\n\nFor more detailed syntax information, check the [Exact Code Search documentation](https://docs.gitlab.com/user/search/exact_code_search/#syntax).\n\n## Availability and deployment\n\n### Current availability\n\nExact Code Search is currently in Beta for GitLab.com users with Premium and Ultimate licenses:\n\n* Available for over 99% of licensed groups\n* Search in the UI automatically uses Zoekt when available, Exact Code Search in Search API is behind a feature flag\n\n### Self-managed deployment options\n\nFor self-managed instances, we offer several deployment methods:\n\n* Kubernetes/Helm: Our most well-supported method, using our [`gitlab-zoekt` Helm chart](https://gitlab.com/gitlab-org/cloud-native/charts/gitlab-zoekt)\n* Other deployment options: We're working on streamlining deployment for Omnibus and other installation methods\n\nSystem requirements depend on your codebase size, but the architecture is designed to scale horizontally and/or vertically as your needs grow.\n\n## What's coming next\n\nWhile Exact Code Search is already powerful, we're continuously improving it:\n\n* **Scale optimizations** to support instances with hundreds of thousands of repositories\n* **Improved self-managed deployment** options, including streamlined Omnibus support\n* **Full high availability support** with automatic failover and load balancing\n\nStay tuned for updates as we move from Beta to General Availability.\n\n## Transform how you work with code\n\nGitLab's Exact Code Search represents a fundamental rethinking of code discovery. By delivering exact matches, powerful regex support, and contextual results, it solves the most frustrating aspects of code search:\n\n* No more wasting time with irrelevant results\n* No more missing important matches\n* No more clicking through files just to understand basic context\n* No more performance issues as codebases grow\n\nThe impact extends beyond individual productivity:\n\n* **Teams collaborate better** with easy code referencing\n* **Knowledge sharing accelerates** when patterns are discoverable\n* **Onboarding becomes faster** with quick codebase comprehension\n* **Security improves** with effective pattern auditing\n* **Technical debt reduction** becomes more feasible\n\nExact Code Search isn't just a feature, it's a better way to understand and work with code. Stop searching and start finding.\n\n**We'd love to hear from you!** Share your experiences, questions, or feedback about Exact Code Search in our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/420920). Your input helps us prioritize improvements and new features.\n\n> #### Ready to experience smarter code search? Learn more in our [documentation](https://docs.gitlab.com/ee/user/search/exact_code_search.html) or try it now by performing a search in your Premium or Ultimate licensed namespaces or projects. Not a GitLab user yet? Try [a free, 60-day trial of GitLab Ultimate with Duo](https://about.gitlab.com/free-trial/)!",[678,9,727],{"featured":6,"template":684,"slug":1810},"exact-code-search-find-code-faster-across-repositories","content:en-us:blog:exact-code-search-find-code-faster-across-repositories.yml","Exact Code Search Find Code Faster Across Repositories","en-us/blog/exact-code-search-find-code-faster-across-repositories.yml","en-us/blog/exact-code-search-find-code-faster-across-repositories",{"_path":1816,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1817,"content":1823,"config":1828,"_id":1830,"_type":13,"title":1831,"_source":15,"_file":1832,"_stem":1833,"_extension":18},"/en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"title":1818,"description":1819,"ogTitle":1818,"ogDescription":1819,"noIndex":6,"ogImage":1820,"ogUrl":1821,"ogSiteName":669,"ogType":670,"canonicalUrls":1821,"schema":1822},"How to export vulnerability reports to HTML/PDF and Jira","With GitLab's API, it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662877/Blog/Hero%20Images/security-cover-new.png","https://about.gitlab.com/blog/exporting-vulnerability-reports-to-html-pdf-jira","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to export vulnerability reports to HTML/PDF and Jira\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Siddharth Mathur\"}],\n        \"datePublished\": \"2023-09-14\",\n      }",{"title":1818,"description":1819,"authors":1824,"heroImage":1820,"date":1825,"body":1826,"category":769,"tags":1827},[957],"2023-09-14","\nGitLab's [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) makes it easy to triage security scan results without ever having to leave the platform. You can manage your code, run security scans against it, and fix vulnerabilities all in one place. That being said, some teams prefer to manage their vulnerabilities in a separate tool like Jira. They may also need to present the vulnerability report to leadership in a digestible format.\n\nOut of the box, GitLab's Vulnerability Report can be [exported to CSV](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/#export-vulnerability-details) with a single click, for easy analysis in other tools. In some cases though, a simple PDF of the report is all that's needed. \n\nWith [GitLab's API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities), it's easy to query vulnerability info and send the report details elsewhere, such as a PDF file or a Jira project. In this blog, we'll show you how to export to HTML/PDF and Jira. **Note that the scripts used in this tutorial are provided for educational purposes and they are not supported by GitLab.**\n\n## Exporting to HTML/PDF\nTo export your vulnerability reports to HTML or PDF, head to the [Custom Vulnerability Reporting](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting) project. \n\n![Project overview](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_overview.png)\n\n\nThis project contains a script that queries a project's vulnerability report, and then generates an HTML file from that data. The pipeline configured in the project runs this script and converts the HTML file to PDF as well.\n\nTo use the exporter, first [fork the project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project).\n\n![Project import](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/project_import.png)\n\n\nSet the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/custom-vulnerability-reporting)\n\nAfter you've set the required CI/CD variables, manually run a pipeline from your project's Pipelines page. Once the pipeline is complete, you'll see your file export by going to the “build_report” (for HTML) or “pdf_conversion” job and selecting “Download” or “Browse” on the sidebar under \"Job artifacts.\" And there you have it! A shareable, easy-to-read export of your project's vulnerabilities.\n\n![PDF export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/pdf_export.png)\n\n\n## Exporting vulnerability info to Jira\nGitLab lets you create Jira tickets from vulnerabilities through the UI using our [Jira integration](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#create-a-jira-issue-for-a-vulnerability). While you can do this individually for vulnerabilities that need actioning, sometimes teams need to bulk-create Jira tickets for all their vulnerabilities. We can leverage GitLab and Jira's APIs to achieve this.\n\nTo get started, head to the [External Vulnerability Tracking](https://gitlab.com/smathur/external-vulnerability-tracking) project. This script fetches vulnerabilities in the same way as the script above, but it uses the Jira API to create a ticket for each vulnerability. Each ticket's description is also populated with details from GitLab's vulnerability report.\n\nTo use the exporter, simply [fork the project](https://gitlab.com/smathur/external-vulnerability-tracking/-/forks/new) or [import it into a new project](https://gitlab.com/projects/new#import_project) (select “Repository by URL” and paste the git URL of the original project), and set the CI/CD variables as described in the readme. You'll need the following from GitLab:\n- GitLab project/personal access token with permissions to access vulnerability info (read_api scope)\n- GitLab GraphQL API URL (for SaaS this is https://gitlab.com/api/graphql)\n- GitLab project path (e.g. smathur/external-vulnerability-tracking)\n\nYou will also need the following from Jira:\n- Jira [personal access token](https://id.atlassian.com/manage-profile/security/api-tokens)\n- Jira API issue endpoint URL (for SaaS this is https://ORG_NAME.atlassian.net/rest/api/latest/issue/)\n- Jira user email ID\n- Jira project key where you want to create vulnerability tickets (e.g. ABC)\n\nOnce you have set your CI/CD variables as described in the project readme, simply run a pipeline from your project's Pipelines page, and watch as your tickets get created in Jira!\n\nIf you run the pipeline again in the future, the script will run a search query against your Jira project to prevent duplicate tickets from being created. It will create tickets for new vulnerabilities that aren't already in Jira.\n\n![Jira export](https://about.gitlab.com/images/blogimages/2023-07-27-exporting-vulnerability-reports-to-html-pdf-and-jira/jira_export.png)\n\n\n## References\n- [GitLab Vulnerability API](https://docs.gitlab.com/ee/api/graphql/reference/index.html#queryvulnerabilities)\n- [Custom Vulnerability Reporting project](https://gitlab.com/jwagner-demo/vandelay-industries/engineering/custom-vulnerability-reporting)\n- [External Vulnerability Tracking project](https://gitlab.com/smathur/external-vulnerability-tracking)\n- [Jira REST API examples](https://developer.atlassian.com/server/jira/platform/jira-rest-api-examples/)\n\n",[9,749,814,794],{"slug":1829,"featured":6,"template":684},"exporting-vulnerability-reports-to-html-pdf-jira","content:en-us:blog:exporting-vulnerability-reports-to-html-pdf-jira.yml","Exporting Vulnerability Reports To Html Pdf Jira","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira.yml","en-us/blog/exporting-vulnerability-reports-to-html-pdf-jira",{"_path":1835,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1836,"content":1839,"config":1845,"_id":1847,"_type":13,"title":1848,"_source":15,"_file":1849,"_stem":1850,"_extension":18},"/en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"noIndex":6,"title":1837,"description":1838},"Fast and secure AI agent deployment to Google Cloud with GitLab","Follow this step-by-step guide, complete with a demo application, to learn how to use agentic AI, along with GitLab's native integrations and CI/CD components.",{"title":1837,"description":1838,"authors":1840,"heroImage":1842,"date":812,"body":1843,"category":702,"tags":1844},[1841],"Regnard Raquedan","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670563/Blog/Hero%20Images/cloudcomputing.jpg","[Agentic AI](https://about.gitlab.com/topics/agentic-ai/) is transforming\nhow we build intelligent applications, but deploying AI agents securely and\nefficiently can be challenging. In this tutorial, you'll learn how to deploy\nan AI agent built with Google's Agent Development Kit\n([ADK](https://cloud.google.com/vertex-ai/generative-ai/docs/agent-development-kit/quickstart))\nto Cloud Run using [GitLab's native\nintegrations](https://cloud.google.com/blog/topics/partners/understand-the-google-cloud-gitlab-integration)\nand [CI/CD components](https://docs.gitlab.com/ci/components/).\n\n\n## What are AI agents and why do they matter?\n\n\nAgentic AI represents a significant evolution in artificial intelligence. Unlike traditional generative AI tools that require constant human direction, AI agents leverage advanced language models and natural language processing to take independent action. These systems can understand requests, make decisions, and execute multistep plans to achieve goals autonomously.\n\n\nThis tutorial uses Google's ADK, a flexible and modular framework for developing and deploying AI agents. While optimized for Gemini and the Google ecosystem, ADK is model-agnostic, deployment-agnostic, and built for compatibility with other frameworks.\n\n\n## Our demo application: Canada City Advisor\n\n\nTo demonstrate the deployment process, we'll work with a practical example: the Canada City Advisor. This AI agent helps users find their ideal Canadian city based on their preferences and constraints.\n\n\nHere's how it works:\n\n\n* Users input their budget requirements and lifestyle preferences.  \n\n* The root agent coordinates two sub-agents:  \n\n  * A budget analyzer agent that evaluates financial constraints. This draws data obtained from the Canada Mortgage and Housing Corporation.  \n  * A lifestyle preferences agent that matches cities to user needs. This includes a weather service that uses [Open-Meteo](https://open-meteo.com/) to get the proper city information.  \n* The system generates personalized city recommendations\n\n\nThis multi-agent architecture showcases the power of agentic AI - different specialized agents working together to solve a complex problem. The sub-agents are only invoked when the root agent determines that budget and lifestyle analysis are needed.\n\n\n![Multi-agent architecture to develop demo application with agentic AI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1751576568/obgxpxvlnxtzifddrrz1.png)\n\n\n## Prerequisites\n\n\nBefore we begin, ensure you have:\n\n\n* A Google Cloud project with the following APIs enabled:  \n\n  * Cloud Run API  \n  * Artifact Registry API  \n  * Vertex AI API  \n* A GitLab project for your source code  \n\n* Appropriate permissions in both GitLab and Google Cloud\n\n\n**Step 1: Set up IAM integration with Workload Identity Federation**\n\n\nThe first step establishes secure, keyless authentication between GitLab and Google Cloud using [Workload Identity Federation](https://cloud.google.com/iam/docs/workload-identity-federation). This eliminates the need for service account keys and improves security.\n\n\nIn your GitLab project:\n\n\n1. Navigate to **Settings > Integrations > Google Cloud IAM.**  \n\n2. Provide the following information:  \n\n   * **Project ID**: Your Google Cloud project ID  \n   * **Project Number**: Found in your Google Cloud console  \n   * **Pool ID**: A unique identifier for your workload identity pool  \n   * **Provider ID**: A unique identifier for your identity provider\n\nGitLab will generate a script for you. Copy this script and run it in your Google Cloud Shell to create the Workload Identity Federation.\n\n\n**Step 2: Configure Google Artifact Registry integration**\n\n\nNext, we'll set up the connection to Google Artifact Registry where our container images will be stored.\n\n\n1. In GitLab, go to **Settings > Integrations > Google Artifact Registry.**  \n\n2. Enter:  \n\n   * **Google Cloud Project ID**: Same as in Step 1  \n   * **Repository Name**: Name of an existing Artifact Registry repository  \n   * **Location**: The region where your repository is located\n\n**Important**: The repository must already exist in Artifact Registry. GitLab won't create a new one for you in this context.\n\n\nGitLab will generate commands to set up the necessary permissions. Run these in Google Cloud Shell.\n\n\nAdditionally, add these roles to your service principal for Cloud Run deployment:\n\n\n* `roles/run.admin`  \n\n* `roles/iam.serviceAccountUser`  \n\n* `roles/cloudbuild.builds.editor`\n\n\nYou can add these roles using the following gcloud commands:\n\n\n```shell\n\nGCP_PROJECT_ID=\"\u003Cyour-project-id>\" #replace\n\nGCP_PROJECT_NUMBER=\"\u003Cyour-project-number>\" #replace\n\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003Cyour-pool-id>\" #replace\n\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n\n**Step 3: Create the CI/CD pipeline**\n\n\nNow for the exciting part – let's build our deployment pipeline! GitLab's CI/CD components make this remarkably simple.\n\n\nCreate a `.gitlab-ci.yml` file in your project root:\n\n\n```unset\n\nstages:\n  - build\n  - test\n  - upload\n  - deploy\n\nvariables:\n  GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n  AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n  image: docker:24.0.5\n  stage: build\n  services:\n    - docker:24.0.5-dind\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -t $GITLAB_IMAGE .\n    - docker push $GITLAB_IMAGE\n\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n  - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n    inputs:\n      stage: upload\n      source: $GITLAB_IMAGE\n      target: $AR_IMAGE\n  - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n    inputs:\n      stage: deploy\n      project_id: \"\u003Cyour-project-id>\" #replace\n      service: \"canadian-city\"\n      region: \"us-central1\"\n      image: $AR_IMAGE\n```\n\n\nThe pipeline consists of four stages:\n\n\n1. **Build**: Creates the Docker container with your AI agent  \n\n2. **Test**: Runs security scans (container scanning, dependency scanning, SAST)  \n\n3. **Upload**: Pushes the container to Artifact Registry  \n\n4. **Deploy**: Deploys to Cloud Run\n\n\nThe great thing about using [GitLab's CI/CD components](https://docs.gitlab.com/ci/components/) is that you only need to provide a few parameters - the components handle all the complex authentication and deployment logic.\n\n\n**Step 4: Deploy and test**\n\n\nWith everything configured, it's time to deploy:\n\n\n1. Commit your code and `.gitlab-ci.yml` to your GitLab repository.  \n\n2. The pipeline will automatically trigger.  \n\n3. Monitor the pipeline progress in GitLab's CI/CD interface.  \n\n4. Once complete, find your Cloud Run URL in the Google Cloud Console.\n\n\nYou'll see each stage execute:\n\n\n* Build stage creates your container.  \n\n* Test stage runs comprehensive security scans.  \n\n* Upload stage pushes to Artifact Registry.  \n\n* Deploy stage creates or updates your Cloud Run service.\n\n\n## Security benefits\n\n\nThis approach provides several security advantages:\n\n\n* **No long-lived credentials:** Workload Identity Federation eliminates service account keys.  \n\n* **Automated security scanning:** Every deployment is scanned for vulnerabilities.  \n\n* **Audit trail:** Complete visibility of who deployed what and when.  \n\n* **Principle of least privilege:** Fine-grained IAM roles limit access.\n\n\n## Summary\n\nBy combining GitLab's security features with Google Cloud's powerful AI and serverless platforms, you can deploy AI agents that are both secure and scalable. The integration between GitLab and Google Cloud eliminates much of the complexity traditionally associated with such deployments.\n\n> Use this tutorial's [complete code\nexample](https://gitlab.com/gitlab-partners-public/google-cloud/demos/ai-agent-deployment)\nto get started now. Not a GitLab customer yet? Explore the DevSecOps platform with [a free trial](https://about.gitlab.com/free-trial/).\n",[704,1248,9],{"featured":6,"template":684,"slug":1846},"fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab","content:en-us:blog:fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","Fast And Secure Ai Agent Deployment To Google Cloud With Gitlab","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab.yml","en-us/blog/fast-and-secure-ai-agent-deployment-to-google-cloud-with-gitlab",{"_path":1852,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1853,"content":1859,"config":1866,"_id":1868,"_type":13,"title":1869,"_source":15,"_file":1870,"_stem":1871,"_extension":18},"/en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"title":1854,"description":1855,"ogTitle":1854,"ogDescription":1855,"noIndex":6,"ogImage":1856,"ogUrl":1857,"ogSiteName":669,"ogType":670,"canonicalUrls":1857,"schema":1858},"Fast Python Flask server deployment with GitLab + Google Cloud","This tutorial shows how to use GitLab’s Google Cloud integration to deploy a Python Flask server in less than 10 minutes, helping developers become more independent and efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098427/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750098427691.png","https://about.gitlab.com/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Fast Python Flask server deployment with GitLab + Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2024-11-04\",\n      }",{"title":1854,"description":1855,"authors":1860,"heroImage":1856,"date":1862,"body":1863,"category":769,"tags":1864},[831,1861],"Jerez Solis","2024-11-04","Deploying an application to the cloud often requires assistance from production or DevOps engineers. GitLab's Google Cloud integration empowers developers to handle deployments independently. In this tutorial, you'll learn how to deploy a Python Flask server to Google Cloud in less than 10 minutes. Whether you’re a solo developer or part of a large team, this setup allows you to deploy applications efficiently.\n\nYou'll learn how to:\n\n- Create a new project in GitLab\n- Create a Flask server utilizing `main.py`\n- Utilize the Google Cloud integration to create a Service account\n- Utilize the Google Cloud integration to create Cloud Run via a merge request\n- Access your newly deployed Flask server\n- Clean up your environment\n\n## Prerequisites:\n- Owner access on a Google Cloud Platform project\n- Working knowledge of Python\n- Working knowledge of GitLab CI\n- 10 minutes\n\n## Step-by-step Python Flask server deployment to Google Cloud\n\n**1. Create a new project in GitLab.**\n\nWe decided to call our project \"python-flask-cloud-run\" for simplicity.\n\n![python flask server - create a new project in GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098438036.png)\n\n**2. Create a flask server utilizing main.py demo.**\n\nFind the `main.py` demo here: [https://gitlab.com/demos/applications/python-flask-cloud-run](https://gitlab.com/demos/applications/python-flask-cloud-run).\n\n```python\nimport os\n\nfrom flask import Flask\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\nif __name__ == \"__main__\":\n    app.run(debug=True, host=\"0.0.0.0\", port=int(os.environ.get(\"PORT\", 8080)))\n```\n\n**3. Create a `requirements.txt` with the following dependencies.**\n\n```\nFlask==3.0.3\ngunicorn==22.0.0\nWerkzeug==3.0.3\n```\n\n**4. Utilizing the Google Cloud integration, create a Service account.**\n\nNavigate to **Operate > Google Cloud > Create Service account**.\n\n![python flask server - create service account](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098438037.png)\n\n**5. Also configure the region you would like the Cloud Run instance to deploy to.**\n\n![python flask server - configure the region](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098438038.png)\n\n**6. Utilizing the Google Cloud integration, configure Cloud Run via merge request.**\n\n![python flask server - deployments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098438041.png)\n\n**7. This will open a merge request. Immediately merge this merge request.**\n\n![python flask server - enable deployments to Cloud Run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098438043.png)\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`, `GCP_SERVICE_ACCOUNT`, `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![python flask server - variables](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098438044.png)\n\n**8. Voila! Check your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.**\n\n![python flask server - update dockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098438045.png)\n\n\u003Cbr>\u003C/br>\n\n![python flask server - dockerfile](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098438046.png)\n\n**9. Click the Service URL to view your newly deployed Flask server.**\n\nNavigate to **Operate > Environments** to see a list of deployments for your environments.\n\n![python flask server - deployments list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098438047.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![python flask server - main job listing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098438/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098438048.png)\n\n## Next steps\n\nTo get started with developing your Flask application, try adding another endpoint. For instance, in your `main.py` file, you can add a **/bye** endpoint as shown below:\n\n```\n@app.route(\"/\")\ndef hello_world():\n    \"\"\"Example Hello World route.\"\"\"\n    name = os.environ.get(\"NAME\", \"World\")\n    return f\"Hello {name}!\"\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once it’s complete, go back to the Service URL and navigate to the **/bye** endpoint to see the new functionality in action.\n\n## Clean up\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> For more DevSecOps capabilities, [start a free 60-day trial of GitLab Ultimate and GitLab Duo](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com/blog/%2F).",[9,1865,1248,1000],"cloud native",{"slug":1867,"featured":90,"template":684},"fast-python-flask-server-deployment-with-gitlab-google-cloud","content:en-us:blog:fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","Fast Python Flask Server Deployment With Gitlab Google Cloud","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud.yml","en-us/blog/fast-python-flask-server-deployment-with-gitlab-google-cloud",{"_path":1873,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1874,"content":1880,"config":1886,"_id":1888,"_type":13,"title":1889,"_source":15,"_file":1890,"_stem":1891,"_extension":18},"/en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab",{"title":1875,"description":1876,"ogTitle":1875,"ogDescription":1876,"noIndex":6,"ogImage":1877,"ogUrl":1878,"ogSiteName":669,"ogType":670,"canonicalUrls":1878,"schema":1879},"From code to production: A guide to continuous deployment with GitLab","Learn how to get started building a robust continuous deployment pipeline in GitLab. Follow these step-by-step instructions, practical examples, and best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659478/Blog/Hero%20Images/REFERENCE_-_Use_this_page_as_a_reference_for_thumbnail_sizes.png","https://about.gitlab.com/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"From code to production: A guide to continuous deployment with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Benjamin Skierlak\"},{\"@type\":\"Person\",\"name\":\"James Wormwell\"}],\n        \"datePublished\": \"2025-01-28\",\n      }",{"title":1875,"description":1876,"authors":1881,"heroImage":1877,"date":1510,"body":1884,"category":678,"tags":1885},[1882,1883],"Benjamin Skierlak","James Wormwell","Continuous deployment is a game-changing practice that enables teams to deliver value faster, with higher confidence. However, diving into advanced deployment workflows — such as GitOps, container orchestration with Kubernetes, or dynamic environments — can be intimidating for teams just starting out.\n\nAt GitLab, we're committed to making delivery seamless and scalable. By enabling teams to focus on the fundamentals, we empower them to build a strong foundation that supports growth into more complex strategies over time. This guide provides essential steps to begin implementing continuous deployment with GitLab, laying the foundation for your long-term success.\n\n## Start with a workflow plan\n\nBefore diving into the technical implementation, take time to map out your deployment workflow. Success lies in careful planning and a methodical approach.\n\n### Artifact management strategy\n\nIn the context of continuous deployment, artifacts are the packaged outputs of your build process that need to be stored, versioned, and deployed. These could be:\n\n- container images for your applications\n- packages\n- compiled binaries or executables\n- libraries\n- configuration files\n- documentation packages\n- other artifacts\n\nEach type of artifact plays a specific role in your deployment process. For example, a typical web application might generate:\n\n- a container image for the backend service\n- a ZIP archive of compiled frontend assets\n- SQL files for database changes\n- environment-specific configuration files\n\nManaging these artifacts effectively is crucial for successful deployments. Here's how to approach artifact management.\n\n#### Artifacts and releases versioning strategies\n\nA best practice to get you started with a clean structure is to establish a clear versioning strategy for your artifacts. When creating releases:\n\n- Use semantic versioning (major.minor.patch) for release tags\n  - Example: `myapp:1.2.3` for a stable release\n  - Major version changes (2.0.0) for breaking changes\n  - Minor version changes (1.3.0) for new features\n  - Patch version changes (1.2.4) for bug fixes\n- Maintain a 'latest' tag for the most recent stable version\n  - Example: `myapp:latest` for automated deployments\n- Include commit SHA for precise version tracking\n  - Example: `myapp:1.2.3-abc123f` for debugging\n- Consider branch-based tags for development environments\n  - Example: `myapp:feature-user-auth` for feature testing\n\n#### Build artifacts retention\n\nImplement defined retention rules:\n\n- Set explicit expiration timeframes for temporary artifacts\n- Define which artifacts need permanent retention\n- Configure cleanup policies to manage storage\n\n#### Registry access and authentication\n\nSecure your artifacts with proper access controls:\n\n- Implement Personal Access Tokens for developer access\n- Configure CI/CD variables for pipeline authentication\n- Set up proper access scopes\n\n### Environment strategy\n\nConsider your environments early, as they shape your entire deployment pipeline:\n\n- Development, staging, and production environment configurations\n- Environment-specific variables and secrets\n- Access controls and protection rules\n- Deployment tracking and monitoring approach\n\n### Deployment targets\n\nBe intentional as to where and how you'll deploy, these decisions matter and the benefits and drawbacks of each should be consider:\n\n- Infrastructure requirements (VMs, containers, cloud services)\n- Network access and security configurations\n- Authentication mechanisms (SSH keys, access tokens)\n- Resource allocation and scaling considerations\n\nWith our strategy defined and foundational decisions made, we can now translate these plans into a working pipeline. We'll build a practical example that demonstrates these concepts, starting with a simple application and progressively adding deployment capabilities.\n\n## Implementing your CD pipeline\n\n### A step-by-step example\n\nLet's walk through implementing a basic continuous deployment pipeline for a web application. We'll use a simple HTML application as an example, but these principles apply to any type of application. We’re also going to deploy our application as a Docker image on a simple virtual machine. This will allow us to lean on a curated image with minimum dependencies, and to ensure no environment specific requirements are unintentionally brought in. By working on a virtual machine, we won’t be leveraging GitLab’s native integrations, allowing us to work on an easier but less scalable setup to begin with.\n\n#### Prerequisites\n\nIn this example, we’ll aim to containerize an application that we’ll run on a virtual machine hosted on a cloud provider. We’ll also test this application locally on our machine. This list of prerequisites is only needed for this scenario.\n\n##### Virtual machine setup\n\n- Provision a VM in your preferred cloud provider (e.g., GCP, AWS, Azure)\n- Configure network rules to allow access on ports 22, 80, and 443\n- Record the machine's public IP address for deployment\n\n##### Set up SSH authentication:\n\n- Generate a public/private key pair for the machine\n- In GitLab, go to **Settings > CI/CD > Variables**\n- Create a variable called `GITLAB_KEY`\n- Set Type to \"File\" (required for SSH authentication)\n- Paste the private key in the Value field\n- Define a USER variable, this is the user logging in and running the scripts on your VM\n\n##### Configure deployment variables\n\n- Create variables for your deployment targets:\n  - `STAGING_TARGET`: Your staging server IP/domain\n  - `PRODUCTION_TARGET`: Your production server IP/domain\n\n##### Local development setup\n\n- Install Docker on your local machine for testing deployments\n\n##### GitLab Container Registry access\n\n- Locate your registry path:\n  - Navigate to **Deploy > Container Registry**\n  - Copy the registry path (e.g., registry.gitlab.com/group/project)\n- Set up authentication:\n  - Go to **Settings > Access Tokens**\n  - Create a new token with registry access\n  - Token expiration: Maximum 1 year\n  - Save the token securely\n- Configure local registry access:\n\n```\ndocker login registry.gitlab.com\n# The username if you are using a PAT is gitlab-ci-token\n# Password: your-access-token\n```\n\n#### 1. Create your application\n\nStart with a basic web application. For our example, we're using a simple HTML page:\n\n```\n\u003C!-- index.html -->\n\u003Chtml>\n  \u003Chead>\n    \u003Cstyle>\n      body {\n        background-color: #171321; /* GitLab dark */\n      }\n    \u003C/style>\n  \u003C/head>\n  \u003Cbody>\n    \u003C!-- Your content here -->\n  \u003C/body>\n\u003C/html>\n```\n\n#### 2. Containerize your application\n\nCreate a Dockerfile to package your application:\n\n```\nFROM nginx:1.26.2\nCOPY index.html /usr/share/nginx/html/index.html\n```\n\nThis Dockerfile:\n\n- Uses nginx as a base image for serving web content\n- Copies your HTML file to the correct location in the nginx directory structure\n\n#### 3. Set up your CI/CD pipeline\n\nCreate a `.gitlab-ci.yml` file to define your pipeline stages:\n\n```\nvariables:\n  TAG_LATEST: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:latest\n  TAG_COMMIT: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA\n\nstages:\n  - publish\n  - deploy\n```\n\nLet's break it down:\n\n`TAG_LATEST` is made up of three parts:\n\n- `$CI_REGISTRY_IMAGE` is the path to your project's container registry in GitLab\n\nFor example: `registry.gitlab.com/your-group/your-project`\n\n- `$CI_COMMIT_REF_NAME` is the name of your branch or tag\n\nFor example, if you're on main branch: `/main`, and if you're on a feature branch: `/feature-login`\n\n- `:latest` is a fixed suffix\n\nSo if you're on the main branch, `TAG_LATEST` becomes: `registry.gitlab.com/your-group/your-project/main:latest`.\n\n`TAG_COMMIT` is almost identical, but instead of `:latest`, it uses: `$CI_COMMIT_SHA` which is the commit identifier, for example: `:abc123def456`.\n\nSo for that same commit on main branch, `TAG_COMMIT` becomes:` registry.gitlab.com/your-group/your-project/main:abc123def456`.\n\nThe reason for having both is `TAG_LATEST` gives you an easy way to always get the newest version, and `TAG_COMMIT` gives you a specific version you can return to if needed.\n\n#### 4. Publish to the container registry\n\nAdd the publish job to your pipeline:\n\n```\npublish:\n  stage: publish\n  image: docker:latest\n  services:\n    - docker:dind\n  script:\n    - docker build -t $TAG_LATEST -t $TAG_COMMIT .\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $TAG_LATEST\n    - docker push $TAG_COMMIT\n```\n\nThis job:\n\n- Uses Docker-in-Docker to build images\n- Creates two tagged versions of your image\n- Authenticates with the GitLab registry\n- Pushes both versions to the registry \n\nNow that our images are safely stored in the registry, we can focus on deploying them to our target environments. Let's start with local testing to validate our setup before moving to production deployments.\n\n#### 5. Deploy to your environment\n\nBefore deploying to production, you can test locally. We just published our image to the GitLab repository, which we’ll pull locally. If you’re unsure of the exact path, navigate to **Deploy > Container Registry**, and you should see an icon to copy the path of your image at the end of the line for the container image you want to test.\n\n```\ndocker login registry.gitlab.com \ndocker run -p 80:80 registry.gitlab.com/your-project-path/main:latest\n```\n\nBy doing so you should be able to access your application locally on your localhost address through your web browser.\n\nYou can now add a deployment job to your pipeline:\n\n```\ndeploy:\n  stage: deploy\n  image: alpine:latest\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$TARGET_SERVER \n      docker pull $TAG_COMMIT &&\n      docker rm -f myapp || true &&\n      docker run -d -p 80:80 --name myapp $TAG_COMMIT\n```\n\nThis job:\n\n- Sets up SSH access to your deployment target\n- Pulls the latest image\n- Removes any existing container\n- Deploys the new version\n\n#### 6. Track deployments\n\nEnable deployment tracking by adding environment configuration:\n\n```\ndeploy:\n  environment:\n    name: production\n    url: https://your-application-url.com \n```\n\nThis creates an environment object in GitLab's **Operate > Environments** section, providing:\n\n- Deployment history\n- Current deployment status\n- Quick access to your application\n\nWhile a single environment pipeline is a good starting point, most teams need to manage multiple environments for proper testing and staging. Let's expand our pipeline to handle this more realistic scenario.\n\n#### 7. Set up multiple environments\n\nFor a more robust pipeline, configure staging and production deployments:\n\n```\nstages:\n  - publish\n  - staging\n  - release\n  - version\n  - production\n\nstaging:\n  stage: staging\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  environment:\n    name: staging\n    url: https://staging.your-app.com\n  # deployment script here\n\nproduction:\n  stage: production\n  rules:\n    - if: $CI_COMMIT_TAG\n  environment:\n    name: production\n    url: https://your-app.com\n  # deployment script here\n```\n\nThis setup:\n\n- Deploys to staging from your main branch\n- Uses GitLab tags to trigger production deployments\n- Provides separate tracking for each environment\n\nHere and in our next step, we’re leveraging a very useful GitLab feature: tags. By manually creating a tag in the **Code > Tags** section, the `$CI_COMMIT_TAG` gets created, which allows us to trigger jobs accordingly.\n\n#### 8. Create automated release notes\n\nWe'll be using GitLab's release capabilities through our CI/CD pipeline. First, update your stages in `.gitlab-ci.yml`:\n\n```\nstages:\n\n- publish\n- staging\n- release # New stage for releases\n- version\n- production\n```\n\nNext, add the release job:\n\n```\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  rules:\n    - if: $CI_COMMIT_TAG                  # Only run when a tag is created\n  script:\n    - echo \"Creating release for $CI_COMMIT_TAG\"\n  release:                                # Release configuration\n    name: 'Release $CI_COMMIT_TAG'\n    description: 'Release created from $CI_COMMIT_TAG'\n    tag_name: '$CI_COMMIT_TAG'           # The tag to create\n    ref: '$CI_COMMIT_TAG'                # The tag to base release on\n```\n\nYou can enhance this by adding links to your container images:\n\n```\nrelease:\n  name: 'Release $CI_COMMIT_TAG'\n  description: 'Release created from $CI_COMMIT_TAG'\n  tag_name: '$CI_COMMIT_TAG'\n  ref: '$CI_COMMIT_TAG'\n  assets:\n    links:\n      - name: 'Container Image'\n        url: '$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG'\n        link_type: 'image'\n```\n\nFor meaningful automated release notes:\n\n- Use conventional commits (feat:, fix:, etc.)\n- Include issue numbers (#123)\n- Separate subject from body with blank line\n\nIf you want custom release notes with deployment info:\n\n```\nrelease_job:\n  script:\n    - |\n      DEPLOY_TIME=$(date '+%Y-%m-%d %H:%M:%S')\n      CHANGES=$(git log $(git describe --tags --abbrev=0 @^)..@ --pretty=format:\"- %s\")\n      cat > release_notes.md \u003C\u003C EOF\n      ## Deployment Info\n      - Deployed on: $DEPLOY_TIME\n      - Environment: Production\n      - Version: $CI_COMMIT_TAG\n\n      ## Changes\n      $CHANGES\n\n      ## Artifacts\n      - Container Image: \\`$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\\`\n      EOF\n  release:\n    description: './release_notes.md'\n```\n\nOnce configured, releases will be created automatically when you create a Git tag. You can view them in GitLab under **Deploy > Releases**.\n\n#### 9. Put it all together\n\nThis is what our final YAML file looks like:\n\n```\nvariables:\n  TAG_LATEST: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:latest\n  TAG_COMMIT: $CI_REGISTRY_IMAGE/$CI_COMMIT_REF_NAME:$CI_COMMIT_SHA\n  STAGING_TARGET: $STAGING_TARGET    # Set in CI/CD Variables\n  PRODUCTION_TARGET: $PRODUCTION_TARGET  # Set in CI/CD Variables\n\nstages:\n  - publish\n  - staging\n  - release\n  - version\n  - production\n\n# Build and publish to registry\npublish:\n  stage: publish\n  image: docker:latest\n  services:\n    - docker:dind\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  script:\n    - docker build -t $TAG_LATEST -t $TAG_COMMIT .\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $TAG_LATEST\n    - docker push $TAG_COMMIT\n\n# Deploy to staging\nstaging:\n  stage: staging\n  image: alpine:latest\n  rules:\n    - if: $CI_COMMIT_BRANCH == \"main\" && $CI_COMMIT_TAG == null\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$STAGING_TARGET \"\n        docker pull $TAG_COMMIT &&\n        docker rm -f myapp || true &&\n        docker run -d -p 80:80 --name myapp $TAG_COMMIT\"\n  environment:\n    name: staging\n    url: http://$STAGING_TARGET\n\n# Create release\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - |\n      DEPLOY_TIME=$(date '+%Y-%m-%d %H:%M:%S')\n      CHANGES=$(git log $(git describe --tags --abbrev=0 @^)..@ --pretty=format:\"- %s\")\n      cat > release_notes.md \u003C\u003C EOF\n      ## Deployment Info\n      - Deployed on: $DEPLOY_TIME\n      - Environment: Production\n      - Version: $CI_COMMIT_TAG\n\n      ## Changes\n      $CHANGES\n\n      ## Artifacts\n      - Container Image: \\`$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\\`\n      EOF\n  release:\n    name: 'Release $CI_COMMIT_TAG'\n    description: './release_notes.md'\n    tag_name: '$CI_COMMIT_TAG'\n    ref: '$CI_COMMIT_TAG'\n    assets:\n      links:\n        - name: 'Container Image'\n          url: '$CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG'\n          link_type: 'image'\n\n# Version the image with release tag\nversion_job:\n  stage: version\n  image: docker:latest\n  services:\n    - docker:dind\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - docker pull $TAG_COMMIT\n    - docker tag $TAG_COMMIT $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker push $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\n\n# Deploy to production\nproduction:\n  stage: production\n  image: alpine:latest\n  rules:\n    - if: $CI_COMMIT_TAG\n  script:\n    - chmod 400 $GITLAB_KEY\n    - apk add openssh-client\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - ssh -i $GITLAB_KEY -o StrictHostKeyChecking=no $USER@$PRODUCTION_TARGET \"\n        docker pull $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG &&\n        docker rm -f myapp || true &&\n        docker run -d -p 80:80 --name myapp $CI_REGISTRY_IMAGE/main:$CI_COMMIT_TAG\"\n  environment:\n    name: production\n    url: http://$PRODUCTION_TARGET\n```\n\nThis complete pipeline:\n\n- Publishes images to the registry (main branch)\n- Deploys to staging (main branch)\n- Creates releases (on tags)\n- Versions images with release tags\n- Deploys to production (on tags)\n\nKey benefits:\n\n- Clean reproducible, local development and testing environment\n- Clear path to production environments with structure to build confidence in what is deployed\n- Pattern to recover from unexpected failures, etc.\n- Ready to scale/adopt more complex deployment strategies\n\n### Best practices\n\nThroughout implementation, maintain these principles:\n\n- Document everything, from variable usage to deployment procedures\n- Use GitLab's built-in features (environments, releases, registry)\n- Implement proper access controls and security measures\n- Plan for failure with robust rollback procedures\n- Keep your pipeline configurations DRY (Don't Repeat Yourself)\n\n## Scale your deployment strategy\n\nWhat next? Here are some aspects to consider as your continuous deployment strategy matures.\n\n### Advanced security measures\n\nEnhance security through:\n\n- Protected environments with restricted access\n- Required approvals for production deployments\n- Integrated security scanning\n- Automated vulnerability assessments\n- Branch protection rules for deployment-related changes\n\n### Progressive delivery strategies\n\nImplement advanced deployment strategies:\n\n- Feature flags for controlled rollouts\n- Canary deployments for risk mitigation\n- Blue-green deployment strategies\n- A/B testing capabilities\n- Dynamic environment management\n\n### Monitoring and optimization\n\nEstablish robust monitoring practices:\n\n- Track deployment metrics\n- Set up performance monitoring\n- Configure deployment alerts\n- Establish deployment SLOs\n- Regular pipeline optimization\n\n## Why GitLab?\n\nGitLab's continuous deployment capabilities make it a standout choice for modern deployment workflows. The platform excels in streamlining the path from code to production, offering built-in container registry, environment management, and deployment tracking all within a single interface. GitLab's environment-specific variables, deployment approval gates, and rollback capabilities provide the security and control needed for production deployments, while features like review apps and feature flags enable progressive delivery approaches. As part of GitLab's complete DevSecOps platform, these CD capabilities seamlessly integrate with your entire software lifecycle.\n\n## Get started today\n\nThe journey to continuous deployment is an evolution, not a revolution. Start with the fundamentals, build a solid foundation, and gradually incorporate advanced features as your team's needs grow. GitLab provides the tools and flexibility to support you at every stage of this journey, from your first automated deployment to complex, multi-environment delivery pipelines.\n\n> Sign up for a [free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) to get started with continous deployment today.",[772,108,680,678,9],{"slug":1887,"featured":6,"template":684},"from-code-to-production-a-guide-to-continuous-deployment-with-gitlab","content:en-us:blog:from-code-to-production-a-guide-to-continuous-deployment-with-gitlab.yml","From Code To Production A Guide To Continuous Deployment With Gitlab","en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab.yml","en-us/blog/from-code-to-production-a-guide-to-continuous-deployment-with-gitlab",{"_path":1893,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1894,"content":1900,"config":1905,"_id":1907,"_type":13,"title":1908,"_source":15,"_file":1909,"_stem":1910,"_extension":18},"/en-us/blog/get-to-know-the-security-and-governance-updates-in-gitlab-17-17-1",{"title":1895,"description":1896,"ogTitle":1895,"ogDescription":1896,"noIndex":6,"ogImage":1897,"ogUrl":1898,"ogSiteName":669,"ogType":670,"canonicalUrls":1898,"schema":1899},"Get to know the security and governance updates in GitLab 17, 17.1","Dive deep into the new enhancements that can strengthen your organization's security posture, including how-to videos for SAST, DAST, API security, container registry, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098858/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_282096522_securitycompliance.jpeg_1750098857843.jpg","https://about.gitlab.com/blog/get-to-know-the-security-and-governance-updates-in-gitlab-17-17-1","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Get to know the security and governance updates in GitLab 17, 17.1\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-07-17\",\n      }",{"title":1895,"description":1896,"authors":1901,"heroImage":1897,"date":1902,"body":1903,"category":814,"tags":1904},[1767],"2024-07-17","With every GitLab release we enhance and optimize security and governance solutions to ensure customers have the tools they need to produce secure and compliant software. Our values of [iteration](https://handbook.gitlab.com/handbook/values/#iteration) and [results for customers](https://handbook.gitlab.com/handbook/values/#results) drive our release cycles, and GitLab 17 is no exception. We have been releasing every month for the past 153 months straight!\n\nIn this article, you'll learn my favorite security and governance enhancements released in GitLab 17 and 17.1 and how they can benefit your organization’s security requirements. \n\n- [SAST analyzer streamlining](#sast-analyzer-streamlining)\n- [Android dependency scanning](#android-dependency-scanning)\n- [Custom roles and granular security permissions updates](#custom-roles-and-granular-security-permissions-updates)\n- [Secret detection updates](#secret-detection-updates)\n- [Container registry updates](#container-registry-updates)\n- [API security scanning updates](#api-security-scanning-updates)\n\n## SAST analyzer streamlining\n\nGitLab provides static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) to examine your source code for known vulnerabilities, detecting vulnerabilities such as SQL injections and cross-site scripting. When SAST kicks off, the programming language used is auto-detected and the appropriate scanner is loaded.\n\nIn GitLab 17, SAST scans the same languages, but now with fewer analyzers, [offering a simpler and more customizable experience](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#streamlined-sast-analyzer-coverage-for-more-languages). Language-specific analyzers have been replaced with GitLab-managed rules in the Semgrep-based analyzer for the following languages:\n\n- C/C++\n- Swift (iOS)\n- Java/Kotlin (Android)\n- Node.js\n- PHP\n- Ruby\n\nHaving one analyzer for many different languages makes configurations and writing rules easier than ever. See the [supported languages and frameworks documentation](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks) for more information.\n\nWatch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/_80z6mZmzek?si=i9yPQttxuwVcb7Ye\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Android dependency scanning\n\nIn modern software development, many applications are built from multiple dependencies that are best at performing their intended function. For example, rather than writing a YAML parser, a developer will use a library that parses YAML. This allows developers to focus on the main goal of their application, rather than spending time on utility functions.\n\nWhile the use of dependencies speeds up efficiency, they can be difficult to manage and could introduce vulnerabilities to your application. For this, GitLab provides [dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), which analyzes dependencies for known vulnerabilities. \n\nMany organizations are using dependencies even when creating native mobile applications. In GitLab 17, we introduced [Android dependency scanning](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#dependency-scanning-support-for-android) to bridge the gap. Android dependency scanning can be easily added as a [CI/CD catalog component](https://gitlab.com/explore/catalog/components/android-dependency-scanning) – just include the following code in your `.gitlab-ci.yml`:\n\n```\ninclude:\n  - component: gitlab.com/components/android-dependency-scanning/component@1.0.0\n    inputs:\n      stage: test\n```\n\nThis job will also generate a CycloneDX software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)) report, which may be necessary for compliance. Make sure to scan your Android dependencies as soon as possible, as there are many CVEs out there.\n\nWatch this video to learn more:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/_80z6mZmzek?si=DdB7j4NAenl-UcrJ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> Learn more SBOMs and dependencies with [our ultimate guide to SBOMs](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/).\n\n## Custom roles and granular security permissions updates\n\nGitLab provides [custom roles](https://docs.gitlab.com/ee/user/custom_roles.html) to allow organizations to create user roles with the precise privileges and permissions to meet their needs. This enables organizations to [implement the principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/) to adhere to various compliance standards.\n\n![custom roles screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098874/Blog/Content%20Images/Blog/Content%20Images/1_aHR0cHM6_1750098873857.png)\n\nIn GitLab 17, managing custom roles has become easier than ever. You can now [edit a custom role and its permissions directly from the UI](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#edit-a-custom-role-and-its-permissions), whereas, in the past, the role needed to be recreated. Also, for those using GitLab self-managed, [custom roles are now managed at the instance level](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#manage-custom-roles-at-self-managed-instance-level), allowing administrators to create the roles, and group owners to assign them.\n\nWatch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/glvvCoc2hkc?si=dl_SwQ7tyVdzirH5\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThere have also been [several UX improvements](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#ux-improvements-to-custom-roles) added to this feature along with the introduction of the following permissions:\n\n- assign security policy links\n- manage and assign compliance frameworks\n- manage webhooks\n- manage push rules\n- manage merge request settings (17.1)\n- manage integrations (17.1)\n- manage deploy tokens (17.1)\n- read CRM contacts (17.1)\n\nGitLab releases usually include new permissions to further enable the implementation of the principle of least privilege. To learn more about the available granular security permissions, [visit the available custom permission documentation](https://docs.gitlab.com/ee/user/custom_roles/abilities.html).\n\n## Secret detection updates\n\nDevelopers may accidentally commit secrets like keys or API tokens to Git repositories from time to time. After a sensitive value is pushed to a remote repository, anyone with access to the repository can impersonate the authorized user of the secret and cause mayhem. When this occurs the exposed secrets must be revoked and replaced to address this risk, which can cause system downtime.\n\nGitLab provides [secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) to address this risk, and in GitLab 17 it’s gotten even better with the following enhancements:\n\n- [Support for remote rulesets when overriding or disabling rules](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#secret-detection-now-supports-remote-rulesets-when-overriding-or-disabling-rules): - Allows you to override or disable rules via a remote configuration. Therefore, you can scale rule configurations across multiple projects using only one [TOML](https://toml.io/en/) file.\n- [Advanced vulnerability tracking](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#introducing-advanced-vulnerability-tracking-for-secret-detection): Detects when the same secret has moved within a file due to refactoring or unrelated changes. This leads to reduced duplicate findings, simplifying vulnerability management.\n\nIn GitLab 17.1, [secret push protection](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#secret-push-protection-available-in-beta) is now in Beta. Secret push protection checks the content of each commit pushed to GitLab. If any secrets are detected, the push is blocked and displays information about the commit. Therefore, a developer does not need to do the extra work of removing and rotating secrets, since they are never committed upstream.\n\n![Push block eue to detected secret](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098874/Blog/Content%20Images/Blog/Content%20Images/2_aHR0cHM6_1750098873858.png)\n\nWhen [push protection occurs](https://about.gitlab.com/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection/), you can see it displays additional information on the commit, including:\n\n- the commit ID that contains the secret\n- the filename and line number that contains the secret\n- the type of secret\n\n**Note:**  [Enabling secret push protection](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/#enable-secret-push-protection) is as easy as flipping a switch in GitLab Security Configuration.\n\nWatch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ZNtwXVj3tA8?si=4xJ1rWdThpVjvebv\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Container registry updates\n\nGitLab provides a [built-in container registry](https://docs.gitlab.com/ee/user/packages/container_registry/), making it easy for developers to store and manage container images for each GitLab project without context switching. GitLab 17.1 includes several features to enhance the security and efficiency of using the registry:\n- [Container images linked to signatures](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#container-images-linked-to-signatures): Container images in the registry can now be signed and associated with the signature. This can reduce image tampering by allowing developers to quickly find and validate the signatures that are associated with a container image\n- [Display the last published date for container images](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#display-the-last-published-date-for-container-images): The container registry UI has been updated to include accurate `last_published_at timestamps`, putting critical data at the top of view.\n- [Sort container registry tags by publish date](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#sort-container-registry-tags-by-publish-date): Allows developers to quickly find and validate the most recently published container image.\n\n![Signed container details](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098874/Blog/Content%20Images/Blog/Content%20Images/3_aHR0cHM6_1750098873860.png)\n\nAdditionally we’ve introduced [container scanning for the registry](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#container-scanning-for-registry). The container images being used in your application may themselves be based on other container images that contain known vulnerabilities. Since developers heavily make use of the built-in container registry, it is a no-brainer to introduce [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) for the registry.\n\n[Container scanning for the registry](https://docs.gitlab.com/ee/user/application_security/container_scanning/#container-scanning-for-registry) can be easily enabled by flipping a switch in GitLab Security Configuration. Once it’s enabled, whenever a container image is pushed to the container registry in your project, GitLab checks its tag. If the tag is `latest`, then GitLab creates a new pipeline that scans the image and even produces a CycloneDX SBOM.\n\n**Note:** At the moment, a vulnerability scan is only performed when a new advisory is published. We are working to detract all vulnerabilities in the registry itself in future iterations.\n\nWatch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Zuk7Axs-CRw?si=odlgT5HWv_KOnBtq\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## API security scanning updates\n\nWhile SAST does a great job of finding vulnerabilities in static source code, there can still be vulnerabilities present in the running application that cannot be detected in source code, such as broken authentication and security misconfigurations. For these reasons, GitLab provides dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) and [Web API fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/index.html) to help discover bugs and potential security issues that other QA processes may miss. \n\nIn GitLab 17, we’ve introduced [several enhancements](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#api-security-testing-analyzer-updates) to our [dynamic scanners which target Web APIs](https://docs.gitlab.com/ee/user/application_security/api_security_testing/index.html), including:\n- system environment variables are now passed from the CI runner to the custom Python scripts used for certain advanced scenarios (like request signing)\n- API Security containers now run as a non-root user, which improves flexibility and compliance\n- support for servers that only offer TLSv1.3 ciphers, which enables more customers to adopt API security testing.\n- scanner image upgraded to Alpine 3.19, which addresses security vulnerabilities\n\nIn GitLab 17.1, additional configuration variables were added to [API security scanning](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#api-security-testing-analyzer-updates) and [API fuzzing](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#fuzz-testing-analyzer-updates) to allow:\n- creation of a comma-separated list of HTTP success status codes that define whether the job has passed\n- disabling of waiting for the target API to become available before scanning begins\n- specifying the expected status code for the API target availability check\n\nWatch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/CcyOoBgSPUU?si=hAMQfmUTlLRKhPSg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Learn more about other enhancements\n\nGitLab 17 and 17.1 also introduced several other security and governance features and enhancements, too many to cover in this blog. Some of these features include:\n\n- [Updated filtering on the Vulnerability Report](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#updated-filtering-on-the-vulnerability-report): You can now use the filtered search component to filter the Vulnerability Report by any combination of status, severity, tool, or activity.\n- [Toggle merge request approval policies to fail open or fail closed](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#toggle-merge-request-approval-policies-to-fail-open-or-fail-closed): A new fail open option for merge request approval policies to offer flexibility to teams who want to ease the transition to policy enforcement as they roll out controls in their organization.\n- [Optional configuration for policy bot comment](https://about.gitlab.com/releases/2024/05/16/gitlab-17-0-released/#optional-configuration-for-policy-bot-comment): The security policy bot posts a comment on merge requests when they violate a policy to help users understand when policies are enforced on their project, when evaluation is completed, and if there are any violations blocking an MR, with guidance to resolve them.\n- [Merge request approval policies fail open/closed (policy editor)](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#merge-request-approval-policies-fail-openclosed-policy-editor): Within the policy editor users can now toggle security policies to fail open or fail closed. This enhancement extends the YAML support to allow for simpler configuration within the policy editor view.\n- [Project owners receive expiring access token notifications](https://about.gitlab.com/releases/2024/06/20/gitlab-17-1-released/#project-owners-receive-expiring-access-token-notifications): Both project owners and maintainers with direct membership now receive email notifications when their project access tokens are close to expiring. This helps keep more people informed about upcoming token expiration.\n\nThese are some of the newest security and compliance enhancements provided in GitLab 17 and 17.1 that can be applied to strengthen your organization's security posture! To learn more about GitLab and the other ways we can strengthen your organization's security throughout all parts of the software development lifecycle, check out the following links:\n\n- [GitLab Security and Compliance](https://about.gitlab.com/solutions/security-compliance/)\n- [GitLab Application Security documentation](https://docs.gitlab.com/ee/user/application_security/)\n- [GitLab security and governance overview video](https://youtu.be/Y4RC-SW8Ric)\n- [GitLab Complete DevSecOps demo](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/simply-vulnerable-notes)\n- [GitLab Complete DevSecOps tutorial](https://gitlab-da.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/) \n- [Ultimate guide to the principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/)\n",[814,678,9,478,680],{"slug":1906,"featured":90,"template":684},"get-to-know-the-security-and-governance-updates-in-gitlab-17-17-1","content:en-us:blog:get-to-know-the-security-and-governance-updates-in-gitlab-17-17-1.yml","Get To Know The Security And Governance Updates In Gitlab 17 17 1","en-us/blog/get-to-know-the-security-and-governance-updates-in-gitlab-17-17-1.yml","en-us/blog/get-to-know-the-security-and-governance-updates-in-gitlab-17-17-1",{"_path":1912,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1913,"content":1919,"config":1923,"_id":1925,"_type":13,"title":1926,"_source":15,"_file":1927,"_stem":1928,"_extension":18},"/en-us/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab",{"title":1914,"description":1915,"ogTitle":1914,"ogDescription":1915,"noIndex":6,"ogImage":1916,"ogUrl":1917,"ogSiteName":669,"ogType":670,"canonicalUrls":1917,"schema":1918},"Getting started with GitLab: How to import your projects to GitLab","Learn how to import your projects from various sources, including Bitbucket, Gitea, GitHub, and GitLab Self-Managed.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097248/Blog/Hero%20Images/Blog/Hero%20Images/blog-getting-started-with-gitlab-banner-0497-option4-fy25_cFwd8DYFLekdnOLmbbChp_1750097247785.png","https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: How to import your projects to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2025-01-28\",\n      }",{"title":1914,"description":1915,"authors":1920,"heroImage":1916,"date":1510,"body":1921,"category":678,"tags":1922},[1570],"*Welcome to our \"Getting started with GitLab\" series, where we help newcomers get familiar with the GitLab DevSecOps platform.*\n\nKnowing how to import your projects to GitLab is an essential skill to make the most of the GitLab DevSecOps platform. You’ve [set up your account](https://university.gitlab.com/pages/getting-started), invited users, and [organized](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/) them based on your use case or team structure. Now, you need to bring your existing projects into GitLab and start collaborating. These projects can be local files on your computer or hosted on a different source code management platform. Let's explore the options.\n\n## Importing local project files\n\nYou don't want to start from scratch every time you import a project. Follow these steps to get into GitLab existing legacy projects or applications that exist without version control or use version control.\n\n### Git project\n\n1. If Git is [already initiated](https://docs.gitlab.com/ee/topics/git/commands.html#git-init) in your local project, create a new project in GitLab and obtain the SSH or HTTPS URL by clicking on the **Code** button in the top right corner of your project page.\n\n![create a new project in GitLab with SSH/HTTPS URLs](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097254/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097252717.png)\n\n2. Switch to your terminal and ensure you are in your project folder:\n\n```bash  \ncd /project_folder  \n```\n\n3. Backup your existing [Git origin](https://git-scm.com/book/ms/v2/Git-Basics-Working-with-Remotes):\n\n```bash\n\ngit remote rename origin old-origin\n\n```\n\n4. Add the [GitLab remote](https://git-scm.com/book/ms/v2/Git-Basics-Working-with-Remotes) URL for the new origin, when using SSH:\n\n```bash  \ngit remote add origin [git@gitlab.com](mailto:git@gitlab.com):gitlab-da/playground/abubakar/new-test-repo.git  \n```\n\nAnd for HTTPS: \n\n```bash  \ngit remote add origin https://gitlab.com/gitlab-da/playground/abubakar/new-test-repo.git  \n```\n\n5. Then push all existing [branches](https://docs.gitlab.com/ee/user/project/repository/branches/) and [tags](https://docs.gitlab.com/ee/user/project/repository/tags/) to GitLab:\n\n```bash  \ngit push --set-upstream origin --all  \ngit push --set-upstream origin --tags  \n```\n\nAll your file project files, branches, and tags will be pushed to GitLab and you can start collaborating.\n\n### Non-Git project\n\nAlternatively, if you have not initiated Git in your project, you will need to initialize Git, commit existing files, and push to GitLab as follows:\n\n```bash  \ngit init --initial-branch=main  \ngit remote add origin git@gitlab.com:gitlab-da/playground/abubakar/new-test-repo.git  \ngit add .  \ngit commit -m \"Initial commit\"  \ngit push --set-upstream origin main  \n```\n\n## Importing from online sources\n\nIf you have your project on GitLab.com or other platforms and you want to move it to another GitLab instance (like a self-managed instance) or from another platform to GitLab.com, GitLab provides the import project feature when you want to create a new project.\n\n![Create a new project screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097252718.png)\n\nImporting a project migrates the project files and some other components of the project depending on the source. You can import from different sources like Bitbucket, GitHub, Gitea, and a GitLab instance, among other sources. Import sources are enabled by default on GitLab.com, but they need to be [enabled for self-managed](https://docs.gitlab.com/ee/administration/settings/import_and_export_settings.html#configure-allowed-import-sources) by an administrator. We will look at a few of these sources in the following sections.\n\n![Import project from third-party sources](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097252719.png)\n\n## GitLab sources\n\nYou can export projects from GitLab.com and GitLab Self-Managed instances using the Export project feature in a project’s settings. \n\n![Export project screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097252720.png)\n\nTo access it:\n\n- Go to your project’s settings and click into the **General** area.\n- Scroll to and **Expand Advanced** section.\n- Select **Export project**.\n- A notification will be shown stating: “Project export started. A download link will be sent by email and made available on this page.”\n- After the export is generated, you can follow the link contained in the email or refresh the project settings page to reveal the “Download export” option.\n\n### Importing the project\n\n![Import an exported GitLab project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097252722.png)\n\n- Click on the **New project** button in your target GitLab instance.  \n- Select **Import project** and click on **GitLab Export** in the list of import sources.  \n- Specify a project name and select the export file, then click **Import project**.  \n- An \"import in progress\" page will be shown and once complete, you will be redirected to the imported project.\n\nDepending on the size of your project, the import time may vary. It's important to note that not everything in a project might be exported and a few things might change after import. Review the [documentation](https://docs.gitlab.com/ee/user/project/settings/import_export.html#export-a-project-and-its-data) to understand the limitations. If you want to migrate a whole group instead of individual projects, the [Direct Transfer method](https://docs.gitlab.com/ee/user/group/import/index.html) is recommended; this creates a copy of an entire group.\n\n## Third-party providers\n\nGitLab supports importing from Bitbucket Cloud, Bitbucket Server, FogBugz, Gitea, and GitHub. The import process is similar across all the supported third parties — the main difference is in the method of authentication. Let's look at a few of them.\n\n### GitHub\n\n![Authenticate with GitHub screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097252723.png)\n\nThere are three methods to import GitHub projects in to GitLab:\n\n- [Using GitHub OAuth](https://docs.gitlab.com/ee/user/project/import/github.html#use-github-oauth)\n- [Using a GitHub personal access token](https://docs.gitlab.com/ee/user/project/import/github.html#use-a-github-personal-access-token)\n- [Using the API](https://docs.gitlab.com/ee/user/project/import/github.html#use-the-api)\n\nImporting using GitHub OAuth and personal access token are similar. The difference lies in how your authorize GitLab to access your repositories. The OAuth method is easier because you only need to click on the “Authorize with GitHub” button and your are redirected to your GitHub account to authorize the connection. Then the list of your projects is loaded for you to pick those you want to import.\n\n![Import repositories from GitHub screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097252725.png)\n\nAlternatively, you will need to generate a GitHub personal access token, selecting the `repo` and `read:org` scopes, and then provide it on the \"Import\" page.  For API imports, you can use the same personal access token with our [Import REST API endpoints](https://docs.gitlab.com/ee/api/import.html#import-repository-from-github) in your script or application.\n\nIn this demo, GitLab Senior Developer Advocate Fernando Diaz explains how to import a project from GitHub using the OAuth method:\n\n\u003C!-- blank line -->  \n\u003Cfigure class=\"video_container\"> \n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs?si=esF6wbz2j2JlhDVL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>  \n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can learn about prerequisites, known issues, importing from GitHub Enterprise, and other valuable information from the GitLab [import documentation](https://docs.gitlab.com/ee/user/project/import/github.html).\n\n### Bitbucket\n\nImporting projects from Bitbucket is similar to importing them from GitHub. While using OAuth is applicable to [Bitbucket Cloud](https://docs.gitlab.com/ee/user/project/import/bitbucket.html), the SaaS version of Bitbucket, you'll need to provide a URL, username, and personal access token for [Bitbucket Server](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html), the enterprise self-hosted version. Clicking on the Bitbucket Cloud option on the \"Import\" screen automatically takes you to Atlassian authentication for Bitbucket.\n\n![Import project from BitBucket](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097252726.png)\n\nYou can also import Bitbucket projects using the [GitLab Import API](https://docs.gitlab.com/ee/api/import.html).\n\n### Gitea\n\n![Import project from Gitea](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097252727.png)\n\nImporting projects from [Gitea](https://docs.gitlab.com/ee/user/project/import/gitea.html) requires the creation of a [personal access token](https://docs.gitea.com/next/development/api-usage#authentication-via-the-api) on the Gitea platform and providing it along with the Gitea server URL on the GitLab import page. OAuth authentication is not supported. \n\n### Generic remote Git repository\n\n![Import project from remote Git repository](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097253/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097252728.png)\n\nWhere your Git provider is not supported or import is not possible using the supported methods, a repository can be imported using its accessible `https://` or `git://` URL.  If it's not publicly accessible, you will provide the repository URL along with username and password (or access token where applicable due to multifactor authentication).\n\nThis method can also be used for maintaining a copy of a remote project and keeping it in sync, i.e., [mirroring](https://docs.gitlab.com/ee/user/project/repository/mirror/). Mirroring allows you to maintain repositories across different platforms and keep them synced. This can be to separate private and public access to project while ensuring both ends have the same copy, which is useful when open-sourcing  internal projects. It can also be used when working with contractors and both parties use different platforms, and access to codebase is necessary on both ends. \n\n## Summary\n\nImporting and migrating between GitLab instances and from other sources is an important process that needs to be planned to ensure the expectations are clear on what gets imported and with which method. While most third-party methods import project items, including files, issues, and merge requests, some methods have known issues and limitations. The [GitLab import section](https://docs.gitlab.com/ee/user/project/import/) of the documentation has detailed information on all the supported methods that can help you plan your migration.   \n\n> #### Want to take your learning to the next level? [Sign up for GitLab University courses](https://university.gitlab.com/). Or you can get going right away with [a free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/).\n\n## \"Getting started with GitLab\" series\n\n- [How to manage users](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n- [How to import your projects to GitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n- [Mastering project management](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n- [Automating Agile workflows with the gitlab-triage gem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n- [Working with CI/CD variables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)\n",[678,9,478],{"slug":1924,"featured":6,"template":684},"getting-started-with-gitlab-how-to-import-your-projects-to-gitlab","content:en-us:blog:getting-started-with-gitlab-how-to-import-your-projects-to-gitlab.yml","Getting Started With Gitlab How To Import Your Projects To Gitlab","en-us/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab.yml","en-us/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab",{"_path":1930,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1931,"content":1937,"config":1942,"_id":1944,"_type":13,"title":1945,"_source":15,"_file":1946,"_stem":1947,"_extension":18},"/en-us/blog/getting-started-with-gitlab-how-to-manage-users",{"title":1932,"description":1933,"ogTitle":1932,"ogDescription":1933,"noIndex":6,"ogImage":1934,"ogUrl":1935,"ogSiteName":669,"ogType":670,"canonicalUrls":1935,"schema":1936},"Getting started with GitLab: How to manage users","Learn how to manage users using groups, roles, and permissions. Walk through the setup of secure collaboration with proper project access.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097273/Blog/Hero%20Images/Blog/Hero%20Images/blog-getting-started-with-gitlab-banner-0497-option4-fy25_cFwd8DYFLekdnOLmbbChp_1750097273817.png","https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: How to manage users\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2025-01-14\",\n      }",{"title":1932,"description":1933,"authors":1938,"heroImage":1934,"date":1939,"body":1940,"category":678,"tags":1941},[1570],"2025-01-14","*Welcome to our \"Getting started with GitLab\" series, where we help newcomers get familiar with the GitLab DevSecOps platform.*\n\nEnsuring a safe, compliant, and collaborative environment starts with the most basic of tasks - managing users. In this tutorial, we show you how to establish project members, assign roles and permissions, and create groups and subgroups.\n\nNote: To follow along with this tutorial, you should have a GitLab account either through GitLab.com or your organization's self-managed instance. If you need help, visit our fundamentals area on [GitLab University](https://university.gitlab.com/).\n\nLet's get started.\n\nWhen you create GitLab users, they only have access to [their private projects, public projects, and projects set with internal visibility](https://docs.gitlab.com/ee/user/public_access.html). For the purposes of this tutorial, your project is super secret and only invited members should have access to it – at varying permissions settings. To ensure this, you can invite users as [members of the project](https://docs.gitlab.com/ee/user/project/members/).\n\n## Project members\n\n![Project members screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097278/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097278487.png)\n\nGitLab users can be invited to a project and [assigned a role](https://docs.gitlab.com/ee/user/permissions.html), which determines what they can do in the project. The owner of a project can delegate administrative tasks to other users as maintainers, who can do almost everything an owner does, aside from changes to a project such as deleting, archiving, or transferring a project.\n\n![Invite members screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097278/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097278487.png)\n\n[Maintainers](https://docs.gitlab.com/ee/user/permissions.html#roles) of the project can invite other members as developers who have access to all the features to create, build, and deploy software. Users who are not developers but need project management access can be invited to the project as [planners](https://about.gitlab.com/blog/introducing-gitlabs-new-planner-role-for-agile-planning-teams/), reporters, and guests with varying levels of permissions. These roles can also be used to determine who can make changes to certain branches with [protected branches](https://docs.gitlab.com/ee/user/project/repository/branches/protected.html).\n\nIf you are working with contractors or your use requires user permissions to expire, you can set an expiry date after which the user loses access to the project. Project members can also be identified as direct or indirect members, based on their [membership type](https://docs.gitlab.com/ee/user/project/members/#membership-types). Direct members are invited directly into the project, whereas indirect members are often inherited from a [GitLab group](https://docs.gitlab.com/ee/user/group/) a project belongs to.\n\nNow, let's look at Group memberships.\n\n## Group memberships\n\nGroups in GitLab can be a top level created at the root of a GitLab instance like the [gitlab.com/gitlab-org](http://gitlab.com/gitlab-org). which is a parent group used to organize other subgroups like [gitlab.com/gitlab-org/charts](http://gitlab.com/gitlab-org/charts). Groups are useful even if you only have one project.\n\nGroups can be used for different reasons:\n\n- organizing similar or related projects  \n- organizing users into groups for better team coordination\n\nWhen using groups to organize users, you can organize teams in groups and [invite a group to a project](https://docs.gitlab.com/ee/user/project/members/sharing_projects_groups.html) with a specific role for an entire team. You can have a `dev` group for the developers of the team, `pm` group for the project managers and `leads` for team leads. When inviting the groups, `dev` can be assigned the Developer role, `pm` the Planner role, and `leads` the Maintainer role. \n\n![Invite a group screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097279/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097278488.png)\n\nMembers of each group can be can be added or removed without needing to update project permissions. This is particularly useful when your team has grown to have several projects. However, it is important to [observe best practices](https://docs.gitlab.com/ee/user/project/members/sharing_projects_groups.html#setting-up-a-group-for-collaboration) for using groups for collaboration.\n\nAnother helpful aspect of having users organized in groups is that you can [mention](https://docs.gitlab.com/ee/user/discussions/#mentions) the entire group in issues, merge requests, or comments, which makes keeping an entire team informed easier.\n\n### Create subgroups\n\n[Subgroups](https://docs.gitlab.com/ee/user/group/subgroups/) can be used to further organize users in a group and you can keep adding subgroups up to 20 nested levels. Users in a subgroup inherit the the permissions they have in a parent group. If you want to grant a user in a subgroup a role higher than what they inherited, you will need to [invite them to the subgroup](https://docs.gitlab.com/ee/user/group/subgroups/#override-ancestor-group-membership) with the new higher role. Note: You can not give them a lower role in the subgroup.\n\n### Manage groups\n\nGroup Owners have several management options to determine how users function in a group. For instance, you can set how a user can request access to a group, enable/disable [group mentions](https://docs.gitlab.com/ee/user/group/manage.html#disable-group-mentions), [restrict access](https://docs.gitlab.com/ee/user/group/manage.html#turn-on-restricted-access), or [moderate users](https://docs.gitlab.com/ee/user/group/moderate_users.html), among other options. An exciting new feature, which is still under development at the time of this article's publication, is the [automatic removal of dormant users](https://docs.gitlab.com/ee/user/group/moderate_users.html#automatically-remove-dormant-members) after a minimum of 90 days and a maximum of five years. This will help keep groups clean and better manage the release of license seats.\n\n## Learn more\n\nManaging users on GitLab depends on your use case. If your organization is larger with more advanced workflows and user management, GitLab provides more advanced ways to [manage enterprise users](https://docs.gitlab.com/ee/user/enterprise_user/index.html). You can also explore more options on how to [manage your organization](https://docs.gitlab.com/ee/topics/set_up_organization.html) and with [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/), you get more granularity and compliance features.\n\n> #### Want to take your learning to the next level? [Sign up for GitLab University courses](https://university.gitlab.com/). Or you can get going right away with [a free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n\n## \"Getting started with GitLab\" series\nRead more articles in our \"Getting started with GitLab\" series:\n\n- [How to import your projects to GitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n- [Mastering project management](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n- [Automating Agile workflows with the gitlab-triage gem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n- [Understanding CI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/)\n- [Working with CI/CD variables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)\n",[478,680,9,980,678],{"slug":1943,"featured":90,"template":684},"getting-started-with-gitlab-how-to-manage-users","content:en-us:blog:getting-started-with-gitlab-how-to-manage-users.yml","Getting Started With Gitlab How To Manage Users","en-us/blog/getting-started-with-gitlab-how-to-manage-users.yml","en-us/blog/getting-started-with-gitlab-how-to-manage-users",{"_path":1949,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1950,"content":1956,"config":1961,"_id":1963,"_type":13,"title":1964,"_source":15,"_file":1965,"_stem":1966,"_extension":18},"/en-us/blog/getting-started-with-gitlab-mastering-project-management",{"title":1951,"description":1952,"ogTitle":1951,"ogDescription":1952,"noIndex":6,"ogImage":1953,"ogUrl":1954,"ogSiteName":669,"ogType":670,"canonicalUrls":1954,"schema":1955},"Getting started with GitLab: Mastering project management","Discover the key components of project management and how to put them to use for better organization and tracking.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097294/Blog/Hero%20Images/Blog/Hero%20Images/blog-getting-started-with-gitlab-banner-0497-option4-fy25_cFwd8DYFLekdnOLmbbChp_1750097293924.png","https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: Mastering project management\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-02-11\",\n      }",{"title":1951,"description":1952,"authors":1957,"heroImage":1953,"date":1958,"body":1959,"category":678,"tags":1960},[852],"2025-02-11","*Welcome to our \"Getting started with GitLab\" series, where we help newcomers get familiar with the GitLab DevSecOps platform.*\n\nGitLab is much more than just a place to store your code. It is an AI-powered DevSecOps platform with tools to help you plan, organize, track, and successfully deliver your projects. This post will guide you through GitLab's key project management features and show you how to leverage them effectively.\n\n## Why GitLab for project management?\n\nImagine having your code repository, issue tracker, and communication platform all seamlessly integrated in one place. That's the power of GitLab. By centralizing everything, you can streamline your workflow, enhance collaboration, and keep your projects moving forward. No more jumping between different tools and losing track of information. GitLab brings it all together, making it easier to manage your projects from start to finish.\n\n## Key components of GitLab project management\n\nLet's break down the essential elements:\n\n* [Epics](https://docs.gitlab.com/ee/user/group/epics/): Think of epics as the big picture. They represent major features, overarching goals, or long-term initiatives within your project. Need to revamp your website? That's an epic! Epics help you organize your work into larger, manageable chunks.  \n* [Issues](https://docs.gitlab.com/ee/user/project/issues/): Issues are the individual tasks or work items that contribute to your project goals. Each issue represents a specific action, like \"design the homepage\" or \"write the 'about us' page.\" Issues are the building blocks of your project, and they provide a clear way to track individual tasks.  \n* [Labels](https://docs.gitlab.com/ee/user/project/labels.html): Labels are like tags that help you categorize and filter your work. You can use labels to indicate priority (e.g., high, medium, low), status (e.g., to do, in progress, done), or assign issues to specific teams or individuals. Labels provide a flexible way to organize and prioritize your work.  \n* Boards: GitLab's issue boards are your visual workspace. They provide a Kanban-style view of your project, allowing you to see the status of all your issues at a glance. Drag and drop issues across different lists (e.g., \"To Do,\" \"Doing,\" \"Done\") to visualize your workflow and track progress. In GitLab, you can create boards for [issues](https://docs.gitlab.com/ee/user/project/issue_board.html) and [Epics](https://docs.gitlab.com/ee/user/group/epics/epic_boards.html).  \n* [Milestones](https://docs.gitlab.com/ee/user/project/milestones/): Milestones mark significant checkpoints or target dates within your project. They help you track progress towards specific goals and deadlines. For example, you might have milestones for completing a major feature, releasing a beta version, or launching the final product.  \n* [Tasks](https://docs.gitlab.com/ee/user/tasks.html): For those extra granular steps, break down your issues into smaller tasks. This helps with delegation, clarifies individual responsibilities, and ensures nothing gets overlooked. Tasks provide a way to create checklists within issues, making it easier to track progress on complex tasks.\n\n## Deep dive into the features\n\n### 1. Epics: The big picture\n\n* Creating epics: Navigate to your group's \"Epics\" menu under “Plan”. Click **New epic** and give it a descriptive title and a clear description outlining the goal. You can also specify the start and end date of the epic – this is useful when using [Roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/).\n\n![Epic creation page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097300817.png)\n\n* [Roadmaps](https://docs.gitlab.com/ee/user/group/roadmap/): Add your epics to a roadmap to visualize your project timeline and long-term goals. Roadmaps provide a bird's-eye view of your project plan, making it easy to see the big picture and track progress towards major milestones.\n\n![Roadmap view](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097300818.png)\n\n### 2. Issues: Getting things done\n\n* Creating issues: In your project, go to the \"Issues\" menu under “Plan” and click **New issue**. Provide a concise and descriptive title like \"Design Homepage Wireframes,\" assign it to a team member, set a due date, and add a detailed description outlining the task's requirements.  \n* GitLab Duo: You can leverage the power of [GitLab Duo to create detailed issue descriptions](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#populate-an-issue-with-issue-description-generation) with just a little hint of what you want to achieve.  \n* Weighting: Estimate the effort required for each issue by assigning weights. This helps with planning and prioritization. For example, a simple task might have a weight of **1**, while a more complex task might have a weight of **5**.\n\n![Issue with weight of 4 assigned](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097300819.png)\n\n### 3. Labels: Organizing your work\n\n* Creating labels: Go to your project's \"Issues\" tab and click Labels. Create custom labels with clear names to categorize your issues. For example, create labels like **Priority: High**, **Status: In Progress**, or **Team: Design**. Apply these labels to your issues to keep them organized and easily filterable.\n\n![Labels screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097300820.png)\n\n### 4. Boards: Visualizing your workflow\n\n* Kanban boards: GitLab's boards provide a Kanban-style view of your project. Create lists like \"To Do,\" \"Doing,\" and \"Done\" to represent the stages of your workflow. Drag and drop issues across these lists to visualize their progress.\n* Customizing boards: Tailor your boards to match your specific workflow. Add more columns, filter issues by labels or assignees, and set up swim lanes to categorize issues by epics or other criteria.\n\n![Visualize workflow with issue boards](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097300820.png)\n\n### 5. Tasks: Breaking down the work\n\n* Creating tasks: Within an issue, use the checklist markdown syntax to create a task list. Each item in the list represents a smaller step within the larger issue. For example, in the issue \"Design Homepage Wireframes,\" you might have tasks like \"Sketch initial concepts,\" \"Create digital wireframes,\" and \"Get feedback from stakeholders.\" To create a Task, click on the **Add** button in the \"Child Items\" section of an issue’s page. Then, enter the title of the task, and click **Create Task**.\n\n![Issue with create task button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097300822.png)\n\n### 6. Milestones: Tracking progress\n* Setting milestones: Define milestones to mark significant points in your project, like completing a specific feature or reaching a key deadline. Give your milestones clear titles and due dates.\n* Associating with issues: Link issues and epics to milestones to track progress towards those goals. This helps you see how individual tasks contribute to the overall project plan.\nCreating a milestone: Under the \"Plan\" dropdown menu, click on **Milestones > New milestone**. Specify the milestone title, description, and start and due dates.\n\n![New milestone screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097300823.png)\n\n\u003Cbr>\u003C/br>\n\n![New page wtih milestone on it](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097300823.png)\n\n### 7. [Iterations](https://docs.gitlab.com/ee/user/group/iterations/): Working in sprints\n\n* Defining iterations: If you're using an Agile workflow, define iterations (sprints) with specific start and end dates. This helps you break down your work into smaller, more manageable time boxes.  \n* Assigning issues: Assign issues to iterations to plan your work in shorter cycles and focus on delivering incremental value.\n\n### 8. [Time tracking](https://docs.gitlab.com/ee/user/project/time_tracking.html): Measuring effort\n\n* Logging time: Within an issue, use the \"/spend\" quick action followed by the time spent (e.g., \"/spend 2h 30m\") to log your work. This helps you track the actual time spent on each task.  \n* Analyzing data: Generate time tracking reports to gain insights into project progress, team efficiency, and identify potential bottlenecks.\n\n![Time tracking report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097301/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097300824.png)\n\n### 9. Dependencies: Managing workflow\n\n* [Linking issues](https://docs.gitlab.com/ee/user/project/issues/related_issues.html): Create dependencies between issues to ensure tasks are completed in the correct order. For example, if issue A must be completed before issue B can begin, you can create a dependency between them. This helps you visualize the workflow and avoid potential roadblocks.\n\n### 10. Templates: Streamlining issue creation\n\n* [Creating templates](https://docs.gitlab.com/ee/user/project/description_templates.html): Create issue templates to standardize the information captured for common tasks, saving you time and ensuring consistency. For example, you could create a template for bug reports that includes fields for steps to reproduce expected behavior and actual behavior.\n\n### Collaboration is key\n\nGitLab fosters collaboration through the following:\n\n* [Comments](https://docs.gitlab.com/ee/user/discussions/): Discuss issues and epics directly within GitLab. Use comments to provide updates, ask questions, and share feedback.  \n* [Mentions](https://docs.gitlab.com/ee/user/discussions/#mentions): Use **@** to mention specific team members and notify them of updates or request their input.  \n* Discussions: Engage in threaded discussions within issues and epics to brainstorm ideas, solve problems together, and keep everyone informed.\n\n### Ready to get started?\n\nNow that you've explored the power of GitLab's project management features, it's time to put them into practice! Create a sample project, experiment with different features, and discover how GitLab can transform your workflow. You can also learn more about how GitLab can help you facilitate [Kanban](https://docs.gitlab.com/ee/tutorials/kanban/) and [Scrum](https://docs.gitlab.com/ee/tutorials/scrum_events/) in the GitLab documentation.\n\n> #### Want to take your learning to the next level? [Sign up for GitLab University courses](https://university.gitlab.com/). Or you can get going right away with a [free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n\n## \"Getting started with GitLab\" series\nRead more articles in our \"Getting started with GitLab\" series:\n\n- [How to manage users](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n- [How to import your projects to GitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n- [Automating Agile workflows with the gitlab-triage gem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n- [Understanding CI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/)\n- [Working with CI/CD variables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)",[9,678,478,980],{"slug":1962,"featured":6,"template":684},"getting-started-with-gitlab-mastering-project-management","content:en-us:blog:getting-started-with-gitlab-mastering-project-management.yml","Getting Started With Gitlab Mastering Project Management","en-us/blog/getting-started-with-gitlab-mastering-project-management.yml","en-us/blog/getting-started-with-gitlab-mastering-project-management",{"_path":1968,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1969,"content":1974,"config":1979,"_id":1981,"_type":13,"title":1982,"_source":15,"_file":1983,"_stem":1984,"_extension":18},"/en-us/blog/getting-started-with-gitlab-understanding-ci-cd",{"title":1970,"description":1971,"ogTitle":1970,"ogDescription":1971,"noIndex":6,"ogImage":972,"ogUrl":1972,"ogSiteName":669,"ogType":670,"canonicalUrls":1972,"schema":1973},"Getting started with GitLab: Understanding CI/CD","Learn the basics of continuous integration/continuous delivery in this beginner's guide, including what CI/CD components are and how to create them.","https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: Understanding CI/CD\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2025-04-25\",\n      }",{"title":1970,"description":1971,"authors":1975,"heroImage":972,"date":1976,"body":1977,"category":678,"tags":1978},[852],"2025-04-25","*Welcome to our \"Getting started with GitLab\" series, where we help newcomers get familiar with the GitLab DevSecOps platform.*\n\nImagine a workflow where every code change is automatically built, tested, and deployed to your users. That's the power of [Continuous Integration/Continuous Delivery (CI/CD)](https://about.gitlab.com/topics/ci-cd/)! CI/CD helps you catch bugs early, ensures code quality, and delivers software faster and more frequently.\n\n### What is CI/CD?\n\n* **Continuous Integration** is a development practice where developers integrate code changes into a shared repository frequently, preferably several times a day. Each integration is then verified by an automated build and test process, allowing teams to detect problems early.  \n* **Continuous Delivery** extends CI by automating the release pipeline, ensuring that your code is *always* in a deployable state. You can deploy your application to various environments (e.g., staging, production) with a single click or automatically.  \n* **Continuous Deployment** takes it a step further by automatically deploying *every successful build* to production. This requires a high degree of confidence in your automated tests and deployment process.\n\n### Why GitLab CI/CD?\n\nGitLab CI/CD is a powerful, integrated system that comes built-in with GitLab. It offers a seamless experience for automating your entire software development lifecycle. With GitLab CI/CD, you can:\n\n* **Automate everything:** Build, test, and deploy your applications with ease.  \n* **Catch bugs early:** Detect and fix errors before they reach production.  \n* **Get faster feedback:** Receive immediate feedback on your code changes.  \n* **Improve collaboration:** Work together more effectively with automated workflows.  \n* **Accelerate delivery:** Release software faster and more frequently.  \n* **Reduce risk:** Minimize deployment errors and rollbacks.\n\n### The elements of GitLab CI/CD\n\n* `.gitlab-ci.yml`**:** This [YAML file](https://docs.gitlab.com/ee/ci/yaml/), located in your project's root directory, defines your CI/CD pipeline, including stages, jobs, and runners.  \n* [**GitLab Runner**](https://docs.gitlab.com/runner/)**:** This agent executes your CI/CD jobs on your infrastructure (e.g. physical machines, virtual machines, Docker containers, or Kubernetes clusters).  \n* [**Stages**](https://docs.gitlab.com/ee/ci/yaml/#stages)**:** Stages define the order of execution for your jobs (e.g. build, test, and deploy).  \n* [**Jobs**](https://docs.gitlab.com/ee/ci/yaml/#job-keywords)**:** Jobs are individual units of work within a stage (e.g. compile code, run tests, and deploy to staging).\n\n### Setting up GitLab CI\n\nGetting started with GitLab CI is simple. Here's a basic example of a `.gitlab-ci.yml` file:\n\n```yaml\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the application...\"\n\ntest_job:\n  stage: test\n  script:\n    - echo \"Running tests...\"\n\ndeploy_job:\n  stage: deploy\n  script:\n    - echo \"Deploying to production...\"\n  environment:\n    name: production\n\n```\n\nThis configuration defines three stages: \"build,\" \"test,\" and \"deploy.\" Each stage contains a job that executes a simple script.\n\n### CI/CD configuration examples\n\nLet's explore some more realistic examples.\n\n**Building and deploying a Node.js application**\n\nThe pipeline definition below outlines using npm to build and test a Node.js application and [dpl](https://docs.gitlab.com/ci/examples/deployment/) to deploy the application to Heroku. The deploy stage of the pipeline makes use of [GitLab CI/CD variables](https://docs.gitlab.com/ci/variables/), which allow developers to store sensitive information (e.g. credentials) and securely use them in CI/CD processes. In this example, an API key to deploy to Heroku is stored under the variable key name `$HEROKU_API_KEY` used by the dpl tool.\n\n```yaml\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  image: node:latest\n  script:\n    - npm install\n    - npm run build\n\ntest:\n  stage: test\n  image: node:latest\n  script:\n    - npm run test\n\ndeploy:\n  stage: deploy\n  image: ruby:latest\n  script:\n    - gem install dpl\n    - dpl --provider=heroku --app=$HEROKU_APP_NAME --api-key=$HEROKU_API_KEY\n\n```\n\n**Deploying to different environments (staging and production)**\n\nGitLab also offers the idea of [Environments](https://docs.gitlab.com/ci/environments/) with CI/CD. This feature allows users to track deployments from CI/CD to infrastructure targets. In the example below, the pipeline adds stages with an environment property for a staging and production environment. While the deploy_staging stage will always run its script, the deploy_production stage requires manual approval to prevent accidental deployment to production.  \n\n```yaml\n\nstages:\n  - build\n  - test\n  - deploy_staging\n  - deploy_production\n\nbuild:\n  # ...\n\ntest:\n  # ...\n\ndeploy_staging:\n  stage: deploy_staging\n  script:\n    - echo \"Deploying to staging...\"\n  environment:\n    name: staging\n\ndeploy_production:\n  stage: deploy_production\n  script:\n    - echo \"Deploying to production...\"\n  environment:\n    name: production\n  when: manual  # Requires manual approval\n\n```\n\n### GitLab Auto DevOps\n\n[GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) simplifies CI/CD by providing a pre-defined configuration that automatically builds, tests, and deploys your applications. It leverages best practices and industry standards to streamline your workflow.\n\nTo enable Auto DevOps:\n\n1. Go to your project's **Settings > CI/CD > General pipelines**.  \n2. Enable the **Auto DevOps** option.\n\nAuto DevOps automatically detects your project's language and framework and configures the necessary build, test, and deployment stages. You don’t even need to create a `.gitlab-ci.yml` file.\n\n### CI/CD Catalog\n\nThe [CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/) is a list of projects with published [CI/CD components](https://docs.gitlab.com/ee/ci/components/) you can use to extend your CI/CD workflow. Anyone can create a component project and add it to the CI/CD Catalog or contribute to an existing project to improve the available components. You can find published components in the [CI/CD Catalog](https://gitlab.com/explore/catalog) on GitLab.com.\n\n> [Tutorial: How to set up your first GitLab CI/CD component](https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component/)\n\n### CI templates\n\nYou can also create your own [CI templates](https://docs.gitlab.com/ee/ci/examples/) to standardize and reuse CI/CD configurations across multiple projects. This promotes consistency and reduces duplication.\n\nTo create a CI template:\n\n1. Create a `.gitlab-ci.yml` file in a dedicated project or repository.  \n2. Define your CI/CD configuration in the template.  \n3. In your project's `.gitlab-ci.yml` file, use the `include` keyword to include the template.\n\n## Take your development to the next level\n\nGitLab CI/CD is a powerful tool that can transform your development workflow. By understanding the concepts of CI/CD, configuring your pipelines, and leveraging features like Auto DevOps, the CI/CD Catalog, and CI templates, you can automate your entire software development lifecycle and deliver high-quality software faster and more efficiently.\n\n> Want to take your learning to the next level? Sign up for [GitLab University courses](https://university.gitlab.com/). Or you can get going right away with a [free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n\n## \"Getting Started with GitLab\" series\n\nCheck out more articles in our \"Getting Started with GitLab\" series:\n\n- [How to manage users](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n- [How to import your projects to GitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n- [Mastering project management](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n- [Automating Agile workflows with the gitlab-triage gem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n- [Working with CI/CD variables](https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables/)\n",[108,771,772,478,678,9],{"slug":1980,"featured":90,"template":684},"getting-started-with-gitlab-understanding-ci-cd","content:en-us:blog:getting-started-with-gitlab-understanding-ci-cd.yml","Getting Started With Gitlab Understanding Ci Cd","en-us/blog/getting-started-with-gitlab-understanding-ci-cd.yml","en-us/blog/getting-started-with-gitlab-understanding-ci-cd",{"_path":1986,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":1987,"content":1992,"config":1998,"_id":2000,"_type":13,"title":2001,"_source":15,"_file":2002,"_stem":2003,"_extension":18},"/en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables",{"title":1988,"description":1989,"ogTitle":1988,"ogDescription":1989,"noIndex":6,"ogImage":972,"ogUrl":1990,"ogSiteName":669,"ogType":670,"canonicalUrls":1990,"schema":1991},"Getting started with GitLab: Working with CI/CD variables","Learn what CI/CD variables are, why they are important in DevSecOps, and best practices for utilizing them.","https://about.gitlab.com/blog/getting-started-with-gitlab-working-with-ci-cd-variables","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with GitLab: Working with CI/CD variables\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab Team\"}],\n        \"datePublished\": \"2025-05-27\",\n      }",{"title":1988,"description":1989,"authors":1993,"heroImage":972,"date":1995,"body":1996,"category":678,"tags":1997},[1994],"GitLab Team","2025-05-27","*Welcome to our \"Getting started with GitLab\" series, where we help newcomers get familiar with the GitLab DevSecOps platform.*\n\nIn an earlier article, we explored [GitLab CI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/). Now, let's dive deeper into the world of **CI/CD variables** and unlock their full potential.\n\n### What are CI/CD variables?\n\nCI/CD variables are dynamic key-value pairs that you can define at different levels within your GitLab environment (e.g., project, group, or instance). These variables act as placeholders for values that you can use in your `.gitlab-ci.yml` file to customize your pipelines, securely store sensitive information, and make your CI/CD configuration more maintainable.\n\n### Why are CI/CD variables important?\n\nCI/CD variables offer numerous benefits:\n\n* **Flexibility** - Easily adapt your pipelines to different environments, configurations, or deployment targets without modifying your core CI/CD script.  \n* **Security** - Securely store sensitive information like API keys, passwords, and tokens, preventing them from being exposed directly in your code.  \n* **Maintainability** - Keep your CI/CD configuration clean and organized by centralizing values in variables, making updates and modifications easier.  \n* **Reusability** - Define variables once and reuse them across multiple projects, promoting consistency and reducing duplication.\n\n### Scopes of CI/CD variables: Project, group, and instance\n\nGitLab allows you to define CI/CD variables with different scopes, controlling their visibility and accessibility:\n\n* **Project-level variables** - These variables are specific to a single project and are ideal for storing project-specific settings, such as:\n  * Deployment URLs: Define different URLs for staging and production environments.  \n  * Database credentials: Store database connection details for testing or deployment.  \n  * Feature flags: Enable or disable features during different stages of your pipeline.  \n  * Example: You have a project called \"MyWebApp\" and want to store the production deployment URL. You create a project-level variable named `DPROD_DEPLOY_URL` with the value `https://mywebapp.com`.  \n* **Group-level variables** - These variables are shared across all projects within a GitLab group. They are useful for settings that are common to multiple projects, such as:\n\n  * API keys for shared services: Store API keys for services like AWS, Google Cloud, or Docker Hub that are used by multiple projects within the group.  \n  * Global configuration settings: Define common configuration parameters that apply to all projects in the group.  \n  * Example: You have a group called \"Web Apps\" and want to store an API key for Docker Hub. You create a group-level variable named `DOCKER_HUB_API_KEY` with the corresponding API key value.  \n* **Instance-level variables** - These variables are available to all projects on a GitLab instance. They are typically used for global settings that apply across an entire organization such as:\n\n  * Default runner registration token: Provide a default token for registering new [runners](https://docs.gitlab.com/runner/).  \n  * License information: Store license keys for GitLab features or third-party tools.  \n  * Global environment settings: Define environment variables that should be available to all projects.  \n  * Example: You want to set a default Docker image for all projects on your GitLab instance. You create an instance-level variable named `DEFAULT_DOCKER_IMAGE` with the value `ubuntu:latest`.\n\n### Defining CI/CD variables\n\nTo define a CI/CD variable:\n\n1. Click on the **Settings > CI/CD** buttons for  your project, group, or instance.  \n2. Go to the **Variables** section.  \n3. Click **Add variable**.  \n4. Enter the **key** (e.g., `API_KEY`) and **value**.  \n5. Optionally, check the **Protect variable** box for sensitive information. This ensures that the variable is only available to pipelines running on protected branches or tags.  \n6. Optionally, check the **Mask variable** box to hide the variable's value from job logs, preventing accidental exposure.  \n7. Click **Save variable**.\n\n### Using CI/CD variables\n\nTo use a CI/CD variable in your `.gitlab-ci.yml` file, simply prefix the variable name with `$`:\n\n```yaml\ndeploy_job:\n  script:\n    - echo \"Deploying to production...\"\n    - curl -H \"Authorization: Bearer $API_KEY\" https://api.example.com/deploy\n```\n\n### Predefined CI/CD variables\n\nGitLab provides a set of [predefined CI/CD variables](https://docs.gitlab.com/ci/variables/predefined_variables/) that you can use in your pipelines. These variables provide information about the current pipeline, job, project, and more.\n\nSome commonly used predefined variables include:\n\n* `$CI_COMMIT_SHA`: The commit SHA of the current pipeline.  \n* `$CI_PROJECT_DIR`: The directory where the project is cloned.  \n* `$CI_PIPELINE_ID`: The ID of the current pipeline.  \n* `$CI_ENVIRONMENT_NAME`: The name of the environment being deployed to (if applicable).\n\n### Best practices\n\n* Securely manage sensitive variables: Use protected and masked variables for API keys, passwords, and other sensitive information.  \n* Avoid hardcoding values: Use variables to store configuration values, making your pipelines more flexible and maintainable.  \n* Organize your variables: Use descriptive names and group related variables together for better organization.  \n* Use the appropriate scope: Choose the correct scope (project, group, or instance) for your variables based on their intended use and visibility.\n\n### Unlock the power of variables\n\nCI/CD variables are a powerful tool for customizing and securing your GitLab pipelines. By mastering variables and understanding their different scopes, you can create more flexible, maintainable, and efficient workflows.\n\nWe hope you found it helpful and are now well-equipped to leverage the power of GitLab for your development projects.\n\n> Get started with CI/CD variables today with a [free, 60-day trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/).\n\n## \"Getting Started with GitLab\" series\nRead more articles in our \"Getting Started with GitLab\" series:\n\n- [How to manage users](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-manage-users/)\n-  [How to import your projects to GitLab](https://about.gitlab.com/blog/getting-started-with-gitlab-how-to-import-your-projects-to-gitlab/)  \n- [Mastering project management](https://about.gitlab.com/blog/getting-started-with-gitlab-mastering-project-management/)\n- [Automating Agile workflows with the gitlab-triage gem](https://about.gitlab.com/blog/automating-agile-workflows-with-the-gitlab-triage-gem/)\n- [Understanding CI/CD](https://about.gitlab.com/blog/getting-started-with-gitlab-understanding-ci-cd/)\n",[678,9,771,772,108,680],{"slug":1999,"featured":90,"template":684},"getting-started-with-gitlab-working-with-ci-cd-variables","content:en-us:blog:getting-started-with-gitlab-working-with-ci-cd-variables.yml","Getting Started With Gitlab Working With Ci Cd Variables","en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables.yml","en-us/blog/getting-started-with-gitlab-working-with-ci-cd-variables",{"_path":2005,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2006,"content":2012,"config":2018,"_id":2020,"_type":13,"title":2021,"_source":15,"_file":2022,"_stem":2023,"_extension":18},"/en-us/blog/getting-started-with-value-streams-dashboard",{"title":2007,"description":2008,"ogTitle":2007,"ogDescription":2008,"noIndex":6,"ogImage":2009,"ogUrl":2010,"ogSiteName":669,"ogType":670,"canonicalUrls":2010,"schema":2011},"Getting started with the new GitLab Value Streams Dashboard","Benchmark your value stream lifecycle, DORA, and vulnerabilities metrics to gain valuable insights and uncover patterns for continuous improvements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749671793/Blog/Hero%20Images/16_0-cover-image.png","https://about.gitlab.com/blog/getting-started-with-value-streams-dashboard","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Getting started with the new GitLab Value Streams Dashboard\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Haim Snir\"}],\n        \"datePublished\": \"2023-06-12\",\n      }",{"title":2007,"description":2008,"authors":2013,"heroImage":2009,"date":2015,"body":2016,"category":769,"tags":2017},[2014],"Haim Snir","2023-06-12","\n\n\u003Ci>This is part two of our multipart series introducing you to the capabilities within GitLab Value Stream Management and the Value Streams Dashboard. In part one, [learn about the Total Time Chart](https://about.gitlab.com/blog/value-stream-total-time-chart/) and how to simplify top-down optimization flow with Value Stream Management.\u003C/i>\n\nGetting started with GitLab [Value Streams Dashboard](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html), a customizable dashboard that enables decision-makers to identify trends, patterns, and opportunities for digital transformation improvements, is easy. If you're already using GitLab Value Stream Management, simply navigate to your project's or group's Analytics tab, and within [Value stream analytics](https://docs.gitlab.com/ee/user/group/value_stream_analytics/#view-value-stream-analytics), click on the \"Value Streams Dashboard - DORA\" link. This will open a new page with the Value Streams Dashboard.\n\n![image of DORA Metrics console](https://about.gitlab.com/images/blogimages/vsdCover.png){: .shadow}\nDORA metrics comparison panel\n{: .note.text-center}\n\nGitLab Value Stream Management allows customers to visualize their end-to-end DevSecOps workstreams, manage their software development processes, and gain insight into how digital transformation and technological investments are delivering value and driving business results. GitLab Value Stream Management is able to do this because GitLab provides an entire DevOps platform as a single application and, therefore, holds all the data needed to provide end-to-end visibility throughout the entire software development lifecycle. So now your decisions rely on actual data rather than blind estimation or gut feelings. Additionally, because GitLab is the place where work happens, GitLab Value Stream Management insights are also actionable, allowing your users to move from \"understanding\" to \"fixing\" at any time, from within their workflow and without losing context.\n\nThe centralized UI in Value Streams Dashboard acts as the single source of truth (SSOT), where all stakeholders can access and view the same set of metrics that are relevant to the organization. The SSOT views ensure consistency, eliminate discrepancies, and provide a reliable and unified source of data for decision-making and analysis.\n\nThe first iteration of the GitLab Value Streams Dashboard was focused on enabling teams to continuously improve software delivery workflows by benchmarking [value stream lifecycle metrics, DORA metrics, and vulnerabilities metrics](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#dashboard-metrics-and-drill-down-reports). One of the key features is a new DevSecOps metrics comparison panel that displays the metrics for a group or project in the month-to-date, last month, the month before, and the past 180 days.\n\nThis comparison enables managers to track team improvements in the context of the other DevSecOps metrics to find patterns or trends over time. The data is presented in a clear and concise manner, ensuring that you can quickly grasp the significance of the metrics.\n\n![The Value Streams Dashboard helps you get a high-level custom view over multiple DevOps metrics and understand whether they are improving month-over-month](https://about.gitlab.com/images/blogimages/2023-05-18_vsd_1.gif){: .shadow}\nValue Streams Dashboard metrics comparison panel\n{: .note.text-center}\n\nAdditionally, from each metric you can drill down to a detailed report to investigate the underlying data, understand what is affecting the team performance, and identify actionable insights.\n\nWe understand that every organization has its own set of subgroups and projects, each with specific processes and terminology. That's why we designed our dashboard to be flexible and adaptable. Users have the power to [customize](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html#customize-the-dashboard-panels) their dashboard by including panels from different subgroups or projects. \n\nTracking and comparing these metrics over a period of time helps teams catch downward trends early, drill down into individual projects/metrics, take remedial actions to maintain their software delivery performance, and track progress of their innovation investments. Value Streams Dashboard's intuitive interface reduces the learning curve and eliminates the need for extensive training. Everyone can now immediately leverage the platform's unified data store power, maximizing their productivity and saving precious time and resources.\n\n## Value Streams Dashboard roadmap\nWe are just getting started with delivering new capabilities in our Value Streams Dashboard. The roadmap includes planned features and functionality that will continue to improve decision-making and operational efficiencies.\n\nSome of the capabilities we plan to focus on next include:\n\n- adding an [executive-level summary](https://gitlab.com/groups/gitlab-org/-/epics/9558) of key metrics related to software performance and flow of value across the organization\n- adding a [\"DORA Performers score\"](https://gitlab.com/groups/gitlab-org/-/epics/10416) panel with the DORA metrics health from all the organization's groups and projects\n- adding [filter by label to the comparison panel](https://gitlab.com/gitlab-org/gitlab/-/issues/388890) - we recognize that every team does not follow the same flow so we are adding them to slice and dice the dashboard views with GitLab labels as filters\n\nTo help us improve the Value Stream Management Dashboard, please share feedback about your experience in this [survey](https://gitlab.fra1.qualtrics.com/jfe/form/SV_50guMGNU2HhLeT4).\n\n## Learn more\n* Find out what's next on the [Value Stream Management direction page](https://about.gitlab.com/direction/plan/value_stream_management/#whats-next-and-why).\n\n* Learn how to use the new dashboard using the [Value Streams Dashboard documentation](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html).\n\n* Watch this short video on Value Streams Dashboards:\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/EA9Sbks27g4\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\nCheck out part three of this multipart series: \"[GitLab's 3 steps to optimizing software value streams](https://about.gitlab.com/blog/three-steps-to-optimize-software-value-streams/)\".\n\n\u003Ci>Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.\u003C/i>\n",[9,680,728,940,980],{"slug":2019,"featured":6,"template":684},"getting-started-with-value-streams-dashboard","content:en-us:blog:getting-started-with-value-streams-dashboard.yml","Getting Started With Value Streams Dashboard","en-us/blog/getting-started-with-value-streams-dashboard.yml","en-us/blog/getting-started-with-value-streams-dashboard",{"_path":2025,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2026,"content":2032,"config":2037,"_id":2039,"_type":13,"title":2040,"_source":15,"_file":2041,"_stem":2042,"_extension":18},"/en-us/blog/github-to-gitlab-migration-made-easy",{"title":2027,"description":2028,"ogTitle":2027,"ogDescription":2028,"noIndex":6,"ogImage":2029,"ogUrl":2030,"ogSiteName":669,"ogType":670,"canonicalUrls":2030,"schema":2031},"GitHub to GitLab migration the easy way","Learn how easy it is to migrate from GitHub to GitLab using GitLab's project import functionality.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749668776/Blog/Hero%20Images/julia-craice-faCwTallTC0-unsplash.jpg","https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitHub to GitLab migration the easy way\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2023-07-11\",\n      }",{"title":2027,"description":2028,"authors":2033,"heroImage":2029,"date":2034,"body":2035,"category":1103,"tags":2036},[1767],"2023-07-11","\nIf you are using different CI/CD tools and are considering migrating over to GitLab, you may be wondering about\nthe difficulty of the migration process. Migration is usually a concern for [DevSecOps](https://about.gitlab.com/topics/devsecops/) teams when considering a new solution. This is due to the fact that migrating may involve heavy lifting. However, migrating to the GitLab AI-powered DevSecOps Platform can be extremely simple and I will show you how step by step. \n\nIn this blog post, we will go over how to migrate from GitHub to GitLab using our [project import](https://docs.gitlab.com/ee/user/project/import/) functionality. Manually migrating GitHub Actions to GitLab pipelines will be covered as well. I have also created a video going over the migration process for those who prefer that format:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## What data can be migrated from GitHub to GitLab?\nGitLab's built-in importer allows for GitHub projects to be automatically migrated into GitLab. The built-in importer\nis accessed directly from GitLab's project creation UI. From the UI, you can select what data you wish to migrate to GitLab.\n\nThe data that can be migrated includes the following:\n* Repository description\n* Git repository data\n* Branch protection rules\n* Collaborators (members)\n* Issues\n* Pull requests\n* Wiki pages\n* Milestones\n* Labels\n* Release notes content\n* Release notes attachments\n* Comment attachments\n* Issue description attachments\n* Pull request description attachments\n* Pull request review comments\n* Regular issue and pull request comments\n* Git Large File Storage (LFS) objects\n* Pull request reviews\n* Pull request assigned reviewers\n* Pull request “merged by” information\n* Pull request comments replies in discussions\n* Pull request review comments suggestions\n* Issue events and pull requests events\n\nGitHub and GitLab have different naming conventions and concepts, so a mapping must be performed during the migration. For example, when collaborators/members are migrated, roles from GitHub are mapped to the appropriate GitLab roles as follows:\n\n| GitHub role | GitLab role |\n| ----------- | ----------- |\n| Read        | Guest       |\n| Triage      | Reporter    |\n| Write       | Developer   |\n| Maintain    | Maintainer  |\n| Admin       | Owner       |\n\n## Prerequisites\nNow that you have an understanding of what can be imported, let's review the prerequisites for performing the migration.\n\nWith the GitLab importer, you can either import your projects from **GitHub.com** or **GitHub Enterprise** to either **GitLab.com** or **Self-managed GitLab** as long as you meet the following requirements:\n* You must be a Maintainer on the GitLab destination group you are importing to from GitHub\n* Each GitHub author and assignee in the repository must have a public-facing email address on GitHub that matches their GitLab email address\n* GitHub accounts must have a public-facing email address that is populated\n* [GitHub import source](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#configure-allowed-import-sources) must be enabled (Self-managed GitLab only)\n\nWhen migrating a user, GitLab uses the public-facing email address in GitHub to verify the user with the same email on GitLab. Because email ownership is unique, you'll know you have set a valid user with valid permissions.\n\n## Performing the import\nNow let's go over how to perform the migration. I will be migrating my project, the [Reddit sentiment analyzer](https://github.com/fishtoadsoft/reddit-sentiment-analyzer), from GitHub to GitLab. The Reddit sentiment analyzer contains a pull request (called a merge request in GitLab), issues, and comments. \n\n**Note:** While you may not have permissions to my project, the step-by-step process applies to any project you own. I am using my project so you can see how I migrate GitHub Actions in the next section. Now, let's get started!\n\n1) Create a new project in GitLab using the [Project Creation Interface](https://gitlab.com/projects/new).\n\n2) Select the **Import Project** box. This allows you to migrate data from external sources.\n\n![Import project box](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_project.png)\n\n3) Under **Import project from**, press the **GitHub** button. This will take you to the **Authenticate with GitHub** page.\n\n4) Press the **Authenticate with GitHub** button. You can also use a [personal access token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens) from GitHub with the **repo scope** if you prefer. This will take you to the GitHub authorization app.\n\n5) From here, you can grant access to [GitHub organization(s)](https://docs.github.com/en/organizations/collaborating-with-groups-in-organizations/about-organizations) where the projects you wish to migrate are located.\n\n![GitHub authorization app](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/github_authorize_app.png)\n\n6) Press the **Grant** button for the organization where the project you wish to migrate is stored.\n\n7) Press the **Authorize gitlabhq** button to grant GitLab access to the organization(s) selected. You will then be taken to the import selection page.\n\n8) From here, you can select the items you wish to import. \n\n![Import selection](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_selection.png)\n\n**Note:** The more items you choose to migrate, the longer the import will take.\n\n9) Then you must set the GitLab location you want to migrate the GitHub project to.\n\n![Set the GitLab location to migrate to](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/import_to.png)\n\n10) Press the **Import** button and the import will begin. You can see the progress in the UI. Once the import is complete the status will be changed to \"complete.\"\n\n[Import progress status](/images/blogimages/2023-july-github-to-gitlab-migration/import_progress.png)\n\nNow you should have the imported project in your workspace. Mine is called [https://gitlab.com/awkwardferny/reddit-sentiment-analyzer](https://gitlab.com/awkwardferny/reddit-sentiment-analyzer). When examining the imported project, you can see the following:\n\n**Repository has been migrated**\n\n![Repository has been migrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_data.png)\n\n**Issue has been migrated**\n\n![Issue has been migrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_issue.png)\n\n**Merge request has been migrated**\n\n![Merge request has been migrated](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/migrated_merge_request.png)\n\n## Migrating GitHub Actions over to GitLab CI/CD\nNow that you have migrated the project over from GitHub, notice that none of the GitHub Actions are running. Don't worry, they are very easy to migrate manually. So let's start the migration process for Actions.\n\n1) Examine the GitHub Actions within the **.github/workflows** folder. In the [project you just imported](https://gitlab.com/awkwardferny/reddit-sentiment-analyzer/-/tree/master/.github/workflows), you should see three different Action files:\n\n#### lint.yml\nThis file contains the Action, which performs linting on the source code using flake8. It uses the python:3.10 Docker image and installs the application requirements before performing the lint.\n\n```yaml\nname: \"Lint\"\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  lint:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install flake8 pytest\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Lint with flake8\n      run: |\n        # stop the build if there are Python syntax errors or undefined names\n        flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n        # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide\n        flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n```\n\n#### smoke.yml\nThis file contains the action which performs a smoke test by just running the CLI help menu. It uses the python:3.10 Docker image and installs the application requirements before performing the smoke test.\n\n```yaml\nname: \"Smoke Tests\"\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  smoke-tests:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install setuptools\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Install Sentiment Analysis Application\n      run: |\n        python setup.py install\n    - name: Run smoke tests\n      run: |\n        reddit-sentiment --help\n```\n\n#### unit.yml\nThis file contains the Action, which performs unit tests using pytest. It uses the python:3.10 Docker image and installs the application requirements running the unit tests.\n\n```yaml\nname: \"Unit Tests\"\n\non:\n  push:\n    branches: [ master ]\n  pull_request:\n    branches: [ master ]\n\njobs:\n  unit-tests:\n    runs-on: ubuntu-latest\n    steps:\n    - uses: actions/checkout@v3\n    - name: Set up Python 3.10\n      uses: actions/setup-python@v4\n      with:\n        python-version: \"3.10\"\n    - name: Install dependencies\n      run: |\n        python -m pip install --upgrade pip\n        pip install pytest\n        if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - name: Test with pytest\n      run: |\n        python -m pip install --upgrade pip\n        if [ -f test-requirements.txt ]; then pip install -r test-requirements.txt; fi\n        pytest tests/\n```\n\nNow let's go ahead and migrate these Actions over to GitLab.\n\n2) Go to the recently imported project on GitLab and open up the [WebIDE](https://docs.gitlab.com/ee/user/project/web_ide/).\n\n3) Create a file at the root called [**.gitlab-ci.yml**](https://docs.gitlab.com/ee/ci/yaml/gitlab_ci_yaml.html).\nThis file defines the GitLab pipeline.\n\n4) Add the following configuration, which will add the GitHub Actions as Jobs in the GitLab pipeline. Notice the comments I added describing each section.\n\n```yaml\n# This creates the stages in which the jobs will run. By default all\n# jobs will run in parallel in the stage. Once the jobs are completed\n# successfully then you move on to the next stage. The way jobs run\n# is completely configurable.\nstages:\n  - test\n\n# With the include statement, you can quickly add jobs which have\n# been pre-defined in external YAMLs. The SAST job I included below\n# is provided and maintained by GitLab and adds Static Application\n# Security Testing (SAST) to your pipeline.\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\n# This is the unit test job which does exactly what is defined in\n# the GitHub Action in unit.yml. You can see it uses the python:3.10\n# Docker image, installs the application dependencies, and then runs\n# the unit tests with pytest. It was added with a simple copy and\n# paste and minor syntax changes.\nunit:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install pytest\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n  script:\n    - pytest tests/\n\n# This is the lint job which does exactly what is defined in the\n# GitHub Action in lint.yml. You can see it uses the python:3.10\n# Docker image, installs the application dependencies, and then\n# performs the linting with flake8. It was added with a simple copy\n# and paste and minor syntax changes.\nlint:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install flake8\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n  script:\n    - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics\n    - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics\n\n# This is the smoke test job which does exactly what is defined in\n# the GitHub Action in smoke.yml. You can see it uses the python:3.10\n# Docker image, installs the application dependencies, and then runs\n# the smoke tests with the Reddit sentiment analysis CLI. It was\n# added with a simple copy and paste and minor syntax changes.\nsmoke:\n  image: python:3.10\n  stage: test\n  before_script:\n    - python -m pip install --upgrade pip\n    - pip install setuptools\n    - if [ -f requirements.txt ]; then pip install -r requirements.txt; fi\n    - python setup.py install\n  script:\n    - reddit-sentiment --help\n```\n\nYou can see that scripts being executed in GitLab match those scripts within the GitHub Actions. The only thing that has really changed is the syntax setting up the jobs and stages. To learn more on how to create and configure pipelines, check out the [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/).\n\n5) Let's check in the code. From the WebIDE click on the Source Control Tab in the side panel of the WebIDE. It is the [third icon from the top](https://code.visualstudio.com/docs/sourcecontrol/overview#_commit). Then press the **Commit to 'main'** button, select **Continue**, and voila, you should now have a running pipeline.\n\n6) Examine the pipeline and make sure the jobs are running properly. Go back to your project and click on the [pipeline](https://docs.gitlab.com/ee/ci/pipelines/) icon. You can see the the four jobs we created have run.\n\n![Four jobs have run](https://about.gitlab.com/images/blogimages/2023-july-github-to-gitlab-migration/gitlab_jobs.png)\n\n7) Click on the **Unit** job and you can see that the unit tests were run successfully.\n\n```bash\n$ pytest tests/\n============================= test session starts ==============================\nplatform linux -- Python 3.10.11, pytest-7.3.1, pluggy-1.0.0\nrootdir: /builds/awkwardferny/reddit-sentiment-analyzer\ncollected 2 items\ntests/test_scraper.py ..                                                 [100%]\n============================== 2 passed in 0.09s ===============================\nCleaning up project directory and file based variables\n00:00\nJob succeeded\n```\n\nAnd that's how simple it is to migrate a project over from GitHub to GitLab!\n\n## What other platforms can GitLab import from?\nThe GitLab importer allows one-click migration from several other platforms. These platforms include:\n* [Bitbucket Cloud](https://docs.gitlab.com/ee/user/project/import/bitbucket.html)\n* [Bitbucket Server (Stash)](https://docs.gitlab.com/ee/user/project/import/bitbucket_server.html)\n* [FogBugz](https://docs.gitlab.com/ee/user/project/import/fogbugz.html)\n* [Gitea](https://docs.gitlab.com/ee/user/project/import/gitea.html)\n* [Repository by URL](https://docs.gitlab.com/ee/user/project/import/repo_by_url.html)\n* [Uploading a manifest file (AOSP)](https://docs.gitlab.com/ee/user/project/import/manifest.html)\n* [Jira (issues only)](https://docs.gitlab.com/ee/user/project/import/jira.html)\n\nWe also have documentation covering how to migrate from these platforms:\n* [SVN](https://docs.gitlab.com/ee/user/project/import/#import-from-subversion)\n* [ClearCase](https://docs.gitlab.com/ee/user/project/import/clearcase.html)\n* [CVS](https://docs.gitlab.com/ee/user/project/import/cvs.html)\n* [Perforce](https://docs.gitlab.com/ee/user/project/import/perforce.html)\n* [TFVC](https://docs.gitlab.com/ee/user/project/import/tfvc.html)\n\n---\n\nThanks for reading! Now you know how easy it is to migrate from GitHub over to GitLab. For more information on GitLab\nand migrating from GitHub, follow the links below:\n\n* [GitHub-to-GitLab project migration documentation](https://docs.gitlab.com/ee/user/project/import/github.html)\n* [Available project importers](https://docs.gitlab.com/ee/user/project/import/#available-project-importers)\n* [GitHub-to-GitLab migration video](https://youtu.be/0Id5oMl1Kqs)\n\nAlso, read how GitLab has been named a leader in the DevOps platforms space by [Gartner](https://about.gitlab.com/blog/gitlab-leader-gartner-magic-quadrant-devops-platforms/) and the integrated software delivery platforms space by [Forrester](https://about.gitlab.com/blog/gitlab-leader-forrester-wave-integrated-software-delivery-platforms/).\n\n_Cover image by [Julia Craice](https://unsplash.com/@jcraice?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/migration?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[108,9,835,478],{"slug":2038,"featured":6,"template":684},"github-to-gitlab-migration-made-easy","content:en-us:blog:github-to-gitlab-migration-made-easy.yml","Github To Gitlab Migration Made Easy","en-us/blog/github-to-gitlab-migration-made-easy.yml","en-us/blog/github-to-gitlab-migration-made-easy",{"_path":2044,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2045,"content":2051,"config":2056,"_id":2058,"_type":13,"title":2059,"_source":15,"_file":2060,"_stem":2061,"_extension":18},"/en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"title":2046,"description":2047,"ogTitle":2046,"ogDescription":2047,"noIndex":6,"ogImage":2048,"ogUrl":2049,"ogSiteName":669,"ogType":670,"canonicalUrls":2049,"schema":2050},"GitLab Duo + Amazon Q: Transform ideas into code in minutes","The new GitLab Duo with Amazon Q integration analyzes your issue descriptions and automatically generates complete working code solutions, accelerating development workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097127/Blog/Hero%20Images/Blog/Hero%20Images/Screenshot%202024-11-27%20at%204.55.28%E2%80%AFPM_4VVz6DgGBOvbGY8BUmd068_1750097126673.png","https://about.gitlab.com/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo + Amazon Q: Transform ideas into code in minutes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-28\",\n      }",{"title":2046,"description":2047,"authors":2052,"heroImage":2048,"date":2053,"body":2054,"category":702,"tags":2055},[699],"2025-04-28","Have you ever spent days or even weeks converting a complex issue into working code? We've all been there. You start with a solid idea and a clear set of requirements, but the path from that initial concept to deployable code can be frustratingly long. Your productivity gets bogged down in implementation details, and projects that should move quickly end up dragging on.\n\nThis is where the power of [agentic AI](https://about.gitlab.com/topics/agentic-ai/) capabilities comes in. [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/), which combines the comprehensive AI-powered DevSecOps platform with the deepest set of cloud computing capabilities, is designed to dramatically accelerate your application development process, all within your familiar GitLab workflow. By streamlining your path from idea to deployment, this powerful integration can propose implementation solutions based on your issue descriptions alone – transforming what used to take days into something that happens in minutes.\n\n## How it works: From issue to working code\n\nLet's walk through how this agentic AI feature works in practice. Imagine you're a developer tasked with creating a mortgage calculator application. Here's how GitLab Duo with Amazon Q helps you get it done:\n\n1. **Create an issue with detailed requirements:** Start by creating a standard [GitLab issue](https://docs.gitlab.com/user/project/issues/). In the description, you'll provide a comprehensive list of requirements that your service needs to meet. This becomes the blueprint for your solution.\n\n2. **Invoke Amazon Q with a quick action:** Once your issue is created, simply add a comment with a quick action, “/q dev”, to invoke Amazon Q. This is where the magic begins. \n\n3. **Let AI generate your implementation:** GitLab Duo with Amazon Q analyzes the issue description you've provided and the context of your source code, then autonomously generates code that meets all your stated requirements. It doesn't stop there – it actually commits those changes in a merge request, ready for your review.\n\n![GitLab Duo  with Amazon Q activity pop-up screenshot](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097156/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097156018.png)\n\n4. **Review the generated application**: Navigate into the merge request to review the generated code. You can verify that all your requirements have been met and make any necessary adjustments.\n\n5. **Test the proposed application**: Finally, check that the application runs successfully. With minimal effort on your part, you now have working code that implements your original requirements.\n\n## Improve your development process\n\nGitLab Duo with Amazon Q completely transforms this process, including dramatically decreasing the time it takes to carry out complex developer tasks, through intelligent automation. By leveraging an agentic AI approach, you can accelerate your path from idea to deployment, freeing development teams to focus on more strategic work.\n\nWith GitLab Duo and Amazon Q, you'll develop software faster, more efficiently, and with less manual coding effort. This integration helps you:\n\n* **Save valuable development time** by automating implementation based on requirements  \n* **Maintain consistency** in code generation across your projects  \n* **Reduce the cognitive load** of translating requirements into working code  \n* **Accelerate your release cycles** by removing implementation bottlenecks  \n* **Focus your expertise** on reviewing and optimizing, rather than writing boilerplate code\n\nReady to see GitLab Duo with Amazon Q in action? Watch our demo video to discover how you can transform your development workflow today.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jxxzNst3jpo?si=j_LQdZhUnwqoQEst\" title=\"GitLab Duo with Amazon Q demo video for dev workflow\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)",[704,794,9,478,678,230],{"slug":2057,"featured":90,"template":684},"gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes","content:en-us:blog:gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","Gitlab Duo Amazon Q Transform Ideas Into Code In Minutes","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes.yml","en-us/blog/gitlab-duo-amazon-q-transform-ideas-into-code-in-minutes",{"_path":2063,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2064,"content":2070,"config":2075,"_id":2077,"_type":13,"title":2078,"_source":15,"_file":2079,"_stem":2080,"_extension":18},"/en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant",{"title":2065,"description":2066,"ogTitle":2065,"ogDescription":2066,"noIndex":6,"ogImage":2067,"ogUrl":2068,"ogSiteName":669,"ogType":670,"canonicalUrls":2068,"schema":2069},"GitLab Duo Chat 101: Get more done on GitLab with our AI assistant","In this first article in our series learn how Chat can improve developer productivity – for example, by summarizing issues – and how to improve prompts to get better answers faster.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099126/Blog/Hero%20Images/Blog/Hero%20Images/GitLab_Duo_Blog_Hero_1800x945_r2_B%20%281%29_6a2UB7TOQk3JKxyb5yqYtc_1750099126039.png","https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat 101: Get more done on GitLab with our AI assistant\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-05-29\",\n      }",{"title":2065,"description":2066,"authors":2071,"heroImage":2067,"date":2072,"body":2073,"category":702,"tags":2074},[1570],"2024-05-29","GitLab Duo Chat became [generally available](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/) in [GitLab 16.11](https://about.gitlab.com/releases/2024/04/18/gitlab-16-11-released/) and its power as a personal assistant can not be overstated. On a DevSecOps platform, more has to happen than just generating code; planning, discussions, security, compliance, and technical reviews are all critical to developing secure software faster. Issues, epics, merge requests, and other sections of GitLab are where this work happens, with knowledge often buried deep in comment threads. It can take a lot of time to get up to speed on these threads, especially when they've grown to hundreds of comments and interactions and when you've been away from them for a while. This is where GitLab Duo Chat can help.\n\nIn this first part of our GitLab Duo Chat 101 series, we'll introduce you to Chat's capabilities and then dig into how to use Chat to summarize comment threads.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## GitLab Duo Chat's capabilities\n\nWith Chat, you can refactor [existing code](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#refactor-code-in-the-ide), learn how a [block of code works](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#explain-code-in-the-ide), and write [tests](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#write-tests-in-the-ide) for your code, learn about your issues and epics, and much more. Depending on your prompts, you can make Chat do impressive things that boost developer productivity. In the video below, I showcased how you can use GitLab Duo Chat to interact with GitLab and learn about your issues and epics.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/RJezT5_V6dI?si=XlXGs2DHAYa8Awzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Use cases   \n\nGitLab Duo Chat’s capabilities allow for productivity gains across multiple parts of the software development lifecycle:\n\n- Product and project managers can use Chat’s issues and epics capabilities to gain insights into discussions and plan faster.\n- Developers can create solutions faster with code suggestions and refactoring capabilities. When it comes to working with legacy code or code from other team members, less time is spent on research with the ` /explain` capability providing the necessary insights to understand the code.\n- Quality assurance and test engineers can generate tests and check for vulnerabilities\n- New employees can get a better understanding of their code base and get started solving problems.\n- Beginner programmers can understand and pick up a language or framework quickly and create solutions with Chat providing next steps and insights.\n\n> Check out \"[10 best practices for using GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\" for tips and tricks to craft AI prompts. \n\n## Summarizing issues\n\nWhen you encounter an issue, especially one with a lot of comments, you skim through the issue description, along with a couple of comments, but can't always get the complete picture of the conversations. GitLab Duo Chat can get you up to speed fast. In the image below, I asked Chat to summarize an issue along with a follow-up question. In two prompts, I got what I needed to understand what is going on in the issue without spending hours reading through the comments.\n\n![Chat summarizing an issue](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099137154.png)\n\nYou can use GitLab Duo Chat on the GitLab interface, as well as [the WebIDE, Visual Studio Code, and JetBrains interfaces](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#use-gitlab-duo-chat-in-the-web-ide).\n\n## Prompts and context\n\nGetting the best responses from any AI tool requires carefully crafting the questions asked. Sometimes, you need to give examples of the responses you expect to prime the large language models (LLMs) toward a desired response. Here are some areas to focus on to get desired responses.\n\n### Context\n\nHere are three prompts with similar objectives but worded differently:\n\n| Prompt 1     | Prompt 2     | Prompt 3    |\n| ---------- | ---------- | ---------- |\n| ![Prompt: Can you summarize this issue's description?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099137154.png)       | ![Prompt: Can you provide a high-level summary of this issue?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099137155.png)      | ![Prompt: Why is this issue popular?](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099137156.png)      |\n\nThe context, “this issue,” is common among the three prompts; this tells Chat what resource to use in looking for answers. Prompt 1 gives additional context on what to focus on: the description of the issue. Prompt 2 is not limited in its scope, which means the LLMs will spend more time going through the description and all the comments to provide a more detailed summary of the whole issue. (Note: As of the publication of this blog, there were more than 90 comments in that issue.) Prompt 3 got a poorer response because not much expectation was set for the type of response expected. \n\n[Low-context communication](https://handbook.gitlab.com/handbook/company/culture/all-remote/effective-communication/#understanding-low-context-communication) is critical in crafting your prompt for the best responses, as all information needed for the LLMs to provide an informed response is provided.\n\n### Simplicity\n\nThe wordiness of prompts can sometimes lead to incorrect or no responses. In the image below, you can see that rephrasing a prompt from “Customers have mentioned why this issue is important to them. Can you list the top 3 reasons they mentioned?” to “Why is this issue important to customers?” led to the expected response. When you don’t get the response you desire, simplifying or changing the words used in your prompt can improve the quality of responses.\n\n![Wordy Chat prompts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099137158.png)\n\n### Follow-up questions\n\nGitLab Duo Chat can have follow-up conversations – an essential capability. In the image below, I continued asking how the issue in question can be solved in GitLab's code along with a follow-up question asking for code samples.\n\n![Streamlined Chat prompt shown](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099137/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099137158.png)\n\nFollow-up questions allow the application to maintain context and provide faster responses. A recommendation is to provide parts of Chat’s previous responses in the next prompt. In the example above, I mentioned “Rails App,” as previously suggested. \n\n## Get started with GitLab Duo Chat \n\nGitLab Duo Chat does more than help you write better code, it helps you navigate through problems and quickly find solutions. With the right prompts and context, you can build secure software faster.\n\n> Want to try GitLab Duo Chat? [Start your free trial](https://about.gitlab.com/gitlab-duo/#free-trial) today.",[704,478,835,9],{"slug":2076,"featured":90,"template":684},"gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant","content:en-us:blog:gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant.yml","Gitlab Duo Chat 101 Get More Done On Gitlab With Our Ai Assistant","en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant.yml","en-us/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant",{"_path":2082,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2083,"content":2089,"config":2094,"_id":2096,"_type":13,"title":2097,"_source":15,"_file":2098,"_stem":2099,"_extension":18},"/en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover",{"title":2084,"description":2085,"ogTitle":2084,"ogDescription":2085,"noIndex":6,"ogImage":2086,"ogUrl":2087,"ogSiteName":669,"ogType":670,"canonicalUrls":2087,"schema":2088},"GitLab Duo Chat gets agentic AI makeover  ","Our new Duo Chat experience, currently an experimental release, helps developers onboard to projects, understand assignments, implement changes, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099203/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2820%29_2bJGC5ZP3WheoqzlLT05C5_1750099203484.png","https://about.gitlab.com/blog/gitlab-duo-chat-gets-agentic-ai-makeover","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Duo Chat gets agentic AI makeover  \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Torsten Linz\"}],\n        \"datePublished\": \"2025-05-29\",\n      }",{"title":2084,"description":2085,"authors":2090,"heroImage":2086,"date":2091,"body":2092,"category":702,"tags":2093},[1327],"2025-05-29","Generative AI chat assistants have become standard in software development, helping create and fix code just to start. But what if your chat assistant could understand the artifacts of your entire development process, not just your code? What if that chat assistant could help you work through issues and project documentation before it helps you write code, and could access CI/CD pipelines and merge requests to help you finish coding tasks properly? \n\n**Meet the next generation of GitLab Duo Chat – GitLab Duo Agentic Chat, a significant evolution in AI-native development assistance and the newest addition to our platform, now in [experimental release](https://docs.gitlab.com/policy/development_stages_support/#experiment).** GitLab Duo Agentic Chat is currently available as an experimental feature in VS Code to all users on GitLab.com that have any one of these add-ons: Duo Core, Duo Pro, or Duo Enterprise.\n\nAgentic Chat transforms chat from traditional conversational AI to a chat experience that takes action on your behalf, breaking down complex problems into discrete tasks that it can complete. Instead of simply responding to questions with the context you provide, Agentic Chat can:\n\n* **Autonomously determine** what information it needs to answer your questions  \n* **Execute a sequence of operations** to gather that information from multiple sources  \n* **Formulate comprehensive responses** by combining insights from across your project  \n* **Create and modify files** to help you implement solutions\n\nAnd all of this is done while keeping the human developer within the loop.\n\nAgentic Chat is built on the Duo Workflow architecture, which is [currently in private beta](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/). The architecture comprises agents and tools that take on specific tasks like finding the right context for a given question or editing files. \n\n**Use cases for GitLab Duo Agentic Chat**\n\nHere are some real-world and common use cases for Agentic Chat:\n\n* Onboard to new projects faster by having AI help you familiarize yourself with a new codebase.\n\n* Jump into assigned work immediately, even when issue descriptions are unclear, because Agentic Chat can help you connect the dots between requirements and existing implementations.\n\n* When it's time to make changes, Agentic Chat can handle the implementation work by creating and editing multiple files across your project.\n\n* At release time, Agentic Chat can help you verify that your solution actually addresses the original requirements by analyzing your merge requests against the initial issue or task.\n\n![agentic chat - example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099210/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099210429.png)\n\n\u003Ccenter>\u003Ci>Agentic Chat making code edits\u003C/i>\u003C/center>\n\n## From learning to shipping: A complete workflow demonstration in four steps\n\nTo show how Agentic Chat transforms the development experience, let's walk through a real scenario from our engineering teams. Imagine you're a new team member who's been assigned an issue but knows nothing about the codebase. You can follow along with this video demonstration:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uG9-QLAJrrg?si=kaOhYylMIaWkIuG8j\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Step 1: Understand the project**\n\nInstead of manually exploring files and documentation, you can prompt Agentic Chat:\n\n```unset\nI am new to this project. Could you read the project structure and explain it to me?\n```\n\nAgentic Chat provides a comprehensive project overview by:  \n- Exploring the directory structure  \n- Reading README files and documentation  \n- Identifying key components and applications\n\n**Step 2: Understand your assigned task**\n\nNext, you need to understand your specific assignment, so you can enter this prompt:\n\n```unset\nI have been assigned Issue 1119. Could you help me understand this task, specifically where do I need to apply the refactoring?\n```\n\nAgentic Chat explains the task and proposes a refactoring approach by:\n- Retrieving and analyzing the issue details from the remote GitLab server  \n- Examining relevant project files  \n- Identifying the specific locations requiring changes\n\n**Step 3: Implement the solution**\n\nRather than doing the work manually, you can request:\n\n```unset\nCould you make the edits for me? Please start with steps one, two, three.\n```\n\nAgentic Chat then:  \n- Creates new directories and files as needed \n- Extracts and refactors code across multiple locations  \n- Ensures consistency across all modified files  \n- Provides a summary of all changes made\n\n**Step 4: Verify completion**\n\nFinally, after creating your merge request, you can verify your work:\n\n```unset\nDoes my MR fully address Issue 1119? \n```\n\nAgentic Chat confirms whether all requirements have been met by analyzing both your merge request and the original issue.\n\n## Try it today and share your feedback\n\nGitLab Duo Agentic Chat is currently available as an experimental feature in VS Code to all users on GitLab.com that have any one of these add-ons: Duo Core, Duo Pro, or Duo Enterprise. See our [setup documentation](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/) for prerequisites and configuration steps.\n\nAs an experimental feature, Agentic Chat has some known limitations we're actively addressing, including slower response times due to multiple API calls, keyword-based rather than semantic search, and limited support for new local folders or non-GitLab projects. **Your feedback is crucial in helping us prioritize improvements and bring Agentic Chat to general availability so please share your experience in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/542198).**\n\n## What's next?\n\nWe are fully focused on improving Agentic Chat, including bringing it to general availability. In the meantime, we are aiming to improve response times and are adding capabilities that GitLab Duo Chat currently has, such as using self-hosted models or supporting JetBrains and Visual Studio in addition to VS Code. Once we have switched Duo Chat to this new architecture we plan to also bring Agentic Chat to the chat in the GitLab web application. We also plan to add a lot more functionality, such as editing GitLab artifacts, supporting context from custom Model Context Protocol, or MCP, servers, and offering commands to run in the terminal.\n\n> Ready to experience autonomous development assistance but not yet a GitLab customer? Try Agentic Chat today as part of [a free, 60-day trial of GitLab Ultimate with Duo Enterprise](https://about.gitlab.com/free-trial/) and help shape the future of AI-powered development. Follow these [setup steps for VS Code](https://docs.gitlab.com/user/gitlab_duo_chat/agentic_chat/#use-agentic-chat-in-vs-code).\n>\n> And make sure to join the GitLab 18 virtual launch event to learn about our agentic AI plans and more. [Register today!](https://about.gitlab.com/eighteen/)\n\n***Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab.***\n\n## Learn more\n\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)\n",[704,875,680,478,678,9],{"slug":2095,"featured":90,"template":684},"gitlab-duo-chat-gets-agentic-ai-makeover","content:en-us:blog:gitlab-duo-chat-gets-agentic-ai-makeover.yml","Gitlab Duo Chat Gets Agentic Ai Makeover","en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover.yml","en-us/blog/gitlab-duo-chat-gets-agentic-ai-makeover",{"_path":2101,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2102,"content":2107,"config":2113,"_id":2115,"_type":13,"title":2116,"_source":15,"_file":2117,"_stem":2118,"_extension":18},"/en-us/blog/gitlab-gdk-remote-development",{"title":2103,"description":2104,"ogTitle":2103,"ogDescription":2104,"noIndex":6,"ogImage":1842,"ogUrl":2105,"ogSiteName":669,"ogType":670,"canonicalUrls":2105,"schema":2106},"Contributor how-to: Remote Development workspaces and GitLab Developer Kit","This tutorial helps you get GDK working inside Remote Development workspaces to begin contributing to GitLab.","https://about.gitlab.com/blog/gitlab-gdk-remote-development","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Contributor how-to: Remote Development workspaces and GitLab Developer Kit\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Raimund Hook\"}],\n        \"datePublished\": \"2023-07-31\",\n      }",{"title":2103,"description":2104,"authors":2108,"heroImage":1842,"date":2110,"body":2111,"category":769,"tags":2112},[2109],"Raimund Hook","2023-07-31","\nOpen source is fundamental to GitLab. We believe that [everyone can contribute](https://about.gitlab.com/company/mission/#mission).\nTypically, we recommend that anyone contributing anything more than basic changes to GitLab run the [GitLab Development Kit](https://gitlab.com/gitlab-org/gitlab-development-kit) (GDK). Because contributors can't always meet the GDK's resource demands, we're working to enable GDK inside the cloud-based GitLab Remote Development workspaces.\n\nIn this article, I'll explain how I used a Remote Development workspace running in my Kubernetes cluster to make working with the GDK faster and easier.\n\n## A preliminary note\nFirst, keep in mind that as of this writing the [Remote Development workspaces](https://about.gitlab.com/direction/create/ide/remote_development/) feature is still in Beta. My example here is therefore very much a proof of concept — and as such, it has some rough edges.\n\nBefore getting started, I followed the \"[Set up a workspace](https://docs.gitlab.com/ee/user/workspace/#set-up-a-workspace)\" prerequisites guide in the GitLab docs. For a more detailed set of instructions, see Senior Developer Evangelist Michael Friedrich's tutorial on [how to set up infrastructure for cloud development environments](https://about.gitlab.com/blog/set-up-infrastructure-for-cloud-development-environments/).\n\n## Getting started with workspaces\nTo start using workspaces, you will need a project configured with a `.devfile.yaml`. GitLab team members have curated [a number of example projects](https://gitlab.com/gitlab-org/remote-development/examples) you can review.\n\nInitially, I tried to do this with a fork of the GitLab project itself, but I ran into [some issues](https://gitlab.com/gitlab-org/gitlab/-/issues/414011) when the workspace begins cloning the repository.\n\nTo figure out what was causing my problems, I looked more closely at what happens behind the scenes when a workspace is created.\n\n## Behind the scenes with Remote Development workspaces\nWhen you create a new workspace, the following happens:\n1. The GitLab agent for Kubernetes creates a new namespace in your cluster. The agent dynamically generates a name for and assumes management of the namespace.\n1. Inside the namespace, a new deployment is created, specifying the container you chose in your `.devfile.yaml` as the image to use.\n1. This deployment is configured with some [init containers](https://kubernetes.io/docs/concepts/workloads/pods/init-containers/) that perform some actions:\n    1. Cloning the repository into `/project/${project_path}`.\n    1. Injecting the VS Code server binary into your container.\n1. Once those init containers are complete, your container starts and the workspace becomes available.\n\n## The clone problem\nWhen cloning a repository, `git` tends to do much of the work in memory. This can be a challenge on larger projects/repositories, as it can require significant amounts of RAM. When cloning the GitLab project, for instance, git consumes approximately 1.6GB of RAM. This number is only going to increase with time. Sure, strategies like [shallow clones](https://git-scm.com/docs/git-clone#Documentation/git-clone.txt---depthltdepthgt) can help reduce this, but these are perhaps less suited to active use by a developer as they can increase the amount of time required to perform ongoing git operations.\n\nIn fact, creating a workspace using our `.devfile.yaml` in a fork of the GitLab project failed for this reason. The init container performing the clone is currently hard-limited to 128MiB of RAM, after which the memory management processes on the node kill the container.\n\nTo overcome this limitation, move the `.devfile.yaml` into the a fork of the root of the GDK repository. This project clones more quickly (and does so using fewer resources), so it's a  perfect starting point for running GDK itself. Another (bonus) advantage: You're then primed to contribute to the GDK itself, in addition to any of the other GitLab projects that the GDK clones.\n\n## Components of a GDK installation\nGDK clones the following projects from the GitLab 'family':\n* [GitLab](https://gitlab.com/gitlab-org/gitlab)\n* [Gitaly](https://gitlab.com/gitlab-org/gitaly)\n* [GitLab shell](https://gitlab.com/gitlab-org/gitlab-shell)\n\nThis allows you to work on any items in those directories as a part of your \"live\" installation.\n\n## Getting GDK installed and running in a workspace\nOnce I had a workspace up and running, my next step was to get GDK installed and running *in* that workspace. The GDK's documentation presents [several routes for doing this](https://gitlab.com/gitlab-org/gitlab-development-kit/#installation).\n\nA complete installation can take some time, as GDK needs to bootstrap itself and install a number of prerequisites. This is less than ideal in the context of a Remote Development workspace, as one of remote development's primary benefits is enabling access to a development environment rapidly. Requiring a user to bootstrap an environment that takes 50 minutes (or longer) doesn't help achieve this goal.\n\nTo combat this, I built a container image that effectively bootstraps and installs GDK, pre-building the GDK prerequisites and pre-seeding the database. This image and its associated tooling are currently [in review](https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231).\n\n## Pre-building\nPre-building the container and running the bootstrap process on a scheduled basis allows us to perform that process once, without requiring the user to wait for something that can essentially be \"pre-canned\" for their use.\n\nOnce the workspace is running, we still need to \"reinstall\" the GDK environment with the latest version of our GitLab repository, but this step doesn't take quite as long as a complete bootstrap.\n\n## Generating a gdk.yml file\nTo work properly, GDK also requires a [`gdk.yml` file](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/doc/configuration.md#gdkyml). This file tells GDK how to configure GitLab to return the correct URLs and other items. To get GDK running in Remote Development, Rails needs to return URLs in a certain scheme (otherwise your browser won't know where to connect). To help this along, we [inject an environment variable](https://gitlab.com/gitlab-org/gitlab/-/issues/415328) into the workspace container. This variable helps us determine the URL in use (which is dynamically generated for each workspace).\n\nWe [now have a script](https://gitlab.com/gitlab-org/gitlab-development-kit/-/blob/main/support/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh?ref_type=heads) in GDK that will generate your `gdk.yml` file based on your workspace.\n\n## Creating our devfile\nThe contents of my `.devfile.yaml` looks like this:\n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NB! This image is only in use until https://gitlab.com/gitlab-org/gitlab-development-kit/-/merge_requests/3231 is merged!\n      image: registry.gitlab.com/gitlab-org/gitlab-development-kit/gitlab-remote-workspace:stingrayza-gdk-remote-dev-add-container\n      memoryRequest: 10240M\n      memoryLimit: 16384M\n      cpuRequest: 2000m\n      cpuLimit: 6000m\n      endpoints:\n        - name: ssh-2222\n          targetPort: 2222\n        - name: gdk-3000\n          targetPort: 3000\n        - name: docs-3005\n          targetPort: 3005\n        - name: pages-3010\n          targetPort: 3010\n        - name: webpack-3808\n          targetPort: 3808\n        - name: devops-5000\n          targetPort: 5000\n        - name: jaeger-5778\n          targetPort: 5778\n        - name: objects-9000\n          targetPort: 9000\n        - name: shell-9122\n          targetPort: 9122\n```\n\nThis definition comes straight out of the [Workspace docs](https://docs.gitlab.com/ee/user/workspace/#devfile), and opens a number of ports that GDK uses. (For now, I've only tested the port `gdk-3000`, which is the the link to our instance of GDK.)\n\n## From Workspace to GDK\nOnce we have a project with a `.devfile.yaml`, our final step is to [create a new workspace](https://docs.gitlab.com/ee/user/workspace/#create-a-workspace).\n\nAs a part of this step, your cluster will pull the image as defined in the `.devfile.yaml` and start it up. For the GDK image we pre-built, this can take a few minutes.\n\nOnce the workspace is ready, the last step is to follow the link from the UI to connect to the workspace. This will open up a familiar VS Code IDE, with our GDK fork checked out.\n\nBut wait, where's GDK?\n\nWell, the pre-build did most of the work for us, but we still need to take a few final steps before we can claim that GDK is up and running. These have been built into a script we can run from the integrated terminal within the workspace.\n\nTo open a terminal, we can click on the VS Code Hamburger menu (top left), navigate to `Terminal` and select `New Terminal`.\n\nNow we execute the following script, which completes the setup and copies a couple of files over from the pre-built folders:\n\n```shell\nsupport/gitlab-remote-dev/remote-dev-gdk-bootstrap.sh\n```\n\nThis can take up to 15 minutes, but when it's done it should output the magic words — something like the following (note the 3000 in the URL; we specified that in the `.devfile.yaml` earlier):\n\n```shell\nSuccess! You can access your GDK here: https://3000-workspace-62637-2083197-apglwp.workspace.my-workspace.example.net/\n```\n\n## Connect to your GDK\nFollow the link as displayed using Cmd-click or Ctrl-click. After a couple of moments (GDK boot time), you should reach a familiar GitLab login screen.\n\nCongratulations! GDK is now running inside your Remote Development workspace.\n\nTo log in, type `gdk` in your terminal and you'll see the default admin credentials displayed near the bottom:\n\n```shell\n# Development admin account: xxxx / xxxx\n\nFor more information about GitLab development see\nhttps://docs.gitlab.com/ee/development/index.html.\n```\n\nLog into your GDK with the default credentials, change the admin user password, and you're all set!\n\n## Demo of workspace launch\nHere's a demo of launching a workspace in my personal cluster:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/iXq1NnTjnX0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to contribute to GitLab\nIn this article I explained how to get GDK up and running in Remote Development workspaces. This is not without its challenges, but the end result should mean that contributing to GitLab (especially in resource-constrained environments) is quicker and easier.\n\nDo you want to contribute to GitLab? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"hello.\"\n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._",[9,773,940,1865,1290,727],{"slug":2114,"featured":6,"template":684},"gitlab-gdk-remote-development","content:en-us:blog:gitlab-gdk-remote-development.yml","Gitlab Gdk Remote Development","en-us/blog/gitlab-gdk-remote-development.yml","en-us/blog/gitlab-gdk-remote-development",{"_path":2120,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2121,"content":2127,"config":2132,"_id":2134,"_type":13,"title":2135,"_source":15,"_file":2136,"_stem":2137,"_extension":18},"/en-us/blog/gitlab-importers",{"title":2122,"description":2123,"ogTitle":2122,"ogDescription":2123,"noIndex":6,"ogImage":2124,"ogUrl":2125,"ogSiteName":669,"ogType":670,"canonicalUrls":2125,"schema":2126},"How to migrate data to GitLab using main importers","Learn about the capabilities of main importers, which are used to import data from external tools and from other GitLab instances.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679170/Blog/Hero%20Images/migration-data.jpg","https://about.gitlab.com/blog/gitlab-importers","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate data to GitLab using main importers\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-02-13\",\n      }",{"title":2122,"description":2123,"authors":2128,"heroImage":2124,"date":2129,"body":2130,"category":769,"tags":2131},[1080],"2023-02-13","\n\nA typical organization looking to adopt GitLab already uses many other tools. Artifacts such as code, build pipelines, issues, and epics will already exist and be changed daily. A seamless transition of work in progress is, therefore, critically important when importing data. GitLab importers aim to make this process easy and reliable, ensuring data is imported quickly and with maximum care.\n\nAt GitLab, a dedicated development team, named group:import, creates a seamless experience when importing data into GitLab or from one GitLab instance to another. This team continuously develops and improves the importing experience and keeps our importers up to date with new features and capabilities.\n\n## Migrate groups by direct transfer\n\nUsing group migration, you can import groups from one GitLab instance to another instance. The most common use case is to import groups from self-managed GitLab instances to GitLab.com (GitLab SaaS). With the group migration, you can migrate many groups in a single click.\n\n### Which items are imported?\n\nThe group migration imports the entire group structure, including all the sub groups and projects in them. Currently, to import projects as part of the group migration on self-managed GitLab, the administrator needs to enable the feature flag named `bulk_import_projects`. On GitLab.com, our SaaS offering, migration of both groups and projects is available. More information can be found in our [documentation](https://docs.gitlab.com/ee/user/group/import/#migrate-groups-by-direct-transfer-recommended).\n\nThe team continuously adds objects to the migration, but not all group items are imported. The docs cover the [items that are imported](https://docs.gitlab.com/ee/user/group/import/#migrated-group-items). \n\n### How can groups be imported?\n\nIt is very simple to import groups between two instances. Here are the steps: \n\n- Create a new group or subgroup in the designated instance \n- Select \"Import group\" \n- Connect to the remote instance with your [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)\n- Select the source groups you want to import \n- Click \"Import xyz groups\"\n\n![bulk_imports_v14_1](https://about.gitlab.com/images/blogimages/2022-11-15-gitlab-importers/bulk_imports_v14_1.png)\n\n## File-based import/export (the previously used method)\n\nGroup migration is the preferred method to migrate content from one GitLab instance to another, as it automates the process and you can import many groups in a single click. However, for some use cases, such as air-gapped networks when you don't have network connection between the two instances, or when you have environments with limited connectivity, the group migration won't help because it requires connection between the two instances. File-based export/import for [groups](https://docs.gitlab.com/ee/user/group/settings/import_export.html) and [projects](https://docs.gitlab.com/ee/user/project/settings/import_export.html) can be used when there is no connectivity between the instances. \n\nFile-based export/import is a manual process and requires a few steps in order to migrate each group or project. The file-based import/export is available from the UI and in the API. The team plans to disable it by a feature flag soon to encourage users to use group migration. However, you will be able to enable the feature flag in your instance if your use case requires the file-based import/export. More info can be found in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/363406).\n\n## Import projects from external tools  \n\nGitLab has built-in support for import projects from [a variety of tools](https://docs.gitlab.com/ee/user/project/import/).\n\nThe GitHub importer is the most common importer and, therefore, the team invests a lot of effort to add more migrated components. GitLab and GitHub have different structure and architecture, so sometimes it is tricky to import objects from GitHub when the migrated components are implemented differently in GitLab. So the team needs to find creative ways to map some of the features or configurations. This is an example [epic](https://gitlab.com/groups/gitlab-org/-/epics/8585 ) with a proposal to map rules for protected branches when migrating GitHub protected rules. \n\n\n### What can be imported from GitHub to GitLab?\n\n- Repository description\n- Git repository data\n- Branch protection rules\n- Issues\n- Pull requests\n- Wiki pages\n- Milestones\n- Labels\n- Pull request review comments\n- Regular issue and pull request comments\n- Attachments for\n    - Release notes\n    - Comments and notes\n    - Issue description\n    - Merge Request description\n- Git Large File Storage (LFS) objects\n- Pull request reviews \n- Pull request “merged by” information \n- Pull request comments replies in discussions \n- Diff notes suggestions \n- Release note descriptions\n\nHere is a [full list of imported data](https://docs.gitlab.com/ee/user/project/import/github.html#imported-data).\n\nRead what's next in our [GitHub Epic](https://gitlab.com/groups/gitlab-org/-/epics/2984). \n\n### Repository by URL\n\nAn alternative way to import external projects is the Repository by URL option. You can import any Git repository through HTTP from the *Import Project* page, by choosing \"Repository by URL\".\n\nTo learn more about the Importer direction, roadmap, etc., refer to [Category Direction - Importers](/direction/manage/import_and_integrate/importers/).\n\n_Cover image by [Conny Schneider](https://unsplash.com/@choys_?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyTex) on [Unsplash](https://unsplash.com/s/photos/data-migration?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[9,773,728],{"slug":2133,"featured":6,"template":684},"gitlab-importers","content:en-us:blog:gitlab-importers.yml","Gitlab Importers","en-us/blog/gitlab-importers.yml","en-us/blog/gitlab-importers",{"_path":2139,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2140,"content":2146,"config":2153,"_id":2155,"_type":13,"title":2156,"_source":15,"_file":2157,"_stem":2158,"_extension":18},"/en-us/blog/gitlab-instance-security-best-practices",{"title":2141,"description":2142,"ogTitle":2141,"ogDescription":2142,"noIndex":6,"ogImage":2143,"ogUrl":2144,"ogSiteName":669,"ogType":670,"canonicalUrls":2144,"schema":2145},"GitLab instance: security best practices","Default settings on products can be massively helpful. However, when it comes to hardening your GitLab instance, we’ve got some helpful configuration recommendations from our security team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667057/Blog/Hero%20Images/configs_unsplash.jpg","https://about.gitlab.com/blog/gitlab-instance-security-best-practices","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab instance: security best practices\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Mark Loveless\"}],\n        \"datePublished\": \"2020-05-20\",\n      }",{"title":2141,"description":2142,"authors":2147,"heroImage":2143,"date":2149,"body":2150,"category":814,"tags":2151},[2148],"Mark Loveless","2020-05-20","GitLab is a feature-rich and powerful collaboration tool that is easy to use, and our self-managed installation is intended to be ready-to-go right out of the box. Exposing *any* service to the internet can create its own challenges from a security perspective, and as a result an administrator might have a bit of head-scratching over how to set things up safely.\n\nFortunately, we have a large number of security features and options that can be used to help lock things down. In this blog post, we’ve highlighted a few important features that will certainly help an administrator harden that new GitLab instance - particularly one facing the internet.\n\n## Access basics\n\nDuring the initial GitLab installation, you will be asked to set up a root password. Obviously, we highly recommend a long password, unique to your GitLab instance that is not easily guessable with a mixture of uppercase and lowercase along with numbers and special characters. For a working example, see how we advise GitLab team members to create, store and manage [passwords](/handbook/security/#accounts-and-passwords).\n\nTo help simplify your installation, consider using environment variables. The root password can also be set this way. For example:\n\n`GITLAB_ROOT_PASSWORD=hunter2 GITLAB_HOST=https://hunter2.instance apt install gitlab-ee`\n\nThis has the added advantage of kicking off the entire [letsencrypt](https://letsencrypt.org/) process to ensure up-to-date certificates are used for your instance.\n\nYou will also want to ensure that users of your instance are also using strong, unique passwords, and you will want to ensure that the methods they use to access your instance are solid. Again, refer to our documentation on passwords for some ideas to share.\n\nThere are some choices you can make to limit access to data and restrict access to authorized users. In **Admin Area > Settings > General** you will want to expand the \"visibility and access controls\" section and make a few changes.\n\nTo help secure SSH access, RSA SSH keys should be allowed, as well as ED25519. Without going *too* deep, the open source crowd seems to prefer ED25519 as everything about it is open source (well-documented, trustworthy elliptical curve parameters), whereas other algorithms do not specify or go into details as to why they chose certain values. DSA also has a theoretical attack that could be used against it, although RSA could in theory fall to the same attack but is more resistant. Ah, but I digress! The main reason to support both RSA and ED25519 is that older systems that will connect may not be set up for ED25519, but will still support RSA, so at least both are recommended. With respect to RSA, encourage your users to use 2048 bits or higher when configuring keys.\n\nWe highly recommend using passwordless SSH authentication over password authentication. The communications are more secure (passwordless SSH authentication uses public/private key cryptography), it allows for an easier workflow, and it is one less password to worry about.\n\nFor more on SSH keys, see our documentation on [ssh keys restrictions](https://docs.gitlab.com/ee/security/ssh_keys_restrictions.html), as well as the additional [visibility and access control](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html) settings that can be configured.\n\n## Restricting how and who\n\nThere are a few settings we recommend tweaking to help define how users access our instance and who we even allow to have access. You’ll want to check out three areas in particular under the **Admin Area > Settings > General** settings.\n\n**Sign up restrictions:**\n* Ensure open sign-up is disabled on your instance. Open registration is disabled by default on self-managed instances with GitLab 13.6 and above installed. If new sign-up is enabled and your instance is open to the internet, anyone can sign up and access data. Administrators who would like to further restrict access on their instance can [follow our documentation on how to configure user access](https://docs.gitlab.com/ee/administration/settings/sign_up_restrictions.html#disable-new-sign-ups).\n* Make sure that Send confirmation email on sign-up\" is checked. This adds a level of assurance that the user is in fact a real user.\n* If you want to restrict access to a sub-group such as the users in your organization, consider configuring a whitelist for your organization’s domain, (e.g., \"example.com\") which will allow them to sign up.\n* Minimum password length: 12. For users that are allowed access, make sure they will be using longer passwords. See our [password length limits documentation](https://docs.gitlab.com/ee/security/password_length_limits.html) for details.\n* For more detailed information, see our [documentation around sign up restrictions](https://docs.gitlab.com/ee/administration/settings/sign_up_restrictions.html)\n\n**Sign in restrictions:**\n* Make sure that Require 2FA is enabled. Multifactor authentication is the more secure method of protecting authentication to a user's account, and is strongly encouraged.\n* Disable \"password authentication enabled for Git over HTTP(S)\" if for some reason you can’t require MFA. This will require users to use a personal access token, further securing the user accounts.\n* For more detailed information, check our [documentation around sign in restrictions](https://docs.gitlab.com/ee/administration/settings/sign_in_restrictions.html).\n\n**Visibility and privacy:**\nEnsure project visibility is set to [\"Private\"](https://docs.gitlab.com/ee/user/public_access.html) on [existing projects](https://docs.gitlab.com/ee/user/public_access.html) and [by default for *new* projects](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#default-project-visibility). Private projects can only be cloned, downloaded, or viewed by project members, newly registered users will not be able to access these projects.\n\n## Improving performance and network tweaks\n\nThere are a few settings that will allow you to help protect your system from various network usage spikes, making your system a lot more stable and accessible for users.\n\n#### User and IP rate limits\nGoing to **Admin Area > Network > User and IP rate limits** allows you to make a few adjustments. Specifically you will want all three items checked:\n\n* \"Enable unauthenticated request rate limit\"\n* \"Enable authenticated API request rate limit\"\n* \"Enable authenticated web request rate limit\"\n\nThe default values associated with those items should be fine under most conditions. For more information, see our [documentation around user and IP rate limits](https://docs.gitlab.com/ee/administration/settings/user_and_ip_rate_limits.html).\n\n#### Webhooks\nWebhooks are a useful feature with a lot of power. Unless there is a legitimate need to allow webhooks to communicate with internal services, they should be restricted to services that are publicly reachable, which you can verify in **Admin Area > Network > Outbound Requests**. While the \"allow requests to the local network from web hooks and services\" is disabled by default, you should also uncheck \"allow requests to the local network from system hooks\" as well. For more detail, including some of the dangers inherent in webhooks, see our [webhooks documentation](https://docs.gitlab.com/ee/security/webhooks.html).\n\n#### Protected paths\nIn **Admin Area > Network > Protected Paths** ensure that \"Enable protected paths rate limit\" has been checked. Default values should be more than sufficient. For details, check out our [protected paths documentation](https://docs.gitlab.com/ee/administration/settings/protected_paths.html).\n\n## Customize your configuration, harden your instance\n\nWe understand with security there is always a balance between protection and agility. In the cases of customers with internet-facing GitLab instances, there are often choices driven by a combination of different business drivers and needs. However, with the help of a few configuration tweaks you can harden your instance and better protect your organization, while still remaining open to the internet.\n\nAdditional settings, including those with security implications, can be found in the [Admin Area](https://docs.gitlab.com/ee/administration/settings/). You'll want to explore those to really fine-tune your setup and make it your own. For some of you, these will have their own security implications that may be unique to your organization. Have fun exploring and securing your instance!\n\nCover image by [Alexey Ruban](https://unsplash.com/@intelligenciya) on [Unsplash](https://unsplash.com/)\n{: .note}\n",[814,2152,9],"security research",{"slug":2154,"featured":6,"template":684},"gitlab-instance-security-best-practices","content:en-us:blog:gitlab-instance-security-best-practices.yml","Gitlab Instance Security Best Practices","en-us/blog/gitlab-instance-security-best-practices.yml","en-us/blog/gitlab-instance-security-best-practices",{"_path":2160,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2161,"content":2167,"config":2173,"_id":2175,"_type":13,"title":2176,"_source":15,"_file":2177,"_stem":2178,"_extension":18},"/en-us/blog/gitlab-mitre-attack-navigator",{"title":2162,"description":2163,"ogTitle":2162,"ogDescription":2163,"noIndex":6,"ogImage":2164,"ogUrl":2165,"ogSiteName":669,"ogType":670,"canonicalUrls":2165,"schema":2166},"Use GitLab and MITRE ATT&CK Navigator to visualize adversary techniques","This tutorial helps build and deploy a customized version of MITRE's ATT&CK Navigator using GitLab CI/CD and GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665667/Blog/Hero%20Images/built-in-security.jpg","https://about.gitlab.com/blog/gitlab-mitre-attack-navigator","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab and MITRE ATT&CK Navigator to visualize adversary techniques\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Moberly\"}],\n        \"datePublished\": \"2023-08-09\",\n      }",{"title":2162,"description":2163,"authors":2168,"heroImage":2164,"date":2170,"body":2171,"category":814,"tags":2172},[2169],"Chris Moberly","2023-08-09","\nIf you use [MITRE ATT&CK](https://attack.mitre.org/) for classifying cybersecurity incidents, you may want to visualize your coverage across a matrix. This blog will show you how to do this automatically with GitLab by deploying the [ATT&CK Navigator](https://github.com/mitre-attack/attack-navigator) web application pre-populated with your own annotated matrices.\n\nWe make this easy by providing a fully working [example project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator) for you to fork and customize. When you're finished, you'll have an interactive visualization tool that displays your coverage of techniques across the ATT&CK framework.\n\n![Image showing ATT&CK Navigator deployed from example project](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/navigator-portal.png)\nATT&CK Navigator deployed from our example project\n{: .note.text-center}\n\n## About MITRE ATT&CK framework\nMITRE ATT&CK is a framework to classify and describe cybersecurity attacks based on real-world observations. It provides a common language that can be used by different groups inside a security organization to collaborate on security initiatives.\n\nFor example, when a company's Red Team emulates an attack based on the techniques of a relevant adversary, they deliver a report that includes a list of the specific technique IDs involved in the exercise. The team in charge of detecting and responding to these attacks can use those IDs to research and implement improved defensive capabilities.\n\nBoth of these groups may want to track their coverage of offensive and defensive capabilities across one of [MITRE's ATT&CK matrices](https://attack.mitre.org/matrices/enterprise/). These matrices are charts that visualize attack tactics and techniques relevant to specific industries and technologies. For example, a company like GitLab may be interested in understanding which techniques in the [Cloud matrix](https://attack.mitre.org/matrices/enterprise/cloud/) we have emulated to test our detection and response capabilities.\n\nMITRE provides a free interactive web application, ATT&CK Navigator, to visualize, annotate, and explore these matrices.\n\nAt GitLab, [our Red Team](https://about.gitlab.com/handbook/security/threat-management/red-team/) produces a new Navigator matrix at the completion of each operation. The matrix highlights which attack techniques we've conducted. We find it useful to view all of these matrices in a single location, with the addition of a combined matrix showing all of the techniques we have conducted across all historical operations. This helps us understand trends and identify potential gaps to cover in future operations.\n\n## Fork our example project to get started\nYou can fork our example project to get going on your own. First, you will need an account on [GitLab.com](https://gitlab.com), or on a self-hosted instance [with GitLab Pages enabled](https://docs.gitlab.com/ee/administration/pages/).\n\nNext, browse to [our example project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator) and click on the \"Forks\" button to create a new fork. Fill in the form shown below by choosing a name, location, and description for your copy of the project. You may wish to change \"Visibility level\" to \"Private,\" which will require authentication to view your deployed application.\n\n![Example project showing where to click on the Forks button](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/fork-project.png)\n\nAfter that, take a look inside the `layers/` folder of your new project. Each file in this folder will be used to pre-populate a new tab in the deployed web application where the listed attack techniques are annotated in green. We provide two example files, as well as an empty template file inside `templates/template.yml`.\n\nTemplates should list specific MITRE ATT&CK \"technique\" IDs as bullets below their corresponding MITRE ATT&CK tactic. You only need to include the tactics for which you will add techniques. Here is a short example of techniques across four tactics:\n\n```yaml\nname: Operation 1\ndescription: Example of a Red Team operation - not real data\ntechniques:\n  initial-access:\n    - T1190\n  privilege-escalation:\n    - T1611\n    - T1055\n  lateral-movement:\n    - T1210\n    - T1021\n  exfiltration:\n    - T1041\n```\n\nYou can edit the example files or delete them and add new files of your own.\n\nThe next step is to build and deploy the web application. If you made any changes to the `layers/` folder inside your default branch, this build process should have started automatically. If you just want to deploy the application using the provided sample data, browse to \"Build\" -\\> \"Pipelines\" using the project's sidebar and click the \"Run pipeline\" button.\n\n![Image showing how to build and deploy app with run pipeline button](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/run-pipeline.png)\n\n\nYou can check to see if a pipeline has completed successfully by browsing to \"Build\" -\\> \"Jobs\" in the sidebar. There should be at least two jobs with a status of \"passed\" - one named `build_navigator` and one named `pages`.\n\nOnce both jobs are complete, browse to \"Deploy\" -\\> \"Pages\" in the project's sidebar. Check the \"Use unique domain\" box and click the \"Save changes\" button.\n\n![Image showing complete jobs with save changes button](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/pages-settings.png)\n\n\nThat's it! Your application should now be accessible using the URL provided under \"Access pages\" on the same page.\n\n## How the example project works\nThere are three main components inside the example project:\n- The `gitlab-ci.yml` file. This defines what jobs run, and when they run. The file contains rules to automatically run the jobs whenever you push changes to the `layers/` folder inside the project's default branch, or whenever you manually run a pipeline as described above.\n- The `create-layers.py` file. This Python script takes the custom YAML files you provide, and then generates individual and combined JSON files in the format that ATT&CK Navigator expects. You can customize this file to change the annotation color (`HIGHLIGHT_COLOR`) as well as some of the application defaults (`JSON_BLOB`). In our internal version, for example, we remove some of the platform filters that don't apply to us.\n- The `build-navigator.sh` file. The shell script is responsible for downloading the ATT&CK Navigator application, injecting the custom layers we create, and building the application. The output is a static website that we then host using GitLab Pages.\n\n## More features of the ATT&CK Navigator app\nOnce deployed, the application is interactive. This blog focuses on using a colored annotation to visualize coverage, but the [application is capable of much more](https://github.com/mitre-attack/attack-navigator/blob/master/USAGE.md).\n\nHere are some features we use often:\n- In the \"layer controls\" bar up top, click on the button that looks like an asterisk surrounded by up and down arrows. The name is \"expand annotated sub-techniques.\" This will ensure that all techniques in your YAML file are easily viewable, as subtechniques may be hidden away when the matrix first loads.\n- If you want to hide all unannotated techniques, click anywhere in the matrix and select \"select unannotated.\" Then, under \"technique controls,\" click on the \"toggle state\" button. This will give you a much cleaner matrix, showing only the specific techniques you've provided in your custom YAML files.\n\n![ATT&CK Navigator with unannotated techniques hidden](https://about.gitlab.com/images/blogimages/2023-08-15-gitlab-mitre-attack-navigator/navigator-collapsed.png)\nATT&CK Navigator with unannotated techniques hidden\n{: .note.text-center}\n\nATT&CK is not a bingo card, and for most organizations the goal shouldn't be to have green squares across the board. Instead, you should use this to understand your own coverage and how it relates to the threats most relevant to you.\n\n## Share your feedback\nMITRE ATT&CK Navigator is a great tool for visualizing coverage of attack techniques across a matrix. Using our example project, you can easily deploy an automated solution that builds this tool and pre-populates it with your own data.\n\nOn our Red Team, we love to find creative use cases for GitLab, and this is one we use ourselves. If you find this useful, or if you have any ideas to improve it, we would love to hear from you! Feel free to open an issue or a merge request inside our [example project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/gitlab-hosted-attack-navigator).\n",[814,727,9,108],{"slug":2174,"featured":6,"template":684},"gitlab-mitre-attack-navigator","content:en-us:blog:gitlab-mitre-attack-navigator.yml","Gitlab Mitre Attack Navigator","en-us/blog/gitlab-mitre-attack-navigator.yml","en-us/blog/gitlab-mitre-attack-navigator",{"_path":2180,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2181,"content":2187,"config":2194,"_id":2196,"_type":13,"title":2197,"_source":15,"_file":2198,"_stem":2199,"_extension":18},"/en-us/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment",{"title":2182,"description":2183,"ogTitle":2182,"ogDescription":2183,"noIndex":6,"ogImage":2184,"ogUrl":2185,"ogSiteName":669,"ogType":670,"canonicalUrls":2185,"schema":2186},"GitLab Pages features review apps and multiple website deployment","GitLab Pages helps organizations reap the rewards of knowledge management, including better collaboration and accessibility. Learn how to use a new feature, Parallel Deployments.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674550/Blog/Hero%20Images/blog-image-template-1800x945__1_.png","https://about.gitlab.com/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab Pages features review apps and multiple website deployment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matthew Macfarlane\"},{\"@type\":\"Person\",\"name\":\"Janis Altherr\"}],\n        \"datePublished\": \"2024-09-23\",\n      }",{"title":2182,"description":2183,"authors":2188,"heroImage":2184,"date":2190,"body":2191,"category":678,"tags":2192,"updatedDate":2193},[2189,1529],"Matthew Macfarlane","2024-09-23","[GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) has long been a popular choice for hosting static websites, allowing users to showcase their projects, blogs, and documentation directly from their repositories.\n\nBefore GitLab 17.4, you could only have a single version of your GitLab Pages website. So you couldn’t preview your changes or have multiple versions of your website deployed simultaneously. Now, with a Premium or Ultimate license, you can do both!\n\n### Introducing Parallel Deployments\n\nWith Parallel Deployments, users can now easily preview changes and manage multiple environments for their GitLab Pages sites. This enhancement allows seamless experimentation with new ideas, enabling users to confidently test and refine their sites. By catching any issues early, users can ensure the live site remains stable and polished, building on the already great foundation of GitLab Pages.\n\n### Why Parallel Deployments is a game-changer\n\n1. **Version control made easy**\\\n   If your project involves software development or documentation that covers multiple versions (such as user guides for different software releases), Parallel Deployments makes it easy to manage. Or you can use the feature to localize your website for different languages.\n2. **Flexibility to experiment**\\\n   Want to try out a new design or feature? Parallel Deployments lets you experiment freely. You can create a separate version of your site to test new ideas without impacting the current site. This flexibility encourages creativity and continuous improvement.\n\n### How to add review apps to your GitLab Pages project\n\nTo add a review app to your GitLab Pages project, edit your `.gitlab-ci.yml` file to create a deployment for each merge request (MR). Let’s assume you start with a `.gitlab-ci.yml` file somewhat like this:\n\n```yaml\ncreate-pages:\n  stage: deploy\n  script:\n    - npm run build\n  pages: \n    publish: dist # the name of the folder containing the pages files\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH # only run this job when there's a commit to the default branch\n```\n\nTo also run the pages pipeline when there’s an MR being opened or updated, we can add another rule to `pages.rules`:\n\n```yaml\n- if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n```\n\nIf we only add this rule, however, each Pages job will always replace the main deployment – each time an MR is opened! You likely don’t want that to happen.\n\nTo provide each individual deployment with its own URL, we’ve introduced the new `pages.path_prefix` property.\n\nA Pages deployment with this configuration...\n\n```yaml\ncreate-pages:\n  script:\n    - ...\n  pages:\n    ...\n    path_prefix: my-review-app\n```\n\n...will be available at `https://my-pages-app-7fe824.gitlab.io/my-review-app`, or, with unique domains disabled, `https://my-group.gitlab.io/my-project/my-review-app`.\n\nBut there’s no need to hardcode the path_prefix. You can dynamically generate it using CI variables. That’s particularly useful for review apps – to create a path for each MR, use the `CI_MERGE_REQUEST_IID variable`:\n\n```yaml\ncreate-pages:\n  script:\n    - ...\n  pages:\n    ...\n    path_prefix: mr-$CI_MERGE_REQUEST_IID\n```\n\nAn MR with the ID 114 would then automatically create a deployment at `https://my-pages-app-7fe824.gitlab.io/mr-114`.\n\nWith those concepts at hand, we’d like our pipeline to dynamically create either a main deployment for the default branch, or a path_prefixed-review app for MR events.\n\nFirst, let’s add a `create-pages-review-app` job to our pipeline config:\n\n```yaml\ncreate-pages-deployment:\n  # This job will create a pages deployment without path_prefix\n  # when there is a commit to the default branch\n  stage: deploy\n  script:\n    - npm run build\n  pages: \n    publish: dist \n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\ncreate-pages-review-app:\n  # This job will create a pages deployment with a path_prefix\n  # when there a merge request is created or updated.\n  stage: deploy\n  script:\n    - npm run build\n  pages:\n    publish: dist \n    path_prefix: 'mr-$CI_MERGE_REQUEST_IID' # Prefix with the mr-\u003Ciid>, like `mr-123`\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n```\n\nNow you’re creating a deployment both when pushing to the default branch, and prefixed parallel deployments when creating or updating MRs!\n\nFor the best experience, add the URL to the environment job property. This will add a link to the review app to the MR page:\n\n```yaml\ncreate-pages-deployment:\n  # This job will create a pages deployment without path_prefix\n  # when there is a commit to the default branch\n  stage: deploy\n  script:\n    - npm run build\n  pages: \n    publish: dist \n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\ncreate-pages-review-app:\n  # This job will create a pages deployment with a path_prefix\n  # when there a merge request is created or updated.\n  stage: deploy\n  script:\n    - npm run build\n  pages:\n    publish: dist \n    path_prefix: 'mr-$CI_MERGE_REQUEST_IID' # Prefix with the mr-\u003Ciid>, like `mr-123`\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n  environment:\n    name: \"Pages Review MR ${CI_MERGE_REQUEST_IID}\"\n    url: $CI_PAGES_URL\n```\n\nCongratulations, you’ve now set up MR review apps for your Pages site.\n\n## How to deploy documentation for different versions of your product\n\nThe Parallel Deployments feature is also a useful tool if you maintain the documentation of multiple versions of your software simultaneously.\n\nThe below CI config will not only create a pages deployment when there is a commit to the default branch, but also for any commit to branches named `v1`, `v2`, or `v3`.\n\n```yaml\ncreate-pages:\n  stage: deploy\n  script:\n    - ...\n  variables:\n    PAGES_PREFIX: \"$CI_COMMIT_BRANCH\" # Use the branch name by default\n  pages:\n    path_prefix: \"$PAGES_PREFIX\" # use whatever value is set in the variable\n  environment:\n    name: \"Pages ${PAGES_PREFIX}\"\n    url: $CI_PAGES_URL\n  artifacts:\n    paths:\n    - public\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n      variables:\n        PAGES_PREFIX: '' # No prefix\n    - if: $CI_COMMIT_BRANCH == 'v1'\n    - if: $CI_COMMIT_BRANCH == 'v2'\n    - if: $CI_COMMIT_BRANCH == 'v3'\n```\n\nBy using the `$CI_COMMIT_BRANCH` variable as the path_prefix value, each of these branches will deploy their documentation to their own sub-path of your website:\n\n- The branch named v1 has its docs published to \u003Cmy-domain>/v1.\n- The branch named v2 has its docs published to \u003Cmy-domain>/v2.\n- The branch named v3 has its docs published to \u003Cmy-domain>/v3.\n\nA new commit to one of these branches will then trigger a new deployment to its respective path, keeping the documentation of multiple versions up to date.\n\nThe Parallel Deployments feature is a significant upgrade to GitLab Pages, offering a more flexible and efficient way to manage your knowledge. Whether you're working on a small project or a large-scale site with multiple versions, this new capability will make your workflow smoother and more efficient\n\n> Visit our [Parallel Deployments documentation](https://docs.gitlab.com/ee/user/project/pages/#create-multiple-deployments) to get started today!\n\n### Feedback\n\nShare your ideas and other comments in our [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/482040)!\n",[980,108,835,680,678,9],"2025-04-09",{"slug":2195,"featured":6,"template":684},"gitlab-pages-features-review-apps-and-multiple-website-deployment","content:en-us:blog:gitlab-pages-features-review-apps-and-multiple-website-deployment.yml","Gitlab Pages Features Review Apps And Multiple Website Deployment","en-us/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment.yml","en-us/blog/gitlab-pages-features-review-apps-and-multiple-website-deployment",{"_path":2201,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2202,"content":2208,"config":2215,"_id":2217,"_type":13,"title":2218,"_source":15,"_file":2219,"_stem":2220,"_extension":18},"/en-us/blog/gitops-with-gitlab-auto-devops",{"title":2203,"description":2204,"ogTitle":2203,"ogDescription":2204,"noIndex":6,"ogImage":2205,"ogUrl":2206,"ogSiteName":669,"ogType":670,"canonicalUrls":2206,"schema":2207},"Connecting Kubernetes clusters to GitLab with Auto DevOps","This is the 6th article in a series of tutorials on how to do GitOps with GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663397/Blog/Hero%20Images/logoforblogpost.jpg","https://about.gitlab.com/blog/gitops-with-gitlab-auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Connecting GitLab with a Kubernetes cluster - Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-02-08\",\n      }",{"title":2209,"description":2204,"authors":2210,"heroImage":2205,"date":2212,"body":2213,"category":769,"tags":2214},"GitOps with GitLab: Connecting GitLab with a Kubernetes cluster - Auto DevOps",[2211],"Viktor Nagy","2022-02-08","\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article we will look at how one can use Auto DevOps with all its bells and whistles to easily manage deployments.\n\n## Prerequisites\n\nThis article builds upon the previous tutorials in this series. We will assume that you have a Kubernetes cluster connected to GitLab using the GitLab Agent for Kubernetes, and you understand how the CI/CD tunnel works.\n\nIf this is not the case, I recommend to follow the previous articles to have a similar setup from where we will start today.\n\n## What is Auto DevOps\n\nAuto DevOps is GitLab's answer to the complexity of software application delivery. It is a set of opinionated templates that can be used \"as-is\" or can be used to fast-track your own pipeline building. For some setups it works from testing through various security and compliance checks to canary deployments. Even if you have a less supported setup, you should be able to reuse some of its components, from security linting to deployment.\n\nYou can read more about the various [features built into Auto DevOps in our documentation](https://docs.gitlab.com/ee/topics/autodevops/).\n\n## The plan for building and deploying a minimul application\n\nThe plan for this article is to build and deploy a minimal application. The focus will be on showing how you can get started quickly, without any modifications on the Auto Deploy pipelines.\n\nThis setup will use the already known CI/CD tunnel. There will be a separate article that shows how to replace the \"Auto Deploy\" part of Auto DevOps with GitOps style deployments.\n\nIn this article, we will deploy a simple hello world application. This is not a tutorial about Auto DevOps, so we will only focus on the setup needed when used together with the GitLab Agent for Kubernetes.\n\nYou can see the final repository under https://gitlab.com/gitlab-examples/ops/gitops-demo/hello-world-service/.\n\n## How to build the application\n\nIn this section we will create our super simple hello world application and put a Dockerfile beside it.\n\n1. Start a new project.\n1. Add `src/main.py` with the following content:\n    ```python\n    # From https://gist.github.com/davidbgk/b10113c3779b8388e96e6d0c44e03a74\n    import http.server\n    import socketserver\n    from http import HTTPStatus\n\n    class Handler(http.server.SimpleHTTPRequestHandler):\n        def do_GET(self):\n            self.send_response(HTTPStatus.OK)\n            self.end_headers()\n            self.wfile.write(b'Hello world')\n\n    httpd = socketserver.TCPServer(('', 5000), Handler)\n    httpd.serve_forever()\n    ```\n1. Create the `Dockerfile` with:\n   ```\n   FROM python:3.9.10-slim-bullseye\n\n   WORKDIR /app\n\n   COPY ./src .\n\n   EXPOSE 5000\n\n   CMD [ \"python\", \"main.py\" ]\n   ```\n1. Commit the change to the repository.\n\n## How to set up Auto DevOps\n\n1. [Share the CI/CD tunnel](https://docs.gitlab.com/ee/user/clusters/agent/work_with_agent.html) with the hello-world project. Note, that the Agent configuration project amd the application project should be in the same project hierarchy and the Agent configuration project needs to be higher in this hierarchy.\n    ```yaml\n    ci_access:\n      # This agent is accessible from CI jobs in projects in these groups\n      projects:\n        - id: \u003Cpath>/\u003Cto>/\u003Cyour>/\u003Cproject>\n    ```\n1. Find out the Kubernetes context name. The agent context name is `\u003Cnamespace>/\u003Cgroup>/\u003Cproject>:\u003Cagent-name>`. You can see the available contexts in CI with the following job:\n    ```yaml\n    contexts:\n      stage: .pre\n      image:\n        name: bitnami/kubectl:latest\n        entrypoint: [\"\"]\n      script:\n        - kubectl config get-contexts \n    ```\n1. Create your `.gitlab-ci.yml` file to have Auto DevOps working:\n    ```yaml\n    include:\n        template: Auto-DevOps.gitlab-ci.yml\n\n    variables:\n        # KUBE_INGRESS_BASE_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.\n        KUBE_INGRESS_BASE_DOMAIN: 74.220.23.215.nip.io\n        KUBE_CONTEXT: \"gitlab-examples/ops/gitops-demo/k8s-agents:demo-agent\"\n        KUBE_NAMESPACE: \"demo-agent\"\n\n        # Feel free to enable any of these\n        TEST_DISABLED: \"true\"\n        CODE_QUALITY_DISABLED: \"true\"\n        LICENSE_MANAGEMENT_DISABLED: \"true\"\n        BROWSER_PERFORMANCE_DISABLED: \"true\"\n        LOAD_PERFORMANCE_DISABLED: \"true\"\n        SAST_DISABLED: \"true\"\n        SECRET_DETECTION_DISABLED: \"true\"\n        DEPENDENCY_SCANNING_DISABLED: \"true\"\n        CONTAINER_SCANNING_DISABLED: \"true\"\n        DAST_DISABLED: \"true\"\n        REVIEW_DISABLED: \"true\"\n        CODE_INTELLIGENCE_DISABLED: \"true\"\n        CLUSTER_IMAGE_SCANNING_DISABLED: \"true\"\n        POSTGRES_ENABLED: \"false\"\n    ```\n1. Commit the changes.\n\nAs you can see, I disabled many Auto DevOps functionalities in the above CI YAML. I did this for two reasons:\n\n1. Some of these features require a Premium or Ultimate license or tests in the repo. I wanted to keep this tutorial \"stable\" for everyone.\n1. Every use case differs a little bit and Auto DevOps allows a large number of customizations. I wanted to highlight this by showing you the most basic ones. Read more about [customizing Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/customize.html). If you would like [Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) support, just remove the `REVIEW_DISABLED` line.\n\nThere are actually only three settings to get the Auto DevOps pipeline up and running:\n\n- The `KUBE_CONTEXT` specifies the context used for the connection, it's provided by the GitLab Agent for Kubernetes.\n- The `KUBE_NAMESPACE` specifies the Kubernetes namespace to target with the deployments. This namespace will be used as we apply the Helm charts used behind the hood.\n- The `KUBE_INGRESS_BASE_DOMAIN` sets up an Ingress and enables user friendly access to the deployed service. \n\n## Recap\n\nA very common setup I see with GitLab customers is that the development team is responsible for writing the application code and packaging it into a Docker container. During this process, they take care of basic testing as well, but they are not familiar with all the security and compliance requirements or the deployment pipelines used within the company. The presented setup and the Auto DevOps suite of templates serves these teams. As you can see, the teams need minimal GitLab CI setup to run a complex pipeline that can take care of many of their requirements.\n\n## What's next\n\nIn the next article, I will show you how to deploy an application project with a GitOps style workflow.\n\n_[Click here](/blog/the-ultimate-guide-to-gitops-with-gitlab/) for the next tutorial._\n",[533,1225,9],{"slug":2216,"featured":6,"template":684},"gitops-with-gitlab-auto-devops","content:en-us:blog:gitops-with-gitlab-auto-devops.yml","Gitops With Gitlab Auto Devops","en-us/blog/gitops-with-gitlab-auto-devops.yml","en-us/blog/gitops-with-gitlab-auto-devops",{"_path":2222,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2223,"content":2229,"config":2235,"_id":2237,"_type":13,"title":2238,"_source":15,"_file":2239,"_stem":2240,"_extension":18},"/en-us/blog/gitops-with-gitlab-manage-the-agent",{"title":2224,"description":2225,"ogTitle":2224,"ogDescription":2225,"noIndex":6,"ogImage":2226,"ogUrl":2227,"ogSiteName":669,"ogType":670,"canonicalUrls":2227,"schema":2228},"Self-managing Kubernetes agent installation with GitOps","This is the eighth and last article in a series of tutorials on how to do GitOps with GitLab.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670178/Blog/Hero%20Images/GitLab-Ops.png","https://about.gitlab.com/blog/gitops-with-gitlab-manage-the-agent","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitOps with GitLab: Turn a GitLab agent for Kubernetes installation to manage itself\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-03-30\",\n      }",{"title":2230,"description":2225,"authors":2231,"heroImage":2226,"date":2232,"body":2233,"category":769,"tags":2234},"GitOps with GitLab: Turn a GitLab agent for Kubernetes installation to manage itself",[2211],"2022-03-30","\n\n_It is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. These easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them. You can find the entire \"Ultimate guide to GitOps with GitLab\" tutorial series [here](/blog/the-ultimate-guide-to-gitops-with-gitlab/)._\n\nIn this article, we will build upon the first few articles, and will turn a GitLab agent for Kubernetes installation to manage itself. This is highly recommended for production usage as it puts your `agentk` deployment under your GitOps project, and enables flawless and simple upgrades.\n\n## Prerequisites\n\nThis article builds on a few previous articles from this series and makes the following assumptions:\n\n- You have [an agent connection set up using the `kpt` based method](/blog/gitops-with-gitlab-connecting-the-cluster/).\n- You have [set up Bitnami's Sealed secrets](/blog/gitops-with-gitlab-secrets-management/).\n- You understand [how to use `kustomize` with the agent](/blog/gitops-with-gitlab/).\n\n## The goal\n\nThe goal of this tutorial is to manage a GitLab agent for Kubernetes deployment using that given agent. This has several benefits, including: \n\n- By turning the agent to manage itself, the agent configuration and deployment is managed in code. As a result, all the code-oriented tools, including Merge Requests, Approvals, and branching are there to support your processes and policies.\n- Managing a fleet of agent installations in code enables simple upgrades of the deployments.\n\n### Upgrading GitLab and the GitLab agent for Kubernetes\n\nA single GitLab instance might have dozens of agent connections. How should you upgrade all these deployments in a coordinated way? Turning everything into code simplifies the upgrade process a lot.\n\nWe have the GitLab - Agent [version compatibility documented](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html#upgrades-and-version-compatibility). The recommended approach is to first upgrade GitLab together with `KAS`, the GitLab-side component of the connection, and then upgrade all the `agentk` deployments. \n\nIf you manage the `agentk` deployments in code, the upgrade requires only bumping the version number in code and the `agentk` instances will take care of upgrading themselves.\n\n## Turning an agent installation to manage itself\n\nLet's do a quick recap and an overview how we wil use the tools.\n\nWe use `kpt` to check out tagged `agentk` deployment manifests. As the manifests are a set of `kustomize` layers, we can extend them with our own overlays if needed, or just customize the setup per our requirements. The agent connection requires a token to authenticate with GitLab. We can use Bitnami's Sealed Secrets to store an encrypted sycret in the repo.\n\nAll the above code can be put under version control safely. Moreover, we can use GitLab CI/CD to dehydrate the `kustomize` package into vanilla Kubernetes manifests that the agent can deal with.\n\nLet's see the above in action!\n\n### Kustomize layer with encrypted secret\n\nBased on the previous articles, we have the `kpt` package checked out under `packages/gitlab-agent`. We would like to store the vanilla Kubernetes manifests in the repository. We can run `kustomize build packages/gitlab-agent/cluster > kubernetes/gitlab-agent.yaml` to get the manifests, but this will include the unencrypted authentication token too.\n\nTo never output the unencrypted token, we should turn it into a sealed secret.\n\nNavigate to the `gitlab-agent` Terraform project, and create a Kubernetes secret from the token `terraform output -raw token_secret | kubectl create secret generic gitlab-agent-token -n gitlab-agent --dry-run=client --type=Opaque --from-file=token=/dev/stdin -o yaml > ../../ignored/gitlab-agent-token.yaml`. If you followed the instructions in the previous articles, the files under the `ignored` directory are never committed to `git`.\n\nWe will turn this unencrypted secret into a sealed secret. As the secret will already exist in the cluster, we should instruct the Bitnami Sealed Secret controller to pull it under its management. Moreover, as kustomize applies a random hash to every secret name, we should enable renaming the secret within the namespace. We can achieve these by adding two annotations to the unencrypted secrets object.\n\nAdd the following annotations to `ignored/gitlab-agent-token.yaml`\n\n```\nannotations:\n  sealedsecrets.bitnami.com/managed: \"true\"\n  sealedsecrets.bitnami.com/namespace-wide: \"true\"\n```\n\nNext, we should create an encrypred secret from the ignored, unencrypted one running `bin/seal-secret ignored/gitlab-agent-token.yaml > packages/gitlab-agent/sealed-secret` in the root of our project. This creates the encrypted secret under `packages/gitlab-agent/sealed-secret/SealedSecret.gitlab-agent-token.yaml`. Now, we need a kustomize layer that will use this secret instead of the original one that came with `kpt`. Let's create the following files around the encrypted secret:\n\n- Create `packages/gitlab-agent/sealed-secret/kustomization.yaml` as:\n\n```yaml\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n- ../base\n- SealedSecret.gitlab-agent-token.yaml\ncomponents:\n- ../cluster/components/gitops-read-all\n- ../cluster/components/gitops-write-all\n- ../cluster/components/cilium-alert-read\nconfigurations:\n- configuration/sealed-secret-config.yaml\nsecretGenerator:\n- name: gitlab-agent-token\n  behavior: replace\n  type: Opaque\n  namespace: gitlab-agent\n  options:\n    annotations:\n      sealedsecrets.bitnami.com/managed: \"true\"\n      sealedsecrets.bitnami.com/namespace-wide: \"true\"\n```\n\n- Create `packages/gitlab-agent/sealed-secret/configuration/sealed-secret-config.yaml` as:\n\n```yaml\nnameReference:\n- kind: Secret\n  fieldSpecs:\n  - kind: SealedSecret\n    path: metadata/name\n  - kind: SealedSecret\n    path: spec/template/metadata/name\n```\n\nThis configuration enables us to reference the name of the Sealed Secret in the `secretGenerator`.\n\nWe created a new `kustomize` overlay that builds on the `base` and `cluster` layers, but will use the sealed secret. We can hydrate this into vanilla manifests using `kustomize build packages/gitlab-agent/sealed-secret > kubernetes/gitlab-agent.yaml`. This configuration does not include any unencrypted, sensitive data. As a result, we can commit it freely using `git commit`.\n\n### Adopt the agent by the agent\n\nRight now the agent configuration file looks similar to: \n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    - glob: 'kubernetes/**/*.yaml'\n```\n\nIf we would push the previously hydrated manifests, `agentk` would fail applying them complaining about missing inventories. We can easily fix this by temporarily setting a looser inventory policy:\n\n```yaml\ngitops:\n  # Manifest projects are watched by the agent. Whenever a project changes,\n  # GitLab deploys the changes using the agent.\n  manifest_projects:\n  - id: path/to/your/project\n    default_namespace: gitlab-agent\n    inventory_policy: adopt_all\n    # Paths inside of the repository to scan for manifest files.\n    # Directories with names starting with a dot are ignored.\n    paths:\n    - glob: 'kubernetes/test_config.yaml'\n    - glob: 'kubernetes/**/*.yaml'\n```\n\nWith the inventory policy configured, we can commit and push our changes to GitLab. The agent will see the new configuration and resources, and will apply them into the cluster. From now on, you can change the code in the repository, push it to git, and the changes will be automatically applied into your cluster.\n\n#### What are inventory policies?\n\nThe GitLab agent for Kubernetes knows about the managed resources using so-called inventory objects. In technical terms, an inventory object is just a `ConfigMap` with a unique label. Whenever the agent sees an object that it should manage, it applies the same label. This way, every agent can easily find the resources that it manages.\n\nYou can read more about the possible [inventory policy configurations in the documentation](https://docs.gitlab.com/ee/user/infrastructure/clusters/deploy/inventory_object.html).\n\n\n#### A word about RBAC\n\nDepending on the authorization rights given to the `agentk` deployment, not every change might be possible. For example, if you would like to create new `ClusterRole` and `ClusterRoleBinding` in a new `kustomize` overlay, and apply that with the Agent, that might fail. It will fail, if your current role-based access control (RBAC) does not allow your `agentk` deployment to create these resources. In this case, you should either provide higher rights to your `agentk` service account first or you should apply the changes manually from your command line.\n\n### Automatic hydration\n\nNow, if you want to change something in your agent deployment, you need to take two actions:\n\n- change the code in the `kpt` package\n- run `kustomize build` to hydrate the results\n\nLet's automate the second step so you can focus on your main job only. Following the setup of [a GitOps-style Auto DevOps pipeline](/blog/gitops-with-gitlab/#hydrating-the-manifests), we need to extend the `hydrate-packages` job:\n\n\n```yaml\nhydrate-packages:\n      ...\n      script:\n      - mkdir -p new_manifests\n      ...\n      - kustomize build packages/gitlab-agent/sealed-secret > new_manifests/gitlab-agent.yaml\n```\n\nWe can re-use all the other automation as presented in the previous articles.\n\n## How to upgrade `agentk`?\n\nJust to provide a practical example, let's see how we can use the above setup to easily upgrade an `agentk` deployment to a newer version.\n\nBy running `kustomize cfg set packages/gitlab-agent agent-version v14.9.1` we set the intended `agentk` version to be version `14.9.1`. You can commit and push this change to git, and lay back in your chair to see how the changes are being rolled out across your clusters. You can point several agent configurations at the same `kubernetes/gitlab-agent.yaml` manifest, and upgrade all of them at once.\n\n## Recap\n\nIn this article we have seen:\n\n- how to turn an Agent deployment to manage itself\n- how to extend the default `kpt` project with a custom `kustomize` overlay to customize the `agentk` deployment\n- how to easily upgrade a set of `agentk` deployments\n- how to pull already existing objects to be managed by the Agent using inventory policies\n\n_Note: This is the final installment in this series of [how to do GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab)._\n\n\n",[533,1225,9],{"slug":2236,"featured":6,"template":684},"gitops-with-gitlab-manage-the-agent","content:en-us:blog:gitops-with-gitlab-manage-the-agent.yml","Gitops With Gitlab Manage The Agent","en-us/blog/gitops-with-gitlab-manage-the-agent.yml","en-us/blog/gitops-with-gitlab-manage-the-agent",{"_path":2242,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2243,"content":2249,"config":2256,"_id":2258,"_type":13,"title":2259,"_source":15,"_file":2260,"_stem":2261,"_extension":18},"/en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"title":2244,"description":2245,"ogTitle":2244,"ogDescription":2245,"noIndex":6,"ogImage":2246,"ogUrl":2247,"ogSiteName":669,"ogType":670,"canonicalUrls":2247,"schema":2248},"Go tools and GitLab: How to do continuous integration like a boss","How the team at Pantomath makes their lives easier with GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667978/Blog/Hero%20Images/go-tools-and-gitlab.jpg","https://about.gitlab.com/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Go tools and GitLab: How to do continuous integration like a boss\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julien Andrieux\"}],\n        \"datePublished\": \"2017-11-27\",\n      }",{"title":2244,"description":2245,"authors":2250,"heroImage":2246,"date":2252,"body":2253,"category":769,"tags":2254},[2251],"Julien Andrieux","2017-11-27","\n\nAt [Pantomath](https://pantomath.io/), we use [GitLab](/) for all our development work. The purpose of this paper is not to present GitLab and all [its features](/pricing/feature-comparison/), but to introduce how we use these tools to ease our lives. So what is it all about? To automate everything that is related to your development project, and let you focus on your code.\n\n\u003C!-- more -->\n\nWe’ll cover the [lint](https://en.wikipedia.org/wiki/Lint_(software)), [unit tests](https://en.wikipedia.org/wiki/Unit_testing), [data race](https://en.wikipedia.org/wiki/Race_condition), [memory sanitizer](https://clang.llvm.org/docs/MemorySanitizer.html), [code coverage](https://en.wikipedia.org/wiki/Code_coverage), and build.\n\nAll the source code shown in this post is available at [gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools). So feel free to get the repository, and use the tags to navigate in it. The repository should be placed in the `src` folder of your `$GOPATH`:\n\n```bash\n$ go get -v -d gitlab.com/pantomath-io/demo-tools\n$ cd $GOPATH/src/gitlab.com/pantomath-io/demo-tools\n```\n\n### Go tools\n\nLuckily, `Go` — the open source programming language also known as golang — comes with a [lot of useful tools](https://golang.org/cmd/go/), to build, test, and check your code. In fact, it’s all there. We’ll just add extra tools to glue them together. But before we go there, we need to take them one by one, and see what they do.\n\n#### Package list\n\nYour Go project is a collection of packages, as described in the [official doc](https://golang.org/doc/code.html). Most of the following tools will be fed with these packages, and thus the first command we need is a way to list the packages. Hopefully, the `Go` language covers our back with the `list` subcommand ([read the fine manual](https://golang.org/cmd/go/#hdr-List_packages) and this [excellent post from Dave Cheney](https://dave.cheney.net/2014/09/14/go-list-your-swiss-army-knife)):\n\n```bash\n$ go list ./...\n```\n\nNote that we want to avoid applying our tools on external packages or resources, and restrict it to **our** code. So we need to get rid of the [vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories):\n\n```bash\n$ go list ./... | grep -v /vendor/\n```\n\n#### Lint\n\nThis is the very first tool we use on the code: the linter. Its role is to make sure that the code respects the code style. This may sounds like an optional tool, or at least a “nice-to-have” but it really helps to keep consistent style over your project.\n\nThis linter is not part of Go *per se*, so you need to grab it and install it by hand (see [official doc](https://github.com/golang/lint)).\n\nThe usage is fairly simple: you just run it on the packages of your code (you can also point the `.go` files):\n\n```bash\n$ golint -set_exit_status $(go list ./... | grep -v /vendor/)\n```\n\nNote the `-set_exit_status` option. By default, `golint` only prints the style issues, and returns (with a 0 return code), so the CI never considers something went wrong. If you specify the `-set_exit_status`, the return code from `golint` will be different from 0 if any style issue is encountered.\n\n#### Unit test\n\nThese are the most common tests you can run on your code. For each `.go` file, we need to have an associated `_test.go` file holding the unit tests. You can run the tests for all the packages with the following command:\n\n```bash\n$ go test -short $(go list ./... | grep -v /vendor/)\n```\n\n#### Data race\n\nThis is usually a hard subject to cover, but the `Go` tool has it by default (but only available on `linux/amd64`, `freebsd/amd64`, `darwin/amd64` and `windows/amd64`). For more information about data race, see [this article](https://golang.org/doc/articles/race_detector.html). Meanwhile, here is how to run it:\n\n```bash\n$ go test -race -short $(go list ./... | grep -v /vendor/)\n```\n\n#### Memory sanitizer\n\nClang has a nice detector for uninitialized reads called [MemorySanitizer](https://clang.llvm.org/docs/MemorySanitizer.html). The `go test` tool is kind enough to interact with this Clang module (as soon as you are on `linux/amd64` host and using a recent version of Clang/LLVM (`>=3.8.0`). This command is how to run it:\n\n```bash\n$ go test -msan -short $(go list ./... | grep -v /vendor/)\n```\n\n#### Code coverage\n\nThis is also a must have to evaluate the health of your code, and see what the part of code is under unit tests and what part is not. [Rob Pike](https://twitter.com/rob_pike) wrote a [full post on that very subject](https://blog.golang.org/cover).\n\nTo calculate the code coverage ratio, we need to run the following script:\n\n```bash\n$ PKG_LIST=$(go list ./... | grep -v /vendor/)\n$ for package in ${PKG_LIST}; do\n    go test -covermode=count -coverprofile \"cover/${package##*/}.cov\" \"$package\" ;\ndone\n$ tail -q -n +2 cover/*.cov >> cover/coverage.cov\n$ go tool cover -func=cover/coverage.cov\n```\n\nIf we want to get the coverage report in HTML format, we need to add the following command:\n\n```bash\n$ go tool cover -html=cover/coverage.cov -o coverage.html\n```\n\n#### Build\n\nLast but not least, once the code has been fully tested, we might want to compile it to make sure we can build a working binary.\n\n```bash\n$ go build -i -v gitlab.com/pantomath-io/demo-tools\n```\n\n### Makefile\n\n*git tag:* [init-makefile](https://gitlab.com/pantomath-io/demo-tools/tags/init-makefile)\n\n![](https://cdn-images-1.medium.com/max/1600/1*Ip_q_6I-kNpUjuPMOutuTA.jpeg)\n*\u003Csmall>Photo by [Matt Artz](https://unsplash.com/photos/qJE5Svhs2ek?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\nNow we have all the tools that we may use in the context of continuous integration, we can wrap them all in a [Makefile](https://gitlab.com/pantomath-io/demo-tools/blob/init-makefile/Makefile), and have a consistent way to call them.\n\nThe purpose of this doc is not to present `make`, but you can refer to [official documentation](https://www.gnu.org/software/make/manual/make.html) to learn more about it.\n\n    PROJECT_NAME := \"demo-tools\"\n    PKG := \"gitlab.com/pantomath-io/$(PROJECT_NAME)\"\n    PKG_LIST := $(shell go list ${PKG}/... | grep -v /vendor/)\n    GO_FILES := $(shell find . -name '*.go' | grep -v /vendor/ | grep -v _test.go)\n\n    .PHONY: all dep build clean test coverage coverhtml lint\n\n    all: build\n\n    lint: ## Lint the files\n      @golint -set_exit_status ${PKG_LIST}\n\n    test: ## Run unittests\n      @go test -short ${PKG_LIST}\n\n    race: dep ## Run data race detector\n      @go test -race -short ${PKG_LIST}\n\n    msan: dep ## Run memory sanitizer\n      @go test -msan -short ${PKG_LIST}\n\n    coverage: ## Generate global code coverage report\n      ./tools/coverage.sh;\n\n    coverhtml: ## Generate global code coverage report in HTML\n      ./tools/coverage.sh html;\n\n    dep: ## Get the dependencies\n      @go get -v -d ./...\n\n    build: dep ## Build the binary file\n      @go build -i -v $(PKG)\n\n    clean: ## Remove previous build\n      @rm -f $(PROJECT_NAME)\n\n    help: ## Display this help screen\n      @grep -h -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | awk 'BEGIN {FS = \":.*?## \"}; {printf \"\\033[36m%-30s\\033[0m %s\\n\", $$1, $$2}'\n\nWhat do we have now? One target for any tool previously presented, and three more targets for:\n\n* installation of dependencies (`dep`);\n* housekeeping of the project (`clean`);\n* some nice and shiny help (`help`).\n\nNote that we also had to create a script for the code coverage work. This is because implementing loops over files in a Makefile is a pain. So the work is done in a `bash` script, and the Makefile only triggers this script.\n\nYou can try the Makefile with the following commands:\n\n    $ make help\n    $ make lint\n    $ make coverage\n\n### Continuous integration\n\n*git tag:* [init-ci](https://gitlab.com/pantomath-io/demo-tools/tags/init-ci)\n\nNow the tools are in place, and we can run various tests on our code, we’d like to automate these, on your repository. Luckily, GitLab offers [CI pipelines](/solutions/continuous-integration/) just for this. And the setup for this is pretty straightforward: all you create is a `.gitlab-ci.yml` file at the root of the repository.\n\nThe [full documentation](https://docs.gitlab.com/ee/ci/yaml/) on this Yaml file presents all the options, but you can start with this `.gitlab-ci.yml`:\n\n```yaml\nimage: golang:1.9\n\ncache:\n  paths:\n    - /apt-cache\n    - /go/src/github.com\n    - /go/src/golang.org\n    - /go/src/google.golang.org\n    - /go/src/gopkg.in\n\nstages:\n  - test\n  - build\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.com/pantomath-io /go/src/_/builds\n  - cp -r $CI_PROJECT_DIR /go/src/gitlab.com/pantomath-io/pantomath\n  - ln -s /go/src/gitlab.com/pantomath-io /go/src/_/builds/pantomath-io\n  - make dep\n\nunit_tests:\n  stage: test\n  script:\n    - make test\n\nrace_detector:\n  stage: test\n  script:\n    - make race\n\nmemory_sanitizer:\n  stage: test\n  script:\n    - make msan\n\ncode_coverage:\n  stage: test\n  script:\n    - make coverage\n\ncode_coverage_report:\n  stage: test\n  script:\n    - make coverhtml\n  only:\n  - master\n\nlint_code:\n  stage: test\n  script:\n    - make lint\n\nbuild:\n  stage: build\n  script:\n    - make\n```\n\nIf you break down the file, here are some explanations on its content:\n\n* The first thing is to choose what Docker image will be used to run the CI. Head to the [Docker Hub](https://hub.docker.com/) to choose the right image for your project.\n* Then, you specify some folders of this image [to be cached](https://docs.gitlab.com/ee/ci/yaml/#cache). The goal here is to avoid downloading the same content several times. Once a job is completed, the listed paths will be archived, and next job will use the same archive.\n* You define the different `stages` that will group your jobs. In our case, we have two [stages](https://docs.gitlab.com/ee/ci/yaml/#stages) (to be processed in that order): `test` and `build`. We could have other stages, such as `deploy`.\n* The `before_script` [section](https://docs.gitlab.com/ee/ci/yaml/#before_script) defines the commands to run in the Docker container right before the job is actually done. In our context, the commands just copy or link the repository deployed in the `$GOPATH`, and install dependencies.\n* Then come the actual [jobs](https://docs.gitlab.com/ee/ci/jobs/), using the `Makefile` targets. Note the special case for `code_coverage_report` where execution is restricted to the `master` branch (we don’t want to update the code coverage report from feature branches for instance).\n\nAs we commit/push the `.gitlab-ci.yml` file in the repository, the CI is [automatically triggered](https://gitlab.com/pantomath-io/demo-tools/pipelines/13481935). And the pipeline fails. Howcome?\n\nThe `lint_code` [job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690212) fails because it can’t find the `golint` binary:\n\n```bash\n$ make lint\nmake: golint: Command not found\nMakefile:11: recipe for target 'lint' failed\nmake: *** [lint] Error 127\n```\n\nSo, [update](https://gitlab.com/pantomath-io/demo-toolscommit/17a0206eb626504e559f56773e2d81c7b5808dbe) your `Makefile` to install `golint` as part of the `dep` target.\n\nThe `memory_sanitizer` [job](https://gitlab.com/pantomath-io/demo-tools/-/jobs/38690209) fails because `gcc` complains:\n\n```bash\n$ make msan\n# runtime/cgo\ngcc: error: unrecognized argument to -fsanitize= option: 'memory'\nMakefile:20: recipe for target 'msan' failed\nmake: *** [msan] Error 2\n```\n\nBut remember we need to use Clang/LLVM `>=3.8.0` to enjoy the `-msan` option in `go test` command.\n\nWe have two options here:\n\n* either we set up Clang in the job (using `before_script`);\n* or we use a Docker image with Clang installed by default.\n\nThe first option is nice, but that implies to have this setup done **for every single job**. This is going to be so long, we should do it once and for all. So we prefer the second option, which is a good way to play with [GitLab Registry](https://docs.gitlab.com/ee/user/packages/container_registry/index.html).\n\n*git tag:* [use-own-docker](https://gitlab.com/pantomath-io/demo-tools/tags/use-own-docker)\n\nWe need to create a [Dockerfile](https://gitlab.com/pantomath-io/demo-tools/blob/use-own-docker/Dockerfile) for the container (as usual: read the [official documentation](https://docs.docker.com/engine/reference/builder) for more options about it):\n\n    # Base image:\n    FROM golang:1.9\n    MAINTAINER Julien Andrieux \u003Cjulien@pantomath.io>\n\n    # Install golint\n    ENV GOPATH /go\n    ENV PATH ${GOPATH}/bin:$PATH\n    RUN go get -u github.com/golang/lint/golint\n\n    # Add apt key for LLVM repository\n    RUN wget -O -\n     | apt-key add -\n\n    # Add LLVM apt repository\n    RUN echo \"deb\n     llvm-toolchain-stretch-5.0 main\" | tee -a /etc/apt/sources.list\n\n    # Install clang from LLVM repository\n    RUN apt-get update && apt-get install -y --no-install-recommends \\\n        clang-5.0 \\\n        && apt-get clean \\\n        && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/*\n\n    # Set Clang as default CC\n    ENV set_clang /etc/profile.d/set-clang-cc.sh\n    RUN echo \"export CC=clang-5.0\" | tee -a ${set_clang} && chmod a+x ${set_clang}\n\nThe container built out of this Dockerfile will be based on [golang:1.9](https://hub.docker.com/_/golang/) image (the one referenced in the `.gitlab-ci.yml` file).\n\nWhile we’re at it, we install `golint` in the container, so we have it available. Then we follow [official way](http://apt.llvm.org/) of installing Clang 5.0 from LLVM repository.\n\nNow we have the Dockerfile in place, we need to build the container image and make it available for GitLab:\n\n```bash\n$ docker login registry.gitlab.com\n$ docker build -t registry.gitlab.com/pantomath-io/demo-tools .\n$ docker push registry.gitlab.com/pantomath-io/demo-tools\n```\n\nThe first command connects you to the GitLab Registry. Then you build the container image described in the Dockerfile. And finally, you push it to the GitLab Registry.\n\nTake a look at the [Registry for your repository](https://gitlab.com/pantomath-io/demo-tools/container_registry), you’ll see your image, ready to be used. And to have the CI using your image, you just need to update the `.gitlab-ci.yml` file:\n\n    image: golang:1.9\n\nbecomes\n\n    image: registry.gitlab.com/pantomath-io/demo-tools:latest\n\nOne last detail: you need to tell the CI to use the proper compiler (i.e. the `CC` environment variable), so we add the variable initialization in the `.gitlab-ci.yml` file:\n\n    export CC=clang-5.0\n\nOnce the modification are done, next commit will trigger the pipeline, which now works:\n\n[gitlab.com/pantomath-io/demo-tools/pipelines/13497136](https://gitlab.com/pantomath-io/demo-tools/pipelines/13497136)\n\n### Badges\n\n*git tag:* [init-badges](https://gitlab.com/pantomath-io/demo-tools/tags/init-badges)\n\n![](https://cdn-images-1.medium.com/max/1600/1*0pY_6oCiHZ_eLh0vfg5rDA.jpeg)\n\n*\u003Csmall>Photo by [Jakob Owens](https://unsplash.com/photos/ZBadHaTUkP0?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\u003C/small>*\n\nNow the tools are in place, every commit will launch a test suite, and you probably want to show it, and that’s legitimate :) The best way to do so is to use badges, and the best place for it is the `README` [file](https://gitlab.com/pantomath-io/demo-tools/blob/init-badges/README.md).\n\nEdit it and add the four following badges:\n\n* Build Status: the status of the last pipeline on the `master` branch:\n\n```\n[![Build Status](https://gitlab.com/pantomath-io/demo-tools/badges/master/build.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n```\n\n* Coverage Report: the percentage of source code covered by tests\n\n```\n[![Coverage Report](https://gitlab.com/pantomath-io/demo-tools/badges/master/coverage.svg)](https://gitlab.com/pantomath-io/demo-tools/commits/master)\n```\n\n* Go Report Card:\n\n```\n[![Go Report Card](https://goreportcard.com/badge/gitlab.com/pantomath-io/demo-tools)](https://goreportcard.com/report/gitlab.com/pantomath-io/demo-tools)\n```\n\n* License:\n\n```\n[![License MIT](https://img.shields.io/badge/License-MIT-brightgreen.svg)](https://img.shields.io/badge/License-MIT-brightgreen.svg)\n```\n\nThe coverage report needs a special configuration. You need to tell GitLab how to get that information, considering that there is a job in the CI that *displays* it when it runs.\u003Cbr> There is a [configuration](https://gitlab.com/help/user/project/pipelines/settings#test-coverage-parsing) to provide GitLab with a regexp, used in any job’ output. If the regexp matches, GitLab consider the match to be the code coverage result.\n\nSo head to `Settings > CI/CD` in your repository, scroll down to the `Test coverage parsing` setting in the `General pipelines settings` section, and use the following regexp:\n\n    total:\\s+\\(statements\\)\\s+(\\d+.\\d+\\%)\n\nYou’re all set! Head to the [overview of your repository](https://gitlab.com/pantomath-io/demo-tools/tree/init-badges), and look at your `README`:\n\n### Conclusion\n\nWhat’s next? Probably more tests in your CI. You can also look at the CD ([Continuous Deployment](https://docs.gitlab.com/ee/ci/environments/index.html)) to automate the deployment of your builds. The documentation can be done using [GoDoc](https://godoc.org/-/about). Note that you generate a coverage report with the `code_coverage_report`, but don’t use it in the CI. You can make the job copy the HTML file to a web server, using `scp` (see this [documentation](https://docs.gitlab.com/ee/ci/ssh_keys/) on how to use SSH keys).\n\nMany thanks to [Charles Francoise](https://dev.to/loderunner) who co-wrote this paper and [gitlab.com/pantomath-io/demo-tools](https://gitlab.com/pantomath-io/demo-tools).\n\n## About the Guest Author\n\nJulien Andrieux is currently working on Pantomath. Pantomath is a modern, open source monitoring solution, built for performance, that bridges the gaps across all levels of your company. The wellbeing of your infrastructure is everyone’s business. [Keep up with the project](http://goo.gl/tcxtXq).\n\n *[Go tools & GitLab - how to do Continuous Integration like a boss](https://medium.com/pantomath/go-tools-gitlab-how-to-do-continuous-integration-like-a-boss-941a3a9ad0b6) was originally published on Medium.*\n\n*Cover photo by [Todd Quackenbush](https://unsplash.com/photos/IClZBVw5W5A?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)*\n{: .note}\n",[108,2255,9],"user stories",{"slug":2257,"featured":6,"template":684},"go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss","content:en-us:blog:go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","Go Tools And Gitlab How To Do Continuous Integration Like A Boss","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss.yml","en-us/blog/go-tools-and-gitlab-how-to-do-continuous-integration-like-a-boss",{"_path":2263,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2264,"content":2270,"config":2277,"_id":2279,"_type":13,"title":2280,"_source":15,"_file":2281,"_stem":2282,"_extension":18},"/en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab",{"title":2265,"description":2266,"ogTitle":2265,"ogDescription":2266,"noIndex":6,"ogImage":2267,"ogUrl":2268,"ogSiteName":669,"ogType":670,"canonicalUrls":2268,"schema":2269},"Google Cloud integrations for secure Cloud Run deployments at GitLab","This tutorial demonstrates how to use GitLab’s Google Artifact Management integration to deploy to Google Cloud Run, a serverless runtime for containers application.\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099336/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_fJKX41PJHKCfSOWw4xQxm_1750099336757.png","https://about.gitlab.com/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Google Cloud integrations for secure Cloud Run deployments at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"},{\"@type\":\"Person\",\"name\":\"Matt Genelin\"}],\n        \"datePublished\": \"2025-01-15\",\n      }",{"title":2265,"description":2266,"authors":2271,"heroImage":2267,"date":2273,"body":2274,"category":678,"tags":2275},[1841,2272],"Matt Genelin","2025-01-15","*This tutorial is from a recent Arctiq, GitLab, and Google in-person workshop. The goal was to explore common security challenges faced by organizations as they journey to the cloud.*\n\nThis tutorial will help you learn about the [Google Cloud integrations in GitLab](https://cloud.google.com/docs/gitlab). These features are meant to help accelerate and improve security of deployments to Google Cloud.\n\n![Google integrations list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099345/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099345112.png)\n\n## Prerequisites\n\n1. [Google Cloud project](https://cloud.google.com/resource-manager/docs/creating-managing-projects)  \n2. Appropriate [IAM permissions](https://cloud.google.com/iam/docs/) for security, Artifact Registry, and Cloud Run usage. For this tutorial, ensure you have the \"Owner\" role with the aforementioned project.\n\n## Setting up Workload Identity Federation\n\nIn this step, we configure GitLab to connect Google Cloud's Workload Identity Federation to reduce the need for service accounts and let the two platforms use short-lived credentials on-demand.\n\n1. On the left sidebar, select **Search** or go to and find your group or project. If you configure this in a group, settings apply to all projects within by default.  \n2. Select **Settings \\> Integrations**.  \n3. Select **Google Cloud IAM**.  \n4. Input the Project ID and Project number in the respective fields. This information can be obtained from the Google Cloud console [Welcome](https://console.cloud.google.com/welcome) page of your project.  \n5. Input the desired Pool ID and Provider ID in the respective fields. These are values that you provide and must be unique from other Pool and Provider IDs.  \n6. Copy the generated command and then go to the **Google Cloud console**.  \n7. Run **Cloud Shell** and execute the generated command from the Workload Identity Federation integration page.  \n8. Once successful, the **Google Cloud IAM** integration will be designated as active in the Integrations list at the GitLab project.\n\n## Artifact Registry configuration\n\nAs an alternative to GitLab's own place to host artifacts, deploying to Google Cloud's Artifact Registry is another way to leverage their infrastructure. This section will provide steps on how to use GitLab's native integration with Artifact Registry. Note that Workload Identity Federation must already be configured prior to this.\n\n1. At the **Google Cloud** console, go to **Artifact Registry** via search or the main navigation.  \n2. Create a new repository by clicking the **\"+\"** icon. At the creation page, provide a name and keep the **Docker** format and **Standard** mode selected. Select **Region** and choose **us-central1**. Leave the rest at the default settings and click **Create**.  \n3. Once the repository is created and confirmed, go back to your GitLab project.  \n4. In your GitLab project, on the left sidebar, select **Settings > Integrations**. Then select **Google Artifact Registry**.  \n5. Under Enable integration, select the **Active** checkbox, then complete the fields:  \n   * Google Cloud project ID: The ID of the Google Cloud project where your Artifact Registry repository is located.  \n   * Repository name: The name of your Artifact Registry repository.  \n   * Repository location: The location of your Artifact Registry repository. (`us-central1` is assumed.)  \n6. In **Configure Google Cloud IAM policies**, follow the onscreen instructions to set up the IAM policies in Google Cloud. These policies are required to use the Artifact Registry repository in your GitLab project. Select **Save** changes.  \n7. To view your Google Cloud artifacts, on the left sidebar, select **Deploy > Google Artifact Registry**.\n\n## Cloud Run configuration\n\n1. Enable the Cloud Run API, if not done already. Go to **APIs & Services > Enabled APIs & Services**. From there, click **Enable APIs & Services** at the top and search for **Cloud Run Admin API**. Select the search result and enable the API.  \n2. Configure the IAM policies in Google Cloud to grant permissions to allow the Cloud Run CI/CD component to deploy to Cloud Run.\n\n```\nGCP_PROJECT_ID=\"\u003CPROJECT ID>\"\nGCP_PROJECT_NUMBER=\"\u003CPROJECT NUMBER>\"\nGCP_WORKLOAD_IDENTITY_POOL=\"\u003CPOOL ID>\"\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/run.admin'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/iam.serviceAccountUser'\n\ngcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \\\n  --member=\"principalSet://iam.googleapis.com/projects/${GCP_PROJECT_NUMBER}/locations/global/workloadIdentityPools/${GCP_WORKLOAD_IDENTITY_POOL}/attribute.developer_access/true\" \\\n  --role='roles/cloudbuild.builds.editor'\n```\n\n## Deploy to Cloud Run\n\nIn this section, you will use Gitlab's CI/CD components to deploy to Cloud Run, Google Cloud's serverless runtime for containers.\n\n1. Go to the GitLab project and from the list of files in the source code, find `.gitlab-ci.yaml`. Click the **file name** and the single file editor will show up. Click the **Edit** button and select the **Open in Web IDE** option.  \n2. In Web IDE, copy-paste the following code:\n\n```\nstages:\n    - build\n    - upload\n    - deploy\n```\n\nThis code snippet sets up three stages in the pipeline: build, upload, and deploy.\n\n1. The next step is to create two CI/CD variables in the same YAML file:\n\n```\nvariables:\n    GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n    AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n```\n\nThe first variable, `GITLAB\\_IMAGE`, denotes the container image that the pipeline creates by default. The second one, `AR\\_IMAGE`, denotes the location at Google Cloud's Artifact Registry where the container image will be pushed to.\n\n2. Next, define the code that will build the container image:\n\n```\nbuild:\n    image: docker:24.0.5\n    stage: build\n    services:\n        - docker:24.0.5-dind\n    before_script:\n        - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    script:\n        - docker build -t $GITLAB_IMAGE .\n        - docker push $GITLAB_IMAGE\n```\n\nThis code uses [pre-defined CI/CD variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) for the Docker commands.\n\n3. The final step is using two CI/CD components to deploy to Google Cloud. The first component integrates with Artifact Registry and the second is the deployment to Cloud Run:\n\n```\ninclude:\n    - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n      inputs:\n        stage: upload\n        source: $GITLAB_IMAGE\n        target: $AR_IMAGE\n\n    - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n      inputs:\n        stage: deploy\n        project_id: \"\u003CPROJECT_ID>\"\n        service: \"tanuki-racing\"\n        region: \"\u003CREGION>\"\n        image: $AR_IMAGE\n```\n\nReplace \u003CPROJECT_ID> with your Google Cloud Project ID. Replace with the [Google Cloud region](https://cloud.google.com/compute/docs/regions-zones) most appropriate to your location. `us-central1` is assumed.\n\nCommit the changes and push to the main branch. For reference, the final `.gitlab-ci.yaml` should look like this, noting to replace the \u003CPROJECT ID> and \u003CREGION> with the appropriate values:\n\n```\nstages:\n    - build\n    - upload\n    - deploy\nvariables:\n    GITLAB_IMAGE: $CI_REGISTRY_IMAGE/main:$CI_COMMIT_SHORT_SHA\n    AR_IMAGE: $GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_LOCATION-docker.pkg.dev/$GOOGLE_ARTIFACT_REGISTRY_PROJECT_ID/$GOOGLE_ARTIFACT_REGISTRY_REPOSITORY_NAME/main:$CI_COMMIT_SHORT_SHA\n\nbuild:\n    image: docker:24.0.5\n    stage: build\n    services:\n        - docker:24.0.5-dind\n    before_script:\n        - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    script:\n        - docker build -t $GITLAB_IMAGE .\n        - docker push $GITLAB_IMAGE\n\ninclude:\n    - component: gitlab.com/google-gitlab-components/artifact-registry/upload-artifact-registry@main\n      inputs:\n        stage: upload\n        source: $GITLAB_IMAGE\n        target: $AR_IMAGE\n\n    - component: gitlab.com/google-gitlab-components/cloud-run/deploy-cloud-run@main\n      inputs:\n        stage: deploy\n        project_id: \"\u003CPROJECT_ID>\"\n        service: \"tanuki-racing\"\n        region: \"\u003CREGION>\"\n        image: $AR_IMAGE\n```\n\n1. Go back to the main GitLab project and view the pipeline that was just initiated. Take note of the stages that should be the same stages that were defined in Step 2.  \n2. Once the pipeline is complete, go to the Google Cloud console and then **Cloud Run** via search or navigation. A new Cloud Run service called `tanuki-racing` should be created.  \n3. Click the **service name** and then go to the **Security** tab. Ensure that the service is set to **Allow unauthenticated invocations**. This will make the deployed app publicly available. The app URL posted on screen is now available and should open a new browser tab when clicked.\n\nBy utilizing GitLab’s CI/CD pipelines to build and push a containerized application to Google Artifact Registry, you can see the power of GitLab’s AI-powered DevSecOps Platform as a means to building secure applications. GitLab also deployed the containerized application to Google’s Cloud Run as a low-cost running application on the public internet. Using GitLab to instrument building an application, pushing a container and triggering a cloud run deployment allows DevOps engineers to have the assurance that secure applications are being run on the public-facing internet.\n\n> [Sign up for a 60-day free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) to begin working with these integrations. Also, check out our [solutions architecture area](https://about.gitlab.com/blog/tags/solutions-architecture/) for more Gitlab and Google Cloud tutorials.",[230,1248,2276,9,1000],"GKE",{"slug":2278,"featured":6,"template":684},"google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab","content:en-us:blog:google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab.yml","Google Cloud Integrations For Secure Cloud Run Deployments At Gitlab","en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab.yml","en-us/blog/google-cloud-integrations-for-secure-cloud-run-deployments-at-gitlab",{"_path":2284,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2285,"content":2291,"config":2296,"_id":2298,"_type":13,"title":2299,"_source":15,"_file":2300,"_stem":2301,"_extension":18},"/en-us/blog/guide-to-fulfilling-soc-2-security-requirements-with-gitlab",{"title":2286,"description":2287,"ogTitle":2286,"ogDescription":2287,"noIndex":6,"ogImage":2288,"ogUrl":2289,"ogSiteName":669,"ogType":670,"canonicalUrls":2289,"schema":2290},"Guide to fulfilling SOC 2 security requirements with GitLab","Understand the application security features in the GitLab DevSecOps platform that map to System and Organization Controls 2 requirements.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099576/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_1172300481_IGPi3TS4VzFgcqhvEdBlR_1750099575518.jpg","https://about.gitlab.com/blog/guide-to-fulfilling-soc-2-security-requirements-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Guide to fulfilling SOC 2 security requirements with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-01-22\",\n      }",{"title":2286,"description":2287,"authors":2292,"heroImage":2288,"date":2293,"body":2294,"category":814,"tags":2295},[1767],"2025-01-22","For businesses that handle sensitive customer information, achieving SOC 2 (System and Organization Controls 2) compliance is not just a good practice — it's often a necessity. SOC 2 is a rigorous auditing standard developed by the American Institute of Certified Public Accountants that assesses a service organization's controls related to security, availability, processing integrity, confidentiality, and privacy.\n\nWhile SOC 2 is not legally mandated, it has become increasingly important, in part due to breaches consistently seen in news headlines. Obtaining SOC 2 compliance allows customers to build trust with service organizations because they know their data is being properly stored and security controls have been assessed by a third party.\n\nIn this guide, we'll review the requirements for obtaining SOC 2 compliance and how GitLab can help your organization meet the highest standards for application security.\n\n## What requirements are set by SOC 2\n\nThe compliance process involves an audit by an independent auditor who evaluates the design and operating effectiveness of an organization's controls. This process can be very costly, and many organizations are not sufficiently prepared before an audit. With the SOC 2 audit process typically taking close to a year, it is important to establish an efficient pre-audit process.\n\nTo obtain SOC 2 compliance, an organization must meet requirements based on the Trust Services Criteria:\n\n| Criteria | Requirements |\n| :---- | :---- |\n| Security | - Implement controls to protect against unauthorized access \u003Cbr> - Establish procedures for identifying and mitigating risks\u003Cbr> - Set up systems for detecting and addressing security incidents |\n| Availability | - Ensure systems are accessible for operation as agreed\u003Cbr> - Monitor current usage and capacity \u003Cbr> - Identify and address environmental threats that could affect system availability |\n| Process integrity | - Maintain accurate records of system inputs and outputs \u003Cbr> - Implement procedures to quickly identify and correct system errors \u003Cbr> - Define processing activities to ensure products and services meet specifications |\n| Confidentiality | - Identify and protect confidential information \u003Cbr> - Establish policies for data retention periods \u003Cbr> - Implement secure methods for destroying confidential data after retention periods expire |\n| Privacy | - Obtain consent before collecting sensitive personal information \u003Cbr> - Communicate privacy policies clearly and in plain language \u003Cbr> - Collect data only through legal means and from reliable sources |\n\u003Cbr>\n\nNote that these requirements are not one-time achievements, but rather a continuous process. Auditors will require control effectiveness over time.\n\n## How to achieve and maintain the security requirements\n\nGitLab provides several features off the board to get you started with assuring SOC 2 security needs are met:\n\n| Security Requirement | Addressing Feature |\n| :---- | :---- |\n| Implement controls to protect against unauthorized access | - Confidential Issues and Merge Requests \u003Cbr> - Custom Roles and Granular Permissions \u003Cbr> - Security Policies \u003Cbr> - Verified Commit \u003Cbr> - Signed Container Images \u003Cbr> - CodeOwners \u003Cbr> - Protected Branches |\n| Set up systems for detecting and addressing security incidents | - Vulnerability Scanning \u003Cbr> - Merge Request Security Widget \u003Cbr> - Vulnerability Insights Compliance Center \u003Cbr> - Audit Events \u003Cbr> - Vulnerability Report Dependency List \u003Cbr> - AI: Vulnerability Explanation \u003Cbr> - AI: Vulnerability Resolution |\n| Establish procedures for identifying and mitigating risks | All the above tools can be used by a security team to establish a procedure around what to do when security vulnerabilities are identified and how they are mitigated. |\n\u003Cbr>\nLet’s go through each section and highlight the security features that address these requirements. Note that a [GitLab Ultimate subscription](https://about.gitlab.com/free-trial/) and the correct Role and Permissions are required to access many of the features listed. Be sure to check out the appropriate documentation for more information.\n\n## Implement controls to protect against unauthorized access\n\nImplementing robust access controls is essential for protecting an organization's assets, ensuring regulatory compliance, maintaining operational continuity, and fostering trust. GitLab allows you to implement controls to follow the [principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/), securing against unauthorized access. I will briefly cover:\n\n* [Security policies](#security-policies)  \n* [Custom roles and granular permissions](#custom-roles-and-granular-permissions)  \n* [Branch protections and CodeOwners](#branch-protections-and-codeowners)  \n* [Verified commits](#verified-commits)\n\n### Security policies\n\nGitLab's security policies, known as guardrails, enable security and compliance teams to implement consistent controls across their organization, helping prevent security incidents, maintain compliance standards, and reduce risk by automatically enforcing security best practices at scale.\n\n![Merge request approval policy in action](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/merge_request_approval_policy_aHR0cHM6_1750099596925.png)\n\n\u003Ccenter>\u003Ci>Merge request approval policy in action\u003C/i>\u003C/center>\u003Cbr>\n\nThe following policy types are available:\n\n* Scan execution policy: Enforce security scans, either as part of the pipeline or on a specified schedule  \n* Merge request approval policy: Enforce project-level settings and approval rules based on scan results  \n* Pipeline execution policy: Enforce CI/CD jobs as part of project pipelines  \n* Vulnerability management policy: Automate vulnerability management workflows\n\nHere is an example of ensuring compliance with the pipeline execution policy:\n\n1. Create a project that houses multiple compliance jobs. An example of a job can be to check permissions of files that are deployed. These jobs should be generic enough that they can be applied to multiple applications.\n2. Limit the project's permissions to only security/compliance officers; don’t allow developers to remove jobs. This allows for separation of duties.\n3. Inject the compliance jobs in batch to the projects where they are required. Force them to run no matter what, but allow approval from team lead to not block development. This will ensure compliance jobs are always run and cannot be removed by developers, and that your environment remains compliant.\n\n> ##### Learn how to create security policies with our [security policy documentation](https://docs.gitlab.com/ee/user/application_security/policies/).\n\n### Custom roles and granular permissions\n\nCustom permissions in GitLab allow organizations to create fine-grained access controls beyond the standard role-based permissions, providing benefits such as:\n\n* more precise access control  \n* better security compliance  \n* reduced risk of accidental access  \n* streamlined user management  \n* support for complex organizational structures\n\n![GitLab custom roles](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/custom_roles_aHR0cHM6_1750099596926.png)\n\n\u003Ccenter>\u003Ci>Roles and permissions settings, including custom roles\u003C/i>\u003C/center>\n\n> ##### Learn how to create custom roles with granular permissions using our [custom role documentation](https://docs.gitlab.com/ee/user/custom_roles.html).\n\n### Branch protections and CodeOwners\n\nGitLab helps you further control who can change your code using two key features:\n* Branch Protection, which lets you set rules about who can update specific branches – like requiring approval before merging changes.\n* Code Ownership, which automatically finds the right people to review code changes by matching files to their designated owners.\n\nTogether, these features help keep your code secure and high-quality by making sure the right people review and approve changes.\n\n![Protected branches](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/protected_branches_aHR0cHM6_1750099596928.png)\n\n\u003Ccenter>\u003Ci>Protected branch settings\u003C/i>\u003C/center>\n\n> ##### Learn how to create protected branches along with CodeOwners using [protected branch](https://docs.gitlab.com/ee/user/project/repository/branches/protected.html) and [codeowner](https://docs.gitlab.com/ee/user/project/codeowners/) documentation.\n\n### Verified commits\n\nWhen you sign your commits digitally, you prove they really came from you, not someone pretending to be you. Think of a digital signature like a unique stamp that only you can create. When you upload your public GPG key to GitLab, it can check this stamp. If the stamp matches, GitLab marks your commit as `Verified`. You can then set up rules to reject commits that aren't signed, or block all commits from users who haven't verified their identity.\n\n![Commit signed with verified signature](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/signed_commit_aHR0cHM6_1750099596929.png)\n\n\u003Ccenter>\u003Ci>Commit signed with verified signature\u003C/i>\u003C/center>\u003Cbr>\n\nCommits can be signed with:\n\n* SSH key  \n* GPG key  \n* Personal x.509 certificate\n\n> ##### Learn more about verified commits with our [signed commits documentation](https://docs.gitlab.com/ee/user/project/repository/signed_commits/).\n\n## Set up systems for detecting and addressing security incidents\n\nSetting up systems for detecting and addressing security incidents is vital for maintaining a robust security posture, ensuring regulatory compliance, minimizing potential damages, and enabling organizations to respond effectively to the ever-evolving threat landscape.\n\nGitLab provides security scanning and vulnerability management for the complete application lifecycle. I will briefly cover:\n\n* [Security scanning and vulnerability management](#security-scanning-and-vulnerability-management)  \n* [Software bill of materials](#software-bill-of-materials)  \n* [System auditing and security posture review](#system-auditing-and-security-posture-review)\n* [Compliance and security posture oversight](#compliance-and-security-posture-oversight)\n\n### Security scanning and vulnerability management\n\nGitLab provides a variety of different security scanners that cover the complete lifecycle of your application:\n\n* Static Application Security Testing (SAST)  \n* Dynamic Application Security Testing (DAST)\n* Container Scanning  \n* Dependency Scanning  \n* Infrastructure as Code (IaC) Scanning  \n* Coverage-guided Fuzzing\n* Web API Fuzzing\n\nThese scanners can be added to your pipeline via the use of templates. For example, to run SAST and dependency scanning jobs in the test stage, simply add the following to your .gitlab-ci.yml:\n\n```yaml  \nstages:  \n   - test\n\ninclude:  \n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml  \n  - template: Jobs/SAST.gitlab-ci.yml  \n``` \n\nThese jobs are fully configurable via environment variables and using GitLab job syntax. Once a pipeline kicks off, the security scanners run and detect vulnerabilities in the diff between the current branch and the target branch. The vulnerability can be seen in a merge request (MR), providing detailed oversight before the code is merged to the target branch. The MR will provide the following information on a vulnerability:\n\n* description  \n* status  \n* severity  \n* evidence  \n* identifiers  \n* URL (if applicable)  \n* request/response (if applicable)  \n* reproduction assets (if applicable)  \n* training (if applicable)  \n* code flow (if using advanced SAST)\n\n![MR view of introduced vulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/no_sql_injection_vulnerability_mr_view_aHR0cHM6_1750099596931.png)\n\n\u003Ccenter>\u003Ci>MR view of introduced vulnerability\u003C/i>\u003C/center>\u003Cbr>\n\nDevelopers can use this data to remediate vulnerabilities without slowing down security team workflows. Developers can dismiss a vulnerability with reasoning, speeding up the review process, or they can create a confidential issue to track the vulnerability.\n\nIf the code in an MR is merged to the default (usually production-level) branch, then the vulnerability report is populated with the security scanner results. These results can be used by security teams to manage and triage the vulnerabilities found in production.\n\n![Vulnerability report with Batch Status setting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/vulnerability_report_aHR0cHM6_1750099596936.png)\n\n\u003Ccenter>\u003Ci>Vulnerability report with Batch Status setting\u003C/i>\u003C/center>\u003Cbr>\n\nWhen clicking on a vulnerability description within the vulnerability report, you are provided with the vulnerability page, which contains the same vulnerability data as the MR, allowing for a single source of truth when assessing impact and performing remediation. From the vulnerability page, [GitLab Duo](https://about.gitlab.com/gitlab-duo/) AI features can be used to explain the vulnerability and also create an MR to remediate, speeding up resolution time.\n\n> ##### Learn more about the security scanners included with GitLab and how to manage vulnerabilities in our [application security documentation](https://docs.gitlab.com/ee/user/application_security/).\n\n### Software bill of materials\n\nGitLab can create a detailed list of everything your software uses – kind of like an ingredients list for your code. This list, called a software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)), shows you all the external code your project depends on, including the parts you directly use and their own dependencies. For each item, you can see which version you're using, what license it has, and whether it has any known security problems. This helps you keep track of what's in your software and spot potential risks.\n\n![Group-level dependency list (SBOM)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/sbom_aHR0cHM6_1750099596937.png)\n\n\u003Ccenter>\u003Ci>Group-level dependency list (SBOM)\u003C/i>\u003C/center>\n\n> ##### Learn how to access and use the dependency list with our [dependency list documentation](https://docs.gitlab.com/ee/user/application_security/dependency_list/).\n\n### System auditing and security posture review\n\nGitLab keeps track of everything that happens in your system such as who made changes, what they changed, and when they did it. Think of it like a security camera for your code. This record helps you:\n\n* spot any suspicious activity  \n* show regulators you're following the rules  \n* figure out what happened if something goes wrong  \n* see how people are using GitLab\n\nAll of this information is stored in one place, making it easy to review and investigate when needed. For example, you can use audit events to track:\n\n* who changed the permission level of a particular user for a GitLab project, and when  \n* who added a new user or removed a user, and when\n\n![Project-level audit events](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/audit_events_aHR0cHM6_1750099596938.png)\n\n\u003Ccenter>\u003Ci>Project-level audit events\u003C/i>\u003C/center>\n\n> ##### Learn more about audit events, see the [audit events documentation](https://docs.gitlab.com/ee/user/compliance/audit_events.html).\n\n## Compliance and security posture oversight\n\nGitLab's Security Dashboard works like a control room that shows you all your security risks in one place. Instead of checking different security tools separately, you can see all their findings together on one screen. This makes it easy to spot and fix security problems across all your projects.\n\n![Group-level Security Dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099597/Blog/Content%20Images/Blog/Content%20Images/security_dashboard_aHR0cHM6_1750099596939.png)\n\u003Ccenter>\u003Ci>Group-level security dashboard\u003C/i>\u003C/center>\n\n> ##### Learn more about security dashboards with our [security dashboard documentation](https://docs.gitlab.com/ee/user/application_security/security_dashboard/).\n\n## Establish procedures for identifying and mitigating risks\n\nVulnerabilities go through a specific lifecycle. For example, a part of the procedure can be to require approval for any vulnerable code to be merged to protected branches using security policies. Then the procedure can state that vulnerable code detected in production must be prioritized, assessed, remediated, and then validated: \n\n* The criteria for prioritization can be by the severity of the vulnerability provided by GitLab scanners.  \n* The assessment can be done using exploitation details provided by the AI: Vulnerability Explanation.  \n* Once the vulnerability is remediated, then it can be validated using built-in GitLab regression tests and scanners.\n\nWhile every organization's needs are different, leveraging GitLab as a platform, risks can be quickly identified and addressed with reduced risk when compared to using a sprawl of disparate tools.\n\n### Best practices for SOC 2 compliance\n\n* Establish a strong security culture: Foster a culture of security awareness and accountability throughout your organization.  \n* Document everything: Maintain thorough documentation of policies, procedures, and controls.  \n* Automate where possible: Use automation tools to streamline compliance processes and reduce errors.  \n* Communicate effectively: Keep stakeholders informed about your compliance efforts.  \n* Seek expert guidance: Consider partnering with a qualified consultant to assist with your SOC 2 journey.\n\nAchieving SOC 2 compliance is a significant undertaking, but the benefits are undeniable. By demonstrating your commitment to application security and operational excellence, you can build trust with customers, enhance your reputation, and gain a competitive edge in the marketplace.\n\n## Read more\n\nTo learn more about GitLab and how we can help achieve SOCv2 compliance while enhancing your security posture, check out the following resources:\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/)  \n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab Application Security Documentation](https://docs.gitlab.com/ee/user/application_security/)  \n* [GitLab DevSecOps Tutorial Project](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/simply-vulnerable-notes)\n",[9,814,478,680,678],{"slug":2297,"featured":90,"template":684},"guide-to-fulfilling-soc-2-security-requirements-with-gitlab","content:en-us:blog:guide-to-fulfilling-soc-2-security-requirements-with-gitlab.yml","Guide To Fulfilling Soc 2 Security Requirements With Gitlab","en-us/blog/guide-to-fulfilling-soc-2-security-requirements-with-gitlab.yml","en-us/blog/guide-to-fulfilling-soc-2-security-requirements-with-gitlab",{"_path":2303,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2304,"content":2310,"config":2316,"_id":2318,"_type":13,"title":2319,"_source":15,"_file":2320,"_stem":2321,"_extension":18},"/en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"title":2305,"description":2306,"ogTitle":2305,"ogDescription":2306,"noIndex":6,"ogImage":2307,"ogUrl":2308,"ogSiteName":669,"ogType":670,"canonicalUrls":2308,"schema":2309},"How to host VueJS apps using GitLab Pages","Follow this tutorial, including detailed configuration guidance, to quickly get your application up and running for free.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683489/Blog/Hero%20Images/hosting.png","https://about.gitlab.com/blog/hosting-vuejs-apps-using-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to host VueJS apps using GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2023-09-13\",\n      }",{"title":2305,"description":2306,"authors":2311,"heroImage":2307,"date":2313,"body":2314,"category":769,"tags":2315},[2312,831],"Sophia Manicor","2023-09-13","\nIf you use VueJS to build websites, then you can host your website for free with GitLab Pages. This short tutorial walks you through a simple way to host and deploy your VueJS applications using GitLab CI/CD and GitLab Pages.\n\n## Prequisites\n- A VueJS application\n- Working knowledge of GitLab CI\n- 5 minutes\n\n## Setting up your VueJS application\n\n1) Install vue-cli.\n\n```bash\nnpm install -g @vue/cli\n# OR\nyarn global add @vue/cli\n```\nYou can check you have the right version of Vue with:\n\n```bash\nvue --version\n```\n\n2) Create your application using:\n\n```bash\nvue create name-of-app\n```\n\nWhen successfully completed, you will have a scaffolding of your VueJS application.\n\n## Setting up .gitlab-ci.yml for GitLab Pages\nBelow is the [GitLab CI configuration](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/.gitlab-ci.yml) necessary to deploy to GitLab Pages. Put this file into your root project. GitLab Pages always deploys your website from a specific folder called `public`.\n\n```yaml\nimage: \"node:16-alpine\"\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script:\n    - yarn install --frozen-lockfile --check-files --non-interactive\n    - yarn build\n  artifacts:\n    paths:\n      - public\n\npages:\n  stage: deploy\n  script:\n    - echo 'Pages deployment job'\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n\n```\n\n## Vue config (vue.config.js)\nIn Vue, the artifacts are built in a folder called dist, in order for GitLab to deploy to Pages, we need to change the path of the artifacts. One way to do this is by changing the [Vue config file](https://gitlab.com/demos/applications/vuejs-gitlab-pages/-/blob/main/vue.config.js), `vue.config.js`.\n\n```\nconst { defineConfig } = require('@vue/cli-service')\n\nfunction publicPath () {\n  if (process.env.CI_PAGES_URL) {\n    return new URL(process.env.CI_PAGES_URL).pathname\n  } else {\n    return '/'\n  }\n}\n\nmodule.exports = defineConfig({\n  transpileDependencies: true,\n  publicPath: publicPath(),\n  outputDir: 'public'\n})\n```\n\nHere we have set `outputDir` to `public` so that GitLab will pick up the build artifacts and deploy to Pages. Another important piece when creating this configuration file is to change the `publicPath`, which is the base URL your application will be deployed at. In this case, we have create a function `publicPath()` that checks if the CI_PAGES_URL environment variable is set and returns the correct base URL.\n\n## Run GitLab CI\n\n![vuejs-gitlab-pages-pipeline](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/vuejs-gitlab-pages-pipeline.png){: .shadow}\n\n\n## Check Pages to get your URL\n\n![gitlab-pages-domain](https://about.gitlab.com/images/blogimages/2023-05-11-hosting-vuejs-apps-using-gitlab-pages/gitlab-page-domain.png){: .shadow}\n\nVoila! You have set up a VueJS project with a fully functioning CI/CD pipeline. Enjoy your VueJS application hosted by GitLab Pages!\n\n## References\n- [https://cli.vuejs.org/guide/installation.html](https://cli.vuejs.org/guide/installation.html)\n- [https://cli.vuejs.org/guide/creating-a-project.html](https://cli.vuejs.org/guide/creating-a-project.html)\n- [https://gitlab.com/demos/applications/vuejs-gitlab-pages](https://gitlab.com/demos/applications/vuejs-gitlab-pages)\n\n",[108,9,771,772],{"slug":2317,"featured":6,"template":684},"hosting-vuejs-apps-using-gitlab-pages","content:en-us:blog:hosting-vuejs-apps-using-gitlab-pages.yml","Hosting Vuejs Apps Using Gitlab Pages","en-us/blog/hosting-vuejs-apps-using-gitlab-pages.yml","en-us/blog/hosting-vuejs-apps-using-gitlab-pages",{"_path":2323,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2324,"content":2330,"config":2335,"_id":2337,"_type":13,"title":2338,"_source":15,"_file":2339,"_stem":2340,"_extension":18},"/en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code",{"title":2325,"description":2326,"ogTitle":2325,"ogDescription":2326,"noIndex":6,"ogImage":2327,"ogUrl":2328,"ogSiteName":669,"ogType":670,"canonicalUrls":2328,"schema":2329},"Developing GitLab Duo: Secure and thoroughly test AI-generated code","Learn step-by-step how to enhance AI-generated code reliability and security using GitLab Duo and GitLab Pages (includes code samples and prompts).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097183/Blog/Hero%20Images/Blog/Hero%20Images/blog-hero-banner-1-0178-820x470-fy25_7JlF3WlEkswGQbcTe8DOTB_1750097183481.png","https://about.gitlab.com/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Developing GitLab Duo: Secure and thoroughly test AI-generated code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-05-30\",\n      }",{"title":2325,"description":2326,"authors":2331,"heroImage":2327,"date":722,"body":2333,"category":702,"tags":2334},[2332],"David O'Regan","___Generative AI marks a monumental shift in the software development industry, making it easier to develop, secure, and operate software. Our new blog series, written by our product and engineering teams, gives you an inside look at how we create, test, and deploy the AI features you need integrated throughout the enterprise. Get to know new capabilities within GitLab Duo and how they will help DevSecOps teams deliver better results for customers.___\n\nAs AI becomes increasingly integral to software development, ensuring the security and thorough testing of AI-generated code is paramount. This article is a step-by-step guide to combining [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI features to powering the DevSecOps workflow, and [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) to secure and thoroughly test AI-generated code. You'll discover how to mitigate common risks, including how to automatically generate tests, test code, and deploy test reports – all to enhance the reliability of your AI-generated code.\n\n> Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Watch today!](https://about.gitlab.com/seventeen/)\n\n## Challenges in AI-generated code\n\nAI-generated code often faces issues such as:\n\n- Mismatched algorithms: Incorrect or suboptimal algorithms can be generated.\n- Dependency problems: AI may include dependencies that are outdated or incompatible.\n- Security vulnerabilities: AI might generate code with potential security flaws.\n\nAI-generated code often faces issues such as mismatched algorithms, dependency problems, and security vulnerabilities. A [recent study published by the Association of Computing Machinery](https://dl.acm.org/doi/pdf/10.1145/3613904.3642596) on ChatGPT’s responses to programming questions found that 52% of the answers contained incorrect information, and 77% were overly verbose. Despite these shortcomings, users preferred ChatGPT’s comprehensive and well-articulated answers 35% of the time, even overlooking misinformation 39% of the time. Addressing these challenges requires the use of advanced tools and frameworks.\n\n## GitLab’s approach to AI security and testing\n\nGitLab has a comprehensive content strategy focused on integrating security measures within the development workflow. By leveraging GitLab Duo for AI-powered code generation and GitLab Pages for embedding test reports, developers can ensure their AI-generated code is both secure and reliable.\n\nHere is a step-by-step guide to pair GitLab Duo and GitLab Pages to ensure secure and thoroughly tested AI-generated code by implementing a [Flask web server](https://flask.palletsprojects.com/en/3.0.x/).\n\n### 1. Create a new project on GitLab.com\n\n- Go to [GitLab.com](http://GitLab.com).\n- Click on the \"New project\" button.\n- Choose \"Create blank project\".\n- Enter a project name (e.g., AI_Code_Security).\n- Set the visibility level (Public, Internal, or Private).\n- Click \"Create project\".\n\n### 2. Enable GitLab Duo Code Suggestions\n\n- Navigate to your project.\n- Click on the \"Web IDE\" button to open the Web IDE.\n- Ensure that GitLab Duo features like Code Suggestions and Duo Chat are enabled. \n- Start coding in the [Web IDE](https://docs.gitlab.com/ee/user/project/web_ide/). As you type, GitLab Duo will provide code suggestions to help you write code more efficiently.\n\n### 3. Create a Flask web server\n\nYou can create a Flask web server using the comment (highlighted in green) in the screenshot below.\n\n![DGD testing - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097192520.png)\n\n### 4. Generate tests with GitLab Duo\n\nUnit tests are essential for validating the functionality of the generated code. Use GitLab Duo’s `/tests` command to [generate testing suggestions directly in the Web IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat_examples.html#write-tests-in-the-ide). This command can be tailored with additional instructions to focus on specific aspects such as performance, regression, or using particular frameworks.\n\n#### Example usage in Web IDE:\n\n- Select the code for which you want to generate tests.\n- Use the command `/tests` followed by additional instructions if needed.\n\n![DGD testing - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097192521.png)\n\n### 5. Identify issues with AI-generated code using GitLab Duo Chat\n\nUse GitLab Duo Chat to review and refine AI-generated code. For instance, let's check our Flask web server code for security vulnerabilities:\n\n```unset\nPrompt: Review this code for potential security vulnerabilities and dependency issues.\n\n```\n\n![DGD testing - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097192523.png)\n\nGitLab Duo Chat can help identify the vulnerabilities in the above code.\n\n### 6. Generate test reports\nAfter running your tests, generate a test report that will be deployed using GitLab Pages.\n\n```unset\n\nPrompt: Write me a python script to generate a test report that will be deployed using \nGitLab Pages.\n\n```\n\n![DGD testing - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097192/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097192525.png)\n\nWhat's happening here?\n\n- The script ensures the test_reports directory exists.\n- It runs the `test_server.py` file using `subprocess.run()`, capturing the output.\n- The raw output is saved to `test_reports/test_output.txt`.\n- An HTML report is generated, embedding the test output within `\u003Cpre>` tags for readability, and saved as `test_reports/index.html`.\n\n### 7. Deploy the test report with GitLab Pages\n\nUse [GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/) to host and display the test report. Here’s the configuration for our `.gitlab-ci.yml` file to deploy the test report.\n\n```python\n\nstages:\n  - test\n  - deploy\ntest_job:\n  stage: test\n  script:\n    - python generate_test_report.py\n  artifacts:\n    paths:\n      - test_reports/\npages:\n  stage: deploy\n  script:\n    - mv test_reports public\n  artifacts:\n    paths:\n      - public\n\n ```\n\nWith this setup, the `test_job` stage runs the Python script to generate the test report. The `pages` stage moves the `test_reports` directory to `public`, which GitLab Pages uses to serve the content.\n\n### 8. Embedding test reports in MR widgets\n\nEmbedding the [test reports in MR widgets](https://docs.gitlab.com/ee/ci/testing/unit_test_reports.html) provides immediate visibility into the testing outcomes, ensuring transparency and reliability. This can be done by including the test report as an artifact in your CI/CD pipeline configuration:\n\n```python\n\nstages:\n  - build\n  - test\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the project...\"\n    - # Your build commands here\n\ntest_job:\n  stage: test\n  script:\n    - mkdir -p test-reports\n    - python test_server.py > test-reports/results.xml\n  artifacts:\n    when: always\n    reports:\n      junit: test-reports/results.xml\n    paths:\n      - test-reports/results.xml\n\npages:\n  stage: deploy\n  script:\n    - mkdir .public\n    - mv test-reports .public/\n  artifacts:\n    paths:\n      - .public\n\n```\nBy including the test report as an artifact and specifying it in the reports section, GitLab will automatically display the test results in the MR widgets, providing immediate visibility into the testing outcomes and enhancing transparency and reliability.\n\n### Case study: AI reliability with security policies and scanners\n\nImagine a scenario where an AI-generated code snippet introduces a dependency that has known vulnerabilities. By using GitLab Duo and its security policies, this dependency would be flagged during the code generation process. Let’s consider an example where a snippet was generated by AI:\n\n```python\n\nimport os\nfrom flask import Flask, request\n\napp = Flask(__name__)\n\n@app.route('/search')\ndef search():\n    query = request.args.get('query')\n    execute_os_command(query)\n    return 'You searched for: ' + query\n\ndef execute_os_command(command):\n    os.system(command)\n\nif __name__ == '__main__':\n    app.run()\n\n```\n\nIn this example, the search endpoint is vulnerable to OS command injection. By leveraging GitLab's Static Application Security Testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) component, this vulnerability would be detected during the CI/CD pipeline.\n\n#### Integrate SAST scanning to detect vulnerabilities\n\nGitLab SAST automatically analyzes your code for security vulnerabilities. Here’s how it can be integrated into your `.gitlab-ci.yml` file to scan for issues:\n\n```python\n\nstages:\n  - build\n  - test\n  - sast\n  - deploy\n\nbuild_job:\n  stage: build\n  script:\n    - echo \"Building the project...\"\n    - # Your build commands here\n\ntest_job:\n  stage: test\n  script:\n    - python test_server.py > test-reports/results.xml\n  artifacts:\n    when: always\n    reports:\n      junit: test-reports/results.xml\n    paths:\n      - test-reports/results.xml\n\nsast_job:\n  stage: sast\n  script:\n    - echo \"Running SAST...\"\n  artifacts:\n    reports:\n      sast: gl-sast-report.json\n  only:\n    - branches\n\npages:\n  stage: deploy\n  script:\n    - mv test-reports public\n  artifacts:\n    paths:\n      - public\n\n```\n\nIn this configuration, the `sast_job` stage runs SAST to detect vulnerabilities in the code, producing a report (`gl-sast-report.json`) that will be included in the pipeline artifacts! By integrating security policies and robust testing frameworks, GitLab Duo helps customers ensure that their AI-generated code is both efficient and secure.\n\n## Get started today\nThe integration of AI in software development brings significant benefits but also introduces new challenges. By using tools like GitLab Duo and GitLab Pages, developers can ensure that their AI-generated code is secure, well-tested, and reliable. Explore these tools today and join the conversation on enhancing AI security and testing!\n\n> [Start a 30-day trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) today to access GitLab Duo and GitLab Pages.\n\n## Read more of the \"Developing GitLab Duo\" series\n\n- [Developing GitLab Duo: How we validate and test AI models at scale](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-validate-and-test-ai-models-at-scale/)\n- [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/)\n- [Developing GitLab Duo: How we are dogfooding our AI features](https://about.gitlab.com/blog/developing-gitlab-duo-how-we-are-dogfooding-our-ai-features/)\n- [Developing GitLab Duo: Blending AI and Root Cause Analysis to fix CI/CD pipelines](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/)\n",[704,835,9,814],{"slug":2336,"featured":6,"template":684},"how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code","content:en-us:blog:how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code.yml","How Gitlab Duo Helps Secure And Thoroughly Test Ai Generated Code","en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code.yml","en-us/blog/how-gitlab-duo-helps-secure-and-thoroughly-test-ai-generated-code",{"_path":2342,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2343,"content":2348,"config":2353,"_id":2355,"_type":13,"title":2356,"_source":15,"_file":2357,"_stem":2358,"_extension":18},"/en-us/blog/how-gitlab-supports-the-fedramp-authorization-journey",{"title":2344,"description":2345,"ogTitle":2344,"ogDescription":2345,"noIndex":6,"ogImage":1586,"ogUrl":2346,"ogSiteName":669,"ogType":670,"canonicalUrls":2346,"schema":2347},"How GitLab supports the FedRAMP authorization journey","This comprehensive guide dives into the FedRAMP certification process, explaining how GitLab offers guidance and best practices for configuration and compliance.","https://about.gitlab.com/blog/how-gitlab-supports-the-fedramp-authorization-journey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab supports the FedRAMP authorization journey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"}],\n        \"datePublished\": \"2024-08-07\",\n      }",{"title":2344,"description":2345,"authors":2349,"heroImage":1586,"date":2350,"body":2351,"category":814,"tags":2352},[1347],"2024-08-07","The Federal Risk and Authorization Management Program (FedRAMP) is a U.S. government program that standardizes security assessment, authorization, and continuous monitoring for cloud products and services. Achieving FedRAMP authorization allows cloud service providers (CSPs) to offer their services to federal agencies, ensuring that these services meet stringent security and privacy requirements.\n\nIn this article, you'll learn how to GitLab can help guide you on your FedRAMP authorization journey, including:\n* the key steps of the FedRAMP certification process\n* highlights of GitLab’s role in supporting FedRAMP requirements\n* best practices for configuration and compliance\n\nBy leveraging GitLab’s features and adhering to recommended practices, organizations can streamline their path to FedRAMP authorization and ensure secure and compliant software development.\n\n## Key requirements and compliance levels\n\nFedRAMP categorizes security requirements into [three levels based on the impact of data](https://www.fedramp.gov/understanding-baselines-and-impact-levels/) being handled:\n\n* **Low:** Impact on operations, assets, or individuals is limited.\n* **Moderate:** Impact on operations, assets, or individuals is serious.\n* **High:** Impact on operations, assets, or individuals is severe or catastrophic.\n\n## Security and privacy controls from NIST 800-53\n\nFedRAMP's security controls are derived from the [National Institute of Standards and Technology (NIST) Special Publication 800-53](https://csrc.nist.gov/pubs/sp/800/53/r5/upd1/final). Key areas include:\n\n* **Vulnerability scanning and patching SLAs:** Regular scanning and timely patching of vulnerabilities.\n* **Secure software supply chain:** Ensuring that the software and its components are secure.\n* **Change management:** Restricting unauthorized software or system changes through merge request (MR) approval rules.\n\n## Importance of FedRAMP for organizations\n\nFor CSPs, achieving FedRAMP authorization is crucial for doing business with federal agencies. Authorized services are listed on the [FedRAMP Marketplace](https://marketplace.fedramp.gov/products), enhancing their visibility and credibility.\n\n## Steps to achieve FedRAMP certification\n\nThe FedRAMP process is evolving, and a [new roadmap](https://www.fedramp.gov/2024-03-28-a-new-roadmap-for-fedramp/) has been introduced. To stay up to date on the latest changes, [subscribe to General Service Administration (GSA) list](https://public.govdelivery.com/accounts/USGSA/subscriber/new).\n\n### Walkthrough of the certification process\n\n#### 1\\. **Preparation and readiness**\n\n* **Preparation**\n  * Understand FedRAMP requirements and prepare documentation.\n* **Readiness assessment**\n  * CSPs can pursue the optional FedRAMP Ready designation by working with an accredited Third-Party Assessment Organization (3PAO). The 3PAO conducts a readiness assessment and documents the CSP's capability to meet federal security requirements in the Readiness Assessment Report (RAR).\n* **Pre-authorization**\n  * CSPs formalize partnerships with an agency as outlined in the FedRAMP Marketplace: Designations for Cloud Service Providers.\n  * CSPs prepare for the authorization process by making necessary technical and procedural adjustments to meet federal security requirements and prepare the required security deliverables for authorization.\n\n#### 2\\. **Authorization package submission and assessment**\n\n* **Authorization package submission**\n  * Historically: Submit the assessment package to the FedRAMP Joint Authorization Board (JAB) or a federal agency sponsor.\n  * [**New process**](https://www.fedramp.gov/2024-03-28-a-new-roadmap-for-fedramp/)**:** Submit to the FedRAMP Board within the GSA, replacing the JAB. The process integrates Agile principles and uses threat-based analysis for control selection and implementation.\n* **Full security assessment**\n  * The 3PAO conducts an independent audit of the CSP's system. Before this, the CSP should complete the System Security Plan (SSP) and have it reviewed and approved by the agency customer.\n  * The 3PAO develops the Security Assessment Plan (SAP) with input from the authorizing agency. After testing, the 3PAO creates a Security Assessment Report (SAR) detailing their findings and providing a recommendation for FedRAMP Authorization.\n* **Agency authorization process**\n  * The agency reviews the security authorization package, including the SAR, and may require CSP remediation.\n  * The agency performs a risk analysis, accepts the risk, and issues an Authority to Operate based on its risk tolerance, with the option to implement, document, and test customer-responsible controls either before or after the ATO issuance.\n\n#### 3\\. **Post-authorization and continuous monitoring**\n\n* **Continuous monitoring**\n  * The continuous monitoring phase involves post-authorization activities to maintain FedRAMP-compliant security authorization.\n* **New tool**\n  * [**automate.fedramp.gov**](https://www.fedramp.gov/2024-07-11-new-website-launch-automate-fedramp-gov/)**:** Provides detailed technical documentation, best practices, and guidance for creating and managing digital authorization packages with Open Security Controls Assessment Language ([OSCAL](https://pages.nist.gov/OSCAL/)). It supports a digital-first approach, offering faster documentation updates, enhanced user experience, and community collaboration.\n\nDetailed steps are available on the [FedRAMP Agency Authorization page](https://www.fedramp.gov/agency-authorization/). \n\n### Common challenges and pitfalls\n\n1. **Vulnerability management:** Ensuring timely and effective vulnerability management.\n2. **System boundaries:** Clearly defining and documenting system boundaries.\n3. **Software security practices:** Implementing and maintaining robust software security practices.\n4. **FIPS 140-2 cryptography:** Ensuring cryptographic modules are FIPS 140-2 compliant (details available in [GitLab's FIPS Compliance documentation](https://docs.gitlab.com/ee/development/fips_compliance.html)).\n\n## Role of self-managed GitLab in FedRAMP compliance\n\n### Supporting FedRAMP requirements\n\nSelf-managed GitLab can play a critical role in achieving FedRAMP compliance by providing tools and features that support secure code development and deployment within FedRAMP authorization boundaries.\n\n### Specific features of GitLab aligned with FedRAMP standards\n\n1\\. **Security configuration**\n\nYou can configure [CI/CD pipelines](https://docs.gitlab.com/ee/topics/build_your_application.html) to continuously test code while it ships and simultaneously enforce security policies. GitLab includes a suite of security tools that you can incorporate into the development of customer applications, including but not limited to:\n\n* [Security configuration](https://docs.gitlab.com/ee/user/application_security/configuration/index.html)\n* [Container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html)\n* [Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/index.html)\n* [Static application security testing](https://docs.gitlab.com/ee/user/application_security/sast/index.html)\n* [Infrastructure as code (IaC) scanning](https://docs.gitlab.com/ee/user/application_security/iac_scanning/index.html)\n* [Secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/index.html)\n* [Dynamic application security testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/index.html)\n* [API fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/index.html)\n* [Coverage-guided fuzz testing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/index.html)\n\n2\\. **Access control and authentication**\n\nAccess management in a GitLab deployment varies for each customer. GitLab offers extensive documentation on deployments using both identity providers and GitLab's native authentication configurations. It is crucial to evaluate your organization's specific requirements before deciding on an authentication approach for your GitLab instance.\n\n3\\. **[Identity providers](https://docs.gitlab.com/ee/security/hardening_nist_800_53.html#identity-providers)**\n\nTo comply with FedRAMP requirements, ensure your existing identity provider is FedRAMP-authorized and listed on the FedRAMP Marketplace, and for requirements like personal identity verification (PIV), use an identity provider rather than relying on native authentication in self-managed GitLab.\n\n4\\. **[Native GitLab user authentication configurations](https://docs.gitlab.com/ee/security/hardening_nist_800_53.html#native-gitlab-user-authentication-configurations)**\n\nGitLab enables administrators to monitor users with different levels of sensitivity and access requirements.\n\n5\\. [**Audits and accountability**](https://docs.gitlab.com/ee/administration/audit_event_streaming/)\n\nGitLab provides a wide array of security events and streaming capabilities for comprehensive logging and monitoring that can be routed to a Security Information and Event Management (SIEM) solution.\n\n* [Event types](https://docs.gitlab.com/ee/security/hardening_nist_800_53.html#event-types)\n\n6\\. **Incident response**\n\nAfter configuring audit events, it's crucial to monitor them. GitLab offers [tools](https://docs.gitlab.com/ee/operations/incident_management/index.html) for alert management, incident tracking, and status reporting through a centralized interface, allowing you to compile system alerts from SIEM or other security tools, triage incidents, and keep stakeholders informed.\n\n* [alerts](https://docs.gitlab.com/ee/operations/incident_management/alerts.html)\n* [incidents](https://docs.gitlab.com/ee/operations/incident_management/incidents.html)\n* [on-call schedules](https://docs.gitlab.com/ee/operations/incident_management/oncall_schedules.html)\n* [status page](https://docs.gitlab.com/ee/operations/incident_management/status_page.html)\n\n7\\. **Configuration management**\n\nAt its core, GitLab meets [configuration management](https://docs.gitlab.com/ee/security/hardening_nist_800_53.html#configuration-management-cm) needs with robust CI/CD pipelines, approval workflows, and change control, primarily using issues and MRs to manage changes.\n\n8\\. **Federal Information Processing Standard (FIPS) compliance**\n\nGitLab supports [FIPS compliance](https://docs.gitlab.com/ee/development/fips_compliance.html) by offering versions that use FIPS-validated cryptographic modules such as OpenSSL, BoringSSL, or other CMVP-validated modules. This ensures that cryptographic operations meet FIPS requirements, making it suitable for use in environments that require high levels of security compliance, such as those seeking FedRAMP authorization. Additionally, GitLab's documentation provides detailed instructions for installing and configuring FIPS-compliant deployments, including a hybrid approach using omnibus and cloud native components.\n\n9\\. [**NIST 800-53 R5 security and privacy controls management project template**](https://gitlab.com/gitlab-org/project-templates/nist_80053r5)\n\nThe project template helps track and manage compliance with NIST 800-53 R5 using GitLab issues, based on [NIST 800-53R5 specifications](https://csrc.nist.gov/pubs/sp/800/53/r5/upd1/final). It includes pre-configured issues, issue boards, and a notional example pipeline to run tests using OpenSCAP (OSCAP) and update issues with artifacts and labels, creating a controls management project within GitLab. This template centralizes compliance efforts, automates control testing, and facilitates a seamless workflow for both project teams and auditors.\n\n## Best practices for using GitLab in the FedRAMP process\n\n### Recommended configurations and setups\n\nTo align self-managed GitLab with NIST 800-53 controls and FedRAMP requirements, consider the following best practices:\n\n1. **Security hardening:** Follow GitLab’s [security hardening guidance](https://docs.gitlab.com/ee/security/hardening_nist_800_53.html).\n2. **Access control:** Implement role-based access control (RBAC) and enforce [the principle of least privilege](https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab/).\n3. **CI/CD pipelines:** Configure pipelines to include security testing and approval stages.\n4. **Audit logging:** Enable comprehensive audit logging and integrate with a SIEM system.\n5. **Backup and recovery:** Establish robust backup and recovery processes.\n\n### NIST 800-53 compliance\n\nGitLab provides various compliance features to help automate critical controls and workflows. Administrators should work with customer solutions architects to configure GitLab instances to meet applicable [NIST 800-53 controls](https://docs.gitlab.com/ee/security/hardening_nist_800_53.html).\n\n## Start your FedRAMP compliance journey\n\nAchieving FedRAMP authorization is a complex but strategic process for CSPs looking to provide services to federal agencies. Self-managed GitLab offers a comprehensive suite of tools and features that can support this journey, ensuring secure and compliant software development and operations. By following best practices and leveraging GitLab’s capabilities, organizations can navigate the challenges of FedRAMP compliance and successfully achieve authorization.\n\n> Learn more about [GitLab's solutions for the public sector](https://about.gitlab.com/solutions/public-sector/).",[9,183,835],{"slug":2354,"featured":90,"template":684},"how-gitlab-supports-the-fedramp-authorization-journey","content:en-us:blog:how-gitlab-supports-the-fedramp-authorization-journey.yml","How Gitlab Supports The Fedramp Authorization Journey","en-us/blog/how-gitlab-supports-the-fedramp-authorization-journey.yml","en-us/blog/how-gitlab-supports-the-fedramp-authorization-journey",{"_path":2360,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2361,"content":2366,"config":2372,"_id":2374,"_type":13,"title":2375,"_source":15,"_file":2376,"_stem":2377,"_extension":18},"/en-us/blog/how-gitlabs-red-team-automates-c2-testing",{"title":2362,"description":2363,"ogTitle":2362,"ogDescription":2363,"noIndex":6,"ogImage":2164,"ogUrl":2364,"ogSiteName":669,"ogType":670,"canonicalUrls":2364,"schema":2365},"How GitLab's Red Team automates C2 testing ","Learn how to apply professional development practices to Red Teams using open source command and control tools.","https://about.gitlab.com/blog/how-gitlabs-red-team-automates-c2-testing","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How GitLab's Red Team automates C2 testing \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Josh Feehs\"}],\n        \"datePublished\": \"2023-11-28\",\n      }",{"title":2362,"description":2363,"authors":2367,"heroImage":2164,"date":2369,"body":2370,"category":814,"tags":2371},[2368],"Josh Feehs","2023-11-28","At GitLab, our [Red Team](https://handbook.gitlab.com/handbook/security/threat-management/red-team/) conducts security exercises that emulate real-world threats. By emulating real-world threats, we help assess and improve the effectiveness of the people, processes, and technologies used to keep our organization secure. To operate effectively, we must utilize professional development practices like the threat actors we emulate.\n\n[Threat actors](https://www.securonix.com/blog/threat-labs-security-advisory-new-starkvortex-attack-campaign-threat-actors-use-drone-manual-lures-to-deliver-merlinagent-payloads/) use open source command and control (C2) tools such as [Merlin](https://github.com/Ne0nd0g/merlin). While convenient, these tools have intentionally detectable features to discourage illegitimate use. Red Teams often need to customize and combine different open source options to evade detections in the environments they target.\n\nIn this blog, you'll learn how our team applies professional development practices to using open source C2 tools. We'll share how we implement continuous testing for the Mythic framework, our design philosophy, and a public project you can fork and use yourself.\n\nOur solution, available in [this public project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/continuousmage), improves our Red Team operations in two ways. First, it contains a suite of **pytest** tests for the Mythic C2 framework. These validate functionality of both the Mythic server and multiple Mythic-compatible agents. Second, it leverages **GitLab CI/CD pipelines** to automatically run these tests after each code change. This enables iterative development and rapid validation of updates to Mythic or Mythic-compatible C2 agents.\n\n## Prerequisites\n\nCurrently, a few prerequisites fall outside the scope of test automation:\n\n- A Linux VM with Mythic, its Python requirements, and the HTTP profile installed. See the [Mythic installation guide](https://docs.mythic-c2.net/installation). We suggest binding Mythic's admin interface to localhost only.\n- A fork of [the ContinuousMage GitLab project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/continuousmage) in GitLab.com or your own GitLab instance. You'll build on top of this to run your own automation. We highly suggest making this fork private, so you don't expose your test infrastructure or C2 code changes.\n- GitLab Runner installed on the VM (configured with the [shell executor](https://docs.gitlab.com/runner/executors/shell.html)) and registered with your GitLab instance. See the docs on [installing](https://docs.gitlab.com/runner/install/) and [registering](https://docs.gitlab.com/runner/register/) a runner or follow the instructions provided when configuring your pipeline later in this blog. You'll assign this runner to your project when we configure CI/CD.\n- Your forked project cloned onto your VM. This allows testing code changes (or new tests) before triggering the pipeline.\n\n## Project structure\n\nThe project contains three main portions that we will detail in this blog post:\n\n1. `pytest` test code for running integration tests for Mythic and Mythic-compatible C2 agents\n2. The source of those Mythic-compatible C2 agents, as git submodules\n3. The GitLab CI/CD pipeline configuration that ties it all together\n\n## Part 1: pytests\n\n[pytest](https://docs.pytest.org/en/7.4.x/) is a framework for writing tests in Python. We can leverage pytest to do integration testing of Mythic since it has its own [Python package](https://pypi.org/project/mythic/). The test suite goals are:\n\n1. Be simple and atomic.\n2. Provide adequate coverage to validate tool readiness.\n\nWe'll walk through a simple test verifying an agent can run the `ls` command, highlighting key code sections for customization.\n\n### Implementation\n\n#### pytest file\n\nWhen run on a directory, `pytest` automatically discovers tests in files prefixed with `test_` and test functions starting with `test_`. Our tests are asynchronous, needing the `pytest.mark.asyncio` decorator, because the Mythic APIs we are testing are asynchronous. If your machine is missing test dependencies, run `python3 -m pip install mythic pytest pytest-asyncio`.\n\nA test function skeleton is as follows:\n\n```python\n@pytest.mark.asyncio\nasync def test_agent_ls():\n    # Will do the test here\n    continue\n```\n\n#### The GlMythic class\n\nThe `GlMythic` class wraps Mythic APIs for ease of use in testing. Because its `init` function is async, a coroutine creates the object:\n\n```python\n@pytest.mark.asyncio\nasync def test_agent_ls():\n    glmythic = await gl_mythic.create_glmythic()\n```\n\nBy default, it connects to the Mythic DB using the `MYTHIC_ADMIN_PASSWORD` environment variable and is configured to test the agent specified via the `AGENT_TYPE` environment variable. We will set these in the CI/CD config later.\n\n#### Interacting with Mythic via GlMythic\n\nWe'll include the remainder of the test code here, with comments, and then discuss the most important parts.\n\nAs a reminder, one of the key goals of this project was to make completely atomic tests. Each test only relies on a running Mythic server with the specific agent and HTTP containers loaded. As the test suite grows, it may be worth running a secondary set of tasks that relies on an already-existing agent connection. Currently, every test creates, downloads, and executes a new agent.\n\n### Test and deploy\n\n```python\n@pytest.mark.asyncio\nasync def test_agent_ls():\n\n    glmythic = await gl_mythic.create_glmythic()\n\n    # Unique payload_path per test\n    payload_path = \"/tmp/test_agent_ls\"\n\n    # Wraps agent create, download, and execute\n    proc = await glmythic.generate_and_run(payload_path=payload_path)\n\n    # Wait for callback\n    time.sleep(10)\n\n    # Uses the display_id field to determine most recent callback\n    # Assumes that the most recent callback is the one created by this test\n    callback = await glmythic.get_latest_callback()\n\n    # Issue the ls command, blocking on output\n    output = await mythic.issue_task_and_waitfor_task_output(\n        mythic=glmythic.mythic_instance,\n        command_name=\"ls\",\n        parameters=\"\",\n        callback_display_id=callback[\"display_id\"],\n        timeout=20,\n    )\n\n    # Clean up (no longer need the agent)\n    proc.terminate() \n    os.remove(payload_path)\n\n    # If the ls failed, there will be no output\n    # This test could also look for files in the repo (where the agent runs)\n    assert len(output) > 0\n\n```\n\nThe longest running portion of this test will be the call to `generate_and_run`, as agent builds within Mythic can take from seconds to minutes or even hang altogether. For your initial set of tests, sign in to the Mythic server and watch the **Payloads** screen for potential issues. In our testing, agent builds failed to complete around 5% of the time, depending on the agent. If you experience repeated build failures, reload your agent container with `sudo ./mythic-cli install folder \u003Cagent_directory> -f`.\n\nTo run the tests, run `pytest \u003Ctestfile_directory>`.\n\n## Part 2: Agent source as submodules\n\nBecause Mythic agents are often updated, we include the agent repos as git submodules in our test project. This allows us to update to new agent versions when they are released and use our project's version control to keep tool versions static for known good builds. These submodules are all located in the `agents` folder.\n\nWe'll discuss adding more agents to this project later in this blog.\n\n## Part 3: GitLab CI/CD pipeline\n\nNow that you have working pytests, you can automate your tests to run whenever you want. In our case, we chose to run our tests on merge requests and tagged commits (which are likely to be tool releases). We will be using [GitLab CI/CD pipelines](https://docs.gitlab.com/ee/ci/pipelines/) to perform our automated tests.\n\n### Configuring the pipeline\n\nNow is the time to set your GitLab CI/CD settings. To find these settings, go to your repository -> `Settings` -> `CI/CD`.\n\nThe first setting you'll want to set is your `Runner`. If you set up a runner as one of your prerequisite steps earlier, you can assign it here. If not, click `New project runner` and work through that process to create and set up your runner on your Mythic server. When you are prompted to choose a runner type on install, choose the [shell executor](https://docs.gitlab.com/runner/executors/shell.html). If your team uses shared runners for other CI/CD pipelines, you will want to make sure that shared runners are disabled for this project, given that your shared runners are unlikely to be able to talk to Mythic directly.\n\n![runner-settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683075/Blog/Content%20Images/runner-settings.png)\n\nNext, you need to set your `Variables`. The `GlMythic` class uses the `MYTHIC_ADMIN_PASSWORD` environment variable to be able to actually sign into Mythic, so you need to make sure that the pipeline runner's environment is set up correctly.\n\nTo do this, click the `Add variable` button and add the `MYTHIC_ADMIN_PASSWORD` variable with the appropriate value. If you don't know your Mythic admin password, on the Mythic server in the directory where you installed Mythic, `cat .env | grep MYTHIC_ADMIN_PASSWORD` will give you the password.\n\nBecause GitLab handles merge requests in a detached state, you need to unclick the `Protect Variable` box, because that would prevent the pipeline from viewing the variable on a merge request otherwise. Because the variable is not protected, any branch committed back to your server can access your CI variables. This may pose a security risk if you allow remote access to your Mythic server (versus binding to localhost) and if you allow arbitrary users to access your repository. For this reason, our public repo does not have the environment variables. We use a private copy to perform testing, and suggest you do the same.\n\nAdditionally, set the `AGENT_TYPE` variable to the name of the agent you want to use. At time of release, valid agent types are `poseidon` or `merlin`. The section about adding more agents to the test suite will go into more detail.\n\nYou can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.\n\nNow that the pipeline is configured to use the runner and pick up the environment variables that you need, the only thing left to do is to set up your pipeline. This step is quite simple: If you add the `.gitlab-ci.yml` file to the root of your repository, GitLab will pick that up as the pipeline config on your next commit. Here is our example pipeline, which we will explain momentarily.\n\n```yaml\ninstall:\n  stage: install\n  script:\n    - sudo /opt/Mythic/mythic-cli install folder \"${CI_PROJECT_DIR}\"/agents/\"${AGENT_TYPE}\" -f\n  rules:\n    - if: $CI_PIPELINE_SOURCE == 'merge_request_event'\n    - if: $CI_COMMIT_TAG\n\ntest:\n  stage: test\n  script:\n    - pytest \"${CI_PROJECT_DIR}\"/mythic-test\n  rules:\n    - if: $CI_PIPELINE_SOURCE == 'merge_request_event'\n    - if: $CI_COMMIT_TAG\n```\n\nAll of the variables set above are made available by GitLab as part of every pipeline. This pipeline has two stages, `install` and `test`. Both stages are set to only run on merge requests or if the commit being evaluated has a specific tag. The `install` stage will install your C2 agent into Mythic using its local folder install. This makes sure that the Mythic server has your latest C2 code changes installed. Next, the `test` stage runs the set of pytest tests that we created. The `install` stage will run very quickly, and the `test` stage will run a little more slowly, given that it's doing the work of creating and interacting with Mythic agents.\n\n### Pipeline in action\n\nYou can do a couple of things to validate that your pipeline is working. First, if you are performing a merge request, there will be a section at the beginning of the merge request that will link to the pipeline. The screenshot below shows that the pipeline has passed, but you can click into the pipeline by clicking on its number even when it's running.\n\n![Pipeline passing](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683075/Blog/Content%20Images/merge-pipeline-pass.png)\n\nYou can then click into the stage that's running (or one that has already run) to view its output.\n\n![Pipeline task output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683075/Blog/Content%20Images/pipeline-task-output.png)\n\nAnd there you are! You now have working `pytest` tests for a Mythic agent that run every time you make a merge request.\n\n## Adapting for other agents\n\nWe tested our test suite against Poseidon and Merlin. Although the initial tests (generate, download and exec, ls) work the same for both agents, Poseidon and Merlin require different parameters for their `upload` commands. Unfortunately, this means that not all tests will be agent agnostic.\n\nAs a result, each `GlMythic` object that is created is told what type of agent it is testing. The coroutine for creating an object allows you to pass in the agent type as a variable, and defaults to using the `AGENT_TYPE` environment variable to determine which agent is being tested.\n\n```python\nasync def create_glmythic(  username=\"mythic_admin\",\n                            password=os.getenv(\"MYTHIC_ADMIN_PASSWORD\"),\n                            server_ip=\"127.0.0.1\",\n                            server_port=7443,\n                            agent_type=os.getenv(\"AGENT_TYPE\")):\n```\n\n### Agent source\n\nTo add more agents for testing, the first thing to do is to import your agent as a git submodule:\n\n```bash\ncd agents\ngit submodule add \"${URL_TO_YOUR_AGENT}\"\n```\n\nCommit your changes, and your agent is tracked as part of the repo.\n\n### Test compatibility\n\nYou'll need to validate that existing tests work with your agent. For tests to work, the parameters passed to the commands must match those in the test suite, with `upload` to be most likely to fail.\n\nThis is okay! Within the `test_agent_upload` test function, you'll see example code that specifies a different upload command for Merlin and Poseidon. Simply follow this structure for your own agent, passing your agent's parameters to the `mythic.issue_task_and_waitfor_task_output` function call.\n\nIf you are using another open source C2 and are unsure of the correct parameters to pass, you can use the Mythic UI. Interact with one of your agents and run the `upload` command to see what params you need to pass. If you do this for Poseidon, it will look like the following:\n\n![upload-parameters](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683075/Blog/Content%20Images/upload-parameters.png)\n\nOur test suite should be pretty easy to add to any Linux-based Mythic agent that supports the [HTTP C2 profile](https://github.com/MythicC2Profiles/http). Because the GitLab Runner installs the agent into Mythic (and Mythic is made to run on Linux), the runner is expecting to be on a Linux machine. Additional effort and test modifications will be required to run the test suite against a Windows or MacOS agent.\n\n## A quick win\n\nAs we worked on this project, we were continuously running our test suite against both Poseidon and Merlin. Unexpectedly, in early October 2023, our test for Poseidon's `upload` function started to fail. After a quick investigation, we identified that a bug had been introduced, present in Poseidon 2.0.2, that caused file uploads to fail.\n\nWe took our information to one of the Poseidon developers, Cody Thomas ([@its_a_feature_](https://twitter.com/its_a_feature_)), and he quickly identified the underlying issue and [fixed the problem](https://github.com/MythicAgents/poseidon/commit/83de4712448d7ed948b3e2d2b2f378d530b3a42a).\n\nThis highlights the usefulness of continuous testing. Instead of running into a potential bug during a Red Team exercise, we identified the issue beforehand and were able to report the bug so the issue was fixed.\n\nWe sincerely thank the Mythic, Merlin, and Poseidon developers for open sourcing their hard work. Many Red Teams around the world are able to perform high-quality security assessments in part because of the hard work of C2 developers who open source their tools. We also want to specifically thank Cody Thomas for addressing this bug within 20 minutes of notification. His responsiveness and attention to detail are unmatched.\n\n## Share your feedback\n\nThis post has demonstrated both the value of continuous testing and shown how to implement continuous testing for your own use, using GitLab. If you have worked alongside these examples, you've implemented some continuous testing for the Mythic framework and have tests that you can use for Merlin, Poseidon, or your own Mythic agent(s).\n\nAt GitLab, we always seek feedback on our work. If you have any questions or comments, please open an issue on [our project](https://gitlab.com/gitlab-com/gl-security/threatmanagement/redteam/redteam-public/continuousmage). You can also propose improvements via a merge request. We believe that everyone should be able to contribute, so we welcome any contributions, big or small.\n\n> [Try GitLab Ultimate for free today.](https://gitlab.com/-/trials/new)\n\n## Related reading\n- [Stealth operations: The evolution of GitLab's Red Team](https://about.gitlab.com/blog/stealth-operations-the-evolution-of-gitlabs-red-team/)\n- [How we run Red Team operations remotely](https://about.gitlab.com/blog/how-we-run-red-team-operations-remotely)\n- [Use GitLab and MITRE ATT&CK Navigator to visualize adversary techniques](https://about.gitlab.com/blog/gitlab-mitre-attack-navigator)\n- [Monitor web attack surface with GitLab](https://about.gitlab.com/blog/monitor-web-attack-surface-with-gitlab)\n",[814,1041,230,9],{"slug":2373,"featured":90,"template":684},"how-gitlabs-red-team-automates-c2-testing","content:en-us:blog:how-gitlabs-red-team-automates-c2-testing.yml","How Gitlabs Red Team Automates C2 Testing","en-us/blog/how-gitlabs-red-team-automates-c2-testing.yml","en-us/blog/how-gitlabs-red-team-automates-c2-testing",{"_path":2379,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2380,"content":2386,"config":2392,"_id":2394,"_type":13,"title":2395,"_source":15,"_file":2396,"_stem":2397,"_extension":18},"/en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"title":2381,"description":2382,"ogTitle":2381,"ogDescription":2382,"noIndex":6,"ogImage":2383,"ogUrl":2384,"ogSiteName":669,"ogType":670,"canonicalUrls":2384,"schema":2385},"Start an open source center of excellence in 10 minutes using GitLab","Launch your own open source program office using the OSPO Alliance's tools on GitLab","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682593/Blog/Hero%20Images/opensign.jpg","https://about.gitlab.com/blog/how-start-ospo-ten-minutes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Start an open source center of excellence in 10 minutes using GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Boris Baldassari\"}],\n        \"datePublished\": \"2023-01-30\",\n      }",{"title":2381,"description":2382,"authors":2387,"heroImage":2383,"date":2389,"body":2390,"category":769,"tags":2391},[2388],"Boris Baldassari","2023-01-30","\nNow that open source has finally become a mainstream topic of conversation in the software industry, many organizations are increasingly curious about best practices for consuming, using, managing, and contributing to open source software projects. Open source software can seem alien and intimidating for organizations unfamiliar with it, and participating meaningfully and effectively in the open source ecosystem can be challenging.\n\nOrganizations especially serious about working in open source have formed [open source program offices](https://opensource.com/business/16/5/whats-open-source-program-office) (OSPOs) to spearhead their efforts. These offices are centers of excellence for an organization's ongoing work in open source. They help the organization realize the benefits of working with open source communities to accelerate innovation and build more secure tools.\n\nPerhaps your organization is considering establishing an OSPO. If it is, you likely have questions about how to get started – and especially about the best ways to help your organization become a valuable participant in the open source ecosystem.\n\nThe [OSPO Alliance](https://ospo.zone/) can help. Formed in 2021, the OSPO Alliance connects [experienced open source practitioners](https://ospo.zone/membership/) with organizations in need of seasoned guides to the open source world. Since the organization's founding, its members have composed a corpus of best open source practices called the [Good Governance Initiative Handbook](https://ospo.zone/ggi/), which explores various legal, cultural, and strategic considerations organizations face when working with open source software (and, naturally, the handbook itself is openly licensed, so anyone can contribute to it).\n\nTo celebrate the launch of the GGI Handbook Version 1.1, the OSPO Alliance went a step further: We have released the [MyGGI project](https://gitlab.ow2.org/ggi/my-ggi-board), which allows organizations to quickly create the infrastructure for their own open source program offices using GitLab.\n\nNow, let's look at what the MyGGI project can help your organization accomplish, including how to use the tool to establish an OSPO built on GGI principles — in only 10 minutes.\n\n## Working with the GGI Handbook\n\nThe GGI Handbook defines 25 activities, or best practices, organized according to various goals an organization may seek to accomplish with open source. Examples of activities include recommendations like \"Manage open source skills and resources,\" \"Manage software dependencies,\" \"Upstream first,\" or \"Engage with open source projects.\" Each of these activities, then, has a corresponding description and rationale, and the handbook provides resources, tools, and hints for successfully implementing them.\n\nActivites are intentionally generic and must be adapted to your organization's specific, unique, local context. The GGI Hanbook offers tools for doing this, too: scorecards. Scorecards allow you to assess your organization's engagement in and progress with various open source best practices.\n\nSo working with the GGI Handbook in your organization might look something like this:\n\n1. Evaluate the open source-related activities the handbook proposes and remove those that don't fit your specific context (maybe some activities will require a bit of adptation to be more relevant to the domain, while some others may just be discarded).\n1. Identify the activities that would be most beneficial to reaching your organization's goals in engaging with open source.\n1. Construct an Agile-like, iterative process for working on a small set of these activities. Do this in the form of sprints by tracking your progress with scorecards, and adapt the activity to your local context, team cultures, and available resources as you go.\n1. At the end of each iteration, review the activities your teams have completed, select a new scope for improvement, and repeat the process.\n\nThe MyGGI project provides a push-button infrastructure for doing this work. Next, let's examine how to deploy it on GitLab.\n\n### Deploying the GGI Handbook on GitLab\n\nThe OSPO Alliance wanted to provide a quick and straightforward way for organizations to establish their own open source program activities using a dashboard, so they can start implementing the GGI Handbook's methods without delay. We didn't want to reinvent the wheel with some heavy custom tooling. Instead, we decided to build the project using tools already available to us. We had already used GitLab issues to model activities during the early stages of handbook development, so reusing this GitLab feature made most sense. By simply adding some scripting to automate the initialization of activities and updating a static website on GitLab Pages, we were able to launch the project so others could easily deploy it in their own GitLab instances.\n\nInstructions for deploying the program are available in the project's [README](https://gitlab.ow2.org/ggi/my-ggi-board/-/blob/main/README.md). Let's review them here and start your own OSPO together.\n\nFirst, we need to create a new project on our GitLab instance. Select `Import project`, then `From repository by URL`. \n\nNext, we will need to provide a remote URL. Copy the existing MyGGI project by using the [URL](https://gitlab.com/gitlab-com/marketing/community-relations/open-source-program/gitlab-open-source-partners/publications-and-presentations/-/tree/main) `[https://gitlab.ow2.org/ggi/my-ggi-board.git](https://gitlab.ow2.org/ggi/my-ggi-board.git)`.\n\nThen we will give our project a unique name and choose a visibility level. Here's an example of how it might look:\n\n![Repository by URL](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-1.png){: .shadow}\n\nWhen you have configured your desired settings, click `Create project` to continue.\n\nOur next step is to configure access privileges. Go to `Project Settings > Access Tokens` and create a `Project Access Token` with `API` privilege and `Maintainer` role. The project's scripts will use these to create the issues and generate the static website dashboard for your OSPO.\n\nWhen the token is created, copy it to a safe place, as **you will never be able to see it again**. Note that some GitLab instances prefer to disable the Project Access Token feature in favor of Personal Access Tokens. This is perfectly okay; the preference won't affect the deployment of this project (see the instructions for more details).\n\nHere's an example of what you will see at this stage:\n\n![Project access tokens](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-2.png){: .shadow}\n\nWe will then provide this access token to the pipelines and scripts by creating a CI/CD environment variable. Go to `Project Settings` and then `CI/CD`. Scroll to the `Variables` section and add a new variable with name `GGI_GITLAB_TOKEN`. Input the access token you created in the last step as the value. Here's an example:\n\n![Add variable screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-3.png){: .shadow}\n\nWe can now execute the pipeline to begin the process of creating your OSPO infrastructure. Go to `CI/CD`, then `Pipelines`, and click on `Run pipeline`. After a couple of minutes, the pipeline should finish and the website will deploy. You will see something like this when the pipeline finishes:\n\n![Pipeline passed screen](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-4.png){: .shadow}\n\nInfrastructure for your open source program office is now ready!\n\n### Using the tools\n\nThe MyGGI project creates a set of 25 activities, along with a nice project board to help you visualize them:\n\n![Project board](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-5.png){: .shadow}\n\nUsers can click on specific activities (rendered as issues) to read the description of the activity, understand the tools and resources that might help them complete it, and begin completing relevant scorecards. Users can also define their own perspectives on the activities, as they see them from the organization's specific context. Then they can create tasks to narrow the scope of each activity so they can iterate on it and track progress. \n\nTheir work is displayed on a static website hosted on GitLab Pages and updated nightly according to the organization's progress on various activities and tasks. This web page is especially useful to present the program and its day-to-day evolution to the organization (or the world); participants, stakeholders, and executives can review it to learn more about the various initiatives, see what work is underway, and track the overall development of the organization's open source program office. The initial website looks like this:\n\n![Welcome screen of website](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-6.png){: .shadow}\n\n### Running your OSPO\n\nSelecting an open source program manager to oversee the work on the project boards is beneficial at this step. That person will:\n\n- Assign issues to team members to start working on new activities, create scorecards to track the work and associated tasks, and label them as \"In Progress\" instead of \"Not Started\".\n- Oversee the evolution of the work as it moves through various iterations, completing the scorecards with local resources and information, and closing issues as tasks are complete.\n- Ensure that issues keep making progress and, as team members complete them, assign new ones.\n\nAs changes occur in both the project and its issues, your OSPO's static website will regularly update to reflect the current status of activities, tasks, and the overall progress. After some time, for instance, the dashboard may look like this:\n\n![Dashboard with current status](https://about.gitlab.com/images/blogimages/ospointenminutes/screenshot-9.png){: .shadow}\n\nYou're now on your way to establishing your organization's open source program office. Don't hesitate to connect with the [OSPO Alliance](https://ospo.zone/) for help and support as you continue your journey!\n\n_Boris Baldassari is an open source consultant at the Eclipse Foundation Europe, and an active contributor to the OSPO Alliance._\n\nCover image by [Clay Banks](https://unsplash.com/@claybanks?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://www.unsplash.com)\n{: .note}\n\n",[727,9,773],{"slug":2393,"featured":6,"template":684},"how-start-ospo-ten-minutes-using-gitlab","content:en-us:blog:how-start-ospo-ten-minutes-using-gitlab.yml","How Start Ospo Ten Minutes Using Gitlab","en-us/blog/how-start-ospo-ten-minutes-using-gitlab.yml","en-us/blog/how-start-ospo-ten-minutes-using-gitlab",{"_path":2399,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2400,"content":2406,"config":2411,"_id":2413,"_type":13,"title":2414,"_source":15,"_file":2415,"_stem":2416,"_extension":18},"/en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale",{"title":2401,"description":2402,"ogTitle":2401,"ogDescription":2402,"noIndex":6,"ogImage":2403,"ogUrl":2404,"ogSiteName":669,"ogType":670,"canonicalUrls":2404,"schema":2405},"How to access GitLab on a private network with Tailscale","If issues around a private network were preventing a permanent GitLab installation, Brendan O'Leary has the solution with Tailscale.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679348/Blog/Hero%20Images/locks.jpg","https://about.gitlab.com/blog/how-to-access-gitlab-on-a-private-network-with-tailscale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to access GitLab on a private network with Tailscale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-07-21\",\n      }",{"title":2401,"description":2402,"authors":2407,"heroImage":2403,"date":2408,"body":2409,"category":769,"tags":2410},[1221],"2022-07-21","\nGitLab provides an easy-to-install package for most Linux distributions and even for devices like the [Raspberry Pi](https://docs.gitlab.com/omnibus/settings/rpi.html). However, if you want to install GitLab in a home lab or similar private network, you would then be faced with a new issue: how do you access the instance from outside that private network?\n\nTraditionally, you would set up your router to forward traffic from your public IP address to the server inside your network. However, this comes with several drawbacks:\n\n- Opening a port on your home or private network comes with a sustainable amount of risk.\n- It can be hard or impossible for folks to do depending on their internet service provider and what routing equipment they use.\n- It can be especially tough if your ISP doesn't provide you with a statically assigned IP address which means your address can change from time to time, and you'll need to either update DNS manually or through some third-party [dynamic DNS](https://www.cloudflare.com/learning/dns/glossary/dynamic-dns/) service.\n\nFor me, all of these challenges have meant that I've only ever really run GitLab \"for fun\" on my local network. Given the challenges above, running a permanent installation wasn't an option. That is until [Tailscale](https://tailscale.com) entered my life.\n\n## Tailscale\n\nTailscale isn't necessarily the \"newest\" technology. In fact, it is based on the [WireGuard protocol](https://www.wireguard.com/), which has existed in one form or another since 2015 and has seen native kernel support added to various Linux distributions as well as the kernel itself over the past several years.  Wireguard VPN technology makes considerable improvements in the usability and setup of virtual private networks over earlier protocols like IPsec. Even with being easier to use, the \"problem\" with WireGuard, at least for me, was always that it was still too complex to set up and maintain. Much like configuring my ISP's router for port forwarding, it wasn't _impossible_, but it just wasn't practical.\n\nEnter Tailscale. Tailscale provides a simple piece of client software, available for Linux, Mac, and Windows (and iOS and Android!), which implements the WireGuard protocol and allows you to control your VPN network from a handy web interface. Not only that, it's [free to use](https://tailscale.com/pricing/) for individuals and small networks. When I started using Tailscale, it was to make sure I could connect back to my home network and troubleshoot it while traveling for work. As the only system administrator in my house, this was fantastic.\n\nHowever, Tailscale also offers the ability to easily access services inside of various networks as well by setting up a mesh VPN between them, all with IP addresses in the 100.x.y.z range. That means for any web service or other service on my network, I can access it with a statically assigned IP address from any other device connected to Tailscale, and create a DNS record to have a domain point to the IP address. At last, I could run GitLab (and other open source tools) at home and safely connect to them from outside my house with as little hassle as possible. So how did I get it to work?\n\n## Tailscale and GitLab together\n\nAssuming you already have a GitLab [installation](/install/) up and running on your network, getting it working through Tailscale involves a few steps:\n\n- Installing Tailscale\n- Setting up DNS for the private address\n- Configuring HTTPS encryption\n\n### Installing Tailscale\n\nPackages are [available](https://tailscale.com/kb/1031/install-linux/) for many Linux distributions. To install Tailscale, you can select your [specific distribution](https://tailscale.com/kb/1031/install-linux/) for detailed instructions. There are also [static binaries](https://tailscale.com/kb/1053/install-static/) if you can't find your particular distribution - they are available for x86 and ARM CPUs for both 32- and 64-bit variants.\n\nOnce Tailscale is installed, getting it running is as simple as running the following command on the CLI:\n\n```bash\nsudo tailscale up\n```\n\nThe setup dialogue will walk you through the authentication process and get Tailscale running. After that process, you can see your new IP address for this node on your network with the CLI command `tailscale ip -4`. You'll need that IP address for the next steps.\n\nBy default, Tailscale will set an expiration date for the token it issues to your device during the authentication process. This is desirable for typical devices that may be transient or portable. However, suppose your device is secured inside your home or another secure place AND is a server you're not accessing all the time. In that case, you can optionally [disable key expiry](https://tailscale.com/kb/1028/key-expiry/) for that particular device.\n\n### Setting up DNS\n\nYou should be able to now access your device from any other Tailscale-connected device via the IP address from the last step. However, my goal was to make it easy for me to connect to GitLab, reference it by an URL, and encrypt the traffic end-to-end with TLS. As the next step I set up DNS.\n\nEven though the 100.x.y.z address is a private IP address, you can still create a public DNS record and have the hostname to point to it. That won't mean the whole world can access your server - it just means once you're connected to your Tailscale network, you can resolve that hostname to the IP address and access the web server. For me, I set up an A record for `gitpi.boleary.dev` to resolve to an IPv4 address:\n\n```\n;; QUESTION SECTION:\n;gitpi.boleary.dev.\t\tIN\tA\n\n;; ANSWER SECTION:\ngitpi.boleary.dev.\t300\tIN\tA\t100.64.205.40\n```\n\nAn important note here is that I use Cloudflare as my DNS provider - and I usually love Cloudflare's proxying service to make my \"real\" IP addresses private. In this case, you have to disable that proxying to make sure that you can resolve the correct address - Cloudflare can't proxy traffic into your Tailscale network.\n\n### Configuring HTTPS\n\nLastly, configuring HTTPS for your GitLab instance will ensure that all traffic is encrypted end-to-end. While Tailscale encrypts the traffic over the network, this will ensure there are no gaps between your device and your GitLab server.\n\nTo accomplish this, we'll use [`certbot`](https://certbot.eff.org/) from the EFF that lets us create and manage [Let's Encrypt](https://letsencrypt.org/) certificates. First, install `certbot` with `sudo apt install certbot` or follow the [instructions for your distribution](https://certbot.eff.org/instructions).\n\nAfter certbot is installed, issue a certificate to use with GitLab using a DNS challenge. Follow the steps to complete the DNS challenge after running this command:\n\n```bash\nsudo certbot certonly --manual --preferred-challenges dns\n```\n\nThe output will show you the specific location of the certificate it created (in my case, in a `gitpi.boleary.dev` folder), and you should link that certificate to GitLab's SSL directory by running:\n\n```bash\nsudo mkdir /etc/gitlab/ssl/\nsudo ln -s /etc/letsencrypt/live/gitpi.boleary.dev/fullchain.pem /etc/gitlab/ssl/gitpi.boleary.dev.crt\nsudo ln -s /etc/letsencrypt/live/gitpi.boleary.dev/privkey.pem /etc/gitlab/ssl/gitpi.boleary.dev.key\n```\n\nNext, configure GitLab to use the new certificate by opening the `gitlab.rb` with\n\n```bash\nsudo vi /etc/gitlab/gitlab.rb\n```\n\nAnd change the `external_url` value to match the URL for the certificate (e.g. `https://gitpi.boleary.dev`). That \"https\" will tell GitLab to enable TLS/SSL and use your linked certificate.\n\n## Finishing up\n\nThat's it! Now with a simple `gitlab-ctl reconfigure`, GitLab will pick up the new certificate and start responding to requests at that URL. From any device - iOS, Android, laptop, etc. - connected to your Tailscale network, you can access your GitLab installation (securely) from anywhere!\n",[9,727,230],{"slug":2412,"featured":6,"template":684},"how-to-access-gitlab-on-a-private-network-with-tailscale","content:en-us:blog:how-to-access-gitlab-on-a-private-network-with-tailscale.yml","How To Access Gitlab On A Private Network With Tailscale","en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale.yml","en-us/blog/how-to-access-gitlab-on-a-private-network-with-tailscale",{"_path":2418,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2419,"content":2425,"config":2431,"_id":2433,"_type":13,"title":2434,"_source":15,"_file":2435,"_stem":2436,"_extension":18},"/en-us/blog/how-to-automate-creation-of-runners",{"title":2420,"description":2421,"ogTitle":2420,"ogDescription":2421,"noIndex":6,"ogImage":2422,"ogUrl":2423,"ogSiteName":669,"ogType":670,"canonicalUrls":2423,"schema":2424},"How to automate the creation of GitLab Runners","Follow this step-by-step guide for automating runner setup using new runner creation workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664087/Blog/Hero%20Images/tanukicover.jpg","https://about.gitlab.com/blog/how-to-automate-creation-of-runners","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate the creation of GitLab Runners\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darren Eastman\"}],\n        \"datePublished\": \"2023-07-06\",\n      }",{"title":2420,"description":2421,"authors":2426,"heroImage":2422,"date":2428,"body":2429,"category":769,"tags":2430},[2427],"Darren Eastman","2023-07-06","\n\nAutomating the creation of GitLab Runners is an essential tactic in optimizing the operations and management of a runner fleet. Since announcing the [deprecation and planned removal of the legacy runner registration token](https://docs.gitlab.com/ee/architecture/blueprints/runner_tokens/#next-gitlab-runner-token-architecture) last year, there have been various questions by customers and the user community regarding the impact of the new workflow on any automation they rely on for creating and registering runners. This is a step-by-step guide for automating runner setup using the new runner creation workflows as depicted in the sequence diagram.\n\n![GitLab Runner create - sequence diagram](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_create_sequence_diagram.png){: .shadow}\n\n## New terminology and concepts\nBefore we dive into the automation steps, let’s first review a few new concepts with the runner creation process and how that differs from the registration token-based method. With the `registration token` method, a `registration token` is available for the instance, for each group, and for each project. Therefore, in a large GitLab installation, with many groups, sub-groups, and projects, you can have tens of hundreds of registration tokens that any authorized user can use to connect a runner. There are two steps to authorizing a runner (the application that you install on a target computing platform) to a GitLab instance:\n1. Retrieve a registration token.\n2. Run the register command in the runner application using the previously retrieved registration token.\n\nThe workflow images below depict the runner setup steps using the registration token compared with the new runner creation process.\n\n![GitLab Runner registration workflows](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_registration_workflows.png){: .shadow}\n\n### Reusable runner configurations\nNow, in the registration token method, if you authenticated multiple runners using the same registration token (a valid use case), each runner entity would be visible in the UI in a separate row in the list view. The new creation method introduces the concept of a reusable runner configuration. For example, if you have to deploy multiple runners at the instance level, each with the same configuration (executor type, tags, etc.), you simply create a runner and configuration **once**, then register each individual runner with the same authentication token that you retrieved from the first runner creation. Each of these runners is now displayed in the UI in a nested hierarchy.\n\n![Runner detailed view with shared configurations](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/runner_detail_shared_configs.png){: .shadow}\n\nWe heard from many of you that your Runners view was cluttered because each runner created received its own row in the table, even if they were the exact same configuration as 100 others. With this change, our intent is to ensure that you have the flexibility you need to configure a runner fleet at scale while ensuring that you can still easily understand and manage the fleet in the GitLab Runners view. We understand that this is a paradigm shift that may take some getting used to.\n\n## Automation steps for creating a runner\nHere are the automation steps to create a runner.\n\n### Step 1: Create an access token\nYou will first need to create an access token. A [personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) for an administrator account will allow you to create runners at the instance, group, and project levels.\n\nIf you only need to create a group or project runner, then it is best to use a group access token or project access token, respectively. For a group or project, navigate to `Settings / Access Tokens` and create a token. You must specify a name, the token expiration date, role, and scope. For the role, select `Owner`; for the scopes, select `create_runner`.\n\nNote: The access token is only visible once in the UI. You will need to store this token in a secure location - for example, a secrets management solution such as [Hashicorp Vault](https://docs.gitlab.com/ee/ci/examples/authenticating-with-hashicorp-vault/) or the [Keeper Secrets Manager Terraform plugin](https://docs.keeper.io/secrets-manager/secrets-manager/integrations/terraform).\n\n![GitLab Runner registration workflows](https://about.gitlab.com/images/blogimages/2023-06-19-how-to-automate-creating-runners/project_access_token.png){: .shadow}\n\n### Step 2: Use the access token to create a runner in the GitLab instance\nNow that you have an access token scoped to the instance, group, or project, the next step is to use that token to create a runner automatically. In this example, we will simply invoke a POST REST endpoint in a terminal using CURL.\n\n```\ncurl -sX POST https://gitlab.example.com/api/v4/user/runners --data runner_type=group_type --data \"group_id=\u003Ctarget_group_or_project_id>\" --data \"description=software-eng-docker-builds-runner\" --data \"tag_list=\u003Cyour comma-separated tags>\" --header \"PRIVATE-TOKEN: \u003Cyour_access_token>\"\n```\n\nOnce this step is complete, the newly created runner configuration is visible in the GitLab UI. As the actual runner has not yet been configured, the status displayed is `Never contacted`.\n\nThe API will return a message with the following fields: `id`, `token`, and `token_expires_at`. You must save the value for the `token` as it will only be displayed once. \n\nAs mentioned above, a critical point to note in the new runner creation is that you can reuse the runner token value to register multiple runners. If you choose to do that, runners created with the same token will be grouped in the Runners list. Whichever runner contacted GitLab most recently will be the one whose unique data (IP address, version, last contact time and status) displays in the list. You can still view all the runners in that group _and_ compare all of their unique data by going to the details page for that runner. Each runner in the group is uniquely identified by their `system_id`.\n\nAt this point, you might ask yourself, what’s the difference between this new workflow and the workflow that relies on the registration token? The benefits are:\n1. You can now quickly identify the user that created a runner configuration. Not only does this add a layer of security compared to the old method, but it also simplifies troubleshooting runner performance issues, especially when your fleet expands.\n1. Only the creator of the runner or administrator(s) can edit crucial configuration details like tags, the ability to run untagged jobs, the setting to lock to only run jobs in the current projects it is shared with, and more.\n\n## Automation of runner install and registration\nWith the runner configuration creation steps completed, you now have a runner or runners configured in your GitLab instance and valid runner tokens that you can use to register a runner. You can manually install the runner application to a target compute host or automate the runner application installation. If you plan to host the runner on a public cloud virtual machine instance – for example, [Google Cloud Compute Engine](https://cloud.google.com/compute/docs/instances) – then a good [example pattern](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1932#note_1172713979) provided by one of our customers for automating the runner install and registration process is as follows:\n1. Use [Terraform infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) to install the runner application to a virtual machine hosted on GCP.\n1. Use the [GCP Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance) and specifically the `metadata` key to automatically add the runner authentication token to the runner configuration file on the newly created GCP virtual machine.\n1. Register the newly installed runner with the target GitLab instance using a [cloud-init](https://cloudinit.readthedocs.io/en/latest/index.html#) script populated from the GCP terraform provider.\n\n**Example cloud-init script**\n\n```shell\n#!/bin/bash\napt update\n\ncurl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" | bash\nGL_NAME=$(curl 169.254.169.254/computeMetadata/v1/instance/name -H \"Metadata-Flavor:Google\")\nGL_EXECUTOR=$(curl 169.254.169.254/computeMetadata/v1/instance/attributes/gl_executor -H \"Metadata-Flavor:Google\")\napt update\napt install -y gitlab-runner\ngitlab-runner register --non-interactive --name=\"$GL_NAME\" --url=\"https://gitlab.com\" --token=\"$RUNNER_TOKEN\" --request-concurrency=\"12\" --executor=\"$GL_EXECUTOR\" --docker-image=\"alpine:latest\"\nsystemctl restart gitlab-runner\n```\n\n## What's next?\nSo there you have it, an overview of how to automate runner creation, installation, and registration. To summarize in three simple steps:\n1. Use the API to create a runner token and configuration.\n1. Store the retrieved authentication token in a secrets management solution.\n1. Use infrastructure as code to install the runner application on a target compute host.\n\nOur long-term vision is to directly incorporate this automation lifecycle into the product to simplify your day-to-day runner fleet management operations.\n",[9,940,728],{"slug":2432,"featured":6,"template":684},"how-to-automate-creation-of-runners","content:en-us:blog:how-to-automate-creation-of-runners.yml","How To Automate Creation Of Runners","en-us/blog/how-to-automate-creation-of-runners.yml","en-us/blog/how-to-automate-creation-of-runners",{"_path":2438,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2439,"content":2445,"config":2450,"_id":2452,"_type":13,"title":2453,"_source":15,"_file":2454,"_stem":2455,"_extension":18},"/en-us/blog/how-to-automate-localization-for-flutter-apps",{"title":2440,"description":2441,"ogTitle":2440,"ogDescription":2441,"noIndex":6,"ogImage":2442,"ogUrl":2443,"ogSiteName":669,"ogType":670,"canonicalUrls":2443,"schema":2444},"How to automate localization for Flutter apps","Follow this tutorial to learn how to simplify the localization process on GitLab with Localizely.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679465/Blog/Hero%20Images/flutterbanner.png","https://about.gitlab.com/blog/how-to-automate-localization-for-flutter-apps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automate localization for Flutter apps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-10\",\n      }",{"title":2440,"description":2441,"authors":2446,"heroImage":2442,"date":2447,"body":2448,"category":769,"tags":2449},[852],"2021-12-10","\n\nLocalization is an indispensable part of today's software. Almost all successful companies strive to adapt their products to different languages, regions, and cultures. Customer satisfaction is crucial for business. However, that often comes at a cost in terms of the higher complexity in software development and maintenance. In addition to regular activities, you must also take care of translation, its synchronization with development processes, and the like.\n\nThe question is: Can we somehow simplify the localization process and make it more agile? The answer is “yes.\" Below, you can see how GitLab and the [Localizely](https://localizely.com/) platform can help. For that purpose, we will use a simple Flutter project. However, the same approach can be applied to other programming languages and frameworks.\n\n## A few words about the Flutter project\n\nFlutter is an open-source framework developed by Google for building multi-platform apps from a single codebase. It has become quite popular lately, as it solves some things much better than some other solutions (hot-reload, performance, etc.). Since the point of this post is the automation of localization, we will not deal with Flutter too much. But we will certainly highlight some important things regarding localization in Flutter projects.\n\nWhatever approach you used to create and localize your Flutter project, its structure would probably be similar to the one below. \n\n![Flutter project structure](https://about.gitlab.com/images/blogimages/fluttergraphic.png){: .shadow.small.left}\n\nAbove, you can see the l10n folder with the two [ARB](https://localizely.com/flutter-arb/) files. Each ARB file contains translations for one language in the Flutter project (i.e. intl_de.arb for German and intl_en.arb for English). Whenever we want to add, modify, or remove a translation, we need to update those files. In other words, those files are the basis of localization in Flutter projects. They separate programming from translation but require synchronization with your code so that each message has a corresponding translation.\n\n## The usual way of localization\n\nThere is no exact rule or process that describes the usual way of localization. However, we could roughly describe it as the routine of a few steps:\n\n1. The developer updates code and the main ARB file.\n2. The developer sends ARB files to the project manager.\n3. The project manager sends ARB files to translators (e.g. email, upload to localization platform, etc.).\n4. The translators work on translations.\n5. The project manager forwards translated ARB files to the developer.\n6. The developer updates the Flutter project with new translations.\n\nIn this simplified case of localization, we can already notice some tasks that drain a lot of time and can be a bottleneck. Those are steps 2, 3, and 5. Moreover, these six steps can be frequent (e.g. update of the UI, new feature, etc.), which is not exactly the optimal solution. And that is even truer for medium and large teams. Just imagine how much time is wasted on file sharing when you have to coordinate in a team of 10+ people. Not to mention the problem with outdated ARB files.\n\n## Automated localization\n\nSince you've seen some flaws in the usual way of localization, let's see how we can optimize that.\n\n1. The developer updates code, the main ARB file, and pushes changes to GitLab.\n2. GitLab informs Localizely via webhook regarding new changes. \n3. Localizely fetches ARB files from GitLab and lets translators work on translations.\n4. The project manager pushes updated ARB files to GitLab via [MR](https://docs.gitlab.com/ee/user/project/merge_requests/).\n5. The developer updates the Flutter project with new translations (merge MR).\n\nThis way of working enables everyone to do their job more efficiently. Developers can be focused on the development of the product, translators on translations, managers on management, and similar. It should also be noted that with this type of workflow, you can easily accelerate the development and delivery of new features, which is in everyone's interest.\n\nTo make this workflow possible, you need to adjust a few things. In the following, you can see the necessary settings.\n\n1. Add a [localizely.yml](https://localizely.com/configuration-file/) config file to the root of your Flutter project. \n2. Set up [GitLab integration](https://localizely.com/gitlab-integration/) on the Localizely platform. \n3. Add a webhook to the GitLab repository.\n\nAnd that’s all. You have automated localization on your Flutter project. Whenever the developer pushes the changes to GitLab, the translators will see new string keys on the Localizely. Once the translation is done, a single click on the button creates a new MR with the latest translations on GitLab. There is no need for a mediator, waiting, or sending ARB files for every little thing. Now you can have more time for other things as this tedious work is automated.\n\n## Final thoughts\n\nIn this post, you have seen the most common steps of localization in Flutter projects and how to automate some of them. Knowing how important efficiency is today, we should strive to automate repetitive tasks as much as possible. As someone once said, “Lost time is never found again”.\n\n",[773,1351,9],{"slug":2451,"featured":6,"template":684},"how-to-automate-localization-for-flutter-apps","content:en-us:blog:how-to-automate-localization-for-flutter-apps.yml","How To Automate Localization For Flutter Apps","en-us/blog/how-to-automate-localization-for-flutter-apps.yml","en-us/blog/how-to-automate-localization-for-flutter-apps",{"_path":2457,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2458,"content":2464,"config":2470,"_id":2472,"_type":13,"title":2473,"_source":15,"_file":2474,"_stem":2475,"_extension":18},"/en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"title":2459,"description":2460,"ogTitle":2459,"ogDescription":2460,"noIndex":6,"ogImage":2461,"ogUrl":2462,"ogSiteName":669,"ogType":670,"canonicalUrls":2462,"schema":2463},"How to automatically create a new MR on GitLab with GitLab CI","With this script, every time we push a commit, GitLab CI checks if the branch that commit belongs to already has an open MR and, if not, creates one.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679783/Blog/Hero%20Images/whats-next-for-gitlab-ci.jpg","https://about.gitlab.com/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to automatically create a new MR on GitLab with GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Riccardo Padovani\"}],\n        \"datePublished\": \"2017-09-05\",\n      }",{"title":2459,"description":2460,"authors":2465,"heroImage":2461,"date":2467,"body":2468,"category":769,"tags":2469},[2466],"Riccardo Padovani","2017-09-05","\n\nAt [fleetster](https://www.fleetster.net/), we have our own instance of [GitLab](https://gitlab.com/) and we rely a lot on [GitLab CI](/solutions/continuous-integration/). How could it be otherwise? We are a small team, with a lot of different projects (only in last month, we had more than **13,000 commits** over **25 different projects**, and we are only 10 people – with myself working part time). Automating as many development steps as possible (from build to QA to deploy) is helping us a lot, but sometimes we write some code and then forget about it. This is a disaster! We have some bug fix or some new feature ready, but it is forgotten in some branch somewhere.\n\n\u003C!-- more -->\n\nThis is why we have a policy to push as soon as possible to open a new MR, mark it as WIP, and assign to ourselves; in this way GitLab will remind us we have an MR.\n\nYou need to do three steps to achieve that:\n\n* Push the code\n* Click on the link that appears on your terminal\n* Fill a form\n\nBut we are nerds. We are lazy. So one night, after a couple of beers, [Alberto Urbano](https://www.linkedin.com/in/alberto-urbano-047a4b19/) and I spent some hours to automate a task that requires 10 seconds.\n\nActually, the experience was quite fun, it was the first time we used GitLab APIs and we learned things we will apply to others scripts as well.\n\n![Image via Riccardo's blog](https://about.gitlab.com/images/blogimages/automating-tasks-expectation-versus-reality.png){: .shadow}\u003Cbr>\n*Image by Randall Munroe, [xkcd.com](https://imgs.xkcd.com/comics/automation.png)*\n\n### The script\n\nWith this script, every time we push a commit, GitLab CI checks if the branch that commit belongs to already has an open MR and, if not, it creates it. It then assigns the MR to you, and puts **WIP** in the title to mark it as a work in progress.\n\nIn this way you cannot forget about that branch, and when you’ve finished writing code on it, you just need to remove the WIP from the title and assign to the right person to review it.\n\nIn the end, this is the script we came out with (when you add to your project, remember to make it executable):\n\n```\n#!/usr/bin/env bash\n# Extract the host where the server is running, and add the URL to the APIs\n[[ $HOST =~ ^https?://[^/]+ ]] && HOST=\"${BASH_REMATCH[0]}/api/v4/projects/\"\n\n# Look which is the default branch\nTARGET_BRANCH=`curl --silent \"${HOST}${CI_PROJECT_ID}\" --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" | python3 -c \"import sys, json; print(json.load(sys.stdin)['default_branch'])\"`;\n\n# The description of our new MR, we want to remove the branch after the MR has\n# been closed\nBODY=\"{\n    \\\"id\\\": ${CI_PROJECT_ID},\n    \\\"source_branch\\\": \\\"${CI_COMMIT_REF_NAME}\\\",\n    \\\"target_branch\\\": \\\"${TARGET_BRANCH}\\\",\n    \\\"remove_source_branch\\\": true,\n    \\\"title\\\": \\\"WIP: ${CI_COMMIT_REF_NAME}\\\",\n    \\\"assignee_id\\\":\\\"${GITLAB_USER_ID}\\\"\n}\";\n\n# Require a list of all the merge request and take a look if there is already\n# one with the same source branch\nLISTMR=`curl --silent \"${HOST}${CI_PROJECT_ID}/merge_requests?state=opened\" --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\"`;\nCOUNTBRANCHES=`echo ${LISTMR} | grep -o \"\\\"source_branch\\\":\\\"${CI_COMMIT_REF_NAME}\\\"\" | wc -l`;\n\n# No MR found, let's create a new one\nif [ ${COUNTBRANCHES} -eq \"0\" ]; then\n    curl -X POST \"${HOST}${CI_PROJECT_ID}/merge_requests\" \\\n        --header \"PRIVATE-TOKEN:${PRIVATE_TOKEN}\" \\\n        --header \"Content-Type: application/json\" \\\n        --data \"${BODY}\";\n\n    echo \"Opened a new merge request: WIP: ${CI_COMMIT_REF_NAME} and assigned to you\";\n    exit;\nfi\n\necho \"No new merge request opened\";\n```\n\n### GitLab CI\n\nThe variables used in the script are passed to it by our `.gitlab_ci.yml` file:\n\n```\nstages:\n    - openMr\n    - otherStages\n\nopenMr:\n    before_script: []   # We do not need any setup work, let's remove the global one (if any)\n    stage: openMr\n    only:\n      - /^feature\\/*/   # We have a very strict naming convention\n    script:\n        - HOST=${CI_PROJECT_URL} CI_PROJECT_ID=${CI_PROJECT_ID} CI_COMMIT_REF_NAME=${CI_COMMIT_REF_NAME} GITLAB_USER_ID=${GITLAB_USER_ID} PRIVATE_TOKEN=${PRIVATE_TOKEN} ./utils/autoMergeRequest.sh # The name of the script\n```\n\nAll these environment variables are set by GitLab itself, but the PRIVATE-TOKEN. A master of the project has to create it in its own profile and add to the project settings.\n\nTo create the personal token you can go to `/profile/personal_access_tokens` on your GitLab instance, and then you add to your pipeline following this guide.\n\n### Ways to improve\n\nThe script is far from perfect.\n\nFirst of all, it has two API calls, one to take the list of MR and one to take the default branch, to use it as target. Of course you can hardcode the value (in the end it shouldn’t change often), but hardcoding is always bad.\n\nAlso, it uses python3 to extract the name of the target branch – this is just one of many possible solutions, just use what is available on your system. Apart from that, the script doesn’t have any external dependency.\n\nThe other thing is how you need to set up the secret token to call the APIs. Luckily, GitLab’s developers are working on a [new way](https://gitlab.com/gitlab-org/gitlab-ce/issues/12729) to manage secret tokens.\n\n### Conclusion\n\nThis was a very small and very simple example about how much powerful Continuous Integration can be. It takes some time to set up everything, but in the long run it will save your team a lot of headache.\n\nIn fleetster we use it not only for running tests, but also for having automatic versioning of the software and automatic deploys to testing environments. We are working to automate other jobs as well (building apps and publish them on the Play Store and so on).\n\nSpeaking of which, **do you want to work in a young and dynamic office with me and a lot of other amazing people?** Take a look at the [open positions at fleetster](https://www.fleetster.net/fleetster-team.html)!\n\nKudos to the GitLab team (and other guys who help in their free time) for their awesome work!\n\nIf you have any question or feedback about this blog post, please drop me an email at riccardo@rpadovani.com :-)\n\nBye for now,\nA. & R.\n\nP.S: if you have found this article helpful and you’d like we write others, do you mind to help us reaching the Ballmer’s peak and buy us a [beer](https://rpadovani.com/donations)?\n\nThis post originally appeared on [*rpadovani.com*](https://rpadovani.com/open-mr-gitlab-ci).\n\n## About the Guest Author\n\nRiccardo is a university student and a part-time developer at [fleetster](http://www.fleetster.net/). When not busy with university or work, he likes to contribute to open-source projects.\n",[108,2255,9],{"slug":2471,"featured":6,"template":684},"how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci","content:en-us:blog:how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","How To Automatically Create A New Mr On Gitlab With Gitlab Ci","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci.yml","en-us/blog/how-to-automatically-create-a-new-mr-on-gitlab-with-gitlab-ci",{"_path":2477,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2478,"content":2484,"config":2490,"_id":2492,"_type":13,"title":2493,"_source":15,"_file":2494,"_stem":2495,"_extension":18},"/en-us/blog/how-to-choose-the-right-security-scanning-approach",{"title":2479,"description":2480,"ogTitle":2479,"ogDescription":2480,"noIndex":6,"ogImage":2481,"ogUrl":2482,"ogSiteName":669,"ogType":670,"canonicalUrls":2482,"schema":2483},"How to choose the right security scanning approach","GitLab offers multiple scanning methods for CI/CD pipelines, including compliance frameworks and scan and pipeline execution policies. Learn the basics, configurations, and advantages/disadvantages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097969/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_282096522_securitycompliance.jpeg_1750097968823.jpg","https://about.gitlab.com/blog/how-to-choose-the-right-security-scanning-approach","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to choose the right security scanning approach\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Genelin\"},{\"@type\":\"Person\",\"name\":\"Mathias Ewald\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":2479,"description":2480,"authors":2485,"heroImage":2481,"date":2487,"body":2488,"category":814,"tags":2489},[2272,2486],"Mathias Ewald","2024-08-26","Integrating security scans into your CI/CD pipeline is crucial for maintaining robust and secure applications. But who's responsible for those scans? Who is responsible for adding them into every CI/CD pipeline for all projects? And who decides which identified vulnerability may pass or needs fixing? For organizations in regulated industries, these are critical questions.\n\nIn this article, you'll learn how GitLab [CI/CD](https://about.gitlab.com/topics/ci-cd/) enables each person in the software development lifecycle to incorporate security scanning. You'll also discover the advantages and disadvantages of the various options available to add scanning to GitLab project pipelines. Code examples will help you kickstart security scanning on the GitLab DevSecOps platform.\n\nArticle contents:\n- [The basics of setting up security scanning](#the-basics-of-setting-up-security-scanning)\n- [Pipeline includes](#pipeline-includes)\n- [Compliance frameworks](#compliance-frameworks)\n- [Policies](#policies)\n- [Get started with security scanning](#get-started-with-security-scanning)\n\n## The basics of setting up security scanning\n\nGitLab uses [fictional personas](https://handbook.gitlab.com/handbook/product/personas/#user-personas) to describe the individual team member who would typically use a given security feature or approach. By exploring the perspective of a **Software Developer (Sasha)**, **Application Security Engineer (Amy)**, or **Platform Engineer (Priyanka)**, you can better understand the needs of each role on your team.\n\nGitLab follows a \"pipeline-per-project\" principle, stored in the file named `.gitlab-ci.yml`. This file contains the project's CI/CD pipeline definition and is revision controlled like any other file in the project. You'll learn about these project pipelines, as well as compliance pipelines and policy pipelines. While compliance pipelines and policy pipelines also refer to the YAML files in GitLab projects, they typically have a different file name and serve a different purpose.\n\nReaders already familiar with security scanning in GitLab will find clarity in the security pipeline choices available in the context of your team/organization. Therefore, we will discuss each of the approaches with respect to the following criteria:\n\n- **Ease of use:** How easy is it to add security scanning to project pipelines? Is it a reasonable task for Sasha, or something that Amy and Priyanka should handle?\n\n- **Customization:** How deeply can scanner configurations be customized using that approach? While default configurations that make sense and cover a wide range of customer needs are worth gold, the time often comes when scanner configurations need adjustments.\n\n- **Enforcement:** Is this approach suitable to companies operating in regulated industries or that otherwise have global policies in place? Can we ensure each relevant project runs Scanner X with Configuration Y?\n\n## Pipeline includes\n\n[GitLab project pipeline includes](https://docs.gitlab.com/ee/ci/yaml/includes.html) are a mechanism that allows the integration of external pipelines into the `.gitlab-ci.yaml` project pipeline. This is similar to including a library in many programming languages. This powerful feature enables the seamless incorporation of your own templates, as well as GitLab-provided templates, to be used as building blocks for your pipelines. Includes can be used in project pipelines or other pipeline files. An example of a commonly included external pipeline is including a security scanning pipeline into a GitLab project pipeline.\n\nHere are the common types of includes, which use the security scanner example.\n\n### Templates\n\nGitLab offers ready-to-use [templates](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Jobs) that can be included in a project pipeline to make it easier for teams to add in various pre-built elements. The following is example code:\n\n```yaml\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n```\n\nThis code includes GitLab's templates for [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/), [Static Application Security Testing](https://docs.gitlab.com/ee/user/application_security/sast/), [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/), and [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) – all in only five lines of code. \n\nTo modify the behavior of jobs included via templates, you can either use variables or use [GitLab's property merging capabilities](https://docs.gitlab.com/ee/ci/yaml/includes.html#merge-method-for-include).\n\nYou will find an example of modifying the GitLab Container Scanning pipeline using variables below. The [template for Container Scanning](https://gitlab.com/gitlab-org/gitlab/-/blob/59f08760feaab1eb0489f694d4f28408af9c2e8d/lib/gitlab/ci/templates/Jobs/Container-Scanning.gitlab-ci.yml) needs to know the location of the image and uses a variable named `CS_IMAGE` for that as is documented in the template code linked above.\n\n```yaml\nvariables:\n  CS_IMAGE: \"$CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA\"\n\ninclude:\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n```\n\nThe project pipeline variables are available to included job templates by defining the `CS_IMAGE` variable before the included pipeline template. The Container Scanning template inherits the `CS_IMAGE` variable value. \n\nIf we wanted to make changes to the [`allow_failure` property defined here](https://gitlab.com/gitlab-org/gitlab/-/blob/59f08760feaab1eb0489f694d4f28408af9c2e8d/lib/gitlab/ci/templates/Jobs/Container-Scanning.gitlab-ci.yml#L38), we would need to resort to property merging since the job templates employ no variable for the value. (The `allow_failure` property is a property generally available on every GitLab pipeline job. Please check the [documentation](https://docs.gitlab.com/ee/ci/yaml/#allow_failure) for details.)\n\nIn this example, `allow_failure` is set to `false`, meaning the entire pipeline stops on a container scanning failure. This stops any unscanned containers from moving forward in the pipeline.\n\n```yaml\ninclude:\n  # Includes a job called \"container_scanning\"\n  - template: Jobs/Container-Scanning.gitlab-ci.yml\n\n# Define a job with same name for merging\ncontainer_scanning:\n  allow_failure: false\n```\n\nGitLab will load the job template and – as defined in the template code – register a job called `container_scanning`. As the pipeline definition declares another job with that name, GitLab will merge that specification with the already registered job.\n\nWhile this feature offers many possibilities, it also makes it impossible to protect certain properties from being overwritten. We are only at the point of modifying the project pipeline, so there's no control over that anyway. But later on, you will see that this can pose a challenge when security needs to be enforced on a project.\n\n### Components\n\nTemplates are a great start for sharing repeatable GitLab pipelines. To further abstract reusable code across an entire organization or a GitLab instance, [GitLab introduced components](https://docs.gitlab.com/ee/ci/components/). Components are the next logical step in GitLab's evolution of pipelines. Components are designed to simplify the creation and use of functional building blocks to use in pipelines, or even to package and ship entire pipelines if needed. They offer a well-defined interface, which accepts \"inputs\" for configuration. Otherwise, the component is completely isolated, which makes them a great candidate to share work within an organization and to be searchable and reusable building blocks.\n\nDevelopers can use the [CI/CD Catalog](https://gitlab.com/explore/catalog) to browse and search the collection of publicly available GitLab components, which are components officially built and maintained by GitLab. GitLab uses the CI/CD Catalog [to publish our shipped components](https://gitlab.com/components) such as security scanners alongside community-provided components.\n\nComponents are consumed similarly to templates via the `include` keyword. In an example above, we showed how the container scanning job requires knowledge of the image location. This \"input\" uses the component for [container scanning](https://gitlab.com/components/container-scanning/-/blob/19fd5b83bc631cb9890b4fadb08d31b3150853ce/templates/container-scanning.yml) is called `cs_image`. The configuration equivalent to the previous example looks like this:\n\n```yaml\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n    inputs:\n      cs_image: \"$CI_REGISTRY_IMAGE:$CI_COMMIT_SHORT_SHA\"\n```\nIn this example, the SAST component is pinned at Version 2.0.2, the Dependency Scanning component at Version 0.2.0, the Secret Detection component at Version 1.1.2, and the Container Scanning component at Version 4.1.0. `~latest` [and more tags are available](https://docs.gitlab.com/ee/ci/components/#component-versions) for bleeding-edge component usage and other development needs.\n\nWhether you use templates or components, your pipeline might look like the image below. The top four jobs in the test stage are the result of the four include statements in the code above.\n\n![An example pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097983863.png)\n\n### Advantages and disadvantages of using pipeline includes\n\n#### Ease of use\n\nOne of the benefits of using pipeline includes in GitLab is their ease of use. We have seen how, with essentially six lines of code, we included four commonly used security scanners. All the complex logic and setup are handled within the templates or components, saving Sacha time and effort by providing a ready-to-use solution.\n\n#### Customization\n\nWhile templates offer the highest flexibility (variables and merging), it's important to remember that with \"great power comes great responsibility.\" The flexibility of templates supports extensive customization, but requires careful management and oversight to avoid unexpected results.\n\nIn contrast, components provide a more structured mechanism for authoring, sharing, and maintaining building blocks for a broader audience. Components, while not as customizable, enhance stability and reliability, and are a valuable, reusable, and repeatable feature. \n\n#### Enforcement\n\nAs the name _include_ suggests, it is the GitLab project pipeline that needs to include templates or components. While scanner templates are straightforward to use, Amy and Priyanka cannot be sure Sacha has included them properly, or even at all. Enforcement of scanner usage is needed.\n\nFor regulated industries, managing security in project pipelines is not an approach that provides the necessary audit trail or enforcement.\n\n## Compliance frameworks\n\nGitLab identified the gap between the ability to enforce security scans on project pipelines and the need to [adhere to regulatory compliance frameworks](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/) such as PCI DSS, NIST, and many more. The introduction of compliance frameworks as functionality caters to precisely this challenge.\n\nAt first glance, a compliance framework in GitLab is merely a label attached to a project, which would typically be named after the regulatory framework it is supposed to implement. The magic is added with the link between that label and a compliance pipeline YAML file, which is responsible for implementing the necessary steps to ensure compliance. \n\nThe mechanism is straightforward: Every time the project pipeline is triggered, GitLab executes the compliance pipeline instead. The compliance pipeline runs with both the [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) and [predefined CI/CD variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) of the project pipeline.\n\nThis allows for two main design patterns: a \"wrapping pipeline,\" where the compliance pipeline includes the project pipeline, and an \"overriding pipeline,\" where it does not. \n\n**Note:** Compliance pipelines have been deprecated in GitLab Version 17.3 and are scheduled for removal in Version 19.0. At this point, we cannot recommend implementing this approach for new development platforms. However, you might already be using them, making it worth reading this section.\n\n### Wrapping pipelines\n\nIn the wrapping approach, the compliance pipeline defines its own jobs according to specific compliance needs. It includes the project pipeline in the same way we have seen templates included in the previous section. This setup is possible because the predefined CI/CD variables originate from the project pipeline, allowing the system to identify the pipeline definition's location for inclusion.\n\nHere is an example of what a simple compliance pipeline might look like. \n\n```yaml\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n  - project: '$CI_PROJECT_PATH'\n    file: '$CI_CONFIG_PATH'\n    ref: '$CI_COMMIT_SHA'\n```\n\nThe last three lines include the project pipeline based on available variables.\n\n### Overriding pipelines\n\nUnlike wrapping pipelines, which include the project pipeline, overriding pipelines ignore it entirely and run only their own jobs. This type of pipeline defines each step, encompassing all necessary jobs to build, test, and deploy the application.\n\nBelow we see a mock compliance pipeline that illustrates this approach.\n\n```yaml\nstages: [\"build\", \"test\", \"deploy\"]\n\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n\nbuild-job:\n  stage: build\n  script: echo \"Building the container image\"\n\ntest-job:\n  stage: test\n  script: echo \"Running unit tests\"\n\ndeploy-job:\n  stage: deploy\n  script: echo \"Deploying app\"\n```\n\n### Advantages and disadvantages of compliance frameworks\n\n#### Ease of use\n\nWhile compliance frameworks aren't terribly complicated, they aren't as straightforward and simple as pipeline includes. They're meant to be written and assigned to projects by Amy and Priyanka, who now need to interact with pipeline YAML code. A framework needs to be declared in the top-level namespace and compliance pipelines need to be created and maintained, and compliance frameworks need to be attached to the right projects. \n\n#### Customization\n\nAmy and Priyanka are the authors of compliance pipelines. Like Sacha in the previous section on includes, they have full control over what they include and how they include it, giving them maximum customizability of compliance jobs such as security scanners.\n\n#### Enforcement\nThis aspect of enforcing pipelines questions whether developers can tamper with security jobs? In an environment with a strong separation of duties, this nuance requires some extra attention. To answer this, we need to look at each pattern separately:\n\n##### Wrapping pipelines\nAs seen before, project pipelines are included in compliance pipelines. In addition to group- or project-level CI/CD variables, every element of that project pipeline must be considered a potential threat to the compliance pipeline. Obviously, variables and jobs stick out as primary candidates. And, in fact, they can and will influence security job behavior if used maliciously.\n\nHere is a simple example to illustrate the issue.\n\nCompliance pipeline:\n```yaml\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n  - project: '$CI_PROJECT_PATH'\n    file: '$CI_CONFIG_PATH'\n    ref: '$CI_COMMIT_SHA'\n```\n\nProject pipeline:\n```yaml\nvariables:\n  SECRET_DETECTION_DISABLED: true\n\nsemgrep-sast:\n  rules:\n    - when: never\n```\n\nThis project pipeline declares a variable `SECRET_DETECTION_DISABLED` (this could be done via project or croup-level CI/CD variables, too), which is evaluated in the included secret detection template. Further, the last three lines use the merging mechanism discussed previously, to not execute the job at all. Kind of redundant, we know.\n\nBoth overrides could be prevented using components, but you get the idea. Components, too, are receptive to such attacks via their inputs' default values, which often use variables, too! Let's take a look at how this could be taken advantage of.\n\nCompliance pipeline:\n```yaml\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - project: '$CI_PROJECT_PATH'\n    file: '$CI_CONFIG_PATH'\n    ref: '$CI_COMMIT_SHA'\n```\n\nProject pipeline:\n```yaml\nvariables:\n  CI_TEMPLATE_REGISTRY_HOST: \"docker.io\"\n```\n\nTo understand what is happening here, look at the [SAST scanner component's Line 6](https://gitlab.com/components/sast/-/blob/main/templates/sast.yml?ref_type=heads#L6):\n\n```yaml\nspec:\n  inputs:\n    stage:\n      default: test\n    image_prefix:\n      default: \"$CI_TEMPLATE_REGISTRY_HOST/security-products\"\n```\n\nThe `image_prefix` input uses the `CI_TEMPLATE_REGISTRY_HOST` to build the default value. By setting this variable to a false value in the same way we set `SECRET_DETECTION_DISABLED` to `true` before, Sacha may cause the job to load a wrong image and break SAST testing.\n\nTo prevent this override ability by the developer role, avoid templates in favor of components. This approach covers many developer-induced loopholes. To be certain of compliance, hardcode values for component inputs.\n\n##### Overriding pipelines\n\nThis type is an entirely different beast. Developers get no chance of injecting actual pipeline code into the compliance pipeline. However, compliance pipelines do run with the project's CI/CD variables. Hence, any variable specified on the group- or project-level might modify the compliance pipeline's behavior. With `SECRET_DETECTION_DISABLED` set to `true` in the project CI/CD variables, the following compliance pipeline can be modified again:\n\n```yaml\nstages: [\"build\", \"test\", \"deploy\"]\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n\nbuild-job: ...\ntest-job: ...\ndeploy-job: ...\n```\n\nComponents can solve this particular problem, but, as before, component inputs may use CI/CD variables developers can set. Compliance pipeline authors need to identify and take care of these situations. \n\n## Policies\n\nCompliance pipelines' shortcomings have led to the next step for managing compliance: [policies](https://docs.gitlab.com/ee/user/application_security/policies/).\n\nGitLab introduced [policies](https://docs.gitlab.com/ee/user/application_security/policies/) as the way forward. Authors store a set of policies in a separate project as YAML files and apply them to projects on the group or project level. This gives Amy and Priyanka the flexibility to target individual projects with specific requirements but also to ensure compliance across the entire organization if needed. Access to the policy project can be controlled within the policy project and audited within GitLab.\n\nPolicies come in different types for different purposes. The types we are interested in right now are scan execution policies (SEP) and pipeline execution policies (PEP).\n\n### Scan execution policies\n\nAs the name suggests, SEPs require a particular scan – or set of scans – to be executed as part of the project pipeline and inject the respective scan jobs into the pipelines of associated projects. They include the respective [template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/Jobs) in the pipeline according to variables and rules set by Amy and Priyanka.\n\nGitLab supports policy authors with a comprehensive user interface in addition to a YAML-based Git workflow. The following screenshot and code snippet illustrate a very basic example of a SEP:\n\n![Scan execution policy example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097983864.png)\n\n```yaml\nname: Secret Scanner\ndescription: ''\nenabled: true\nactions:\n- scan: secret_detection\nrules:\n- type: pipeline\n  branches:\n  - \"*\"\n```\n\nFor more details on SEP settings in the UI and YAML, please refer to the [policy documentation](https://docs.gitlab.com/ee/user/application_security/policies/scan_execution_policies.html).\n\n#### Advantages and disadvantages of scan execution policies\n\n##### Ease of use\nSEPs provide a lightweight, easy-to-use mechanism that enforces security on existing and new CI/CD pipelines across the organization or on a granular level. The UI support makes them a viable tool for all relevant personas.\n\n##### Customization\nSEPs are restricted to predefined scanner jobs, and there is no option to extend this list with custom jobs at this point. This limitation can be restrictive for teams with unique scanning requirements that fall outside the standard options.\n\n##### Enforcement\n\nOnce an SEP is applied to a project (directly or indirectly), Sacha has no way to get rid of that scan job. Though, there may be ways to – intentionally or not – manipulate the scan job's behavior.\n\nJobs injected via SEPs generally are receptive to CI/CD variables and adhere to the general rules of [variable precedence](https://docs.gitlab.com/ee/ci/variables/index.html#cicd-variable-precedence). For this injection, Policies incorporate logic that denies changing some predefined variables as described [here](https://docs.gitlab.com/ee/user/application_security/policies/scan_execution_policies.html#cicd-variables) and generally deny the configuration of variables that follow certain patterns such as `_DISABLED` or  `_EXCLUDED_PATHS`.\n\nDespite these security measures, inconsiderate use of policies may still open opportunities for tampering: In my test, I was able to set a project-level CI/CD variable `SECURE_ANALYZERS_PREFIX` to a bad value (a non-existing location) and as you can see [here](https://gitlab.com/gitlab-org/gitlab/-/blob/a2d4b8df0095c1363a105a1fa212daf227eca063/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml), the secret detection template uses that to build the location of the scanner image.\n\nWhile the scan job does get included in the pipeline run, it crashes very early and, therefore, provides no scan results. Due to the [`allow_failure: true` configuration](https://gitlab.com/gitlab-org/gitlab/-/blob/a2d4b8df0095c1363a105a1fa212daf227eca063/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml#L18), the pipeline will continue to run and eventually execute a deploy job.\n\nBecause SEP variables take the highest variable precedence, there is an easy fix to reduce the attack surface of the policy: Simply hardcode the correct value in your policy YAML or via the UI:\n\n```yaml\n- name: Secret Scanner\n  actions:\n  - scan: secret_detection\n    variables:\n      SECURE_ANALYZERS_PREFIX: registry.gitlab.com/security-products\n```\n\n### Pipeline execution policies\n\nSEPs enable the injection of a set of security-related jobs into any project pipeline. In contrast, PEPs apply entire pipeline configurations to projects, offering a lot more flexibility when it comes to customizing security constraints. \n\nThere are two methods for implementing these policies, known as \"actions\": `inject` and `override`. These actions function similarly to the patterns we have seen in the compliance frameworks section and provide flexible ways to enhance and enforce security standards within the development workflow.\n\n#### Injecting pipelines\n\nInjecting pipelines involves adding the jobs and other elements defined in the policy pipeline into the project pipeline. Currently, jobs should only be injected into reserved stages, namely `.pipeline-policy-pre` and `.pipeline-policy-post` to avoid unpredictable results.\n\nGitLab handles name clashes between jobs or variables in policy and project pipelines effectively by building each pipeline in isolation before combining them. This ensures that the integration process is seamless and does not disrupt existing workflows or configurations.\n\n![security scanning - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097983865.png)\n\nThe above screenshot shows an example of an injected policy pipeline. Project pipeline jobs are prefixed with `prj-` for easier identification.\n\n#### Overriding pipelines\n\nIn the override approach, the project pipeline is completely replaced by the policy pipeline. This method is similar to compliance pipelines that do not include the project's `.gitlab-ci.yml` file. Despite the override, the pipelines run using the project's CI/CD variables, maintaining consistency with project-specific configurations. The compliance pipeline we used earlier makes a perfectly fine policy pipeline, too:\n\n```yaml\nstages: [\"build\", \"test\", \"deploy\"]\n\ninclude:\n  - component: $CI_SERVER_FQDN/components/sast/sast@2.0.2\n  - component: $CI_SERVER_FQDN/components/dependency-scanning/cargo@0.2.0\n  - component: $CI_SERVER_FQDN/components/secret-detection/secret-detection@1.1.2\n  - component: $CI_SERVER_FQDN/components/container-scanning/container-scanning@4.1.0\n\nbuild-job:\n  stage: build\n  script: echo \"Building the container image\"\n\ntest-job:\n  stage: test\n  script: echo \"Running unit tests\"\n\ndeploy-job:\n  stage: deploy\n  script: echo \"Deploying app\"\n```\n\nThe image below shows a slightly more complete pipeline than the mock pipeline above:\n\n![More complete pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097984/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097983866.png)\n\n**Note:** This doesn't currently work with SEPs.\n\nHowever, the existence of a Dockerfile may not always be a valid indicator, as developers might be building without Dockerfiles using Cloud Native Buildpacks, Heroku Buildpacks, Kaniko, or other tools. Managed pipelines do not encounter this challenge, as they are more controlled and centralized.\n\n\u003C!-- TOC ignore:true -->\n### Projects with multiple container images\nFor projects that produce multiple container images, several container scanning jobs would be necessary for proper coverage. This raises similar questions as before: \"How do we know there are multiple?\" and \"Is the source of that information trustworthy?\". If we wanted to rely on the existence of `Dockerfile`s a [dynamic approach](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#dynamic-child-pipelines) would be necessary that includes a container scanning job for each `Dockerfile` detected.\n\n## Get started with security scanning\nIn this article, you've learned about a variety of approaches to adding security scanning to CI/CD pipelines with a close look at ease of use, customizability, and the ability to strictly enforce scanning. You've seen that a pipeline author who is held responsible for project compliance needs to keep a few things in mind during the process to avoid surprises down the line. We recommend building a small testing space on your GitLab instance and then run a few tests to reproduce the main points of this article. Put yourself in the shoes of a malicious Sacha (Sachas aren't generally malicious people, but it's a good exercise) and think about how you could fool that annoying Amy and her security scans.\n\nGitLab provides strong support for all sorts of requirements and all approaches are – at least in our eyes – easy to implement due the platform's baked-in functionality. You should find ways to bulletproof your scan jobs and, if not, you should open a ticket with our support. \n\nHappy pipelining!\n\n> #### Get started with security scanning today!\n> [Sign up for a free 30-day trial of GitLab Ultimate](https://gitlab.com/-/trial_registrations/new?glm_source=about.gitlab.com/blog&glm_content=default-saas-trial) to implement security scanning in your software development lifecycle.\n\n## Read more\n\n- [Meet regulatory standards with GitLab security and compliance](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/)\n- [How to integrate custom security scanners into GitLab](https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab/)\n- [Integrate external security scanners into your DevSecOps workflow](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/)\n",[814,9,108],{"slug":2491,"featured":90,"template":684},"how-to-choose-the-right-security-scanning-approach","content:en-us:blog:how-to-choose-the-right-security-scanning-approach.yml","How To Choose The Right Security Scanning Approach","en-us/blog/how-to-choose-the-right-security-scanning-approach.yml","en-us/blog/how-to-choose-the-right-security-scanning-approach",{"_path":2497,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2498,"content":2503,"config":2508,"_id":2510,"_type":13,"title":2511,"_source":15,"_file":2512,"_stem":2513,"_extension":18},"/en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod",{"title":2499,"description":2500,"ogTitle":2499,"ogDescription":2500,"noIndex":6,"ogImage":2226,"ogUrl":2501,"ogSiteName":669,"ogType":670,"canonicalUrls":2501,"schema":2502},"How to code, build, and deploy from an iPad using GitLab and Gitpod","Senior Developer Evangelist Brendan O'Leary tackles the challenge of doing DevOps from a tablet.","https://about.gitlab.com/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to code, build, and deploy from an iPad using GitLab and Gitpod\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-02-10\",\n      }",{"title":2499,"description":2500,"authors":2504,"heroImage":2226,"date":2505,"body":2506,"category":769,"tags":2507},[1221],"2022-02-10","\n\nAs a software engineer, it can be tough to go all-in on just using an iPad for your daily driver. So when Apple announced the M1 chip-based iPads, I, along with many techies, got excited to see if we'd finally get things like a proper terminal on the iPad. But that still isn't the use case that the iPad solves. I remained determined to be able to *code* from mine. So I hooked up my magic keyboard and fired up Gitpod to code and GitLab to build and deploy an app from scratch... all from my iPad.\n\n## Getting started\n\nLike any of [my projects](/blog/introducing-auto-breakfast-from-gitlab/), the first thing I needed was inspiration. I had promised my colleague [Pj](https://brendan.fyi/pj) for some time that I would review [his blog](https://brendan.fyi/pj-twitter-blog) on how to make a Twitter bot like all of the fantastic ones he built while breaking into tech. Combine the need to learn the Twitter API to provide an excellent review with my love of Elton John's music, and I had it: I'd make a Twitter bot that tweeted every morning at 4:00 am (as an homage to the line in “Someone Saved My Life Tonight”).\n\nArmed with my newfound inspiration, I ran to gitlab.com in Safari (on my iPad, obviously) and created a new, blank GitLab project.\n\n![ipad on desk](https://about.gitlab.com/images/blogimages/brendanipad1.png){: .shadow}\n\n## Coding on the iPad\n\nOnce I had the new project, getting started on Gitpod was as easy as clicking the \"Gitpod\" button on GitLab to open my repository in Gitpod.\n\nGitpod enables you to access an entire development environment from any browser. By default, you get a container with many development tools (Node, Ruby, OpenJDK, etc.). But you can also choose [your own container](https://www.gitpod.io/docs/config-docker) as a starting point with a .gitpod.yml… but we'll talk about that later.\n\nThe environment is presented to you as a VS Code interface – where you can open, edit, and add files just as you'd expect. You can also access the terminal just like you would in VS Code and install anything you might need to get your project running.\n\nIn this example, I decided to build the Twitter bot in Node.js, so I initialized a new Node project and installed the packages I'd need with:\n\n```bash\nnpm init -y\nnpm install express twit node-schedule dotenv\n```\n\n## Running your app\n\nOnce I had some code running – just the [Express sample app](https://expressjs.com/en/starter/hello-world.html) that says Hello World – running the app was just as easy as if I was going to run it on my laptop:\n\n```bash\nnpm dev\n```\nNot only did that run my code to connect to the Twitter API, wait until 4:00 a.m. (UTC), and then tweet to let everyone know it was 4:00 a.m., but it also shows this relative to my Express app:\n\n![Express app](https://about.gitlab.com/images/blogimages/brendanipad3.png){: .shadow}\n\nThat allows me to preview my [website for the app](https://brendan.fyi/4oclock) while I'm coding it. This is a massive benefit because it means I can have two tabs open on the iPad – one with Gitpod and my code and another with the website as I change it. Or I can even use split-screen on the iPad to have them side-by-side like I might if I was at my desk at my \"normal\" setup. And there's even a button to make the site available publically so I could share it with my team and show them what I'm working on (as long as my Gitpod workspace is running).\n\nNow, when it comes to coding the rest of the Twitter bot, I used the previously mentioned [tutorial](https://brendan.fyi/pj-twitter-blog) from my colleague [Pj](https://brendan.fyi/pj). So I won't go into detail on the actual coding of the app – you can find the [code](https://gitlab.com/brendan-demo/4oclock), [website](https://brendan.fyi/4oclock), and [Twitter bot](https://twitter.com/DammitOclock) if you want to learn more about the app itself. But to deploy the website and the bot, I needed something else: [GitLab CI/CD](https://docs.gitlab.com/ee/ci/).\n\n## Deploying the app\n\nCombining GitLab CI/CD and GitLab.com's SaaS offering with Gitpod meant that I could not only code and preview the app from my iPad, but I could also get it deployed to Heroku (or any provider) from the couch. \n\nI created a `.gitlab-ci.yml` file in my project to get started. For deploying to Heroku:\n\n- I like to use a Ruby package called [dpl](https://github.com/travis-ci/dpl) from Travis CI because it makes it a simple one-line command.  Alternatively, I could install the [Heroku CLI](https://devcenter.heroku.com/articles/heroku-cli) and deploy with that if I wanted to. \n\n- I added the `HEROKU_API_KEY` variable to my [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/#add-a-cicd-variable-to-a-project) so that I could authenticate with Heroku for the deployment. \n\n- I then set the `rules:` section to only deploy when commits are impacting the main (default) branch, and I was ready to go! \n\nNow, every time I push code from Gitpod to GitLab, GitLab will start the build and deploy it to Heroku:\n\n```yaml\nimage: starefossen/ruby-node:2-10\n\nvariables:\n APP_NAME: four-oclock-in-the-morning\n\ndeploy:\n stage: deploy\n script:\n - gem install dpl -v 1.10.6\n - dpl --provider=heroku --app=$APP_NAME --api-key=$HEROKU_API_KEY\n rules:\n - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH\n```\n\n## Enabling collaboration\n\nThere are two other concepts that this pattern introduces that are worth discussion: the idea of one environment per change and enabling new collaborators to spin up a development environment in seconds.\n\nMost developers are used to having our setup just the way we like it – precisely the correct number of monitors, keys on our keyboard, and all of our favorite tools installed. However, that can lead to issues. We already know we should treat our servers like cattle, not pets, so why do we still treat our laptops like pets? While I love my MacBook and the stickers on it as much as the next person, I can get frustrated when setting up a new one and getting it back to the way I like it.\n\nIn addition, on many projects I've been on in the past, onboarding a new developer can take a lot of effort, including getting the correct libraries installed and ensuring they have access to all the right resources and environments. These things may seem trivial, but I've seen it take up to three days from senior engineers just to get another engineer up and running. All of that time is time that could be much better spent on writing code for the actual business.\n\nGitpod solves both of these issues with a simple YAML file: `.gitpod.yml`. This file allows you to specify:\n\n- What image to use as the base for the environment\n- Which other tools to install\n- What commands to run at startup, and even things like which VSCode extensions you should have in the environment\n\nAnd [lots of different settings](https://www.gitpod.io/docs/references/gitpod-yml) that you can find in the [Gitpod docs](https://www.gitpod.io/docs).\n\nSpecifying all of the tools needed lets you have short-lived environments that you can spin up for one task and then discard and get a fresh one for the next task. And it also saves time when onboarding new engineers by guaranteeing they have a running system within just a few seconds of opening the project. Best of all, it is all in a file that's in source control, so as things change or you make improvements to the development environment, all of your developers benefit from it immediately.  \n\nI added a simple [`.gitpod.yml`](https://gitlab.com/brendan-demo/4oclock/-/blob/main/.gitpod.yml) to run `npm run dev` to get started when you create a new environment. That simple example is great for a simple Node app or similar, but what about something more complex? Gitpod works for that, too. GitLab itself has a [`gitpod.yml`](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitpod.yml) that lets you get an entire working GitLab development environment – and all that entails – up and running quickly, without the need to install Postgres and Redis and all of the other dependencies GitLab has.\n\nThis makes contributing to GitLab easier than ever. Just go to the [GitLab repository](https://brendan.fyi/gitlab-repo) and click on that Gitpod button to get started. I'd love to hear how it works for you!\n",[773,9,749],{"slug":2509,"featured":6,"template":684},"how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod","content:en-us:blog:how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod.yml","How To Code Build And Deploy From An Ipad Using Gitlab And Gitpod","en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod.yml","en-us/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod",{"_path":2515,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2516,"content":2522,"config":2529,"_id":2531,"_type":13,"title":2532,"_source":15,"_file":2533,"_stem":2534,"_extension":18},"/en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"title":2517,"description":2518,"ogTitle":2517,"ogDescription":2518,"noIndex":6,"ogImage":2519,"ogUrl":2520,"ogSiteName":669,"ogType":670,"canonicalUrls":2520,"schema":2521},"CI/CD pipeline: GitLab & Helm for Kubernetes Auto Deploy","One user walks through how he tried GitLab caching and split the job into multiple steps to get better feedback.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664472/Blog/Hero%20Images/gitlabflatlogomap.png","https://about.gitlab.com/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sergey Nuzhdin\"}],\n        \"datePublished\": \"2017-09-21\",\n      }",{"title":2523,"description":2518,"authors":2524,"heroImage":2519,"date":2526,"body":2527,"category":769,"tags":2528},"How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm",[2525],"Sergey Nuzhdin","2017-09-21","Recently, I started working on a few Golang [microservices](/topics/microservices/). I decided to try GitLab’s caching and split the job into multiple steps for better feedback in the UI.\n\n\u003C!-- more -->\n\nSince my previous posts[[1](http://blog.lwolf.org/post/how-to-build-tiny-golang-docker-images-with-gitlab-ci/)][[2](http://blog.lwolf.org/post/continuous-deployment-to-kubernetes-from-gitlab-ci/)] about [CI/CD](/topics/ci-cd/), a lot has changed. I started using Helm charts for packaging applications, and stopped using docker-in-docker in gitlab-runner.\n\nHere are a few of the main changes to my `.gitlab-ci.yml` file since my previous post:\n\n* no docker-in-docker\n* using cache for packages instead of a prebuilt image with dependencies\n* splitting everything into multiple steps\n* autodeploy to staging environment using Helm, a package manager for Kubernetes\n\n### Building Golang image\n\nSince Golang is very strict about the location of the project, we need to make some adjustments to the CI job. This is done in the `before_script` block. Simply create needed directories and link source code in there. Assuming that the official repository of the project is `gitlab.example.com/librerio/libr_files` it should look like this.\n\n```\nvariables:\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n```\n\nWith this in place, we can install dependencies and build our binaries. To avoid the download of all packages on each build we need to configure caching. Due to the strange caching rules of GitLab, we need to add vendor directory to both cache and artifacts. Cache will give us an ability to use it between build jobs and artifacts will allow us to use it inside the same job.\n\n```\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\n```\n\nBuild step didn’t change, it’s still about building the binary. I add binary to artifacts.\n\n```\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n```\n\n###  Test stage\n\nTo run golang tests with coverage reports I’m using the variation of [this shell script](https://github.com/mlafeldt/chef-runner/blob/v0.7.0/script/coverage). It runs all tests in project subdirectories and creates a [coverage report](/blog/publish-code-coverage-report-with-gitlab-pages/). I changed it a bit before putting into a gist. I exclude vendor directory from tests.\n\n* coverage regexp for gitlab-ci: `^total:\\s*\\(statements\\)\\s*(\\d+.\\d+\\%)`\n\n### Deploy stage\n\nI don’t use native GitLab’s integration with Kubernetes.\n\nFirst I thought about creating Kubernetes secrets and mounting it to the gitlab-runner pod. But it’s very complicated. You need to upgrade deployment every time you want to add new Kubernetes cluster configurations. So I’m using GitLab’s CI/CD variables with base64 encoded Kubernetes config. Each project can have any number of configurations. The process is easy – create base64 string from the configuration file and copy it to the clipboard. After this, put it into `kube_config` variable (name it whatever you like).\n\n`cat ~/.kube/config | base64 | pbcopy`\n\nIf you do not own a full GitLab installation, consider creating a Kubernetes user with restricted permissions.\n\nThen on the deploy stage, we can decode this variable back into the file and use it with kubectl.\n\n```\nvariables:\n  KUBECONFIG: /etc/deploy/config\n\ndeploy:\n  ...\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n```\n\nDeploy stage also covers the case when you have several versions of the same application.\n\nFor example, you have two versions of API: v1.0 and v1.1. All you need to do is set `appVersion` in Chart.yaml file. Build system will check API version and either deploy or upgrade needed release.\n\n```\n- export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n- export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n- export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n- if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; fi\n```\n\n### tl;dr\n\n```\nHere is complete `.gitlab-ci.yaml` file for reference.\n\ncache:\n  untracked: true\n  key: \"$CI_BUILD_REF_NAME\"\n  paths:\n    - vendor/\n\nbefore_script:\n  - mkdir -p /go/src/gitlab.example.com/librerio/\n  - ln -s $PWD ${APP_PATH}\n  - mkdir -p ${APP_PATH}/vendor\n  - cd ${APP_PATH}\n\nstages:\n  - setup\n  - test\n  - build\n  - release\n  - deploy\n\nvariables:\n  CONTAINER_IMAGE: ${CI_REGISTRY}/${CI_PROJECT_PATH}:${CI_BUILD_REF_NAME}_${CI_BUILD_REF}\n  CONTAINER_IMAGE_LATEST: ${CI_REGISTRY}/${CI_PROJECT_PATH}:latest\n  DOCKER_DRIVER: overlay2\n\n  KUBECONFIG: /etc/deploy/config\n  STAGING_NAMESPACE: app-stage\n  PRODUCTION_NAMESPACE: app-prod\n\n  APP_PATH: /go/src/gitlab.example.com/librerio/libr_files\n  POSTGRES_USER: gorma\n  POSTGRES_DB: test-${CI_BUILD_REF}\n  POSTGRES_PASSWORD: gorma\n\nsetup:\n  stage: setup\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - glide install -v\n  artifacts:\n    paths:\n     - vendor/\n\nbuild:\n  stage: build\n  image: lwolf/golang-glide:0.12.3\n  script:\n    - cd ${APP_PATH}\n    - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -o release/app -ldflags '-w -s'\n    - cd release\n  artifacts:\n    paths:\n     - release/\n\nrelease:\n  stage: release\n  image: docker:latest\n  script:\n    - cd ${APP_PATH}/release\n    - docker login -u gitlab-ci-token -p ${CI_BUILD_TOKEN} ${CI_REGISTRY}\n    - docker build -t ${CONTAINER_IMAGE} .\n    - docker tag ${CONTAINER_IMAGE} ${CONTAINER_IMAGE_LATEST}\n    - docker push ${CONTAINER_IMAGE}\n    - docker push ${CONTAINER_IMAGE_LATEST}\n\ntest:\n  stage: test\n  image: lwolf/golang-glide:0.12.3\n  services:\n    - postgres:9.6\n  script:\n    - cd ${APP_PATH}\n    - curl -o coverage.sh https://gist.githubusercontent.com/lwolf/3764a3b6cd08387e80aa6ca3b9534b8a/raw\n    - sh coverage.sh\n\ndeploy_staging:\n  stage: deploy\n  image: lwolf/helm-kubectl-docker:v152_213\n  before_script:\n    - mkdir -p /etc/deploy\n    - echo ${kube_config} | base64 -d > ${KUBECONFIG}\n    - kubectl config use-context homekube\n    - helm init --client-only\n    - helm repo add stable https://kubernetes-charts.storage.googleapis.com/\n    - helm repo add incubator https://kubernetes-charts-incubator.storage.googleapis.com/\n    - helm repo update\n  script:\n    - cd deploy/libr-files\n    - helm dep build\n    - export API_VERSION=\"$(grep \"appVersion\" Chart.yaml | cut -d\" \" -f2)\"\n    - export RELEASE_NAME=\"libr-files-v${API_VERSION/./-}\"\n    - export DEPLOYS=$(helm ls | grep $RELEASE_NAME | wc -l)\n    - if [ ${DEPLOYS}  -eq 0 ]; then helm install --name=${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; else helm upgrade ${RELEASE_NAME} . --namespace=${STAGING_NAMESPACE}; fi\n  environment:\n    name: staging\n    url: https://librerio.example.com\n  only:\n  - master\n\n```\n\n_[How to create a CI/CD pipeline with Auto Deploy to Kubernetes using GitLab and Helm](http://blog.lwolf.org/post/how-to-create-ci-cd-pipeline-with-autodeploy-k8s-gitlab-helm/) was originally published on Lwolfs Blog._\n\nPhoto by C Chapman on [Unsplash](https://unsplash.com/)",[771,772,9,2255],{"slug":2530,"featured":6,"template":684},"how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab","content:en-us:blog:how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","How To Create A Ci Cd Pipeline With Auto Deploy To Kubernetes Using Gitlab","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab.yml","en-us/blog/how-to-create-a-ci-cd-pipeline-with-auto-deploy-to-kubernetes-using-gitlab",{"_path":2536,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2537,"content":2543,"config":2551,"_id":2553,"_type":13,"title":2554,"_source":15,"_file":2555,"_stem":2556,"_extension":18},"/en-us/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io",{"title":2538,"description":2539,"ogTitle":2538,"ogDescription":2539,"noIndex":6,"ogImage":2540,"ogUrl":2541,"ogSiteName":669,"ogType":670,"canonicalUrls":2541,"schema":2542},"Review Apps for Android with GitLab, fastlane & Appetize.io","See how GitLab and Appetize.io can bring Review Apps to your Android project","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664102/Blog/Hero%20Images/gitlab-values-cover.png","https://about.gitlab.com/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to create Review Apps for Android with GitLab, fastlane, and Appetize.io\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Andrew Fontaine\"}],\n        \"datePublished\": \"2020-05-06\",\n      }",{"title":2544,"description":2539,"authors":2545,"heroImage":2540,"date":2547,"body":2548,"category":2549,"tags":2550},"How to create Review Apps for Android with GitLab, fastlane, and Appetize.io",[2546],"Andrew Fontaine","2020-05-06","\n\n{::options parse_block_html=\"true\" /}\n\n\n\nIn a [previous look at GitLab and _fastlane_], we discussed how _fastlane_ now\nautomatically publishes the Gitter Android app to the Google Play Store, but at\nGitLab, we live on [review apps], and review apps for Android applications didn't\nreally exist... until [Appetize.io] came to our attention.\n\nJust a simple extension of our existing `.gitlab-ci.yml`, we can utilize\nAppetize.io to spin up review apps of our Android application.\n\nIf you'd rather just skip to the end, you can see\n[my MR to the Gitter Android project].\n\n## Setting up Fastlane\n\nFortunately for us, _fastlane_ has integrated support for Appetize.io, so all\nthat's needed to hit Appetize is the addition of a new `lane`:\n\n```diff\ndiff --git a/fastlane/Fastfile b/fastlane/Fastfile\nindex eb47819..f013a86 100644\n--- a/fastlane/Fastfile\n+++ b/fastlane/Fastfile\n@@ -32,6 +32,13 @@ platform :android do\n     gradle(task: \"test\")\n   end\n\n+  desc 'Pushes the app to Appetize and updates a review app'\n+  lane :review do\n+    appetize(api_token: ENV['APPETIZE_TOKEN'],\n+             path: 'app/build/outputs/apk/debug/app-debug.apk',\n+             platform: 'android')\n+  end\n+\n   desc \"Submit a new Internal Build to Play Store\"\n   lane :internal do\n     upload_to_play_store(track: 'internal', apk: 'app/build/outputs/apk/release/app-release.apk')\n```\n\n`APPETIZE_TOKEN` is an Appetize.io API token that can be generated on the\n[Appetize API docs] after signing up for an account. Once we add a new job and\nstage to our `.gitlab-ci.yml`, we will be able to deploy our APK to Appetize and\nrun them in the browser!\n\n```diff\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\nindex d9863d7..e4d0ce3 100644\n--- a/.gitlab-ci.yml\n+++ b/.gitlab-ci.yml\n@@ -5,6 +5,7 @@ stages:\n   - environment\n   - build\n   - test\n+  - review\n   - internal\n   - alpha\n   - beta\n@@ -81,6 +82,16 @@ buildRelease:\n   environment:\n     name: production\n\n+deployReview:\n+  stage: review\n+  image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n+  script:\n+    - bundle exec fastlane review\n+  only:\n+    - branches\n+  except:\n+    - master\n+\n testDebug:\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n   stage: test\n```\n\nGreat! Review apps will be deployed when branches other than `master` build.\nUnfortunately, there is no `environment` block, so there's nothing linking these\ndeployed review apps to GitLab. Let's fix that next.\n\n## Dynamic Environment URLs\n\nPreviously, GitLab only liked environment URLs that used pre-existing CI\nvariables (like `$CI_COMMT_REF_NAME`) in their definition. Since 12.9, however,\na [new way of defining environment urls with alternative variables exists].\n\nBy creating a `dotenv` file and submitting it as an `artifact` in our build, we\ncan define custom variables to use in our environment's URL. As all Appetize.io\napp URLs take the pattern of `https://appetize.io.app/$PUBLIC_KEY`, where\n`$PUBLIC_KEY` is randomly generated when the app is created, we need to get the\npublic key from the Appetize response in our `Fastfile`, and put it in a\n`dotenv` file.\n\n```diff\ndiff --git a/fastlane/Fastfile b/fastlane/Fastfile\nindex 7b5f9d1..ae3867c 100644\n--- a/fastlane/Fastfile\n+++ b/fastlane/Fastfile\n@@ -13,6 +13,13 @@\n # Uncomment the line if you want fastlane to automatically update itself\n # update_fastlane\n\n+\n+def update_deployment_url(pub_key)\n+  File.open('../deploy.env', 'w') do |f|\n+    f.write(\"APPETIZE_PUBLIC_KEY=#{pub_key}\")\n+  end\n+end\n+\n default_platform(:android)\n\n platform :android do\n@@ -37,6 +44,7 @@ platform :android do\n     appetize(api_token: ENV['APPETIZE_TOKEN'],\n              path: 'app/build/outputs/apk/debug/app-debug.apk',\n              platform: 'android')\n+    update_deployment_url(lane_context[SharedValues::APPETIZE_PUBLIC_KEY])\n   end\n\n   desc \"Submit a new Internal Build to Play Store\"\n```\n\nWe also need to add an `environment` block to our `.gitlab-ci.yml` to capture an\nenvironment name and URL.\n\n```diff\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\nindex f5a8648..c834077 100644\n--- a/.gitlab-ci.yml\n+++ b/.gitlab-ci.yml\n@@ -85,12 +85,18 @@ buildCreateReleaseNotes:\n deployReview:\n   stage: review\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n+  environment:\n+    name: review/$CI_COMMIT_REF_NAME\n+    url: https://appetize.io/app/$APPETIZE_PUBLIC_KEY\n   script:\n     - bundle exec fastlane review\n   only:\n     - branches\n   except:\n     - master\n+  artifacts:\n+    reports:\n+      dotenv: deploy.env\n\n testDebug:\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n```\n\nOnce committed, pushed, and a pipeline runs, we should see our environment\ndeployed!\n\n![Our first review environment][first-review-app]\n\n## Optimizing Updates\n\nAfter running with this for a bit, we realized that we were accidentally\ncreating a new app on Appetize.io with every new build! Their docs\n[specify how to update existing apps], so we went about seeing if we could\nsmartly update existing environments.\n\nSpoiler alert: We could.\n\nFirst, we need to save the public key granted to us by Appetize.io somewhere. We\ndecided to put it in a JSON file and save that as an artifact of the build.\nFortunately, the `Fastfile` is just ruby, which allows us to quickly write it\nout to a file with a few lines of code, as well as attempt to fetch the artifact\nfor the last build of the current branch.\n\n```diff\ndiff --git a/fastlane/Fastfile b/fastlane/Fastfile\nindex ae3867c..61e9226 100644\n--- a/fastlane/Fastfile\n+++ b/fastlane/Fastfile\n@@ -13,8 +13,32 @@\n # Uncomment the line if you want fastlane to automatically update itself\n # update_fastlane\n\n+require 'net/http'\n+require 'json'\n+\n+GITLAB_TOKEN = ENV['PRIVATE_TOKEN']\n+PROJECT_ID = ENV['CI_PROJECT_ID']\n+REF = ENV['CI_COMMIT_REF_NAME']\n+JOB = ENV['CI_JOB_NAME']\n+API_ROOT = ENV['CI_API_V4_URL']\n+\n+def public_key\n+  uri = URI(\"#{API_ROOT}/projects/#{PROJECT_ID}/jobs/artifacts/#{REF}/raw/appetize-information.json?job=#{JOB}\")\n+  http = Net::HTTP.new(uri.host, uri.port)\n+  http.use_ssl = true\n+  req = Net::HTTP::Get.new(uri)\n+  req['PRIVATE-TOKEN'] = GITLAB_TOKEN\n+  response = http.request(req)\n+  return '' if response.code.equal?('404')\n+\n+  appetize_info = JSON.parse(response.body)\n+  appetize_info['publicKey']\n+end\n\n def update_deployment_url(pub_key)\n+  File.open('../appetize-information.json', 'w') do |f|\n+    f.write(JSON.generate(publicKey: pub_key))\n+  end\n   File.open('../deploy.env', 'w') do |f|\n     f.write(\"APPETIZE_PUBLIC_KEY=#{pub_key}\")\n   end\n@@ -42,6 +66,7 @@ platform :android do\n   desc 'Pushes the app to Appetize and updates a review app'\n   lane :review do\n     appetize(api_token: ENV['APPETIZE_TOKEN'],\n+             public_key: public_key,\n              path: 'app/build/outputs/apk/debug/app-debug.apk',\n              platform: 'android')\n     update_deployment_url(lane_context[SharedValues::APPETIZE_PUBLIC_KEY])\n```\n\nWhen we go to deploy our app to Appetize, we hit the [Jobs API] to see if we\nhave a public key for this branch. If the API returns a `404`, we know we are\nbuilding a fresh branch and return an empty string, else we parse the JSON and\nreturn our public key. The [Fastlane docs] state the `appetize` action can take\na `public_key` to update an existing app. Here, `''` is considered the same as\n_not_ providing a public key, so a new application is still deployed as we expect.\n\n**NOTE:** If you've read the `diff` closely, you'll notice the usage of an\nenvironment variable called `PRIVATE_TOKEN`. This is a GitLab private token\ncreated with the `read_api` scope and injected into our build as an environment\nvariable. This is required to authenticate with the GitLab API and fetch\nartifacts.\n\nOnce we update `.gitlab-ci.yml` to save the new `appetize-information.json` file\nas an artifact, later builds on the same branch will be smart and update the\nexisting Appetize app!\n\n```diff\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\nindex c834077..54cf3f6 100644\n--- a/.gitlab-ci.yml\n+++ b/.gitlab-ci.yml\n@@ -95,6 +95,8 @@ deployReview:\n   except:\n     - master\n   artifacts:\n+    paths:\n+      - appetize-information.json\n     reports:\n       dotenv: deploy.env\n```\n\n## Cleaning up\n\nAll that's left is to delete old apps from Appetize once we don't need them\nanymore. We can do that by leveraging `on_stop` and creating a `stop` job that\nwill delete our app from Appetize.io\n\n```diff\ndiff --git a/.gitlab-ci.yml b/.gitlab-ci.yml\nindex 54cf3f6..f6ecf7e 100644\n--- a/.gitlab-ci.yml\n+++ b/.gitlab-ci.yml\n@@ -10,6 +10,7 @@ stages:\n   - alpha\n   - beta\n   - production\n+  - stop\n\n\n .updateContainerJob:\n@@ -88,6 +89,7 @@ deployReview:\n   environment:\n     name: review/$CI_COMMIT_REF_NAME\n     url: https://appetize.io/app/$APPETIZE_PUBLIC_KEY\n+    on_stop: stopReview\n   script:\n     - bundle exec fastlane review\n   only:\n@@ -100,6 +102,22 @@ deployReview:\n     reports:\n       dotenv: deploy.env\n\n+stopReview:\n+  stage: stop\n+  environment:\n+    name: review/$CI_COMMIT_REF_NAME\n+    action: stop\n+  variables:\n+    GIT_STRATEGY: none\n+  when: manual\n+  only:\n+    - branches\n+  except:\n+    - master\n+  script:\n+    - apt-get -y update && apt-get -y upgrade && apt-get -y install jq curl\n+    - curl --request DELETE https://$APPETIZE_TOKEN@api.appetize.io/v1/apps/`jq -r '.publicKey' \u003C appetize-information.json`\n+\n testDebug:\n   image: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n   stage: test\n```\n\nOnce your MR is merged and your branch is deleted, the `stopReview` job runs,\ncalling the [`DELETE` endpoint of the Appetize.io API] with the public key that\nis contained in `appetize-information.json`. We don't need to fetch\n`appetize-information.json` because the artifact is already present in our build\ncontext. This is because the `stop` stage happens _after_ the `review` stage.\n\n![A merge request with a deployed review app][merge-request-with-review-app]\n\n## Conclusion\n\nThanks to some integration with _fastlane_ and the addition of a couple\nenvironment variables, having the ability to create review apps for an Android\nproject was surpsingly simple. GitLab's review apps are not _just_ for web-based\nprojects, even though it may take a little tinkering to get working. Appetize.io\nalso supports iOS applications, so all mobile native applications can be turned\ninto review apps. I would love to see this strategy be applied to a React Native\nproject as well!\n\n[previous look at gitlab and _fastlane_]: /blog/android-publishing-with-gitlab-and-fastlane/\n[my mr to the gitter android project]: https://gitlab.com/gitlab-org/gitter/gitter-android-app/-/merge_requests/167\n[review apps]: https://docs.gitlab.com/ee/ci/review_apps/#review-apps\n[appetize.io]: https://appetize.io\n[appetize api docs]: https://appetize.io/docs#request-api-token\n[new way of defining environment urls with alternative variables exists]: https://docs.gitlab.com/ee/ci/environments/index.html#set-dynamic-environment-urls-after-a-job-finishes\n[first-review-app]: /images/blogimages/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io/first-review-app.png\n[specify how to update existing apps]: https://appetize.io/docs#updating-apps\n[jobs api]: https://docs.gitlab.com/ee/api/jobs.html#download-a-single-artifact-file-from-specific-tag-or-branch\n[fastlane docs]: https://docs.fastlane.tools/actions/appetize/\n[`delete` endpoint of the appetize.io api]: https://appetize.io/docs#deleting-apps\n[merge-request-with-review-app]: /images/blogimages/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io/merge-request-with-review-app.png\n","unfiltered",[108,230,680,9],{"slug":2552,"featured":6,"template":684},"how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io","content:en-us:blog:how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io.yml","How To Create Review Apps For Android With Gitlab Fastlane And Appetize Dot Io","en-us/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io.yml","en-us/blog/how-to-create-review-apps-for-android-with-gitlab-fastlane-and-appetize-dot-io",{"_path":2558,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2559,"content":2565,"config":2570,"_id":2572,"_type":13,"title":2573,"_source":15,"_file":2574,"_stem":2575,"_extension":18},"/en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"title":2560,"description":2561,"ogTitle":2560,"ogDescription":2561,"noIndex":6,"ogImage":2562,"ogUrl":2563,"ogSiteName":669,"ogType":670,"canonicalUrls":2563,"schema":2564},"How to deploy a PHP app using GitLab's Cloud Run integration","Are you using PHP and want an easy way to deploy your application to Google Cloud? Follow this guide to deploy your app with Google Cloud Run in under 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098264/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_519147119_2RafH61mqosMZv8HGAlsUj_1750098264407.jpg","https://about.gitlab.com/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to deploy a PHP app using GitLab's Cloud Run integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Nnachi\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-12-10\",\n      }",{"title":2560,"description":2561,"authors":2566,"heroImage":2562,"date":2567,"body":2568,"category":769,"tags":2569},[1347,831],"2024-12-10","Writing PHP application code and ensuring the application is running smoothly in production are often two different skills sets owned by two different engineers. GitLab aims to bridge the gap by enabling the engineer who has written the PHP application code to also deploy it into Google Cloud Platform with little effort. \n\nWhether you own event-driven, long-running services or deploy containerized jobs to process data, Google Cloud Run automatically scales your containers up and down from zero — this means you only pay when your code is running.\n\nIf you are a PHP developer who would like to deploy your application with minimal effort to Google Cloud Platform, this guide will show you how using the GitLab Google Cloud Run integration. \n\n# Overview\n\n- Create a new project in GitLab\n- Set up your PHP application\n- Utilizing the Google Cloud integration, create a Service account\n- Utilizing the Google Cloud integration, configure Cloud Run via merge request\n- Try adding another endpoint\n- Clean up\n\n## Prerequisites\n- Owner access on a Google Cloud Platform project\n- Working knowledge of [PHP](https://www.php.net/manual/en/introduction.php), an open-source, general-purpose scripting language\n- Working knowledge of [GitLab CI](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci)\n- 10 minutes\n\n## 1. Create a new project in GitLab.\n\nWe decided to call our project `PHP cloud-run` for simplicity.\n\n![PHP cloud- run project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098287/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098287615.png)\n\nThen, create an index.php app[https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php](https://gitlab.com/demos/templates/php-cloud-run/-/blob/main/index.php).\n\n```php\n\u003C?php\n\n$name = getenv('NAME', true) ?: 'World';\necho sprintf('Hello %s!', $name);\n```\n\n## 2. Utilizing the Google Cloud integration, create a Service account.\n\nNavigate to **Operate > Google Cloud > Create Service account**. \n\n![Create Service account screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098287616.png)\n\nThen configure the region you would like the Cloud Run instance deployed to.\n\n![Configure region screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098287618.png)\n\n## 3. Utilizing the Google Cloud integration, configure **Cloud Run via merge request**.\n\n![Deployment configuration screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098287620.png)\n\nThis will open a merge request. Immediately merge this merge request.\n\n![Enable Deployments to Cloud run screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098287622.png)\n\n**Note:** `GCP_PROJECT_ID`, `GCP_REGION`,  `GCP_SERVICE_ACCOUNT`, and `GCP_SERVICE_ACCOUNT_KEY` will all be automatically populated from the previous steps.\n\n![Variables screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098287624.png)\n\nCheck your pipeline and you will see you have successfully deployed to Google Cloud Run utilizing GitLab CI.\n\n![merge branch screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098287625.png)\n\n\u003Cbr>\u003C/br>\n\n![Google Cloud Run deployed with GitLab CI](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098287627.png)\n\n## 4. Click the **Service URL** to view your newly deployed Flask server.\n\nIn addition, you can navigate to **Operate > Environments** to see a list of deployments for your environments.\n\n![Environments screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098287628.png)\n\nBy clicking on the environment called **main**, you’ll be able to view a complete list of deployments specific to that environment.\n\n![Main environment](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098288/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098287631.png)\n\n## 5. Add another endpoint\n\nTo get started with developing your PHP application, try adding another endpoint. For example, in your main file, you can add a `/bye` endpoint like this:\n\n```\n\n\u003C?php\n\n$name = getenv('NAME', true) ?: 'World';\n\nif ($_SERVER['REQUEST_URI'] == '/bye') {\n    echo sprintf('Goodbye %s!', $name);\n} else {\n    echo sprintf('Hello %s!', $name);\n}\n\n```\n\nPush the changes to the repo, and watch the `deploy-to-cloud-run` job deploy the updates. Once the job is complete, go back to the Service URL and navigate to the `/bye` endpoint to see the new functionality in action.\n\n### Clean up\n\nTo prevent incurring charges on your Google Cloud account for the resources used in this tutorial, you can either delete the specific resources or delete the entire Google Cloud project. For detailed instructions, refer to the [cleanup guide here](https://docs.gitlab.com/ee/tutorials/create_and_deploy_web_service_with_google_cloud_run_component/#clean-up).\n\n> Check out more [easy-to-follow tutorials from our Solutions Architecture team](https://about.gitlab.com/blog/tags/solutions-architecture/).",[1000,9,1248,230],{"slug":2571,"featured":6,"template":684},"how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration","content:en-us:blog:how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","How To Deploy A Php App Using Gitlabs Cloud Run Integration","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration.yml","en-us/blog/how-to-deploy-a-php-app-using-gitlabs-cloud-run-integration",{"_path":2577,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2578,"content":2584,"config":2590,"_id":2592,"_type":13,"title":2593,"_source":15,"_file":2594,"_stem":2595,"_extension":18},"/en-us/blog/how-to-detecting-secrets-in-video",{"title":2579,"description":2580,"ogTitle":2579,"ogDescription":2580,"noIndex":6,"ogImage":2581,"ogUrl":2582,"ogSiteName":669,"ogType":670,"canonicalUrls":2582,"schema":2583},"How-to: Detecting secrets in video content ","GitLab’s Security team identifies and mitigates security risks in video content by searching for API keys or other sensitive tokens. Here's how we do it (with an assist from AI) and how you can, too.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099421/Blog/Hero%20Images/Blog/Hero%20Images/security-checklist_security-checklist.png_1750099421443.png","https://about.gitlab.com/blog/how-to-detecting-secrets-in-video-content","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How-to: Detecting secrets in video content \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dennis Appelt\"}],\n        \"datePublished\": \"2024-02-29\",\n      }",{"title":2579,"description":2580,"authors":2585,"heroImage":2581,"date":2587,"body":2588,"category":814,"tags":2589},[2586],"Dennis Appelt","2024-02-29","Today, we are open-sourcing our [solution for detecting secrets in video content]( https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/youtube-video-scanner). We use it internally to search videos published on our [GitLab Unfiltered YouTube channel](https://www.youtube.com/@GitLabUnfiltered) for [secrets](https://docs.gitlab.com/ee/security/token_overview.html) such as API keys and other sensitive tokens.\n\nWhile there are existing tools for secret detection, we did not find a tool that quite fit the bill for our use case, so we decided to implement a custom scanner. In this blog post, we'll walk through our general approach, some of the challenges we encountered, and our solution. We'll also discuss how GitLab’s new AI assistant, [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/), helped with the implementation of the scanner.\n\n## Scanning videos, one frame at a time\n\nOur general approach to secret detection in videos is quite simple: Split the video into frames, run optical character recognition (OCR) over each frame, and match the resulting text against known [secret patterns](https://docs.gitlab.com/ee/security/token_overview.html). If a secret is found, a [security incident](https://handbook.gitlab.com/handbook/security/security-operations/sirt/engaging-security-on-call/#engage-the-security-engineer-on-call) is kicked off to investigate the leak and revoke exposed secrets.\n\nTo implement this approach, we first experimented using [FFmpeg](https://ffmpeg.org//) for splitting the video into frames and feeding the frames to [Tesseract](https://github.com/tesseract-ocr/tesseract), an open-source engine for OCR. This worked quite well and gave us confidence that the general approach was feasible. However, we decided to switch to [Google Cloud Platform's Video Intelligence API](https://cloud.google.com/video-intelligence/docs/) for the frame splitting and OCR for the simple reason of not having to scale and maintain our own implementation.\n\nFFmpeg and Tesseract are good options if third-party APIs cannot be used or if more control over the process is required. For example, if the secrets are only exposed for a brief moment in the video, using FFmpeg allows you to increase the frame sampling rate to analyze more frames per second and increases the chances of catching the frame that exposes the secret. The Video Intelligence API does not provide a comparable level of control. \n\nThe choice between the Video Intelligence API and FFmpeg + Tesseract also depends on the data set that has to be analyzed. The Video Intelligence API works well on our data set, which makes the additional complexity of a custom implementation based on FFmpeg + Tesseract hard to justify. After settling for the Video Intelligence API, it was a natural choice to host the rest of the scanner on GCP as well. The below diagram gives an overview of the design:\n\n![video content scanners - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099431138.png)\n\nThe scanner is implemented as a collection of cloud functions running on GCP. The cloud function `WebSub API` implements the WebSub [spec](https://www.w3.org/TR/websub/), which is used by YouTube to deliver notifications. Notifications of new videos are published to a PubSub topic, which the cloud function `Video Fetcher` is subscribed to. If a message is received, the video is downloaded and submitted for OCR to the Video Intelligence API. The resulting text extract is checked for secrets by the `Secret Matcher` and alerts are created in case a secret is found.\n\n## Accounting for inaccuracies in OCR\n\nThe described approach sounds simple enough, but as with most things, the devil is in the details. When comparing the video scanner to other secret scanning methods, a notable difference is how the video scanner determines if a given string literal is a secret. Secret detection tools usually determine if the given text contains a secret by matching the text against a list of regular expressions, each defining the format of a secret. If there is a match, a secret is detected.\n\n![video content scanners - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099431139.png)\n\n\u003Ccenter>\u003Ci>A video frame showing a GitLab access token\u003C/i>\u003C/center>\n\n\u003Cp>\u003C/p>\n\nWhen it comes to video scanning, this approach has limited effectiveness due to the OCR step. In some instances, the recognized text does not quite match the text displayed in the video. For example, the above video frame shows the access token `glpat-HseyLLLE92Ubso2vyVeD` and OCR extracted the text `glpat-HseyLLLE92Ubso2vyVe\\`. The last character of the secret is `D`, but OCR extracted a backslash ( `\\`). This error causes the extracted text to no longer match the format of GitLab personal access tokens; therefore, simply matching the text against a regular expression conforming to the token format would have not detected the leaked access token.\n\nTo account for the inaccuracies that are introduced by the OCR step, the video scanner uses approximate regular expression matching where a string is not required to match a regular expression exactly, but small deviations in the strings are allowed. These deviations are expressed as string edit distance and define how many characters in the string need to be inserted, deleted, or substituted to make the string match a given regular expression. For example, the string edit distance for the previous example is 1 because the erroneously detected backslash has to be substituted with an alphanumeric character or a minus sign to make the string match the GitLab personal access token format.\n\nFinding the right value for the maximum edit distance for a string to still be considered to match a regular expression depends on the data set and requires some experimentation. If the value is too low, secrets might be missed and if the value is too high, strings that are not secrets will be matched. To find the right balance for our use case, we collected a data set of videos leaking secrets and ran them through our scanner pipeline. We measured precision (how many matches are not secrets) and recall (how many matches are relevant/true secrets) for different maximum edit distances. We collected this data for the two most relevant types of secrets: GitLab API access tokens and runner registration tokens. The below diagrams show the results.\n\nFor both types of secrets, the optimal maximum cost is 2, at which point a high precision and recall are achieved (97% precision and 91% recall for access tokens; 100% precision and 86% recall for runner registration tokens). Decreasing the maximum distance results in worse recall, while increasing the maximum cost leads to either no improvement (runner registration token) or adversely affects precision (access tokens).\n\n![video content scanner - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099431141.png)\n\n\u003Cp>\u003C/p>\n\n![video content scanner - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099431143.png)\n\n## Implementation made easy using GitLab Duo Chat\n\nAfter some initial experimentation, it was clear to us that approximate regular expression matching can increase the detection accuracy of our video scanner. However, the implementation turned out not to be trivial. While the [algorithms for approximate regular expression matching](https://www.tgries.de/agrep/) have existed since the '90s, there is no widespread library support. [TRE](https://laurikari.net/tre/) is a solid choice but is written in C and thus requires bindings to be called from the scanner’s runtime environment (Node.js).\n\nTo reduce the development effort, we turned to [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/). After discussing our options with Duo Chat for calling C from Node.js, we settled on [implementing a C++ add-on](https://nodejs.org/docs/latest-v20.x/api/addons.html). We quickly iterated towards a working implementation, which would have undoubtedly taken considerably more time without the help of GitLab Duo Chat. Below is an excerpt from the chat session.\n\n> Me: Are you familiar with the C library TRE? Can you help me write a Node.js addon that is a wrapper for TRE?\n\n> Duo: Here is the template for wrapping the TRE library in a Node.js addon using N-API:\n>\n>![video content scanner - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.51.14_AM_aHR0cHM6_1750099431144.png)\n>\n> The key steps are:\n> - Include TRE header\n> - Get tre regex_t from parameters\n> - Use tre_regcomp() to compile pattern\n> - Execute regexec()\n> - Handle TRE match results\n> - Return results back to JavaScript\n>\n> Let me know if this helps explain how to integrate TRE regex usage in a Node.js add-on!\n\n> Me: Ok, now let's add code for the tasks that you outlined in the template. I want to call this C++ from JavaScript like this:\n>\n>![video content scanners - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.53.05_AM_aHR0cHM6_1750099431145.png)\n>\n> In this example, the first parameter is the regular expression that should be used to create the parameter regex_t *preg, the second parameter is the string that should be matched against the regular expression, and the third parameter is an integer that represents TRE’s max_cost.\n\n>Duo: Here is example C++ code implementing TRE regex usage to match the provided JavaScript code snippet.\n>\n>![video content scanners - image 7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.54.08_AM_aHR0cHM6_1750099431146.png)\n>\n> Let me know if you need any clarification on this implementation!\n\n> Me: Thank you for the code. Can you change the call tre_regexec to tre_regaexec?\n\n> Duo: Here is the code updated to use tre_regaexec instead of tre_regexec:\n>\n>![code updated to use tre_regaexec instead of tre_regexec](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099431/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-02-28_at_9.56.29_AM_aHR0cHM6_1750099431146.png)\n>\n> The key changes are:\n> - Using `tre_regaexec` instead of `tre_regexec`\n> - Initializing a `regamatch_t` instead of `regmatch_t`\n> - Passing a regaparams_t struct to configure match parameters\n> \n> Let me know if you need any clarification or have additional questions!\n\nWhile the code generation capabilities are very impressive and save a lot of time, the generated code needs review and sometimes correction. For example, in our session with Duo Chat it is prompted to call [tre_regaexec](https://laurikari.net/tre/documentation/regaexec/), but the generated code is calling the similarly named, but distinct function [tre_regexec](https://laurikari.net/tre/documentation/regexec/). Typically it is enough to make Duo Chat aware of its mistake and it will correct the code in question. In this respect, working with Duo Chat feels like pair programming rather than fully outsourcing a programming task.\n\nThe full add-on code is available [here](https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/tre-node-bindings/).\n\n## Try our open-source implementation\n\nWe are [making the implementation of the scanner open source](https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/youtube-video-scanner) under the MIT license. We hope this solution can help you with detecting secrets in your own video content. Please [share your feedback in an issue](https://gitlab.com/gitlab-com/gl-security/security-research/video-scanner/youtube-video-scanner/-/issues/new).",[9,727,814],{"slug":2591,"featured":90,"template":684},"how-to-detecting-secrets-in-video-content","content:en-us:blog:how-to-detecting-secrets-in-video.yml","How To Detecting Secrets In Video","en-us/blog/how-to-detecting-secrets-in-video.yml","en-us/blog/how-to-detecting-secrets-in-video",{"_path":2597,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2598,"content":2604,"config":2610,"_id":2612,"_type":13,"title":2613,"_source":15,"_file":2614,"_stem":2615,"_extension":18},"/en-us/blog/how-to-fuzz-rust-code",{"title":2599,"description":2600,"ogTitle":2599,"ogDescription":2600,"noIndex":6,"ogImage":2601,"ogUrl":2602,"ogSiteName":669,"ogType":670,"canonicalUrls":2602,"schema":2603},"How to fuzz Rust code continuously","Learn why you should always fuzz test your Rust code, and the code you'll need to do it.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749681441/Blog/Hero%20Images/rust.jpg","https://about.gitlab.com/blog/how-to-fuzz-rust-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to fuzz Rust code continuously\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Yevgeny Pats\"}],\n        \"datePublished\": \"2020-12-03\",\n      }",{"title":2599,"description":2600,"authors":2605,"heroImage":2601,"date":2607,"body":2608,"category":769,"tags":2609},[2606],"Yevgeny Pats","2020-12-03","\n\nThis blog post was originally published on the GitLab Unfiltered blog. It was reviewed and republished on 2020-12-17.\n{: .alert .alert-info .note}\n\n## What is fuzzing?\n\nFuzzing, also called [fuzz testing](/topics/devsecops/what-is-fuzz-testing/), is an automated software technique that involves providing semi-random data as an input to the test program in order to uncover bugs and crashes.\n\nIn this short tutorial we will discuss using `cargo-fuzz` for fuzzing Rust code.\n\n## Why fuzz Rust code?\n[Rust](https://www.rust-lang.org/) is a safe language (mostly) and memory corruption issues are a thing of the past so we don’t need to fuzz our code, right? Wrong!\nAny code, and especially where stability, quality, and coverage are important, is worth fuzzing.\nFuzzing can uncover logical bugs and denial-of-service issues in critical components that can lead to security issues as well.\n\nAs a reference to almost infinite amount of bugs found with cargo-fuzz (only the documented one) you can look at [the list of bugs found by fuzz-testing Rust codebases](https://github.com/rust-fuzz/trophy-case).\n\n## Cargo-fuzz\n\ncargo-fuzz is the current de-facto standard fuzzer for Rust and essentially it is a proxy layer to the well-tested [libFuzzer](https://llvm.org/docs/LibFuzzer.html) engine.\nThis means the algorithm and the interface is all based on libFuzzer, which is a widely-used, coverage-guided fuzzer for C/C++ and some other languages that implemented a proxy layer – just like cargo-fuzz.\n\nlibFuzzer (cargo-fuzz) and coverage-guided fuzzers in general have the following algorithm:\n\n```\n// pseudo code\nInstrument program for code coverage\nfor {\n  Choose random input from corpus\n  Mutate input\n  Execute input and collect coverage\n  If new coverage/paths are hit add it to corpus (corpus - directory with test-cases)\n}\n```\n\n## Building and running the fuzzer\n\nIf you are already familiar with this part you can skip to Continuous Fuzzing section.\n\nWe will start with [rust-fuzzing-example](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example).\n\nFor the sake of the example, we have a simple [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/src/lib.rs) with an off-by-one bug:\n\n```\npub fn parse_complex(data: &[u8]) -> bool{\n\tif data.len() == 5 {\n\t\tif data[0] == b'F' && data[1] == b'U' && data[2] == b'Z' && data[3] == b'Z' && data[4] == b'I' && data[5] == b'T' {\n\t\t\treturn true\n\t\t}\n\t}\n    return true;\n}\n```\n\nOur fuzz [function](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/fuzz/fuzz_targets/fuzz_parse_complex.rs) will look like this and will be called by libFuzzer in an infinite loop with the generated data, according to the coverage-guided algorithm.\n\n```\n#![no_main]\n#[macro_use] extern crate libfuzzer_sys;\nextern crate example_rust;\n\nfuzz_target!(|data: &[u8]| {\n    let _ = example_rust::parse_complex(&data);\n});\n```\n\nTo run the fuzzer we need to build an instrumented version of the code together with the fuzz function.\ncargo-fuzz is doing for us the heavy lifting so it can be done using the following simple steps:\n\n```\n# cargo-fuzz is available in rust nightly\ndocker run -it rustlang/rust:nightly-stretch /bin/bash\ncargo install cargo-fuzz\n\n# Download the example repo, build, and run the fuzzer\ngit clone https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example/-/blob/master/fuzz/fuzz_targets/fuzz_parse_complex.rs\ncd example-rust\ncargo fuzz run fuzz_parse_complex\n\n## The output should look like this:\n#524288 pulse  cov: 105 ft: 99 corp: 6/26b lim: 517 exec/s: 131072 rss: 93Mb\n#1048576        pulse  cov: 105 ft: 99 corp: 6/26b lim: 1040 exec/s: 116508 rss: 229Mb\n==2208== ERROR: libFuzzer: deadly signal\n    #0 0x5588b8234961  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x83961)\n    #1 0x5588b8262dc5  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xb1dc5)\n    #2 0x5588b8284734  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xd3734)\n    #3 0x5588b82845e9  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xd35e9)\n    #4 0x5588b826493a  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xb393a)\n    #5 0x7f93737e70df  (/lib/x86_64-linux-gnu/libpthread.so.0+0x110df)\n    #6 0x7f9373252ffe  (/lib/x86_64-linux-gnu/libc.so.6+0x32ffe)\n    #7 0x7f9373254429  (/lib/x86_64-linux-gnu/libc.so.6+0x34429)\n    #8 0x5588b82a4a06  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf3a06)\n    #9 0x5588b82a1b75  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf0b75)\n    #10 0x5588b824fa1b  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9ea1b)\n    #11 0x5588b82a442b  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf342b)\n    #12 0x5588b82a3ee1  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf2ee1)\n    #13 0x5588b82a3dd5  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf2dd5)\n    #14 0x5588b82b6cd9  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x105cd9)\n    #15 0x5588b82b6c94  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x105c94)\n    #16 0x5588b824edda  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9ddda)\n    #17 0x5588b81c45b7  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x135b7)\n    #18 0x5588b824f7e4  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0x9e7e4)\n    #19 0x5588b827da53  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xcca53)\n    #20 0x5588b82a4a18  (/example-rust/fuzz/target/x86_64-unknown-linux-gnu/debug/fuzz_parse_complex+0xf3a18)\n\nNOTE: libFuzzer has rudimentary signal handlers.\n      Combine libFuzzer with AddressSanitizer or similar for better crash reports.\nSUMMARY: libFuzzer: deadly signal\nMS: 2 ShuffleBytes-ChangeByte-; base unit: 89b92cdd9bcb9b861c47c0179eff7b3a9baafcde\n0x46,0x55,0x5a,0x5a,0x49,\nFUZZI\nartifact_prefix='/example-rust/fuzz/artifacts/fuzz_parse_complex/'; Test unit written to /example-rust/fuzz/artifacts/fuzz_parse_complex/crash-df779ced6b712c5fca247e465de2de474d1d23b9\nBase64: RlVaWkk=\n```\n\nThis find the bug in a few seconds, prints the “FUZZI” string that triggers the vulnerability and saves it to a file.\n\n## Running cargo-fuzz from CI\n\nThe best way to integrate go-fuzz fuzzing with Gitlab CI/CD is by adding additional stage and step to your `.gitlab-ci.yml`. It is straightforward and [fully documented](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#configuration).\n\n```\ninclude:\n  - template: Coverage-Fuzzing.gitlab-ci.yml\n\nmy_fuzz_target:\n  extends: .fuzz_base\n  script:\n    - apt-get update -qq && apt-get install -y -qq git make clang cmake\n    - export CC=`which clang`\n    - export CXX=`which clang++`\n    - cargo install cargo-fuzz\n    - cargo fuzz run fuzz_parse_complex -- -runs=0\n    - ./gitlab-cov-fuzz run --regression=$REGRESSION -- ./fuzz/target/x86_64-unknown-linux-gnu/release/fuzz_parse_complex\n```\n\nFor each fuzz target you will have to create a step which extends `.fuzz_base` that runs the following:\n\n- Builds the fuzz target.\n- Runs the fuzz target via gitlab-cov-fuzz CLI.\n- For `$CI_DEFAULT_BRANCH` (can be override by `$COV_FUZZING_BRANCH`) will run fully fledged fuzzing sessions. For everything else including MRs will run fuzzing regression with the accumulated corpus and fixed crashes.\n\nThis will run your fuzz tests in a blocking manner inside your pipeline. There is also a possibility to run longer fuzz sessions asynchronously, as described in the [docs](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/#continuous-fuzzing-long-running-async-fuzzing-jobs).\n\nCheck out our [full documentation](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) and the [example repo](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/rust-fuzzing-example) and try adding fuzz testing to your own repos!\n\nCover image by [Zsolt Palatinus](https://unsplash.com/@sunitalap) on [Unsplash](https://unsplash.com/)\n",[727,9],{"slug":2611,"featured":6,"template":684},"how-to-fuzz-rust-code","content:en-us:blog:how-to-fuzz-rust-code.yml","How To Fuzz Rust Code","en-us/blog/how-to-fuzz-rust-code.yml","en-us/blog/how-to-fuzz-rust-code",{"_path":2617,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2618,"content":2624,"config":2630,"_id":2632,"_type":13,"title":2633,"_source":15,"_file":2634,"_stem":2635,"_extension":18},"/en-us/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id",{"title":2619,"description":2620,"ogTitle":2619,"ogDescription":2620,"noIndex":6,"ogImage":2621,"ogUrl":2622,"ogSiteName":669,"ogType":670,"canonicalUrls":2622,"schema":2623},"How-to: GitLab Single Sign-on with SAML, SCIM, and Azure’s Entra ID","Follow this detailed walk-through of the configuration steps required to configure GitLab Single Sign-on, using Microsoft Azure’s Entra ID as the identity provider.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098047/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_1097303277_6gTk7M1DNx0tFuovupVFB1_1750098046895.jpg","https://about.gitlab.com/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How-to: GitLab Single Sign-on with SAML, SCIM, and Azure’s Entra ID\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Rob Jackson\"}],\n        \"datePublished\": \"2025-01-23\",\n      }",{"title":2619,"description":2620,"authors":2625,"heroImage":2621,"date":2627,"body":2628,"category":814,"tags":2629},[2626],"Rob Jackson","2025-01-23","As organizations increase in size, it becomes increasingly difficult and critical to ensure that the right team members have access to the right groups and projects within their development platform. GitLab offers some powerful methods to manage user access, especially now with [custom roles](https://about.gitlab.com/blog/how-to-tailor-gitlab-access-with-custom-roles/), but performing this at scale through a point-and-click user interface can be frustrating. However, all is not lost. You can use Security Assertion Markup Language (SAML) and System for Cross-domain Identity Management (SCIM) as a solution. (There are moments where I’m grateful for acronyms.) \n\nI was researching this topic for a particular customer, and walking through the GitLab documentation on the capabilities, but I never felt like I truly understood the integration. As is often the case, especially when dealing with integrating components, the knowledge from experience far outweighs that gained from reading or watching. In that light, I wanted to share my steps along this path and invite you all to join me. All you need is a free trial of Microsoft Azure Entra ID and GitLab Premium with a top-level group on GitLab.com.  \n\n**Note:** This exercise produces a working integration, however, for production environments there may be necessary deviations. For example, the user account email for the identity provider (Entra ID in this case) will likely not match your GitLab account email. \n\n## Creating the application in Entra ID\n\nFirst, go to the Entra ID admin center. Within the **Applications** area, select **Enterprise Applications**. We’re going to create a new application, and then create our own application.\n\n![Entra ID application creation flow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750098073325.png)\n\n\u003Ccenter>\u003Ci>Figure 1: Entra ID application creation flow\u003C/i>\u003C/center>\u003Cbr>\n\nWith our new application created, we can start configuring the single sign-on (SSO) parameters for our application. For this task, you may want to have side-by-side browser windows. One window on your Entra ID application, and another window on the SAML settings for your GitLab group. Those settings are located under **Settings**, then SAML SSO on the left side of your GitLab window, as shown in Figure 2. If you don’t see this option, you aren’t in the top-level group, don’t have permission to configure SAML, or don’t have GitLab Premium enabled for that group.\n\n![GitLab SAML configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098073326.png)\n\n\u003Ccenter>\u003Ci>Figure 2: GitLab SAML configuration\u003C/i>\u003C/center>\u003Cbr>\n\nWithin your Entra ID interface, select **Single sign-on** and click the SAML card.\n\n![Entra ID SAML configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image24_aHR0cHM6_1750098073328.png)\n\n\u003Ccenter>\u003Ci>Figure 3: Entra ID SAML configuration\u003C/i>\u003C/center>\u003Cbr>\n\nWith the side-by-side view, the SAML configuration settings are on the left and the GitLab SSO settings on the right. \n\n![Side-by-side view of Entra ID and GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image16_aHR0cHM6_1750098073330.png)\n\n\u003Ccenter>\u003Ci>Figure 4: Side-by-side view of Entra ID and GitLab\u003C/i>\u003C/center>\u003Cbr>\n\nNow we can start copying and pasting parameters. Within the Entra ID interface, select **Edit** within the “Basic SAML Configuration” block. The parameter sources and destination are identified in the following table.\n\n| Source (GitLab) | Destination (Entra ID) |\n| :---------------- | :------: | \n| Identifier        |   Identifier (Entity ID)   | \n| Assertion consumer service URL |   Reply URL (Assertion Consumer Service URL)   | \n| GitLab single sign-on URL    |  Sign on URL (Optional)   | \n\n\u003Cbr>\nOnce completed, your side-by-side view should appear similar to the following (noting the URLs are unique to your environment).\u003Cbr>\n\n![Completed basic SAML SSO configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098073332.png)\n\n\u003Ccenter>\u003Ci>Figure 5: Completed basic SAML SSO configuration\u003C/i>\u003C/center>\u003Cbr>\n\nClick **Save** within the Entra ID “Basic SML Configuration” window to save your hard work thus far. Note: You may need to click on the “X” in the upper right of the “Basic SAML Configuration” window if it doesn’t close automatically. \n\nAfter this window closes, you may get a popup to test single sign-on with your application. Select **No, I’ll test later**, because we still have more work to do (there is always more work to do). \n\n## Configuring attributes and claims\n\nWithin the Entra ID user interface, look for the section for “Attributes and Claims,” and click the **Edit** pencil icon. The first thing we want to do is modify the Unique User identifier (Name ID) value, so click on that row and set the Source attribute to **user.objectid**. Additionally, the Name identifier format must be updated, and set to **Persistent**.\n\n![Configuring attributes and claims](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750098073333.png)\n\n\u003Ccenter>\u003Ci>Figure 6: Configuring attributes and claims\u003C/i>\u003C/center>\u003Cbr>\n\nSave that claim configuration. Now we have additional claims to configure, but there are only three that we need here. So, feel free to go wild and delete those default four items under **Additional claims**, or you can edit the existing ones to match the table below. Note that these values (specifically, the Name) are case sensitive. \n\u003Cbr>\n\n| Name | Namespace | Source Attribute |\n| :---------------- | :------: | :------: | \n|emailaddress |http://schemas.microsoft.com/ws/2008/06/identity/claims | user.otheremail |\n| NameID | http://schemas.microsoft.com/ws/2008/06/identity/claims |user.objectid |\n\n\u003Cbr>\n\nThe resulting claims configuration should appear as follows. Note the use of **otheremail** for the “emailaddress” attribute. This was necessary for me as my primary email addresses within Entra ID are not the addresses used on GitLab.com. If you recall, when I set up my “user,\" I modified the contact information to include my gitlab.com email address as one of my “Other emails.” \n\n![Configuring the claims](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image21_aHR0cHM6_1750098073335.png)  \n\n\u003Ccenter>\u003Ci>Figure 7: Configuring the claims\u003C/i>\u003C/center>\u003Cbr>\n\nWith your attributes configured, under the Advance settings, enable **Include attribute name format** setting.\n\n![Advanced claims configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098073336.png)\n\n\u003Ccenter>\u003Ci>Figure 8: Advanced claims configuration\u003C/i>\u003C/center>\u003Cbr>\n\nYour \"Attributes and Claims\" window should now look similar to Figure 9 below.\n\n![Configured attributes and claims](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image18_aHR0cHM6_1750098073337.png)\n\n\u003Ccenter>\u003Ci>Figure 9: Configured attributes and claims\u003C/i>\u003C/center>\u003Cbr>\n\nIf you’re happy, or at least relatively content, with your configuration, click the “X” in the top right corner of the \"Attributes and Claims\" window to close it. \n\n## Configuring and assigning users\n\nNow that we have our application configured, we need to ensure that our users have been assigned to that application. I'll assume you’re working with a test instance that does not have the same email address as what is configured within your GitLab.com namespace. \n\nSo let’s go to the “Users and groups” within the Entra ID user interface for your configured application.\n\n![Managing application users and groups](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image17_aHR0cHM6_1750098073338.png)\n\n\u003Ccenter>\u003Ci>Figure 10: Managing application users and groups\u003C/i>\u003C/center>\u003Cbr>\n\nSelect **Add user/group**, and under the “Users and groups” where it says “None Selected,” click that text. Now you can select the user(s) to add to your application. These are the users that will be permitted to log into GitLab, authenticating themselves through Entra ID.\n\n![User selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image23_aHR0cHM6_1750098073339.png)\n\n\u003Ccenter>\u003Ci>Figure 11: User selection\u003C/i>\u003C/center>\u003Cbr>\n\nOnce selected, at the bottom of that page, click **Select**, and at the bottom of the next, select **Assign**. Now you should have a user assigned to your application.\n\n![User assigned to application](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750098073340.png)\n\n\u003Ccenter>\u003Ci>Figure 12: User assigned to application\u003C/i>\u003C/center>\u003Cbr>\n\nNext, we need to ensure that the GitLab.com email address for that user is configured correctly. By clicking on the user itself, we can modify or configure some  additional information about that user. We can see below, the User principal name, which is based on an “onmicrosoft” domain. This is not the email address I have associated with my GitLab.com account. If you recall that we set the “Email address” attribute to “otheremail,” this is where we now configure that “other” email address. \n\n![User properties](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image20_aHR0cHM6_1750098073341.png)\n\n\u003Ccenter>\u003Ci>Figure 13: User properties\u003C/i>\u003C/center>\u003Cbr>\n\nClick the option to **Edit properties** for the user, and click on the **Contact Information** heading. Here we can add other emails – more specifically, the email address utilized for your GitLab.com account.  \n\n![Configuration of alternate email address](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image15_aHR0cHM6_1750098073342.png)\n\n\u003Ccenter>\u003Ci>Figure 14: Configuration of alternate email address\u003C/i>\u003C/center>\u003Cbr>\n\nThat should complete the configuration parameters that we need in Entra ID, but wait, there’s more. \n\nWithin the GitLab side now, you will need to configure a couple parameters. First, you might as well enable SAML for the group as that’s kind of a key piece here. GitLab offers some additional options to disable password authentication or enforce SSO to reduce the security risks within your application, but we’ll leave those unchecked for now. Similar to the table above, we’ll need a couple things from Entra ID to configure into GitLab. Please refer to the table below. \n\u003Cbr>\n\n| Source (Entra ID) | Destination (GitLab) | \n| :---------------- | :------: | \n|Login URL |Identity provider single sign-on URL |\n| Thumbprint | Certificate fingerprint|\n\n\u003Cbr>\n\n![GitLab SAML configuration from Entra ID](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image25_aHR0cHM6_1750098073343.png)\n\n\u003Ccenter>\u003Ci>Figure 15: GitLab SAML configuration from Entra ID\u003C/i>\u003C/center>\u003Cbr>\n\nLastly, you want to configure the default membership role for users logging in via SAML. Note that the access that you set for users here will cascade down to other groups and projects within your top-level group. Therefore, I would strongly recommend NOT setting this role to be “Owner.” Either “Guest” or “Minimal Access” would be acceptable options here, depending on the security posture of your organization. For more information about what these roles can and can not do, refer to the GitLab documentation on [Roles and Permissions](https://docs.gitlab.com/ee/user/permissions.html#roles). Now, save your work on the GitLab interface by clicking that beautiful blue **Save changes** button.\n\nWith your GitLab settings saved, you can now test your setup. I would encourage you to do this both through the “Verify SAML Configuration” on the GitLab system as well as with the Entra ID SSO \"Test\" button.\n\n## Troubleshooting SAML\n\nIn addition to the troubleshooting steps included within [GitLab documentation](https://docs.gitlab.com/ee/user/group/saml_sso/troubleshooting.html), I wanted to include a couple other items that I personally experienced. \n\nIf you get an error stating that the SAML reference did not contain an email address, check the Claim name for your email within the “Attributes and Claims” section within your Entra ID application. With GitLab 16.7, we added support for the “2008” attribute names, and at least for the email address setting, I found the default “xmlsoap” name for the email address claim to be a disappointing failure. \n\nAnother common error is “SAML Name ID and email address do not match your user account.” As you may suspect, this error is caused by a mismatch of the “NameID” and “emailaddress” attributes within the Entra ID application. This could be a misconfiguration of the “Attributes and Claims,” but it could also be that the properties of your test user don’t match your configuration. One helpful method to identify exactly what is coming through the SAML exchange is to use a SAML Tracer or SAML Message Decoder plugin with your web browser. \n\n## SCIM\n\nNow that you have SAML configured to enable users to log in via your Entra ID application, let’s make sure that people are assigned to the proper group(s) upon login. This can be incredibly helpful at scale, where instead of manually identifying which groups the particular users belong to, GitLab can learn this information from your identity application, Entra ID in this case. \n\nBecause SCIM utilizes groups to identify group membership, we need to create a group within Entra ID and add the relevant user(s) to the group. For this we’ll need the main administration menu for Entra ID. \n\n![Entra ID Group configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image19_aHR0cHM6_1750098073344.png)\n\n\u003Ccenter>\u003Ci>Figure 16: Entra ID Group configuration\u003C/i>\u003C/center>\u003Cbr>\n\nWe’re going to create a new group and assign our user(s) to that group. So click **New group** and configure a new group, which only requires you to configure a “Group name.” I used the default group type of “Security.” Leave the “Membership type” as “Assigned.” From this window, we can also assign the members.\n\n![Creating a New Entra ID Group](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098073345.png)\n\n\u003Ccenter>\u003Ci>Figure 17: Creating a New Entra ID Group\u003C/i>\u003C/center>\u003Cbr>\n\nOnce you’ve added the member(s), click **Create** in the bottom of that window. With your group created, and the user(s) assigned to the group, we can configure SCIM.\n\nImmediately below the SAML configuration section within the GitLab UI, you’ll see the “SCIM Token” area. Here you can generate a new token, and copy the endpoint URL, both of which will be useful for the next steps. Note that if you forget or already have a SCIM token, it can be reset. \n\n![SCIM token and endpoint within GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098073345.png)\n\n\u003Ccenter>\u003Ci>Figure 18: SCIM token and endpoint within GitLab\u003C/i>\u003C/center>\u003Cbr>\n\nWith this information saved, return to your Entra ID application configuration. Within the left side menu, you’ll find the following: \n\n![Provisioning SCIM within Entra ID](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098073346.png)\n\n\u003Ccenter>\u003Ci>Figure 19: Provisioning SCIM within Entra ID\u003C/i>\u003C/center>\u003Cbr>\n\nWithin the \"Provisioning\" section, click on **New Configuration**, which opens a new page where that token and URL from GitLab will be used. \n\n![New provisioning configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image22_aHR0cHM6_1750098073348.png)\n\n\u003Ccenter>\u003Ci>Figure 20: New provisioning configuration\u003C/i>\u003C/center>\u003Cbr>\n\nFeel free to test the connection to ensure that you’ve configured the parameters properly. After testing, click on the **Create** button to establish the configuration and work on our mappings and settings. You may need to click the “X” in the top right corner of the panel to return to the overview configuration.\n\nExpand the “Mappings,” which includes two parameters; “Provision Microsoft Entra ID Groups” and “Provision Microsoft Entra ID Users.” SCIM group provisioning isn’t currently supported in GitLab, and although it doesn’t break the integration, keeping group provisioning enabled may cause negligible error messages. Therefore, we want to disable “Provision Microsoft Entra ID Groups,” so click that entry and set the “Enabled” field to “No.” \n\n![Provisioning attribute mapping](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098073349.png)\n\n\u003Ccenter>\u003Ci>Figure 21: Provisioning attribute mapping\u003C/i>\u003C/center>\u003Cbr>\n\nSave that configuration and select “Provision Microsoft Entra ID Users.” Validate that all three \"Target Object Actions\" are enabled, and then proceed to the “Attribute Mapping” section. Delete all existing mappings available to delete (I find this easier because attributes can’t be assigned twice), and then configure the Attribute Mappings per the following table:\n\n| customappsso Attribute (Destination) | Microsoft Entra ID Attribute (Source) | Matching Precedence | Mapping Type |\n| :---------------- | :------: | :------: | :------: | \n|externalID|objectId|1|Direct|\n|active|Switch([IsSoftDeleted], , \"False\", \"True\", \"True\", \"False\")| |Expression|\n|userName|mailNickname| |Direct|\n|name.formatted|displayName| |Direct|\n|Emails[type eq “other”].value|userPrincipalName||Direct|\n\n\u003Cbr>\n\n![Editing attributes](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image26_aHR0cHM6_1750098073349.png)\n\n\u003Ccenter>\u003Ci>Figure 22: Editing attributes\u003C/i>\u003C/center>\u003Cbr>\n\nAfter configuring all of the attribute mappings, the result should be similar to that found in Figure 22.\n\n![Completed attribute mapping configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098073350.png)\n\n\u003Ccenter>\u003Ci>Figure 23: Completed attribute mapping configuration\u003C/i>\u003C/center>\u003Cbr>\n\nNote the use of the “other” email within the **customappssso** attribute. This relates back to the “other” email we configured for the user back in the Entra ID user properties. In a production situation, the emails for the SSO account and the email address for the account within GitLab should match. \n\nWith your mapping complete (congratulations, Ptolemy), there are some advanced configuration settings necessary. Underneath the \"Attribute Mappings,\" click the box for “Show advanced options.” Once this box is checked, a link called “Edit attribute list for customappsso” is revealed.\n\n![Advanced attribute configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098073351.png)\n\n\u003Ccenter>\u003Ci>Figure 24: Advanced attribute configuration\u003C/i>\u003C/center>\u003Cbr>\n\nClick that link, and ensure that the Name “ID” is both “Primary Key” and “Required,” and that “externalID” is also “Required.” These attributes both refer to a unique user ID generated by Entra ID. However, although the “id” itself is required, it is not consistently provided within the API calls. Therefore, GitLab relies on the “externalID” to ensure the proper connection between the Entra ID and GitLab user accounts. \n\n![Required attribute list](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098073351.png)\n\n\u003Ccenter>\u003Ci>Figure 25: Required attribute list\u003C/i>\u003C/center>\u003Cbr>\n\nSave these settings, and then close the “Attribute Mapping” page with the “X” in the top right of the window. Return to the \"Application Provisioning\" section and click **Start provisioning**. \n\nWithin GitLab, we need to configure the association between the group we configured within Entra ID and the level of access we want those users to have within the GitLab top-level group. Note that this association can be configured on each sub-group within GitLab for more extensive provisioning, but within GitLab, permissions flow downhill. Whatever permission you set for a user at a top-level group, or sub-group, will cascade down to all projects and groups contained therein. \n\nWithin the \"Settings\" portion of the GitLab menu, select **SAML Group Links**. Here is where you’ll configure the group name and determine what access level, or role, members of the Entra ID Group will have within this particular GitLab Group.\n\n![GitLab SAML Group link](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image27_aHR0cHM6_1750098073352.png)\n\n\u003Ccenter>\u003Ci>Figure 26: GitLab SAML Group link\u003C/i>\u003C/center>\u003Cbr>\n\nAs shown in Figure 26, I’ve configured my membership to The Academy such that any users within the dev-security group from Entra ID  will be granted Developer access. Note that this is a slight variation of what a typical production environment would look like. In most instances, the user account within the identity provider (Entra ID, in this case) would match the user’s corporate account email (and we wouldn’t require “other” emails). When configured properly, if the user does not already have an account on GitLab, one will be created for them tied to their SSO account. \n\n![GitLab SSO tutorial - image11](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098073/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750098073352.png)\n\n\u003Ccenter>\u003Ci>Figure 27: SAML Group Links configured\u003C/i>\u003C/center>\u003Cbr>\n\nNow that you’ve completed the configuration, give it a try! From another browser, preferably in private mode to ignore any cookies or other yummy artifacts, paste the link for the GitLab SSO URL found in the GitLab SAML configurations. You should be prompted to log in with your Entra ID credentials and gain the proper access to your GitLab group! \n\nCongratulations, you’ve made it! I hope you’ve learned from and appreciate the work here, and we can all rejoice in the fact that the users within the Play-Dough app can now all properly authenticate, with the right permissions, to The Academy!\n\n> Don't have a GitLab account? [Sign up for a free, 60-day trial today](https://about.gitlab.com/free-trial/devsecops/).\n\n## Read more\n- [The ultimate guide to enabling SAML and SSO on GitLab.com](https://about.gitlab.com/blog/the-ultimate-guide-to-enabling-saml/)\n- [SAML SSO for GitLab.com groups documentation](https://docs.gitlab.com/ee/user/group/saml_sso/)",[9,814,478,835,1000],{"slug":2631,"featured":6,"template":684},"how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id","content:en-us:blog:how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id.yml","How To Gitlab Single Sign On With Saml Scim And Azures Entra Id","en-us/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id.yml","en-us/blog/how-to-gitlab-single-sign-on-with-saml-scim-and-azures-entra-id",{"_path":2637,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2638,"content":2644,"config":2650,"_id":2652,"_type":13,"title":2653,"_source":15,"_file":2654,"_stem":2655,"_extension":18},"/en-us/blog/how-to-harden-your-self-managed-gitlab-instance",{"title":2639,"description":2640,"ogTitle":2639,"ogDescription":2640,"noIndex":6,"ogImage":2641,"ogUrl":2642,"ogSiteName":669,"ogType":670,"canonicalUrls":2642,"schema":2643},"How to harden your self-managed GitLab instance","Learn seven easy steps to ensure your self-managed GitLab instance is as secure as possible.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664923/Blog/Hero%20Images/security-checklist.png","https://about.gitlab.com/blog/how-to-harden-your-self-managed-gitlab-instance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to harden your self-managed GitLab instance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ayoub Fandi\"}],\n        \"datePublished\": \"2023-05-23\",\n      }",{"title":2639,"description":2640,"authors":2645,"heroImage":2641,"date":2647,"body":2648,"category":769,"tags":2649},[2646],"Ayoub Fandi","2023-05-23","\n\"You are as secure as your weakest link\" is a well-understood phrase. If attackers find their way in, they will exploit any gaps in your security configurations. Hardening — the process of turning off unused features and making adjustments to settings that have security implications — is critical to limit your attack surface and reduce potential attack vectors.\n\nHardening ensures that your application (in this case GitLab) is as secure as it can be. The goal is simple: minimize risk while still preserving enough functionality for users to remain as productive as possible.\n\n## Guiding principles\nThese principles should guide the way you approach hardening. The security activities in the checklist below will tie back to one or another of these principles.\n\n### Layered security\nThe idea behind this is simple. If there are two ways to implement security, both ways should be implemented instead of just one. You can try to combine as many methods as possible. \n\nFor instance, if you are trying to secure access to your service, you could combine a complex password with hardware tokens and multifactor authentication. This approach is also called defense-in-depth.\n\n### No security through obscurity\nThe idea of hiding things works in many cases, but not so in the information security world. The premise that if something is hidden then it is more secure isn’t a viable approach today. \n\nCurrent scanning capabilities available to attackers shatter through obscurity. It is very easy for anyone to scan for open ports on a system. If you’ve swapped the SSH port TCP 22 to a different port, it would be picked up by a network scanning tool such as Nmap. \n\nAs GitLab is committed to transparency and open source, our approach is at odds with security through obscurity. The goal of security is to remove any security through obscurity. Our documentation is available to all and security best practices are clearly labeled and detailed.\n\n### Reducing the attack surface\nGitLab comprises numerous components, services, and dependencies. A critical aspect of security is: The more components you have, the more entryways attackers have.\n\nA good rule to keep in mind is to always disable services that you do not need to run the application. If there are features that aren’t used, disabling the related services will reduce the potential attack surface and make you more secure.\n\n## 7 steps to secure your self-managed instance\nLet’s go through seven easy steps to quickly harden your self-managed instance. These quick wins are great first steps towards securing your installation. Of course, refer to the [documentation](https://docs.gitlab.com/ee/security/) for additional details and further guidance on each section.\n\n### 1. Enable multi-factor authentication\n**Admin > Settings > General > Sign-in restrictions**\n\nEnsure that the checkbox next to _Two-factor authentication_ (2FA) is **checked**. The default setting for _Two-factor grace period_ is 48 hours. Adjust it to a lower value, such as **8 hours**.\n\nEnsure the checkbox next to _Enable admin mode_ is **checked** so that _Admin Mode_ is **active**. Users with Admin access will have to use additional authentication to perform administrative tasks. With 2FA enabled, this will require additional 2FA authentication by the user.\n\nFor more detailed information, refer to the documentation on [sign-in restrictions](https://docs.gitlab.com/ee/administration/settings/sign_in_restrictions.html).\n\n### 2. Enforce additional sign-up checks\n**Admin > Settings > General > Sign-up restrictions**\n\nNext to _Sign-up enabled_ ensure the checkbox is **unchecked**.\n\nUnder _Email confirmation settings_ ensure that **Hard** is selected. This will require the user to verify their email address during the sign-up process before their account is allowed access.\n\nThe _Minimum password length (number of characters)_ default setting of 12 characters is fine if additional authentication techniques are enforced. Options available for password complexity include _Require numbers_, _Require uppercase letters_, _Require lowercase letters_, and _Require symbols_. Check these boxes depending on your internal password standard (also check out [NIST SP 800-63B](https://pages.nist.gov/800-63-3/sp800-63b.html)).\n\nIf all users' email addresses are under a single domain (e.g., example.com), consider **adding it** to the _Allowed domains for sign-ups_. This will prevent those with email addresses associated with other domains from signing up.\nFor more detailed information, refer to the documentation on [sign-up restrictions](https://docs.gitlab.com/ee/administration/settings/sign_up_restrictions.html).\n\n### 3. Limit public visibility of your groups and projects\n**Admin > Settings > General > Visibility and access control**\n\nThe _Default project visibility_ and _Default group visibility_ for any newly created project or group should be set to **Private** by default. Only users that are granted specific access to a project or group will be able to access these resources. This can be adjusted later if necessary or when creating a new project or group. This ensures the default mode is secure to prevent accidental disclosure of information.\n\nFor more details on Visibility and access control [refer to the documentation](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html).\n\n### 4. Harden your SSH settings\n**Admin > Settings > General > Visibility and access control**\n\nTypically, under _Enabled Git access protocols_ it will be set to _Both SSH and HTTP(S)_. If one of the Git protocols is not in use by your users, set it to **either** _Only SSH_ or _Only HTTP(S)_ accordingly. This will reduce the attack surface by limiting possibilities of compromise through an unused protocol.\nFor SSH key types, the most recommended algorithms to use are, in order: \n1. ED25519\n1. RSA \n1. ECDSA\n\nWhen configuring default types and lengths for SSH keys, keep in mind the list above.\n\nSpecific details on SSH settings can be found [here](https://docs.gitlab.com/ee/security/ssh_keys_restrictions.html) and [here](https://docs.gitlab.com/ee/administration/settings/visibility_and_access_controls.html#configure-enabled-git-access-protocols) for Git Access protocols.\n\n### 5. Review the account and limit settings\n**Admin > Settings > General > Account and limit settings**\n\nThis section allows you to limit the size of attachments, pushes, exports, imports, or repositories. As the specific size (in MB) will be tailored to your needs, review these settings and **set limits** in line with your internal policies.\n**Session duration for users** (in minutes) and **lifetime of SSH keys and all access tokens** (in days) can also be configured. Ensure the durations are in accordance with your internal policies and security best practices.\n\nReview the [documentation](https://docs.gitlab.com/ee/administration/settings/account_and_limit_settings.html) and apply changes that enforce your own policies.\n\n### 6. Secure your CI secrets\n**Admin > Settings > CI**\n\nPasswords, tokens, keys, and other secrets that require any level of protection should never be stored in plaintext. Instead, some type of **encrypted container technology (Secrets Manager)** should be implemented, such as GCP's Secret Manager, AWS Key Management Service (KMS), or HashiCorp Vault. For self-managed and standalone instances, HashiCorp Vault is **recommended**, and many GitLab features can take advantage of Vault and are well described in the [documentation](https://docs.gitlab.com/search/?query=vault).\n\nFor external communications, ensure any connectivity with external hosts in your CI/CD process is using encrypted channels. The use of TLS 1.2 or above is highly recommended and where possible mutual TLS will help things considerably.\nFor details on the use of external secrets for your CI/CD pipeline, check [here](https://docs.gitlab.com/ee/ci/secrets/) for actual examples and configuration guides.\n\n### 7. Protect your pipelines for all branches\n**Admin > Settings > CI**\n\nPipelines are a part of jobs that execute steps in stages to automate tasks on behalf of the users of a project. They are a central component of CI/CD.\nBy default, only the default branch gets a protected pipeline. Configure your other branches with the same level of security by following [these simple steps](https://docs.gitlab.com/ee/user/project/protected_branches.html#configure-a-protected-branch). This considerably hardens your pipelines.\n\nThe security features enabled by default on protected pipelines are listed in our [documentation](https://docs.gitlab.com/ee/ci/pipelines/#pipeline-security-on-protected-branches).\n\nOnce the pipeline has run, the code will be deployed in an environment. To limit interactions with that environment and to protect it from unauthorized users, you can set your key environments as protected.\n\nPrerequisites and full process are available in the [documentation](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n### Learn more \nThis is a high-level overview of the different areas to focus on when hardening your self-managed GitLab instance. A blog post can’t include every single security recommendation. That’s why we maintain detailed [security documentation](https://docs.gitlab.com/ee/security/) on how to secure your installation. \n\nPlease refer to the documentation as the single source of truth on hardening. Hopefully, with the help of the action items highlighted above, you’ll harden your self-managed GitLab instance while preserving agility and speed.\n\nIf you want to learn more about how we do security **at GitLab**, review the [security section](https://about.gitlab.com/handbook/security/) of the handbook.\n",[814,9,835],{"slug":2651,"featured":6,"template":684},"how-to-harden-your-self-managed-gitlab-instance","content:en-us:blog:how-to-harden-your-self-managed-gitlab-instance.yml","How To Harden Your Self Managed Gitlab Instance","en-us/blog/how-to-harden-your-self-managed-gitlab-instance.yml","en-us/blog/how-to-harden-your-self-managed-gitlab-instance",{"_path":2657,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2658,"content":2664,"config":2671,"_id":2673,"_type":13,"title":2674,"_source":15,"_file":2675,"_stem":2676,"_extension":18},"/en-us/blog/how-to-harmonize-agile-sprints-with-product-roadmaps",{"title":2659,"description":2660,"ogTitle":2659,"ogDescription":2660,"noIndex":6,"ogImage":2661,"ogUrl":2662,"ogSiteName":669,"ogType":670,"canonicalUrls":2662,"schema":2663},"How to harmonize Agile sprints with product roadmaps","Apply best practices and GitLab features to your product journey, including creating centralized roadmaps, conducting review sessions, and tracking sprint lifecycles.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097231/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2821%29_2pdp2MNB7SoP4MhhiI1WIa_1750097230664.png","https://about.gitlab.com/blog/how-to-harmonize-agile-sprints-with-product-roadmaps","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to harmonize Agile sprints with product roadmaps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2025-02-04\",\n      }",{"title":2659,"description":2660,"authors":2665,"heroImage":2661,"date":2667,"body":2668,"category":2669,"tags":2670},[2666],"Amanda Rueda","2025-02-04","Picture this: Product and Development teams are working in isolation. Product has created a 12-month roadmap and communicated it to internal stakeholders but didn't review it with their development team. Dev starts building the features planned for the upcoming sprint without considering the broader product roadmap, leading to missed opportunities to optimize timing, like running projects in parallel, accounting for team capacity, or building reusable APIs that could serve multiple initiatives. The lack of coordination results in inefficiencies and delayed value delivery.\n\nBalancing short-term wins with long-term vision isn’t easy; it requires clear communication, aligned priorities, and the right tools. In this guide, you'll learn strategies to help harmonize your Agile sprints with strategic roadmaps, tackle common challenges, and uncover actionable solutions tailored to your teams.\n\n## The importance of a single source of truth\n\nA consistent single source of truth for roadmaps with longer-range goals ensures you and your teams have access to up-to-date information about the bigger picture. In practice, this means maintaining a single, regularly updated platform where all roadmap details reside rather than keeping versions of the roadmap across multiple formats, each typically with slightly different information, causing a misaligned understanding of where you're headed.\n\n### Create a centralized roadmap\n\nBy creating a centralized roadmap for your team, you can:\n\n* communicate long-range strategy\n* minimize miscommunication\n* facilitate cross-functional alignment\n* quickly adapt to changes without losing context\n* self-serve information, reducing dependency on a single point of contact who retains the information\n\n***GitLab tip**: Use [epics](https://docs.gitlab.com/ee/user/group/epics/) and [Roadmap view](https://docs.gitlab.com/ee/user/group/roadmap/) to support both product planning and the transparent monitoring of delivery. The Roadmap view allows you to track progress, identify bottlenecks, and ensure alignment between high-level goals and sprint-level execution.*\n\n![Roadmap view for group](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097239117.png)\n\n## Collaborative roadmap review practices\n\nEstablish a regular review and sign-off process for roadmap updates that include Product, Engineering, and UX as part of the [product trio](https://www.producttalk.org/product-trio/). Collaborative reviews help you maintain alignment and minimize risk. At GitLab, I meet with my engineering manager and UX designer monthly to review and obtain sign-offs on any changes. We maintain a running sign-off on the roadmap wiki page itself that holds us accountable for keeping the schedule and provides transparency to the rest of the organization.\n\n#### How to extract value from review sessions\n\nTo make the most of the review session, aim for the following best practices:\n\n* Schedule routine reviews, monthly or quarterly, depending on how frequently the roadmap tends to fluctuate at your organization.\n* Validate alignment between product goals, UX lead time, and technical feasibility by discussing potential risks and dependencies upfront.\n  * Validate that the roadmap reflects current organizational business objectives.\n  * Ensure that design timelines are realistic and consider research or validation needs.\n  * Confirm that the roadmap allocates time for technical preparation, such as technical spikes or investigations, and ensures alignment with broader engineering priorities.\n* Optimize team utilization by considering capacity constraints and ensuring the sequence of work aligns with the team’s skill profile. This includes avoiding periods of underutilization or skill mismatches while effectively planning for situations like staffing level drops during holidays.\n* Right-size scope and set appropriate expectations about what can be achieved. We all want to do it all, but perfection is the enemy of progress so prioritize what truly matters to deliver incremental value efficiently. Seek opportunities to optimize by identifying ways to iterate or increase velocity, such as adjusting the order of work to reduce dependencies or leveraging reusable components to streamline development.\n* Encourage open dialogue about trade-offs and priorities to ensure all perspectives are considered. This collaborative approach helps identify creative solutions to challenges and builds consensus on the best path forward.\n\n***GitLab tip**: Use a [GitLab Wiki](https://docs.gitlab.com/ee/user/project/wiki/) page to complement the [Roadmap](https://docs.gitlab.com/ee/user/group/roadmap/) feature. In the wiki, you can include expanded context about your product roadmap, such as business rationale, links to user research, RICE scores, and details about dependencies or risks. Link directly to the roadmap for easy access, and leverage the upcoming discussion threads feature to encourage async collaboration and feedback from your team.*\n\n![PlanFlow product roadmap](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097239118.png)\n\n## Continuous direction validation and progress measurement\n\nThe goal of a product roadmap isn’t just to stay on track – it’s to deliver real value to your customers. To make space for sharing ongoing user feedback and behavioral data consider incorporating regular touchpoints across your product trio outside of sprint cycles. These sessions can be used to review insights, analyze trends, and ensure that the product roadmap continues to reflect the evolving needs of your users. By grounding roadmap updates using real user insights, you’re not only delivering on outcomes but also adapting to what really matters to your customers.\n\nThe value you ship might come in the form of improved usability, reduced technical debt, or entirely new capabilities. When the product trio is aligned on the roadmap vision, they’re also aligned on the outcomes you’re working to achieve.\n\nTo measure whether you’re on track to deliver those outcomes, you need to closely scope the intended results. Scope creep, like late user story additions, can delay your ability to ship value. Additionally, it’s important to identify work that was delivered but doesn’t align with the roadmap and understand why.\n\n### Sprint planning\n\nRemaining aligned with your product roadmap starts with thoughtful sprint planning. Here are some best practices to keep your team on track and focused on delivering value:\n\n* Clearly define, and narrowly scope, desired outcomes to ensure high confidence in delivery.\n* Identify potential late additions or adjustments that could delay delivery, and build in buffers to maintain focus.\n* Align on the sequence of work with your team to optimize for capacity, skill profiles, and reducing dependencies.\n* To maintain focus and improve confidence of delivering on time, avoid planning to 100% of the team’s capacity. Leave room (10%-20%) for unknowns or new discoveries that may surface during the sprint.\n\n### During the sprint\n\nStaying aligned with your roadmap during the sprint requires focus, communication, and constant evaluation. While delivering value is the goal, it’s equally important to ensure the work in progress aligns with the outcomes you’ve scoped and planned.\n\n* Continuously validate the work in progress against roadmap outcomes to ensure every sprint contributes to the bigger picture.\n* Encourage the team to regularly check if they’re still working toward the intended goals and outcomes.\n* Maintain open communication throughout the sprint. Use daily standups or async updates to surface risks, unplanned work, or dependencies early and adjust where necessary.\n* Be ruthless about protecting the sprint. While the urge to solve emerging problems is natural, unplanned work should be carefully evaluated to avoid derailing agreed-upon priorities.\n* Proactively manage scope creep. If new work surfaces mid-sprint, assess whether it aligns with the current roadmap outcome’s narrowly scoped focus. While additional ideas or features may align conceptually with the broader outcome, they may not fit into the immediate plan to deliver value as soon as possible. Document these suggestions and evaluate if they should be considered as part of future iterations or as a nice-to-have for the future, rather than introducing them into the current sprint and delaying agreed-upon priorities.\n\n### Sprint retros\n\nIn your sprint retrospectives, take time to reflect with your team on how well you are collectively progressing toward your desired outcomes. Questions to ask:\n\n* Did any unplanned work get introduced during the sprint that delayed your ability to deliver value? Identify why it happened and what adjustments can be made.\n* Did you deliver any work that deviated from the roadmap? Discuss what led to this and what you can learn for future planning.\n\nFrom sprint planning through retrospectives, staying focused on delivering tangible outcomes to users and stakeholders is a team responsibility. By aligning every step of the way, you ensure that your roadmap remains a clear guide for delivering value efficiently and consistently.\n\n***GitLab tip:** Use [burndown charts](https://docs.gitlab.com/ee/user/project/milestones/burndown_and_burnup_charts.html) to visualize progress and detect deviations early, helping your team stay focused on delivering outcomes.*\n\n![Burndown chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097239/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097239120.png)\n\n## Delivering roadmap outcomes with confidence\n\nHarmonizing Agile sprints with strategic roadmaps requires intentionality, team buy-in, and the proper tools. By creating a roadmap single source of truth, fostering collaborative reviews, and measuring progress towards outcomes, you can align execution with vision. With GitLab’s robust planning features, teams can turn challenges into opportunities for innovation and growth.\n\nReady to align your sprints with your strategic roadmap? [Start a free trial of GitLab](https://about.gitlab.com/free-trial/) today and explore the tools that can help you deliver outcomes with confidence.\n\n## Learn more\n\n- [Agile planning content hub](https://about.gitlab.com/blog/categories/agile-planning/)\n- [GitLab’s new Planner role for Agile planning teams](https://about.gitlab.com/blog/introducing-gitlabs-new-planner-role-for-agile-planning-teams/)\n- [Get to know the GitLab Wiki for effective knowledge management](https://about.gitlab.com/blog/get-to-know-the-gitlab-wiki-for-effective-knowledge-management/)","agile-planning",[980,9,940,478],{"slug":2672,"featured":90,"template":684},"how-to-harmonize-agile-sprints-with-product-roadmaps","content:en-us:blog:how-to-harmonize-agile-sprints-with-product-roadmaps.yml","How To Harmonize Agile Sprints With Product Roadmaps","en-us/blog/how-to-harmonize-agile-sprints-with-product-roadmaps.yml","en-us/blog/how-to-harmonize-agile-sprints-with-product-roadmaps",{"_path":2678,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2679,"content":2685,"config":2690,"_id":2692,"_type":13,"title":2693,"_source":15,"_file":2694,"_stem":2695,"_extension":18},"/en-us/blog/how-to-integrate-custom-security-scanners-into-gitlab",{"title":2680,"description":2681,"ogTitle":2680,"ogDescription":2681,"noIndex":6,"ogImage":2682,"ogUrl":2683,"ogSiteName":669,"ogType":670,"canonicalUrls":2683,"schema":2684},"How to integrate custom security scanners into GitLab","Learn how to extend the DevSecOps platform by adding custom security scanners to your workflows (includes an easy-to-follow tutorial).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097082/Blog/Hero%20Images/Blog/Hero%20Images/securitycheck_securitycheck.png_1750097081856.png","https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate custom security scanners into GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-02-27\",\n      }",{"title":2680,"description":2681,"authors":2686,"heroImage":2682,"date":2687,"body":2688,"category":814,"tags":2689},[1767],"2024-02-27","GitLab, the most comprehensive DevSecOps platform, has everything you need to plan, manage, build, deploy, secure, govern, and monitor your applications. However, there are instances where you may want to extend GitLab with third-party or custom tools. For example, you might need to migrate to a DevSecOps platform from separate solutions, evaluate third-party tools, or integrate proprietary or custom-built solutions into GitLab.\n\nHere's what is covered:\n- [GitLab DevSecOps platform extensibility](#gitlab-devsecops-platform-extensibility)\n- [GitLab security scanner integration](#gitlab-security-scanner-integration)\n  - [Merge request security widget](#merge-request-security-widget)\n  - [Pipeline Security section](#pipeline-security-section)\n  - [Vulnerability Report](#vulnerability-report)\n  - [Vulnerability pages](#vulnerability-pages)\n  - [Security dashboard](#security-dashboard)\n  - [Scan Result Policy integration](#scan-result-policy-integration)\n- [Tutorial: Integrating custom security scanners](#tutorial-integrating-custom-security-scanners)\n  - [Creating a custom security scanner](#creating-a-custom-security-scanner)\n  - [Integrating a custom security scanner with GitLab](#integrating-a-custom-security-scanner-with-gitlab)\n\n## GitLab DevSecOps platform extensibility\n\nGitLab can be extended in many ways to support enhanced functionality that your organization may require. Some common examples of these integrations include:\n\n- external application integrations such as Jenkins and Slack\n- external issue tracking integrations such as Bugzilla and Jira\n- external authentication provider integrations such as LDAP and SAML\n- external security scanner integrations such as Fortify and Checkmarx\n- ability to respond to leaked secrets such as AWS and GCP access keys\n\nYou can see all the available integrations in the [Integrate with GitLab documentation](https://docs.gitlab.com/ee/integration/). (Note: Not all integrations are listed in the documentation.)\n\n## GitLab security scanner integration\n\n[Third-party security scanners](https://docs.gitlab.com/ee/integration/#security-improvements) or [custom-built security scanners](https://gitlab.com/gitlab-de/tutorials/security-and-governance/custom-scanner-integration) can be integrated into GitLab to populate the merge request widget, Pipeline Security section, Vulnerability Report, vulnerability pages, Security dashboard, and Scan Result Policies. Let's review each integration.\n\n### Merge request security widget\n\nA merge request contains a security widget which displays a summary of the newly detected vulnerabilities.\n\n![integrating security scanners - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097088837.png)\n\n\u003Ccenter>\u003Ci>Merge request security widget\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nWhen you click on a vulnerability, you will see a popup that contains the following information:\n- status\n- description\n- project\n- file\n- identifiers\n- severity\n- tool\n- scanner provider\n\n![integrating security scanners - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097088838.png)\n\n\u003Ccenter>\u003Ci>Actionable vulnerabilities with details\u003C/i>\u003C/center>\n\n\u003Cp>\u003C/p>\n\nThese vulnerabilities are also actionable, which means that they can either be dismissed or a confidential issue can be created.\n\nThe results of a custom scanner can be used to populate the security widget. The vulnerability data is populated from the JSON schema the scanner emits.\n\n### Pipeline Security section\n\nAll enabled security analyzers run in the pipeline and output their results as artifacts. These artifacts are processed, including deduplication, and the results are listed on the Pipeline Security tab. From here, you can also download the resulting JSON files.\n\n![integrating security scanners - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097088840.png)\n\n\u003Ccenter>\u003Ci>Pipeline Security tab\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nThe results of a custom scanner can be used to populate the Pipeline Security tab. The columns are filled in using the JSON schema the scanner emits.\n\n### Vulnerability Report\n\nThe Vulnerability Report provides information about vulnerabilities from scans of the default branch, including:\n\n- totals of vulnerabilities per severity level\n- filters for common vulnerability attributes\n- details of each vulnerability, presented in tabular layout\n\n![integrating security scanners - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097088842.png)\n\n\u003Ccenter>\u003Ci>Vulnerability Report\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nThe results of a custom scanner on the default branch can be used to populate the Vulnerability Report.\n\n### Vulnerability pages\n\nClicking on a vulnerability present within the Vulnerability Report takes you to its vulnerability page. Each vulnerability in a project has a vulnerability page that provides details such as:\n\n- description\n- when it was detected\n- current status\n- location detected\n- available actions\n- linked issues\n- actions log\n- solutions\n- identifier\n- training\n\nYou can use the data provided in the vulnerability page to triage a detected vulnerability as well as assist in its remediation.\n\n![integrating security scanners - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097088844.png)\n\n\u003Ccenter>\u003Ci>Vulnerability page for secret detection vulnerability\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nThe results of a custom scanner can be used to populate the vulnerability page. The vulnerability data is populated from the JSON schema the scanner emits.\n\n### Security dashboard\n\nSecurity dashboards are used to assess the security posture of your applications. GitLab provides you with a collection of metrics, ratings, and charts for the vulnerabilities detected by the security scanners run on your project. The security dashboard provides data such as:\n\n- vulnerability trends over a 30-, 60-, or 90-day timeframe for all projects in a group\n- a letter grade rating for each project based on vulnerability severity\n- the total number of vulnerabilities detected within the last 365 days and their severity levels\n\n![integrating security scanners - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097088846.png)\n\n\u003Ccenter>\u003Ci>Group-level Security dashboard\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nFrom the group-level Security dashboard you can click on a project to access its specific Security dashboard, which provides the 365-day view.\n\n![integrating security scanners - image 7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097088847.png)\n\n\u003Ccenter>\u003Ci>Project-level Security dashboard\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\n### Scan Result Policy integration\n\nScan Result Policies are used to require approval based on the findings of one or more security scan jobs. This can prevent insecure code from being merged to production. Scan Result Policies are evaluated after a CI scanning job is fully executed, where policies are evaluated based on the job artifact reports that are published in the completed pipeline.\n\nFor example, you can create a Scan Result Policy that requires approval from project maintainers if a secret detection scanner finds any vulnerabilities. Here's how:\n\n1. On the left sidebar, select **Search or go to** and search for the project you wish to add a policy to.\n2. On the project left sidebar, go to **Secure > Policies**\n3. Select **New policy**\n4. In the **Scan result policy** section, select **Select policy**.\n5. Complete the fields:\n- Name: The name of the Policy\n- Description: The description of the Policy\n- Policy status: Whether it is enabled or not\n- Rules: The conditions that must be met for an action (require approval) to take place\n\n![integrating security scanners - image 8](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097088849.png)\n\u003Ccenter>\u003Ci>Scan Result Policy rules\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\n- Actions: The action to be taken whenever the conditions in the rules (defined vulnerabilities/licenses detected) are met\n\n![integrating security scanners - image 9](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097088850.png)\n\n\u003Ccenter>\u003Ci>Scan Result Policy actions\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\n- Override project approval settings: If selected, the following choices will overwrite project settings but only affect the branches selected in the policy\n\n![integrating security scanners - image 11](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097088851.png)\n\n \u003Ccenter>\u003Ci>Scan Result Policy approval settings\u003C/i>\u003C/center>\n \u003Cp>\u003C/p>\n\n6. Press the \"Configure with a merge request\" button.\n\nOnce the Scan Result Policy has been merged, whenever you create a merge request and the criteria defined in the rules are met, then the defined action will be triggered. In this case, at least one approval will be required from a maintainer before the code can be merged.\n\n![integrated security scanner - image 10](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097089/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097088852.png)\n\n\u003Ccenter>\u003Ci>Blocked merge request due to detected vulnerabilities\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nThe results of a custom scanner can be fully integrated with Scan Result Policies. If the custom scanner detects a vulnerability, then approval will be required before the code can be merged. The scanner you select in a Scan Result Policy must be leveraging the appropriate JSON schema.\n\n## Tutorial: Integrating custom security scanners\n\nNow let’s get to the fun part – integrating a custom security scanner. In this tutorial, you will learn how to create a custom security scanner, as well as how to integrate it with GitLab. We will be leveraging the following projects:\n\n- [Fern Pattern Scanner](https://gitlab.com/gitlab-de/tutorials/security-and-governance/custom-scanner-integration/fern-pattern-scanner): Scans your files looking for specific patterns such as passwords, private keys, and social security numbers.\n- [Secret list](https://gitlab.com/gitlab-de/tutorials/security-and-governance/custom-scanner-integration/secret-list): Contains a list of user passwords, clients, and keys. This project is used to showcase how a custom security scanner can be integrated into GitLab.\n\nYou can watch the following video to see how the application was created and how it is used in detail:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/timMbl5SP-w?si=R2DKtZ5MmBR1rQFL\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Creating a custom security scanner\n\nNow let’s create a custom scanner that can be integrated into GitLab. Before a custom scanner can be fully integrated with GitLab, the scanner must:\n- scan a directory for defined patterns\n- emit a JSON following the appropriate schema\n- be containerized and accessible\n- provide a template to allow it to be run on another project\n\nWhen the [Fern Pattern scanner](https://gitlab.com/gitlab-de/tutorials/security-and-governance/custom-scanner-integration/fern-pattern-scanner) is run on a project using the provided template, it performs the following steps:\n1. Loads a set of rules which define patterns (regex) to detect.\n- Allow rules to be configurable to meet the changing needs of your organization.\n2. Scans files for defined patterns.\n3. Emits a JSON report following the Secret Detection schema.\n- Go templates are used in this project to create a JSON.\n- Depending on what your scanner will look for, make sure you use the appropriate schema.\n\nOnce the JSON report is loaded as an artifact into GitLab, it will populate the merge request widget, Vulnerability Report, vulnerability pages, Scan Result Policies, and Security dashboards as defined above.\n\n### Integrating a custom security scanner with GitLab\n\nOnce you have created your custom scanner that meets all the needs for integration, you can run it on GitLab.\n\nRunning a custom scanner is as easy as adding a template. We can see how the Fern Pattern scanner template is loaded by examining the `.gitlab-ci.yml` in the [Secret List](https://gitlab.com/gitlab-da/tutorials/security-and-governance/custom-scanner-integration/secret-list) project.\n\n1. Create a [.gitlab-ci.yml file](https://docs.gitlab.com/ee/ci/quick_start/#create-a-gitlab-ciyml-file) in the project you want the scanner to run on.\n2. Include the [Custom Scanner template](https://docs.gitlab.com/ee/ci/yaml/includes.html).\n    - You should also be able to configure the template with environment variables.\n3. Commit the file to the main branch.\n\nOnce the file has been committed, you can see that the custom scanner will run in your pipeline. Once the pipeline is complete, the scanner will populate all the areas defined above in the [GitLab security scanner integration](#gitlab-security-scanner-integration) section.\n\n## Read more\n\nCheck out these resources to learn more about GitLab and the other ways you can extend your DevSecOps platform:\n\n- [Security Scanner GitLab Integration](https://docs.gitlab.com/ee/development/integrations/secure.html)\n- [GitLab Partner Integrations](https://docs.gitlab.com/ee/integration/)\n- [Custom Security Scanner Projects Group](https://gitlab.com/gitlab-de/tutorials/security-and-governance/custom-scanner-integration)\n- [Automatic Response to a Secret Leak](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html)\n",[9,814,1041,478],{"slug":2691,"featured":90,"template":684},"how-to-integrate-custom-security-scanners-into-gitlab","content:en-us:blog:how-to-integrate-custom-security-scanners-into-gitlab.yml","How To Integrate Custom Security Scanners Into Gitlab","en-us/blog/how-to-integrate-custom-security-scanners-into-gitlab.yml","en-us/blog/how-to-integrate-custom-security-scanners-into-gitlab",{"_path":2697,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2698,"content":2704,"config":2710,"_id":2712,"_type":13,"title":2713,"_source":15,"_file":2714,"_stem":2715,"_extension":18},"/en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting",{"title":2699,"description":2700,"ogTitle":2699,"ogDescription":2700,"noIndex":6,"ogImage":2701,"ogUrl":2702,"ogSiteName":669,"ogType":670,"canonicalUrls":2702,"schema":2703},"How to leverage GitLab Duo for enhanced security reporting","Learn how GitLab Duo enables efficient, real-world security reporting for development, operations, and security teams.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098339/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%285%29_1iy516k40hwBDChKcUJ2zb_1750098339103.png","https://about.gitlab.com/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to leverage GitLab Duo for enhanced security reporting\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valentine Mairet\"},{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":2699,"description":2700,"authors":2705,"heroImage":2701,"date":2707,"body":2708,"category":702,"tags":2709},[2706,2332],"Valentine Mairet","2024-12-03","Good security reporting is crucial to maintain a good security posture because it provides detailed insights into incidents. With this information, organizations can better understand vulnerabilities, improve defenses, and prevent similar threats in the future. At GitLab, the [Security division](https://handbook.gitlab.com/handbook/security/#division-structure) has created use cases for GitLab Duo to improve reporting capabilities and enhance operational efficiency. \n\n## GitLab Duo’s security capabilities\n\nThe GitLab Security division uses GitLab’s built-in [incidents](https://docs.gitlab.com/ee/operations/incident_management/incidents.html) to manage and report on security incidents. Incidents are handled, documented, and resolved in GitLab, enabling the use of AI-driven [GitLab Duo](https://about.gitlab.com/gitlab-duo/) as an assistant when performing security operations like incident response. \n\nParticularly in incident analysis and reporting, GitLab Duo is highly efficient and accurate at creating proper documentation and is a great “pair programmer” when solving security incidents.\n\n## GitLab Duo features for security reporting\n\nGitLab Duo offers many features that enhance security reporting:\n\n- **Root Cause Analysis:** GitLab Duo can explain vulnerabilities and understand the context of an incident issue, making it an excellent assistant for performing root cause analyses of security incidents.\n- **Vulnerability Explanation:** Provides detailed insights into identified vulnerabilities, including potential exploitation methods and remediation steps. This feature aids developers and security analysts in understanding and addressing security issues effectively.\n- **Vulnerability Resolution:** Assists in fixing vulnerabilities by generating merge requests that address the identified issues, streamlining the remediation process.\n- **Code Explanation:** Helps users comprehend specific code segments by offering clear explanations, which is particularly useful when dealing with complex or unfamiliar codebases.\n- **Test Generation:** Facilitates early bug detection by generating tests for selected code, ensuring that security vulnerabilities are identified and addressed promptly.\n- **Refactor Code:** Suggests improvements or refactoring for selected code to enhance its quality and maintainability, contributing to a more secure codebase.\n- **Fix Code:** Identifies and rectifies quality issues such as bugs or typos in the selected code, helping maintain a robust and secure codebase.\n\n## Practical use cases\n\nFor the purpose of demonstrating practical use cases, the Security Incident Response Team created a dummy incident with following limited information:\n\n![Incident report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098346297.png)\n\nSeveral comments were added as the team would normally proceed:\n\n![Comments added to report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098346297.png)\n\n### Incident reporting\n\nGitLab Duo is able to comprehensively keep track of all information inside an incident issue, including the issue description, comments, and labels. When handling security incidents, information often is all over the place and can change over time. It can easily get lost or overlooked. GitLab Duo is excellent at finding relevant information again to create accurate incident reports. \n\nNavigate to your incident issue and open GitLab Duo Chat. You can engineer your prompt so that GitLab Duo takes your exact reporting requirements into account such as what sections you need and how they should be filled out. Here is an example of the prompt we use at GitLab Security:\n\n> Required sections:\n> - Executive Summary - bottom-line-up-front that is adequate for an audience like senior leadership and CISO\n> - Mitigations & Remediations\n> - Scope & Impact (Environments, customers, team members)\n> - Cause\n> - Detection Capabilities\n>\n> Based on the issue and the required section, write the security incident report. Flag any missing information. \n\nBelow is GitLab Duo’s output that we can use to create our extensive incident report:\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175975/Blog/zxiafech08ck9gqjubaz.png\" alt=\"root cause analysis output\" width=\"400\" height=\"1200\">\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752175985/Blog/xjc9coah4oi7rz0daw2b.png\" alt=\"root cause analysis output\" width=\"400\" height=\"1200\">\n\n### Root Cause Analysis\n\nGitLab Duo is able to explain vulnerabilities and can understand the context of an incident issue. These capabilities make GitLab Duo an excellent assistant to perform root cause analyses of security incidents. \n\nAs seen above, GitLab Duo can write its own section on the cause of an incident based entirely on the issue contents. We can dig into this further with the assistance of GitLab Duo to identify several candidates for root cause(s).  \n\nOur prompt is built as such:\n\n> “The root cause of the incident was the exposure of a personal access token in CI job logs. The token was leaked in a project that was not included in the current list of projects for token detection, indicating a gap in the security monitoring process.”\n> Name a few potential root causes for a PAT finding itself in CI job logs.\n\nGitLab Duo’s response is the following:\n\n![GitLab Duo Root Cause Analysis response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098346298.png)\n\nGitLab Duo Root Cause Analysis can help security teams identify avenues worth investigating. It has a pretty good grasp on security concepts and how Root Cause Analysis is led in general, so it can be a great assistant for these situations where the exact root of the problem is unclear. \n\n> Learn more about [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/).\n\n### On-demand updates\n\nSecurity incidents move fast and can be confusing, and information is constantly changing and shifting around. Higher severity incidents require a certain degree of thoroughness when it comes to reporting to senior leadership, especially when the audience is at the Director+ level. Engineers who are in the midst of a security incident can find it difficlut to condense the information such that it is digestable for executives. \n\nWe saw above that GitLab Duo is capable of delivering a pretty good executive summary. When the incident is ongoing, we need to deliver regular updates to senior leadership on the incident status and next steps. GitLab Duo is a great help for that, as well. If information is scattered across the issue in the form of a description or comments, GitLab Duo can help reassemble this information into the “bottom-line-up-front,” or BLUF summary, we need for executive updates. \n\nWe’ve taken the same incident right before token revocation and asked GitLab Duo for a BLUF summary where the audience is the Director of Security Operations. \n\n![Executive Summary - GitLab Duo](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098346/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098346299.png)\n\n## Getting started with GitLab Duo for security\n\nGitLab Security has automated several parts of the reporting process with the help of GitLab Duo. But to get started, all you need is access to GitLab Duo Chat. GitLab Duo Chat can be your well-informed assistant for many security reporting cases and post-mortem analyses.\n\n## What’s next for GitLab Duo?\n\nGitLab is committed to continuously enhancing GitLab Duo’s capabilities. Future developments aim to integrate AI-driven features more deeply into the security workflow, providing proactive detection and resolution of vulnerabilities, streamlined incident management, and comprehensive reporting tools. These advancements will further empower security teams to maintain robust security postures and respond effectively to emerging threats.\n\n> [Try GitLab Duo for 60 days for free](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)!\n",[704,9,814,680,478],{"slug":2711,"featured":6,"template":684},"how-to-leverage-gitlab-duo-for-enhanced-security-reporting","content:en-us:blog:how-to-leverage-gitlab-duo-for-enhanced-security-reporting.yml","How To Leverage Gitlab Duo For Enhanced Security Reporting","en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting.yml","en-us/blog/how-to-leverage-gitlab-duo-for-enhanced-security-reporting",{"_path":2717,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2718,"content":2724,"config":2731,"_id":2733,"_type":13,"title":2734,"_source":15,"_file":2735,"_stem":2736,"_extension":18},"/en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"title":2719,"description":2720,"ogTitle":2719,"ogDescription":2720,"noIndex":6,"ogImage":2721,"ogUrl":2722,"ogSiteName":669,"ogType":670,"canonicalUrls":2722,"schema":2723},"Bamboo Server to GitLab CI migration: Advanced techniques","A real-world look at how a migrated CI/CD infrastructure will work in GitLab CI.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679081/Blog/Hero%20Images/jenkins-migration.jpg","https://about.gitlab.com/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-11\",\n      }",{"title":2725,"description":2720,"authors":2726,"heroImage":2721,"date":2728,"body":2729,"category":769,"tags":2730},"How to migrate Atlassian's Bamboo server's CI/CD infrastructure to GitLab CI, part two",[2727],"Ivan Lychev","2022-07-11","\nIn [part one of our series](/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci/), I showed you how to migrate from Atlassian’s Bamboo Server to GitLab CI/CD. In this blog post we’re going to take a deep dive into how it works from a user’s perspective.\n\n## Get started\n\nYou’ve deployed the demo so it’s time to play with it to understand how it works.\n\nLet's imagine that one of the members of our project is John Doe. He is a software engineer responsible for developing some components (app1, app2, and app3) of the entire product, and he and his team would like to test those components in several combinations in myriad preview environments. So, what does that look like?\n\nFirst of all, let’s make some commits to the app1, app2, and app3 source code and get successful builds upon those commits.\n\nAfter that, we should create releases for those apps to be able to deploy them (as the deployment part of the apps CI config only shows when being triggered by a Git tag, i.e., a GitLab release). A release can be created by launching the last step (`manual-create-release`) in a commit pipeline. That would give us a new release with the ugly name containing the date and commit SHA in the patch part (in accord to `semver` scheme):\n\n\n\n![app_gitlab_release](https://about.gitlab.com/images/blogimages/app_gitlab_release.png)\n\nOn the `Tags` tab for the same app you now can see a deployment part of the pipeline has been triggered by the just created GitLab release but no actual environments to deploy are displayed (the `_` item in the `Deploy-nonprod` stage is not an env):\n\n\n![absent_envs](https://about.gitlab.com/images/blogimages/absent_envs.png)\n\n\n## Create an environment\n\nBut before that we have to briefly switch to another team who is responsible for preparing infrastructure IaC templates. Navigate to the `infra/environment-blueprints` project and pretend you are a member of that team doing their job. Namely, imagine you have just created some initial set of IaC files (they are already kindly prepared by me and present in the repository). You’ve tested them and now you feel that they are ready to be used by the other members of the project. You indicate such a readiness of a particular version of the IaC files by giving it a GitTag. Let’s put a tag like `v1.0.0` onto the HEAD version.\n\nYou will see how the tags are going to be used immediately. But first let's make some changes to the IaC files (e.g., add a new resource for some of the apps) and create a second Git tag, let's say `v1.1.0`. So, at this moment we have two versions of IaC templates (or `blueprints`) for our infrastructure - `v1.0.0` and `v1.1.0`.\n\n## Deploy an app into the environment\n\nNow we can return back to John and his team. We assume John is somehow informed that the version of the IaC templates he should use is `v1.0.0`. He wants to create a new preview environment out of the IaC templates of that version and put app1 and app2 into that env. \n\n(Here starts a description of how a user interoperates with the `infrastructure-set` Git repo. Notice that though the eventual idea is that it should be a Merge Request workflow – where you first get a Terraform plan within a Merge Request and can apply such a plan by merging the MR – which is widely advocated by GitLab but for the sake of simplicity here the MR workflow is not implemented and instead direct push commits into a branch are made).\n\nJohn wants the env to be named `preview-for-johns-team`. He creates a new branch in the `infrastructure-set` repo with that name and puts two files into it: a `version.txt` containing text `v1.0.0` and `apps.txt` with text `app1 app2` inside (the files format and its content is utterly simplified). \n\nThe `infrastructure-set` pipeline is triggered by the new branch and first generates a Terraform plan using the set of the Terraform files indicated by the tag specified in `version.txt`. John reviews the plan and wants to proceed with creating the environment by starting the `Terraform-apply` stage:\n\n\n![new_env_pipeline](https://about.gitlab.com/images/blogimages/new_env_pipeline.png)\n\n\n(To store the Terraform plan as artifact and Terraform state the embedded features of GitLab are leveraged - [Package Registry](https://docs.gitlab.com/ee/user/packages/package_registry/) and [Terraform HTTP back-end by GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).)\n\nNow return to the `app1` project and rerun the pipeline for the app1 release we created previously to make it regenerate a list of environments to deploy. You should see that the `preview-for-johns-team` item has appeared in the list of the environments:\n\n\n![new_env_in_the_deploy_pipeline](https://about.gitlab.com/images/blogimages/new_env_in_the_deploy_pipeline.png)\n\n\nClick the arrow button to deploy. Then refer to the `Deployments/Environments` section of the `app1` project to ensure a new env with the app1 release deployed into it is displayed.\n\nWe have successfully created a new environment and deployed one of the apps into it!\n\nNotice that although the above describes how users manually deploy the applications into an environment after it has been created which doesn’t look really convenient, in a real life scenario we most likely would have some additional step in the `infrastructure-set` pipeline that runs after Terraform successfully finishes creating an environment and triggers deployment pipelines for all the applications specified in the `apps.txt`. In that situation, we would need to establish which versions of the applications should be deployed in such an automated manner - for example, those might be the latest versions available for each app or the versions currently deployed to production, etc.\n\n## Update an environment's infrastructure\n\nJohn got notified that a new version of the infrastructure templates is available (you remember that `v1.1.0` tag in the `environment-blueprints` repo?). His team wants to assess how app1 would work within the new conditions. They decide to update an existing env, namely `preview-for-johns-team`, for that purpose. \n\nJohn walks to the `preview-for-johns-team` branch of the `environment-set` repo and changes `version.txt`'s content from `v1.0.0` to `v1.1.0`. The branch pipeline gets triggered and first shows John a Terraform plan for a diff comparing the current state of the environment. After reviewing and accepting that diff, John proceeds with actual updating the environment by launching `Terraform-apply` stage. That's it!\n\n## Advantages and disadvantages\n\n### Virtues\n\nGiven that this case assumes migrating from some existing CI/CD infrastructure based on Atlassian Bamboo with a lot of users who are familiar with it, the proposed solution leverages the native capabilities of GitLab so that it mostly keeps the concepts and workflows used with Bamboo. This strategy makes the process of migration more smooth for the users.\n\nThe solution sticks to the GitOps tenets and empowers a project with all the virtues provided by Git. For example, it's usually easy to track any changes in the infrastructure back to Git repos. (It may not be so easy for the `environment-set` project where we do not have the infrastructure changes captured in Git commits, but in that case a task of finding differences between two states of a particular environment can be accomplished by fetching the two versions of the `environment-blueprints` repo corresponding to those states denoted in the `version.txt` and figuring out the differences by using any apt tool.)\n\nThe solution tends to support user self-service where most of the tasks of changing the infrastructure can be performed only by those familiar with the basics of Git and Terraform. As a result, it offloads the DevOps team from some part of the work and removes dependence on the Ops department which comes in really handy, especially for large-scale projects.\n\n### Shortcomings\n\nBesides the mentioned deficits which stem from the necessity to utterly simplify all the aspects of this demo to make it comprehensible and possible to prepare in a sensible amount of time, this solution possesses some shortcomings that have to be resolved by using external tools to make this solution appropriate for a real life usage.\n\nFor example, there is no way to have a central dashboard with an aggregated view of all the environments with all the apps and their versions deployed into the envs. This would require creating some custom SPA web app which would gather information from GitLab via API.\n",[108,773,9],{"slug":2732,"featured":6,"template":684},"how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two","content:en-us:blog:how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","How To Migrate Atlassians Bamboo Servers Ci Cd Infrastructure To Gitlab Ci Part Two","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two.yml","en-us/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two",{"_path":2738,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2739,"content":2745,"config":2750,"_id":2752,"_type":13,"title":2753,"_source":15,"_file":2754,"_stem":2755,"_extension":18},"/en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey",{"title":2740,"description":2741,"ogTitle":2740,"ogDescription":2741,"noIndex":6,"ogImage":2742,"ogUrl":2743,"ogSiteName":669,"ogType":670,"canonicalUrls":2743,"schema":2744},"How to protect GitLab-connected SSH key with Yubikey","Add a layer of security to SSH keys by restricting physical access to YubiKey.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667202/Blog/Hero%20Images/gitlabultimatesecurity.jpg","https://about.gitlab.com/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to protect GitLab-connected SSH key with Yubikey\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Brendan O'Leary\"}],\n        \"datePublished\": \"2022-03-03\",\n      }",{"title":2740,"description":2741,"authors":2746,"heroImage":2742,"date":2747,"body":2748,"category":769,"tags":2749},[1221],"2022-03-03","\n[Two-factor authentication](https://docs.gitlab.com/ee/security/two_factor_authentication.html) is one of the best defenses we have as individuals for protecting our accounts and credentials. But not all 2FA methods are created equal. For example, SMS is vulnerable to [SIM-swapping](https://www.ic3.gov/Media/Y2022/PSA220208) attacks and thus doesn't always provide the extra security we would like.\n\nIdeally, everything I  want to connect to would use 2FA with dedicated 2FA hardware. With GitLab 14.8, you can now use 2FA hardware to protect your SSH keys, as I explain below.  \n\n## 2FA and SSH keys\n\nState-of-the-art 2FA uses a physical hardware device – often FIDO/U2F hardware – to verify your presence at the time of authentication. This provides two distinct factors as a means of authentication: something you know (your username and password, for instance) with something you have (the physical device). I have two [YubiKey](https://www.yubico.com/works-with-yubikey/catalog/gitlab/) devices that I use for this purpose – one that is always in a safe in my house and one that I generally keep with me and the computer I'm using to do work. And I have everything I can secure using this method, including my GitLab account.\n\nAnd that does a great job of securing my access to GitLab, the application front end, and the ability to create and modify API keys. But there is another way to authenticate to a git server: SSH keys. In this case, there's only one factor of authorization because the SSH key is on my computer. So you can imagine how excited I was to hear that GitLab added support for `ecdsa-sk` and `ed25519-sk` key types in [GitLab 14.8](/releases/2022/02/22/gitlab-14-8-released/#support-for-ecdsa-sk-and-ed25519-sk-ssh-keys).\n\n### What are `ecdsa-sk` and `ed25519-sk`?\n\nThese two new keys are close to the existing `ecdsa` (Elliptic Curve Digital Signature Algorithm) and `ed25519` (Edwards Curve Digital Signature Algorithm) keys already supported. But that `-sk` at the end adds the ability to verify the key with a FIDO/U2F device. \"SK\" here stands for \"security key\". [OpenSSH 8.2](https://www.openssh.com/txt/release-8.2) added this key type to the supported keys it can generate, interacting with the hardware device to authenticate user presence before allowing the key to be used.\n\nHowever, I still had a few things to do to be ready to use the new keys.\n\n## Updating OpenSSH \nMy daily driver computer is a 2021 iMac running macOS Big Sur version 11.6. When I ran to it to generate this new key, I encountered a problem. Supposedly my version of SSH didn't support `-sk` keys!\n\nNow, your mileage may vary here, but I was able to update the version of SSH my Mac uses by default by first running `brew install openssh`, which successfully installed OpenSSH 8.8. But when I ran `ssh -V` it still showed version 8.1. So how could I get the system to use the newly installed OpenSSH instead?\n\nThe easiest way I could think of to do that was to put the Homebrew version first in the $PATH variable. But where is that path? Luckily, I was able to find that (`/opt/homebrew/opt/openssh`) by running this command:\n\n`brew --prefix openssh`\n\nOnce I updated my $PATH variable to have that at the front, I got the desired outcome:\n\n```bash\n$  which ssh\n/opt/homebrew/opt/openssh/bin/ssh\n\n$ ssh -V\nOpenSSH_8.8p1, OpenSSL 1.1.1m  14 Dec 2021\n```\n\n## Generating the key\nNow that I was using the correct version of SSH, I was able to create my `ecdsa-sk` key by running: \n\n```bash\nssh-keygen -t ecdsa-sk -f ~/.ssh/id_ecdsa_sk\n```\n\nNow, the specific device I have only supports ECDSA and not EdDSA, which is why I went with `ecdsa-sk`. There also is an option to have the key reside ON the device itself (if supported by your hardware) with the `-O resident` flag like this:\n\n```bash\n$ ssh-keygen -t ecdsa-sk -O resident -f ~/.ssh/id_ecdsa_sk\n\nEnter PIN for authenticator:\nYou may need to touch your authenticator (again) to authorize key generation.\nEnter passphrase (empty for no passphrase):\nEnter same passphrase again:\nYour identification has been saved in /Users/brendan/.ssh/id_ecdsa_sk\nYour public key has been saved in /Users/brendan/.ssh/id_ecdsa_sk.pub\n```\n\nGenerating a resident key will make sharing this key with a new computer if and when that happens much easier. If you have a YubiKey like me, you can set the FIDO2 PIN using the [YubiKey Manager](https://www.yubico.com/support/download/yubikey-manager/) software.\n\n## Adding the key to GitLab\nNow that I had the complex parts covered, all that was left was to add the key to GitLab. I went to my [SSH settings](https://gitlab.com/-/profile/keys) on GitLab.com and (bravely) deleted my old SSH key and added the `.pub` public part of my key to my profile.\n\nAnd it was that simple! Now every time I go to interact with GitLab.com, I'm prompted to confirm my presence by touching the YubiKey device attached to my computer:\n\n```bash\ngit clone git@gitlab.com:brendan/website.git\nCloning into 'website'...\nConfirm user presence for key ECDSA-SK SHA256:OZSZGwbnnbc...\n\ngit add .\ngit commit -m \"A new commit\"\ngit push\nConfirm user presence for key ECDSA-SK SHA256:OZSZGwbnnbc...\n```\n\nThat small but essential change gives me peace of mind that even if someone could somehow get my private SSH key, I would still be protected by having physical access restricted to my YubiKey.\n\n",[773,814,9],{"slug":2751,"featured":6,"template":684},"how-to-protect-gitlab-connected-ssh-key-with-yubikey","content:en-us:blog:how-to-protect-gitlab-connected-ssh-key-with-yubikey.yml","How To Protect Gitlab Connected Ssh Key With Yubikey","en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey.yml","en-us/blog/how-to-protect-gitlab-connected-ssh-key-with-yubikey",{"_path":2757,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2758,"content":2763,"config":2769,"_id":2771,"_type":13,"title":2772,"_source":15,"_file":2773,"_stem":2774,"_extension":18},"/en-us/blog/how-to-read-open-source-finding-middleman-callbacks",{"title":2759,"description":2760,"ogTitle":2759,"ogDescription":2760,"noIndex":6,"ogImage":2540,"ogUrl":2761,"ogSiteName":669,"ogType":670,"canonicalUrls":2761,"schema":2762},"How to Read Open Source: Finding Middleman Callbacks","Open source empowers you to learn beyond existing documentation. Getting started can be confusing. This is a demonstration finding unlisted Middleman callbacks.","https://about.gitlab.com/blog/how-to-read-open-source-finding-middleman-callbacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to Read Open Source: Finding Middleman Callbacks\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tyler Williams\"}],\n        \"datePublished\": \"2021-01-20\",\n      }",{"title":2759,"description":2760,"authors":2764,"heroImage":2540,"date":2766,"body":2767,"category":2549,"tags":2768},[2765],"Tyler Williams","2021-01-20","        \n\n{::options parse_block_html=\"true\" /}\n\n\n\n## Why read open source? \n\nWhen folks write about open source, I think there is a strong emphasis on contributing to open source projects, which makes sense. Many software professionals are excited to give back to the community. \n\nBut beyond adding to your favorite project, open source philosophies have a number of other benefits in our daily lives. In particular, I love open source because it allows me to learn more about my tools when the documentation is out of date, incomplete, or leaves me with additional questions from my own curiosity. \n\nThis happened to me recently when I was working on [https://about.gitlab.com](https://about.gitlab.com), a static site built with [Middleman](https://middlemanapp.com/). I needed to find a more comprehensive list of available [callbacks](https://middlemanapp.com/advanced/custom-extensions/#callbacks) in the Middleman lifecycle.\n\nI hope this blog post is helpful if you're looking for existing Middleman callbacks, or if you're getting started reading through the source code of your favorite open source tools. \n\n## The task at hand\n\nIf you're getting started reading open source, I find it helps to have a specific task. Any unfamiliar codebase can be challenging to navigate. Having a goal in mind narrows your focus. Here was my task for Middleman:\n\nI recently created a merge request to [add Webpack devServer to the local development environment](https://gitlab.com/gitlab-com/www-gitlab-com/-/merge_requests/71845). I had to modify some existing behavior of our Middleman preview server and wanted to use one of the lifecycle callbacks to modify the preview server's log output. \n\nHowever, the Middleman documentation does not currently list all available callbacks, nor where they happen in the lifecycle. The [extension docs](https://middlemanapp.com/advanced/custom-extensions/) say: \n\n> Middleman extensions are Ruby classes which can hook into various points of the Middleman system, add new features and manipulate content. This guide explains some of what's available, but you should read the Middleman source and the source of plugins like middleman-blog to discover all the hooks and extension points.\n\nI took them up on their advice and read through the [Middleman source code](https://github.com/middleman/middleman) to find the available callbacks. Here's what I found, and how I found them.\n\n## Callbacks available in Middleman Core\n\n1. `initialized`: called before config is parsed, and before extensions are registered\n1. `configure`: called to run any `configure` blocks (once for current environment, again for the current mode)\n1. `before_extensions`: called before the `ExtensionManager` is instantiated\n1. `before_instance_block`: called before any blocks are passed to the configuration context\n1. `before_sitemap`: called before the `SiteMap::Store` is instantiated, which initializes the sitemap\n1. `before_configuration`: called before configuration is parsed, mostly used for extensions\n1. `after_configuration`: called after extensions have worked\n1. `after_configuration_eval`: called after the configuration is parsed, before the pre-extension callback\n1. `ready`: called when everything is stable\n1. `before_build`: called before the site build process runs\n1. `after_build`: called after the builder is complete\n1. `before_shutdown`: called in the `shutdown!` method, which lets users know the application is shutting down\n1. `before`: called before Rack requests\n1. `before_server`: called before the `PreviewServer` is created\n1. `reload`: called before the new application is initialized on a reload event\n\n## How to find Middleman Callbacks\n\n1. [Clone](https://docs.gitlab.com/ee/gitlab-basics/start-using-git.html) the [Middleman repository](https://github.com/middleman/middleman) to your local machine. \n1. Open the Middleman directory in a text editor, IDE, or any tool that allows you to easily search through a folder's files for specific strings. \n1. Start with the [existing documentation](https://middlemanapp.com/advanced/custom-extensions/#callbacks) if it exists. Middleman lists the names of a few callbacks. You can search the directory for the string `after_configuration`.\n1. In this instance, you should be able to find that string used like so: `execute_callbacks(:after_configuration)`.\n1. You may also find it listed with other similar symbols in `middleman-core/lib/middleman-core/application.rb`. \n1. Read through the related blocks of code around these search results, you'll get some additional context for how they work, and you may find additional search terms that will be helpful.\n1. In the case of Middleman callbacks, you can continue to search for combinations of the `execute_callbacks` method with any callback listed in `middleman-core/lib/middleman-core/application.rb` to find where and when specific callbacks are used.\n\n## Contribute!\n\nWith this in-depth knowledge of your tool, you can be more productive at your day-to-day work, and you can contribute back to open source more easily. \n\nIn my case, I was able to identify the best callback for my usecase, and I now plan to check in with the Middleman team and ask if they would accept a contribution to their documentation with this information so it's easier for other folks to find in the future.\n",[727,9,940],{"slug":2770,"featured":6,"template":684},"how-to-read-open-source-finding-middleman-callbacks","content:en-us:blog:how-to-read-open-source-finding-middleman-callbacks.yml","How To Read Open Source Finding Middleman Callbacks","en-us/blog/how-to-read-open-source-finding-middleman-callbacks.yml","en-us/blog/how-to-read-open-source-finding-middleman-callbacks",{"_path":2776,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2777,"content":2783,"config":2788,"_id":2790,"_type":13,"title":2791,"_source":15,"_file":2792,"_stem":2793,"_extension":18},"/en-us/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets",{"title":2778,"description":2779,"ogTitle":2778,"ogDescription":2779,"noIndex":6,"ogImage":2780,"ogUrl":2781,"ogSiteName":669,"ogType":670,"canonicalUrls":2781,"schema":2782},"How to scan a full commit history to detect sensitive secrets","Use GitLab Secret Detection to scan a repository's commit history, including branches. View results within the GitLab UI with just a few lines of code added to a pipeline file.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097948/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20display%20preview%20for%20blog%20images%20%281%29_2XDPsbkjQ3o6tcdom6IGxI_1750097948673.png","https://about.gitlab.com/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to scan a full commit history to detect sensitive secrets\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2025-02-06\",\n      }",{"title":2778,"description":2779,"authors":2784,"heroImage":2780,"date":2785,"body":2786,"category":814,"tags":2787},[831,1861],"2025-02-06","Secrets left exposed in outdated repositories pose significant risk for data breaches. For example, a still-active secret key can be exposed, leaving it vulnerable to exploitation. Secrets include access keys, API tokens, private keys, and other sensitive values. \n\nIn this article, you'll learn how to use GitLab Secret Detection to scan a repository’s full commit history, including all branches, to detect sensitive secrets. In addition, you will discover how to view the results directly within the GitLab UI without the need for any integration. All it takes is just a couple of lines of code in your `.gitlab-ci.yml` pipeline file. \n\n## Scan every corner of your repository\n\nWe will use the sample repository shown in the screenshot below as an example. To keep things simple, there is only a `README.md` file present in the default branch of this repository. \n\n![Sample repository to scan](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097956/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097955851.png)\n\nAt first glance, it may seem like the repository is empty and that there are probably no sensitive secrets in this repository. But what we are looking at is only the state of the default branch, which is the main branch in this example. There could be feature branches in this repository created weeks, months, or years ago with sensitive secrets. It is also possible that a file with a secret was accidentally pushed to the repo and then deleted right after. However, it likely was not deleted correctly and is still in the commit history.\n\nWe are going to enable GitLab Secret Detection scanner and set the `SECRET_DETECTION_HISTORIC_SCAN` variable to **true** so that the content of all branches in the repository is scanned.\n\n![Enable GitLab Secret Detection variable to true](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097956/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097955853.png)\n\n```\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\nsecret_detection:\n  variables:\n    SECRET_DETECTION_HISTORIC_SCAN: \"true\"\n```\n\nBy setting the `SECRET_DETECTION_HISTORIC_SCAN` variable to **true**, GitLab Secret Detection looks into every branch and commit of your repository. It ensures that no sensitive information — whether from a feature branch or an old commit — is left unchecked.\n\n## Results of the scan\n\nTwo sensitive secrets were identified in the repository. One is a password in a `.env` file that was deleted from the repository, but the commit containing it was not removed from the git history. The other is an AWS Access Token found in a feature branch. These exposed secrets could compromise the organization’s security. \n\n![AWS Access Token screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097956/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097955855.png)\n\nYou can click on the AWS Access Token result to see more details, including the file location. You can also create a GitLab issue to triage the vulnerability with one click. If you’re using the Jira integration, you can create a Jira ticket directly from the vulnerability page as well.\n\n## Why scanning for secrets matters\n\nAnyone with access to the repository can misuse the secret to gain unauthorized access to private resources and sensitive data. \n\nIn addition to scanning a repository’s full commit history across all branches, GitLab Secret Detection also helps you take a multilayered approach to detecting secrets:\n\n* [Secret push protection](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/index.html) - scans commits for secrets during a push and blocks it if secrets are detected, unless skipped, reducing the risk of leaks.  \n* [Pipeline secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/index.html) - scans files after they’ve been committed and pushed to a GitLab repository.\n* [Client-side secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/client/index.html) - scans comments and descriptions in issues and merge requests for secrets before they're saved to GitLab.  * [Automatic response to leaked secrets](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html) - automatically revokes certain types of leaked secrets and notifies the partner that issued the secret. \n\nYou can adjust pipeline secret detection to suit your needs by modifying, extending, or replacing the default ruleset. For instance, you can define [custom rules](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/index.html#customize-analyzer-rulesets) using regex patterns to detect sensitive data like credit card numbers, phone numbers, or other information specific to your organization.\n\n## Try GitLab Secret Detection\n\n1. [Enable](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#enable-the-analyzer) Secret Detection in your GitLab pipeline.  \n2. Set `SECRET_DETECTION_HISTORIC_SCAN: true`.  \n3. Push and trigger a pipeline to scan all branches and commits.\n\nGitLab makes securing your code simple and comprehensive. Don’t let an old branch or commit compromise your security — give historical scans a try today!\n\n> #### [Sign up for a free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/) to get started with security scanners like Secret Detection.",[1000,9,478,680],{"slug":2789,"featured":6,"template":684},"how-to-scan-a-full-commit-history-to-detect-sensitive-secrets","content:en-us:blog:how-to-scan-a-full-commit-history-to-detect-sensitive-secrets.yml","How To Scan A Full Commit History To Detect Sensitive Secrets","en-us/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets.yml","en-us/blog/how-to-scan-a-full-commit-history-to-detect-sensitive-secrets",{"_path":2795,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2796,"content":2802,"config":2807,"_id":2809,"_type":13,"title":2810,"_source":15,"_file":2811,"_stem":2812,"_extension":18},"/en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops",{"title":2797,"description":2798,"ogTitle":2797,"ogDescription":2798,"noIndex":6,"ogImage":2799,"ogUrl":2800,"ogSiteName":669,"ogType":670,"canonicalUrls":2800,"schema":2801},"How to secure Google Cloud Run deployment with GitLab Auto DevOps","This tutorial will help teams speed development, improve security, and harness the power of serverless technology.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682898/Blog/Hero%20Images/cloud-security.png","https://about.gitlab.com/blog/how-to-secure-cloud-run-deployment-with-auto-devops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to secure Google Cloud Run deployment with GitLab Auto DevOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2023-08-21\",\n      }",{"title":2797,"description":2798,"authors":2803,"heroImage":2799,"date":2804,"body":2805,"category":769,"tags":2806},[1841],"2023-08-21","\nTeams looking for efficiency often look to GitLab and serverless platforms to minimize management overhead and speed deployment times. GitLab's tight integration with [Google Cloud Run](https://cloud.google.com/run) means that teams can take advantage of the industry-leading DevSecOps platform to deliver container-based applications securely and efficiently.\n\nThis tutorial will show you how to deploy applications to Cloud Run using GitLab [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/), a feature that lets developers quickly use CI/CD pipelines via pre-built templates. This approach can accelerate testing and deployment because stages and jobs are already pre-configured.\n\n## Prerequisites\nBefore you begin, make sure you have the following:\n- a Google Cloud project with Cloud Run and Cloud Build APIs enabled\n- a Google Cloud service account with Cloud Run Admin, Cloud Build Service Agent, Service Account User, and Project Viewer permissions\n- a GitLab project containing your application code\n\n### Demo walkthrough\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/hIFagDyo3f8\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share\" allowfullscreen>\u003C/iframe>\n\n\n**Step 1:** Configure Google Cloud credentials\n\nTo start, use the Google Cloud service account with the necessary permissions. Once you have the service account, export its key to a JSON file and encode it using base64.\n\n**Step 2:** Add Auto DevOps to your GitLab project\n\nNavigate to your GitLab project and create a new file at the root called \"gitlab-ci.yml.\" Add the following lines of code to include the Auto DevOps template, which automatically configures your pipeline based on project settings and configuration:\n\n```\ninclude:\n  - template: Auto-DevOps.gitlab-ci.yml\n```\n\nCommit the changes to your project.\n\n**Step 3:** Configure environment variables\n\nAdd the following environment variables to your GitLab project:\n\n* `BASE64_GOOGLE_CLOUD_CREDENTIALS`: The base64-encoded JSON file containing your service account key. Make sure to mask this variable.\n* `PROJECT_ID`: The Google Cloud project ID.\n* `SERVICE_ID`: The service ID that will be used for Cloud Run. For this tutorial, we'll use \"nodejs\" as our service ID.\n\n**Step 4:** Configure the CI/CD pipeline\n\nModify the \"gitlab-ci.yml\" file to add Google Cloud SDK, gcloud commands, Docker, and the necessary configurations for deploying your application to Cloud Run. \n\n```\nimage: google/cloud-sdk:latest\n```\n\nAdditionally, use Google Cloud Build to generate the container image required for deployment. Commit the changes to your project.\n\n```\ndeploy:\n  stage: deploy\n  script:\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $BASE64_GOOGLE_CLOUD_CREDENTIALS | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud config set project $PROJECT_ID \n    - gcloud auth configure-docker\n    - gcloud builds submit --tag gcr.io/$PROJECT_ID/$SERVICE_ID\n    - gcloud run deploy $SERVICE_ID --image gcr.io/$PROJECT_ID/$SERVICE_ID --region=us-central1 --platform managed --allow-unauthenticated \n```\n\n**Step 5:** Finalize the DAST stage\n\nOnce your application has been deployed to Cloud Run, complete the dynamic application security testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) stage in the CI/CD pipeline to ensure your application is more secure. Add the Cloud Run URL to your \"gitlab-ci.yml\" file and enable full_scan and browser_scan options. Commit the changes to your project.\n\n```\nvariables:\n  DAST_WEBSITE: \u003Cproject URL>\n  DAST_FULL_SCAN_ENABLED: \"true\"\n  DAST_BROWSER_SCAN: \"true\" \n```\n\nIn this tutorial, we successfully deployed a Cloud Run application using GitLab's Auto DevOps. By following these steps, you can enjoy faster development and improved security, and harness the power of serverless technology.\n",[9,1865,814],{"slug":2808,"featured":6,"template":684},"how-to-secure-cloud-run-deployment-with-auto-devops","content:en-us:blog:how-to-secure-cloud-run-deployment-with-auto-devops.yml","How To Secure Cloud Run Deployment With Auto Devops","en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops.yml","en-us/blog/how-to-secure-cloud-run-deployment-with-auto-devops",{"_path":2814,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2815,"content":2821,"config":2827,"_id":2829,"_type":13,"title":2830,"_source":15,"_file":2831,"_stem":2832,"_extension":18},"/en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes",{"title":2816,"description":2817,"ogTitle":2816,"ogDescription":2817,"noIndex":6,"ogImage":2818,"ogUrl":2819,"ogSiteName":669,"ogType":670,"canonicalUrls":2819,"schema":2820},"How to stream logs through the GitLab Dashboard for Kubernetes","In GitLab 17.2, users can now view Kubernetes pod and container logs directly via the GitLab UI. This tutorial shows how to use this new feature to simplify monitoring Kubernetes infrastructure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662245/Blog/Hero%20Images/blog-image-template-1800x945__16_.png","https://about.gitlab.com/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to stream logs through the GitLab Dashboard for Kubernetes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2024-08-19\",\n      }",{"title":2816,"description":2817,"authors":2822,"heroImage":2818,"date":2824,"body":2825,"category":769,"tags":2826},[2823],"Daniel Helfand","2024-08-19","Developers are context-switching more frequently, needing to understand and use multiple tools to accomplish complex tasks. These tools all have different user experiences and often do not present all the information needed to successfully develop, troubleshoot, and ship critical features. It is challenging enough to release and monitor software changes without also needing to understand so many tools.\n\nWith the addition of [pod log streaming through the GitLab Dashboard for Kubernetes in v17.2](https://about.gitlab.com/releases/2024/07/18/gitlab-17-2-released/#log-streaming-for-kubernetes-pods-and-containers), developers can go straight from a merge request review to watching a deployment rolled out to Kubernetes. This new feature will:\n- allow developers to avoid switching tooling\n- ease the process of troubleshooting and monitoring deployments and post-deployment application health\n- strengthen [GitOps workflows](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html) to easily manage application and infrastructure changes\n\nThe new feature allows GitLab users to view the logs of pods and containers directly via the GitLab UI. In previous versions of GitLab, users could configure a GitLab project to view pods deployed to certain namespaces on an associated cluster. This new feature allows users to further monitor workloads running on Kubernetes without needing to switch to another tool.\n\nIn the sections below, you will learn how to use this new feature by adding a Kubernetes cluster to a GitLab project, deploying a sample workload to a cluster, and viewing the logs of this workload running on a cluster. \n\n> Need to know the basics of Kubernetes? [Read this quick introductory blog](https://about.gitlab.com/blog/kubernetes-the-container-orchestration-solution/).\n\n## Configure a GitLab project to view Kubernetes resources\n\nBefore proceeding with this section, the following prerequisites are required:\n* a remote Kubernetes cluster (i.e., not running locally on your machine)\n* access to a GitLab v17.2 account\n* [this repository](https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example) forked to a GitLab group to which you have access\n* Helm CLI\n* kubectl CLI\n\nOnce you have satisfied the prerequisites involved, add an agent configuration file to the GitLab project you forked. The configuration file allows users to control permissions around how GitLab users may interact with the associated Kubernetes cluster.\n\nYou can use the configuration file included in this GitLab project by changing the following file: `.gitlab/agents/k8s-agent/config.yaml`. Replace the `\u003CGitLab group>` in the id property shown below with the group where you have forked the example project. This config file will allow [GitLab to access your cluster via an agent](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html) that can be installed on your cluster.\n\n```yaml\nuser_access:\n  access_as:\n    agent: {}\n  projects:\n    - id: \u003CGitLab group>/gitlab-k8s-log-streaming-example\n```\n\nOnce the above file is edited, you can commit and push these changes to the main branch of the project. \n\n## Add GitLab Kubernetes agent to cluster\n\nWith the agent configuration file added, now add the cluster to GitLab by installing an agent on your cluster. In the GitLab UI, go to your project and, on the left side of the screen, select **Operate > Kubernetes clusters**. Once on this page, select the **Connect a cluster** button on the right side of the screen. From the dropdown menu, you can then select the agent, which should be `k8s-agent`. Click **Register** to get instructions for how to install the agent on your cluster.\n\nThe instructions presented to you after registering the agent will be to run a helm command that will install the GitLab agent on your cluster. Before running the command locally, you will want to ensure your Kubernetes context is targeting the cluster you want to work with. Once you have verified you are using the correct kubeconfig locally, you can run the helm command to install the agent on your cluster.\n\nOnce both pods are running, GitLab should be able to connect to the agent. Run the following command to wait for the pods to start up:\n\n```shell\nkubectl get pods -n gitlab-agent-k8s-agent -w\n```\n\n## Deploy sample application to your cluster\n\nBefore you can view logs of a workload through GitLab, you first need to have something running on your cluster. To do this, you can run the following kubectl command locally. \n\n```shell\nkubectl apply -f https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example/-/raw/main/k8s-manifests/k8s.yaml\n```\n\nAfter the command runs successfully, you are now ready to complete the final step to set up a Kubernetes dashboard via GitLab.\n\n## View pod logs through the GitLab UI\n\nTo add the Kubernetes dashboard via the GitLab UI, go to your project and, on the left side of the screen, select **Operate > Environments**. On the top right side of the screen, select the **Create an environment**.\n\nNext, you can give your environment a name, select the GitLab agent (i.e. `k8s-agent`), and pick a namespace for the Kubernetes dashboard to focus on. Since the application is running in the `gitlab-k8s-log-streaming-example-dev` namespace, select this option from the namespace dropdown. After naming the environment and selecting the agent and namespace, click **Save**.\n\nAfter creating the environment, you should now see information about the application’s pods displayed via the GitLab UI.\n\n![Kubernetes logs - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676402/Blog/Content%20Images/Screenshot_2024-08-20_at_12.15.08_PM.png)\n\nGo to the right side of the screen and click **View Logs** to see logs for one of the pods associated with the application. \n\n![Kubernetes dashboard - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676402/Blog/Content%20Images/Screenshot_2024-08-20_at_12.16.56_PM.png)\n\n## Try it out and share feedback\n\nThe introduction of pod log streaming in GitLab v17.2 will help GitLab users get one step closer to managing complex deployments to Kubernetes, as well as monitoring and troubleshooting issues post deployment via a common user experience. We are excited to hear more about users’ experiences with this new enhancement and how it helps improve DevOps workflows around Kubernetes. To share your experience with us, you can open an issue to the [project associated with this tutorial](https://gitlab.com/gitlab-da/tutorials/cloud-native/gitlab-k8s-log-streaming-example). Or, [comment directly in the Kubernetes log streaming feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/478379) to report information to the GitLab engineering team.\n\nMore information on getting started with the GitLab Dashboard for Kubernetes can be found in the documentation [here](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html).\n\n> To explore the GitLab Dashboard for Kubernetes as well as other more advanced features of GitLab, sign up for [our free 30-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n",[680,533,1225,9],{"slug":2828,"featured":90,"template":684},"how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes","content:en-us:blog:how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes.yml","How To Stream Logs Through The Gitlab Dashboard For Kubernetes","en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes.yml","en-us/blog/how-to-stream-logs-through-the-gitlab-dashboard-for-kubernetes",{"_path":2834,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2835,"content":2841,"config":2846,"_id":2848,"_type":13,"title":2849,"_source":15,"_file":2850,"_stem":2851,"_extension":18},"/en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags",{"title":2836,"description":2837,"ogTitle":2836,"ogDescription":2837,"noIndex":6,"ogImage":2838,"ogUrl":2839,"ogSiteName":669,"ogType":670,"canonicalUrls":2839,"schema":2840},"How to translate Bamboo agent capabilities to GitLab Runner tags  ","This tutorial demonstrates how to use tags to organize GitLab Runners when building complex CI/CD pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663019/Blog/Hero%20Images/AdobeStock_519147119.jpg","https://about.gitlab.com/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to translate Bamboo agent capabilities to GitLab Runner tags  \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-02-22\",\n      }",{"title":2836,"description":2837,"authors":2842,"heroImage":2838,"date":2843,"body":2844,"category":769,"tags":2845},[1570],"2024-02-22","CI pipelines often start simple – a single job building a binary and pushing it to an artifact repository or to some production environment. Ever-changing software requirements introduce more complexities, such as adding more jobs to perform certain checks and reviewing the output before the final build job is executed.  \n\nThese complexities increase exponentially when builds are expected to target varying systems with different system architectures or resource needs. This is evident in projects like operating systems, mobile apps, or software distributions that support multiple deployment platforms. To account for the varying needs of builds in these types of environments, having multiple runners that match needed requirements is key, and that's where [GitLab Runner](https://docs.gitlab.com/runner/) tags come in. If you are coming from Atlassian's Bamboo, they are called \"agent capabilities.\"\n\nRunner tags allow organizing runners by a tag that signifies a specific use case they support; these tags are then used to make sure CI jobs run on a runner that meets their requirements. A job can require GPU resources that are only available on a handful of runners; tagging the job to the tags of the runner allows it to be scheduled on the runner with GPUs.\n\nAgent capabilities on Bamboo are used to achieve the same functionality by specifying binaries or custom identifiers that must be matched or available for a job to run on a Bamboo agent. In this blog post, we will be looking at how to translate Bamboo agent capabilities to GitLab Runner tags. \n\nBamboo has varying agent capabilities:\n- Executable capability specifies executables that are available on an agent.\n- JDK capability specifies that the Java Development Kit is installed and available for builds.\n- Version Control capability lets Bamboo know the version control systems set up on an agent and where the client application is located.\n- Docker capability is used to define the agents where Docker is installed for Docker tasks\n- Custom capability uses key/value identifiers to identify a unique functionality an agent provides.\n\nGitLab makes the process easier by using tags to identify Runners, some of which can be assigned multiple tags to denote the varying functionalities they can provide to jobs. Let's look at how you can use Runner tags in GitLab.\n\n## Adding tags to GitLab Runner\n\nWhen [registering a runner](https://docs.gitlab.com/runner/register/index.html) after installation, one of the steps requires providing a list of comma-separated tags that can be used. If none are provided at this stage, you can always edit the `/etc/gitlab-runner/config.toml` file and add any missing tags.\n\nYou can also manage the tags of a runner in GitLab by accessing the runner's edit page and updating the `Tags` field. You have the option for the runner to be exclusive to jobs that are tagged appropriately, or when there are no tagged jobs to run, it should run untagged jobs, too. Checking `Run untagged jobs` enables this behavior.\n\n## Using tags in .gitlab-ci.yaml file\n\nTo run a job on a specific runner, add the relevant tags to the job's configuration, as shown below:\n\n```yaml\nbuild_ios:\n  image: macos-13-xcode-14\n  stage: build\n  script:\n    - bundle check --path vendor/bundle || bundle install --path vendor/bundle --jobs $(nproc)\n    - bundle exec fastlane build\n  tags: \n    - saas-macos-medium-m1\n```\nIn the example above, the job builds an iOS application only on runners operating on a macOS device with an M1 chip and tagged `saas-macos-medium-m1`.\n\n## Using multiple tags\n\nA job can specify multiple tags to target a diverse range of runners, especially in organizations that run several fleets of runners as part of their software development lifecycle. A job will only run if a runner is found that has all the tags the job has been tagged with. For example, if a job has `[linux, android, fastlane]` tags, a runner with `[ android, fastlane]` or `[ linux, android]` will not execute the job because the full set of tags does not match the runner.\n\n## Dynamic jobs with tags and variables\n\nYou can use variables to determine the values of tags and thus dynamically influence which runners pick up the jobs. For example:\n\n```\nvariables:\n  KUBERNETES_RUNNER: kubernetes\n\n  job:\n    tags:\n      - docker\n      - $KUBERNETES_RUNNER\n    script:\n      - echo \"Hello runner selector feature\"\n\n``` \n\nIn this example, only runners tagged with `kubernetes` will execute the job. You can take this further in more complex pipelines with [`parallel: matrix`](https://docs.gitlab.com/ee/ci/yaml/index.html#parallelmatrix). Here is an example:\n\n```\ndeploystacks:\n  stage: deploy\n  parallel:\n    matrix:\n      - PROVIDER: aws\n        STACK: [monitoring, app1]\n      - PROVIDER: gcp\n        STACK: [data]\n  tags:\n    - ${PROVIDER}-${STACK}\n  environment: $PROVIDER/$STACK\n\n```\n\nThis example ends up with three parallel jobs with three different tags for each: `aws-monitoring`, `aws-app1` and `gcp-data`, thus targeting possibly three different runners.\n\nUsing tags in your GitLab CI configuration gives you the flexibility to determine where and how your applications are built, to use resources more efficiently as scarce resources can be limited to certain runners, and to determine how jobs are allocated to those runners.\n\n> Learn more about [how to make the move from Atlassian to GitLab](https://about.gitlab.com/move-to-gitlab-from-atlassian/).\n",[108,771,9],{"slug":2847,"featured":90,"template":684},"how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags","content:en-us:blog:how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags.yml","How To Translate Bamboo Agent Capabilities To Gitlab Runner Tags","en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags.yml","en-us/blog/how-to-translate-bamboo-agent-capabilities-to-gitlab-runner-tags",{"_path":2853,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2854,"content":2860,"config":2865,"_id":2867,"_type":13,"title":2868,"_source":15,"_file":2869,"_stem":2870,"_extension":18},"/en-us/blog/how-to-use-gitlabs-custom-compliance-frameworks-in-your-devsecops",{"title":2855,"description":2856,"ogTitle":2855,"ogDescription":2856,"noIndex":6,"ogImage":2857,"ogUrl":2858,"ogSiteName":669,"ogType":670,"canonicalUrls":2858,"schema":2859},"How to use GitLab's Custom Compliance Frameworks in your DevSecOps environment","Explore how new frameworks, along with more than 50 out-of-the-box controls, transform regulatory requirements from burdensome checkboxes to integrated, automated workflow components.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097104/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%284%29_3LZkiDjHLjhqEkvOvBsVKp_1750097104092.png","https://about.gitlab.com/blog/how-to-use-gitlabs-custom-compliance-frameworks-in-your-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab's Custom Compliance Frameworks in your DevSecOps environment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-04-30\",\n      }",{"title":2855,"description":2856,"authors":2861,"heroImage":2857,"date":2862,"body":2863,"category":814,"tags":2864},[1767],"2025-04-30","Compliance isn't just a checkbox — it's a critical business function that affects everything from operational risk to customer trust. For development teams, balancing compliance requirements with velocity can be particularly challenging. GitLab's [Custom Compliance Frameworks](https://about.gitlab.com/blog/introducing-custom-compliance-frameworks-in-gitlab/) offer a powerful way to integrate compliance verification directly into your development workflow. In this article you'll learn what they are and how to use them for maximum efficiecy.\n\n## What are GitLab Custom Compliance Frameworks?\n\nGitLab Custom Compliance Frameworks allow organizations to define, implement, and enforce compliance standards directly within their GitLab instance. This feature extends GitLab's built-in compliance capabilities by enabling teams to create customized frameworks that align with specific regulatory requirements, internal policies, or industry standards.\n\nCustom Compliance Frameworks have the following benefits:\n* Reduce manual tracking  \n* Accelerate audit readiness  \n* Enforce compliance controls natively\n\n![Compliance center screenshot with frameworks listed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097114254.png)\n\nWith this release, more than 50 out-of-the-box (OOTB) controls are provided (with more coming soon) that can be tailored to your organization's unique compliance needs, including HIPAA in healthcare, GDPR for data privacy, SOC2 for service organizations, or industry-specific regulations. Some examples of OOTB controls include:\n\n* Separation of duties (e.g., at least two approvers and author approved merge request)  \n* Security scanners running (e.g., [SAST](https://docs.gitlab.com/user/application_security/sast/) running and [Dependency Scanning](https://docs.gitlab.com/user/application_security/dependency_scanning/) running)  \n* Authentication/authorization (e.g., project visibility not public and AuthSSO required)  \n* Application configuration (e.g., status checks required and Terraform required)\n\nAdditionally, you can configure external environmental controls using the GitLab API to check the status and details of an external environment.\n\n## Creating a Custom Compliance Framework from scratch\n\nNow that we understand the value, let's explore how to implement Custom Compliance Frameworks in your GitLab environment. We will use this demo application and you can follow along in this video. \n\n**Note:** A GitLab Ultimate subscription is required.\n\n\u003C!-- TODO: EMBED_YT_VIDEO -->\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/bSwwv5XeMdQ?si=unDwCltF4vTHT4mB\" title=\"Adhering to compliance requirements with built-in compliance controls\n\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Step 1: Define your compliance requirements**\n\nBefore building your custom framework, you need to clearly define your compliance requirements:\n\n1. **Identify applicable regulations:** Determine which regulations and standards apply to your organization (e.g., GDPR, PCI DSS, and HIPAA). \n2. **Map requirements to controls:** Break down each regulation into specific, actionable controls.  \n3. **Prioritize requirements:** Focus on high-risk areas and requirements with the greatest impact.\n\n**Step 2: Create your Custom Compliance Framework**\n\nTo create a custom compliance framework in GitLab:\n\n1. Navigate to your GitLab group's **Secure > Compliance Center** section.  \n2. Press the **New framework** button.  \n3. Select **Create blank framework**.\n\n![Create a custom compliance framework screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097114255.png)\n\n4. Provide a name, description, and color for your framework.\n\n![New compliance framework screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097114257.png)\n\n5. Add a requirement to the framework:  \n   a. Scroll down to the **Requirements** tab.\n\n   b. Press the **New requirement** button.\n\n   c. Provide a name and description.  \n   d. Under the **Controls** section, select **Choose a GitLab control**.  \n   e. Select a control from the list (e.g., at least two approvals, SAST running).  \n   f. Press the **Create requirement** button.\n\n![Create new requirement button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097114258.png)\n\n6. Press the **Create framework** button.\n\nThe framework will be created as specified and will now be available to add to projects. Additionally, compliance frameworks can be [imported](http://TODO) using a JSON with the appropriate schema.\n\n**Step 3: Apply the framework to projects**\n\nOnce your framework is created:\n1. From the Compliance Center, select the **Projects** tab.  \n2. Use the search bar to **Search** or **Filter** results.  \n3. Select the project(s) you wish to apply your framework to.  \n4. Press the **Choose one bulk action** button.  \n5. Select **Apply frameworks to selected projects**.  \n6. Press the **Select frameworks** button.  \n7. Select your framework(s) from the list.  \n8. Press the **Apply** button.\n\n![Compliance center screen with SOC 2 framework dropdown](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097114260.png)\n\nThe framework will now be applied to the project, making its requirements visible and trackable.\n\n**Step 4: Monitor and report on compliance**\n\nWith your framework in place, you can now:\n\n1. Use the **Compliance Center** to track compliance status across projects including details and suggested fixes for failed controls.\n2. Generate **compliance reports** for audits and stakeholder reviews.  \n3. Set up **compliance alerts** to notify stakeholders of potential compliance issues. \n4. Review **audit events** to overview action taken on compliance settings.\n\n![Compliance Center screen showing SOC2 test framework](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097114263.png)\n\n## Real-world example: Implement a SOC2 compliance framework\n\nSystem and Organization Controls 2, better known as SOC2, is a rigorous auditing standard developed by the American Institute of Certified Public Accountants that assesses a service organization's controls related to security, availability, processing integrity, confidentiality, and privacy. You can read my [Guide to fulfilling SOC 2 security requirements with GitLab](https://about.gitlab.com/blog/guide-to-fulfilling-soc-2-security-requirements-with-gitlab/) to learn more.\n\nNow, let's review a practical example of using a Custom Compliance Framework to verify SOC2 security compliance, which requires:\n\n* implementation of controls to protect against unauthorized access  \n* establishment of procedures for identifying and mitigating risks  \n* setting up systems for detecting and addressing security incidents\n\n**Disclaimer:** This is only an example showcasing some of the controls possible for adhering to SOC2. Be sure to consult with your security/compliance team before moving any implementation to production.\n\nA Custom Compliance Framework for SOC2 will look as follows using some GitLab OOTB controls:\n\n* **Name:** SOC2 Security Requirements  \n* **Description:** Adds the security requirements for SOC2 framework compliance  \n* **Requirements:**  \n  * **Implement controls to protect against unauthorized access**  \n    * Auth SSO enabled  \n    * CI/CD job token scope enabled  \n    * Require MFA at org level  \n  * **Establish procedures for identifying and mitigating risks**  \n    * At least two approvals  \n    * Author approved merge request  \n    * Committers approved merge request  \n    * Default branch protected  \n  * **Setting up systems for detecting and addressing security incidents**  \n    * Dependency Scanning running  \n    * SAST running  \n    * DAST running\n\nWhen applied to your project(s), this framework allows you to oversee if/and when they fall out of compliance and what can be done to bring them back into compliance. Note that you can create and apply multiple compliance frameworks to a project(s). For example, you can have one for SOC2 process integrity requirements.\n\n## Implement security policies to ensure compliance requirements are met\n\nAlthough not required, security policies can be applied to projects containing a Custom Compliance Framework. This allows you to assure that certain compliance criteria will be enforced via security policies. For example, you can force security scanners to run on projects that contain a Custom Compliance Framework requiring security scanning. \n\nGitLab provides various different security policies:\n\n* [Scan execution policy](https://docs.gitlab.com/user/application_security/policies/scan_execution_policies/): Enforces security scans, either as part of the pipeline or on a specified schedule.  \n* [Merge request approval policy](https://docs.gitlab.com/user/application_security/policies/merge_request_approval_policies/): Enforces project-level settings and approval rules based on scan results.  \n* [Pipeline execution policy](https://docs.gitlab.com/user/application_security/policies/pipeline_execution_policies/): Enforces CI/CD jobs as part of project pipelines. \n* [Vulnerability management policy](https://docs.gitlab.com/user/application_security/policies/vulnerability_management_policy/): Automatically resolves vulnerabilities that are no longer detected in the default branch.\n\nLet’s go ahead and force a SAST scanner to run in order to automatically adhere to any requirements that require SAST scanning. To create a security policy and apply it to a project with a particular framework:\n\n1. Navigate to a project that has a Custom Compliance Framework requiring **SAST scanning**. \n2. In the sidebar, select **Secure > Policies**.  \n3. Press the **New policy** button.  \n4. Under **Scan execution policy**, press the **Select policy** button. \n5. Fill in the **Name** and **Description**. \n6. Under **Actions**, select **SAST** as the scan to run.\n\n![Actions screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097114264.png)\n\n7. Under **Conditions**, select the pipeline to be triggered when a pipeline runs for all branches.\n\n![Conditions screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097114/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097114265.png)\n\n8. Press the **Configure with a merge request** button.  \n9. An MR is now created in a separate project containing all the security policies scoped to this project.\n10. Press the **Merge** button.\n\nNow SAST will run for every branch, assuring you are compliant in that area. Be sure to review all the different types of security policies and see how they can suit your requirements.\n\n## 5 best practices to follow\n\nTo maximize the value of Custom Compliance Frameworks:\n\n1. **Start small:** Begin with one critical regulation or standard before expanding.  \n2. **Involve key stakeholders:** Include compliance, security, and development teams in framework creation.  \n3. **Automate where possible:** Use GitLab CI/CD to automate compliance checks.  \n4. **Document thoroughly:** Maintain clear documentation of how your framework maps to regulatory requirements.  \n5. **Review regularly:** Update your frameworks as regulations evolve or new requirements emerge.\n\n## Get started today\n\nGitLab Custom Compliance Frameworks represent a significant advancement in DevSecOps by bringing compliance directly into the development workflow. By implementing custom frameworks, organizations can reduce compliance overhead, improve risk management, and accelerate development cycles while maintaining robust compliance with regulatory requirements.\n\nThe ability to define and enforce Custom Compliance Frameworks gives teams the flexibility they need to address their specific regulatory landscape while providing the structure necessary to ensure consistent compliance practices across the organization.\n\nAs regulatory requirements continue to grow in complexity, tools like GitLab Custom Compliance Frameworks will become increasingly essential for organizations looking to balance compliance requirements with development velocity in a sustainable way.\n\n> To try Custom Compliance Frameworks today, sign up for your [free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n\n## Learn more\n\nVisit these resources to learn more about Custom Compliance Frameworks and how they can benefit your organization:\n\n* [Custom Compliance Frameworks documentation](https://docs.gitlab.com/user/compliance/compliance_center/compliance_status_report/)  \n* [Custom Compliance Frameworks epic](https://gitlab.com/groups/gitlab-org/-/epics/13295)  \n* [Security Policies documentation](https://docs.gitlab.com/user/application_security/policies/)  \n* [GitLab Security and Compliance solutions](https://about.gitlab.com/solutions/security-compliance/)",[814,9,478,680,678],{"slug":2866,"featured":90,"template":684},"how-to-use-gitlabs-custom-compliance-frameworks-in-your-devsecops","content:en-us:blog:how-to-use-gitlabs-custom-compliance-frameworks-in-your-devsecops.yml","How To Use Gitlabs Custom Compliance Frameworks In Your Devsecops","en-us/blog/how-to-use-gitlabs-custom-compliance-frameworks-in-your-devsecops.yml","en-us/blog/how-to-use-gitlabs-custom-compliance-frameworks-in-your-devsecops",{"_path":2872,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2873,"content":2879,"config":2884,"_id":2886,"_type":13,"title":2887,"_source":15,"_file":2888,"_stem":2889,"_extension":18},"/en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"title":2874,"description":2875,"ogTitle":2874,"ogDescription":2875,"noIndex":6,"ogImage":2876,"ogUrl":2877,"ogSiteName":669,"ogType":670,"canonicalUrls":2877,"schema":2878},"How to use OCI images as the source of truth for continuous delivery","Discover the benefits of using Open Container Initiative images as part of GitOps workflows and the many features GitLab offers to simplify deployments to Kubernetes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097601/Blog/Hero%20Images/Blog/Hero%20Images/REFERENCE%20-%20Use%20this%20page%20as%20a%20reference%20for%20thumbnail%20sizes_76Tn5jFmEHY5LFj8RdDjNY_1750097600692.png","https://about.gitlab.com/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use OCI images as the source of truth for continuous delivery\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2025-02-19\",\n      }",{"title":2874,"description":2875,"authors":2880,"heroImage":2876,"date":2881,"body":2882,"category":724,"tags":2883},[2823],"2025-02-19","Is [GitOps](https://about.gitlab.com/topics/gitops/) still GitOps if you are not using a git repository as your deployment artifact? While git remains central to GitOps workflows, storing infrastructure definitions as Open Container Initiative (OCI) artifacts in container registries has seen a rise in adoption as the source for GitOps deployments. In this article, we will dive deeper into the ideas behind this trend and how GitLab features support this enhancement to GitOps workflows.\n\n## What is GitOps?\n\nThe [OpenGitOps](https://opengitops.dev/) project has defined [four principles](https://opengitops.dev/#principles) for the practice of GitOps:\n- A [system managed by GitOps](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#software-system) must have its [desired state expressed declaratively](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#declarative-description).\n- Desired state is stored in a way that enforces immutability and versioning, and retains a complete version history.\n- Software agents automatically pull the desired state declarations from the source.\n- Software agents [continuously](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#continuous) observe actual system state and [attempt to apply the desired state](https://github.com/open-gitops/documents/blob/v1.0.0/GLOSSARY.md#reconciliation).\n\nAn example of GitOps is storing the Kubernetes manifests for a microservice in a GitLab project. Those Kubernetes resources are then continuously reconciled by a [controller](https://kubernetes.io/docs/concepts/architecture/controller/) running on the Kubernetes cluster where the microservice is deployed to. This allows engineers to manage infrastructure using the same workflows as working with regular code, such as opening merge requests to make and review changes and versioning changes. GitOps also has operational benefits such as [preventing configuration drift](https://about.gitlab.com/topics/gitops/#cicd) and helps engineers audit what changes led to certain outcomes with deployments.\n\n## Benefits and limitations of git in GitOps workflows\n\nWhile git is an essential piece of GitOps workflows, git repositories were not designed to be deployed by GitOps controllers. Git does provide the ability for engineers to collaborate on infrastructure changes and audit these changes later on, but controllers do not need to download an entire git repository for a successful deployment. GitOps controllers simply need the infrastructure defined for a particular environment.\n\nAdditionally, an important piece of the deployment process is to [sign and verify deployments](https://docs.sigstore.dev/about/overview/#why-cryptographic-signing) to assure deployment changes to an environment are coming from a trusted source. While git commits can be signed and verified by GitOps controllers, commits may also capture other details not related to the deployment itself (e.g., documentation changes, updates to other environments, and git repository restructuring) or not enough of the deployment picture as a deployment may consist of multiple commits. This again feels like a case this git feature wasn’t designed for.\n\nAnother challenging aspect of git in GitOps workflows is that it can sometimes lead to more automation than expected. Soon after merging a change to the watched branch, it will be deployed. There are no controls in the process outside of git. How can you make sure that nothing gets deployed on a Friday late afternoon? What if teams responsible for deployment do not have permissions to merge changes in certain GitLab projects? Using OCI images adds a pipeline into the process, including all the delivery control features, like [approvals or deploy freezes](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n## OCI images\n\nThe [Open Container Initiative](https://opencontainers.org/) has helped to define standards around container formats. While most engineers are familiar with building Dockerfiles into container images, many may not be as familiar with storing Kubernetes manifests in a container registry. Because [GitLab’s Container Registry](https://docs.gitlab.com/ee/user/packages/container_registry/) is OCI compliant, it allows for users to push Kubernetes manifests for a particular environment to a container registry. GitOps controllers, such as [Flux CD](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/), can use the manifests stored in this OCI artifact instead of needing to clone an entire git repository.\n\nOften in GitOps workflows, a git repository can include the infrastructure definitions for all environments that a microservice will be deployed to. By packaging the Kubernetes manifests for only a specific environment, Flux CD can download the minimum files needed to carry out a deployment to a specific environment.\n\n### Security benefits of using OCI artifacts\n\nAs mentioned previously, signing and verifying the artifacts to be deployed to an environment adds an additional layer of security for software projects. After Kubernetes manifests are pushed to a container registry, a tool like [Sigstore Cosign](https://docs.sigstore.dev/quickstart/quickstart-cosign/) can be used to sign the OCI image with a private key that can be securely stored in a GitLab project as a [CI/CD variable](https://docs.gitlab.com/ee/ci/variables/). Flux CD can then use a public key stored on a Kubernetes cluster to verify that a deployment is coming from a trusted source.\n\n## Using GitLab to push and sign OCI images\n\nGitLab offers many features that help simplify the process of packaging, signing, and deploying OCI images. A common way to structure GitLab projects with GitOps workflows is to have separate GitLab projects for microservices’ code and a single infrastructure repository for all microservices. If an application is composed of `n` microservices, this would require having `n +1` GitLab projects for an application.\n\nThe artifact produced by a code project is usually a container image that will be used to package the application. The infrastructure or delivery project will contain the Kubernetes manifests defining all the resources required to scale and serve traffic to each microservice. The artifact produced by this project is usually an OCI image used to deploy the application and other manifests to Kubernetes.\n\nIn this setup, separation of environments is handled by defining Kubernetes manifests in separate folders. These folders represent environments (e.g., development, staging, and production) that will host the application. When changes are made to the code project and a new container image is pushed, all that needs to be done to deploy these changes via GitLab’s integration with Flux CD is to edit the manifests under the environment folder to include the new image reference and open a merge request. Once that merge request is reviewed, approved, and merged, the delivery project’s CI/CD job will push a new OCI image that Flux CD will pick up and deploy to the new environment.\n\n![OCI images - flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097611/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097611046.png)\n\nSigning an OCI image is as simple as including Cosign in your project’s CI/CD job. You can simply generate a new public and private key with Cosign by running the commands below locally. Just make sure to log in to your GitLab instance with the [glab CLI](https://gitlab.com/gitlab-org/cli/#installation) and replace the [`PROJECT_ID`] for the Cosign command with your [delivery project’s ID](https://docs.gitlab.com/ee/user/project/working_with_projects.html#access-a-project-by-using-the-project-id).\n\n```\nglab auth login\ncosign generate-key-pair gitlab://[PROJECT_ID]\n```\n\nOnce the cosign command runs successfully, you can see the Cosign keys added to your project under the CI/CD variables section under the key names `COSIGN_PUBLIC_KEY` and `COSIGN_PRIVATE_KEY`.\n\n### Example CI/CD job\n\nA GitLab CI/CD job for pushing an OCI image will look something like the following:\n\n```yaml\nfrontend-deploy:\n  rules:\n  - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    changes:\n      paths:\n      - manifests/dev/frontend-dev.yaml\n  trigger:\n    include:\n      - component: gitlab.com/components/fluxcd/oci-artifact@0.3.1\n        inputs:\n          version: 0.3.1\n          kubernetes_agent_reference: gitlab-da/projects/tanuki-bank/flux-config:dev\n          registry_image_url: \"oci://$CI_REGISTRY_IMAGE/frontend\"\n          image_tag: dev\n          manifest_path: ./manifests/dev/frontend-dev.yaml\n          flux_oci_repo_name: frontend\n          flux_oci_namespace_name: frontend-dev\n          signing_private_key: \"$COSIGN_PRIVATE_KEY\"\n```\n\nThe [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) offers a GitLab-maintained [CI/CD component for working with OCI artifacts and Flux CD](https://gitlab.com/explore/catalog/components/fluxcd). This component allows development teams to push Kubernetes manifests as OCI images to GitLab’s Container Registry or an external container registry, sign the OCI image using Cosign, and immediately reconcile the newly pushed image via Flux CD.\n\nIn the example above, the Flux CD `component` is included in a `.gitlab-ci.yml` file of a GitLab project. Using the component’s `inputs`, users can define what registry to push the image to (i.e., `registry_image_url` and `image tag`), the file path to Kubernetes manifests that will be pushed (i.e., `manifest_path`), the cosign private key used to sign images (i.e., `signing_private_key`), and the Kubernetes namespace and Flux CD [OCIRepository](https://fluxcd.io/flux/components/source/ocirepositories/) name needed to sync updates to an environment (i.e., `flux_oci_namespace_name` and `flux_oci_repo_name`).\n\nThe `kubernetes_agent_reference` allows GitLab CI/CD jobs to inherit the `kubeconfig` needed to access a Kubernetes cluster without needing to store a `kubeconfig` CI/CD variable in each GitLab project. By setting up the [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/), you can configure all GitLab projects’ CI/CD jobs in a [GitLab group](https://docs.gitlab.com/ee/user/group/) to inherit permissions to deploy to the Kubernetes cluster.\n\nThe agent for Kubernetes context is typically configured wherever you configure the GitLab Agent for Kubernetes in your GitLab group. It is typically recommended that this be done in the project where Flux CD is managed. More information on configuring the agent for CI/CD access can be found in our [CI/CD workflow documentation](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html).\n\nThe variables `$COSIGN_PRIVATE_KEY`, `$FLUX_OCI_REPO_NAME`, and `$FRONTEND_DEV_NAMESPACE` are values stored as CI/CD variables to easily access and mask these sensitive pieces of data in CI/CD logs. The `$CI_REGISTRY_IMAGE` is a variable that GitLab jobs have available by default that specifies the GitLab project’s container registry.\n\n### Deploy OCI images\n\nUsing [Flux CD with your GitLab projects](https://docs.gitlab.com/ee/user/clusters/agent/gitops/flux_tutorial.html), you can automate deployments and signing verification for your microservice’s environments. Once Flux CD is configured to sync from a GitLab project, you could add the following Kubernetes [custom resource definitions](https://kubernetes.io/docs/concepts/extend-kubernetes/api-extension/custom-resources/) to your project to sync your pushed OCI image.\n\n```yaml\napiVersion: v1\nkind: Namespace\nmetadata:\n  name: frontend-dev\n  labels:\n    name: frontend-dev\n---\napiVersion: bitnami.com/v1alpha1\nkind: SealedSecret\nmetadata:\n  name: cosign-public-key\n  namespace: frontend-dev\nspec:\n  encryptedData:\n    cosign.pub: AgAKgLf4VbVzJOmr6++k81LlFayx88AELaUQFNOaXmBF4G+fBfBYeABl0skNvMAa1UrPVNSfMIHgFoYHoO96g576a+epk6V6glOI+++XvYbfsygof3GGxe0nL5Qh2b3ge0fNpyd0kTPSjTj0YUhRhKtMGMRSRw1jrwhNcGxCHK+Byibs52v8Np49KsIkeZKbzLdgYABkrv+k0j7hQM+jR180NpG+2UiRvaXpPuogxkbj61FEqWGrJHk8IVyfl3eh+YhoXxOHGDqko6SUC+bUZPDBlU6yKegO0/8Zq3hwulrSEsEjzRZNK+RFVMOLWWuC6h+WGpYhAMcsZPwjjJ/y29KLNa/YeqkN/cdk488QyEFc6ehCxzhH67HxIn2PDa+KkEOTv2TuycGF+Q00jKIizXF+IwLx/oRb3pTCF0AoAY8D8N3Ey+KfkOjsBON7gGID8GbQiJqX2IgIZxFMk0JRzxbRKOEqn+guLd5Shj7CD1a1Mkk0DxBdbqrGv2XNYUaFPI7xd3rZXUJZlnv+fsmwswsiGWRuXwim45HScWzQnfgLAe7tv3spVEGeaO5apl6d89uN21PBQnfE/zyugB//7ZW9tSp6+CSMyc5HynxI8diafqiwKPgvzLmVWRnkvxJijoXicRr3sCo5RudZPSlnjfd7CKdhwEVvLl7dRR4e/XBMdxCzk1p52Pl+3/kJR+LJii5+iwOpYrpVltSZdzc/3qRd19yMpc9PWpXYi7HxTb24EOQ25i21eDJY1ceplDN6bRtop2quzkjlwVeE2i4cEsX/YG8QBtQbop/3fjiAjKaED3QH3Ul0PECS9ARTScSkcOL3I00Xpp8DyD+xH0/i9wCBRDmH3yKX18C8VrMq02ALSnlP7WCVVjCPzubqKx2LPZRxK9EG0fylwv/vWQzTUUwfbPQZsd4c75bSTsTvxqp/UcFaXA==\n  template:\n    metadata:\n      name: cosign-public-key\n      namespace: frontend-dev\n---\napiVersion: source.toolkit.fluxcd.io/v1beta2\nkind: OCIRepository\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    url: oci://registry.gitlab.com/gitlab-da/projects/tanuki-bank/tanuki-bank-delivery/frontend\n    ref:\n        tag: dev\n    verify:\n      provider: cosign\n      secretRef:\n        name: cosign-public-key\n---\napiVersion: kustomize.toolkit.fluxcd.io/v1\nkind: Kustomization\nmetadata:\n    name: frontend\n    namespace: frontend-dev\nspec:\n    interval: 1m\n    targetNamespace: frontend-dev\n    path: \".\"\n    sourceRef:\n        kind: OCIRepository\n        name: frontend\n    prune: true\n```\n\nThe [`Kustomization`](https://fluxcd.io/flux/components/kustomize/kustomizations/) resource allows for further customization of Kubernetes manifests and also specifies which namespace to deploy resources to. The `OCIRepository` resource for Flux CD allows users to specify the OCI image repository reference and tag to regularly sync from. Additionally, you will notice the `verify.provider` and `verify.secretRef` properties. These fields allow you to verify that the OCI image deployed to the cluster was signed by the corresponding Cosign private key used in the earlier CI/CD job.\n\nThe public key needs to be stored in a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/) that will need to be present in the same namespace as the `OCIRepository` resource. To have this secret managed by Flux CD and not store the secret in plain text, you can consider using [SealedSecrets](https://fluxcd.io/flux/guides/sealed-secrets/) to encrypt the value and have it be decrypted cluster side by a controller.\n\nFor a simpler approach not requiring SealedSecrets, you can [deploy the secret via a GitLab CI/CD](https://docs.gitlab.com/ee/user/clusters/agent/getting_started_deployments.html) job using the [`kubectl CLI`](https://kubernetes.io/docs/reference/kubectl/). In the non-sealed secret approach, you would simply remove the SealedSecret included above and run the job to deploy the public key secret before running the job to push the new OCI image. This will make sure the secret is stored securely in GitLab and make sure the secret can be accessed on the cluster by the OCIRepository. While this approach is a bit simpler, just note this is not a suitable approach for managing secrets in production.\n\n## The benefits of OCI, GitLab, and GitOps\n\nOCI artifacts allow for GitOps teams to take deployments even further with added security benefits and allowing for deployments to be minimal. Users still gain all the benefits offered by git as far as having a source of truth for infrastructure and collaborating on projects. OCI images add a packaging approach that improves the deployment aspect of GitOps.\n\nGitLab continues to learn from our customers and the cloud native community on building experiences that help simplify GitOps workflows. To get started using some of the features mentioned in this blog, you can sign up for a [60-day free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/). We would also love to hear from users about their experiences with these tools, and you can provide feedback in the [community forum](https://forum.gitlab.com/t/oci-images-as-source-of-truth-for-gitops-with-gitlab/120965).\n",[108,727,1225,533,726,9],{"slug":2885,"featured":6,"template":684},"how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery","content:en-us:blog:how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","How To Use Oci Images As The Source Of Truth For Continuous Delivery","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery.yml","en-us/blog/how-to-use-oci-images-as-the-source-of-truth-for-continuous-delivery",{"_path":2891,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2892,"content":2897,"config":2903,"_id":2905,"_type":13,"title":2906,"_source":15,"_file":2907,"_stem":2908,"_extension":18},"/en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes",{"title":2893,"description":2894,"ogTitle":2893,"ogDescription":2894,"noIndex":6,"ogImage":1342,"ogUrl":2895,"ogSiteName":669,"ogType":670,"canonicalUrls":2895,"schema":2896},"How we diagnosed and resolved Redis latency spikes with BPF and other tools","How we uncovered a three-phase cycle involving two distinct saturation points and a simple fix to break that cycle.","https://about.gitlab.com/blog/how-we-diagnosed-and-resolved-redis-latency-spikes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How we diagnosed and resolved Redis latency spikes with BPF and other tools\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Matt Smiley\"}],\n        \"datePublished\": \"2022-11-28\",\n      }",{"title":2893,"description":2894,"authors":2898,"heroImage":1342,"date":2900,"body":2901,"category":769,"tags":2902},[2899],"Matt Smiley","2022-11-28","\n\nIf you enjoy performance engineering and peeling back abstraction layers to ask underlying subsystems to explain themselves, this article’s for you. The context is a chronic Redis latency problem, and you are about to tour a practical example of using BPF and profiling tools in concert with standard metrics to reveal unintuitive behaviors of a complex system.\n\nBeyond the tools and techniques, we also use an iterative hypothesis-testing approach to compose a behavior model of the system dynamics. This model tells us what factors influence the problem's severity and triggering conditions.\n\nUltimately, we find the root cause, and its remedy is delightfully boring and effective. We uncover a three-phase cycle involving two distinct saturation points and a simple fix to break that cycle. Along the way, we inspect aspects of the system’s behavior using stack sampling profiles, heat maps and flamegraphs, experimental tuning, source and binary analysis, instruction-level BPF instrumentation, and targeted latency injection under specific entry and exit conditions.\n\nIf you are short on time, the takeaways are summarized at the end. But the journey is the fun part, so let's dig in!\n\n## Introducing the problem: Chronic latency \n\nGitLab makes extensive use of Redis, and, on GitLab.com SaaS, we use [separate Redis clusters](/handbook/engineering/infrastructure/production/architecture/#redis-architecture) for certain functions. This tale concerns a Redis instance acting exclusively as a least recently used (LRU) cache.\n\nThis cache had a chronic latency problem that started occurring intermittently over two years ago and in recent months had become significantly worse: Every few minutes, it suffered from bursts of very high latency and corresponding throughput drop, eating into its Service Level Objective (SLO). These latency spikes impacted user-facing response times and [burned error budgets](https://gitlab.com/gitlab-org/gitlab/-/issues/360578#note_966597336) for dependent features, and this is what we aimed to solve.\n\n**Graph:** Spikes in the rate of extremely slow (1 second) Redis requests, each corresponding to an eviction burst\n\n![Graph showing spikes in the slow request rate every few minutes](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/00_redis_slow_request_rate_spikes_during_each_eviction_burst.png)\n\nIn prior work, we had already completed several mitigating optimizations. These sufficed for a while, but organic growth had resurfaced this as an important [long-term scaling problem](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#why-is-it-important-to-get-to-the-root-of-the-latency-spikes). We had also already ruled out externally triggered causes, such as request floods, connection rate spikes, host-level resource contention, etc. These latency spikes were consistently associated with memory usage reaching the eviction threshold (`maxmemory`), not by changes in client traffic patterns or other processes competing with Redis for CPU time, memory bandwidth, or network I/O.\n\nWe [initially thought](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1567) that Redis 6.2’s new [eviction throttling mechanism](https://github.com/redis/redis/pull/7653) might alleviate our eviction burst overhead. It did not. That mechanism solves a different problem: It prevents a stall condition where a single call to `performEvictions` could run arbitrarily long. In contrast, during this analysis we [discovered](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_977816216) that our problem (both before and after upgrading Redis) was related to numerous calls collectively reducing Redis throughput, rather than a few extremely slow calls causing a complete stall.\n\nTo discover our bottleneck and its potential solutions, we needed to investigate Redis’s behavior during our workload’s eviction bursts.\n\n## A little background on Redis evictions\n\nAt the time, our cache was oversubscribed, trying to hold more cache keys than the [configured `maxmemory` threshold](https://redis.io/docs/reference/eviction/) could hold, so evictions from the LRU cache were expected. But the dense concentration of that eviction overhead was surprising and troubling.\n\nRedis is essentially single-threaded. With a few exceptions, the “main” thread does almost all tasks serially, including handling client requests and evictions, among other things. Spending more time on X means there is less remaining time to do Y, so think about queuing behavior as the story unfolds.\n\nWhenever Redis reaches its `maxmemory` threshold, it frees memory by evicting some keys, aiming to do just enough evictions to get back under `maxmemory`. However, contrary to expectation, the metrics for memory usage and eviction rate (shown below) indicated that instead of a continuous steady eviction rate, there were abrupt burst events that freed much more memory than expected. After each eviction burst, no evictions occurred until memory usage climbed back up to the `maxmemory` threshold again.\n\n**Graph:** Redis memory usage drops by 300-500 MB during each eviction burst:\n\n![Memory usage repeatedly rises gradually to 64 GB and then abruptly drops](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/01_redis_memory_usage_dips_during_eviction_bursts.png)\n\n**Graph:** Key eviction spikes match the timing and size of the memory usage dips shown above\n\n![Eviction counter shows a large spike each time the previous graph showed a large memory usage drop](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/02_redis_eviction_bursts.png)\n\nThis apparent excess of evictions became the central mystery. Initially, we thought answering that question might reveal a way to smooth the eviction rate, spreading out the overhead and avoiding the latency spikes. Instead, we discovered that these bursts are an interaction effect that we need to avoid, but more on that later.\n\n## Eviction bursts cause CPU saturation\n\nAs shown above, we had found that these latency spikes correlated perfectly with large spikes in the cache’s eviction rate, but we did not yet understand why the evictions were concentrated into bursts that last a few seconds and occur every few minutes.\n\nAs a first step, we wanted to verify a causal relationship between eviction bursts and latency spikes.\n\nTo test this, we used [`perf`](https://www.brendangregg.com/perf.html) to run a CPU sampling profile on the Redis main thread. Then we applied a filter to split that profile, isolating the samples where it was calling the [`performEvictions` function](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L512). Using [`flamescope`](https://github.com/Netflix/flamescope), we can visualize the profile’s CPU usage as a [subsecond offset heat map](https://www.brendangregg.com/HeatMaps/subsecondoffset.html), where each second on the X axis is folded into a column of 20 msec buckets along the Y axis. This visualization style highlights sub-second activity patterns. Comparing these two heat maps confirmed that during an eviction burst, `performEvictions` is starving all other main thread code paths for CPU time.\n\n**Graph:** Redis main thread CPU time, excluding calls to `performEvictions`\n\n![Heat map shows one large gap and two small gaps in an otherwise uniform pattern of 70 percent to 80 percent CPU usage](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/03_heat_map_of_redis_main_thread_during_eviction_burst__excluding_performEvictions.png)\n\n**Graph:** Remainder of the same profile, showing only the calls to `performEvictions`\n\n![This heat map shows the gaps in the previous heap map were CPU time spent performing evictions](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/04_heat_map_of_redis_main_thread_during_eviction_burst__only_performEvictions.png)\n\nThese results confirm that eviction bursts are causing CPU starvation on the main thread, which acts as a throughput bottleneck and increases Redis’s response time latency.  These CPU utilization bursts typically lasted a few seconds, so they were too short-lived to trigger alerts but were still user impacting.\n\nFor context, the following flamegraph shows where `performEvictions` spends its CPU time. There are a few interesting things here, but the most important takeaways are:\n* It gets called synchronously by `processCommand` (which handles all client requests).\n* It handles many of its own deletes. Despite its name, the `dbAsyncDelete` function only delegates deletes to a helper thread under certain conditions which turn out to be rare for this workload.\n\n![Flamegraph of calls to function performEvictions, as described above](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/05_flamegraph_of_redis_main_thread_during_eviction_burst__only_performEvictions.png)\n\nFor more details on this analysis, see the [walkthrough and methodology](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_854745083).\n\n## How fast are individual calls to `performEvictions`?\n\nEach incoming request to Redis is handled by a call to `processCommand`, and it always concludes by calling the `performEvictions` function. That call to `performEvictions` is frequently a no-op, returning immediately after checking that the `maxmemory` threshold has not been breached. But when the threshold is exceeded, it will continue evicting keys until it either reaches its `mem_tofree` goal or exceeds its configured time limit per call.\n\nThe CPU heat maps shown earlier proved that `performEvictions` calls were collectively consuming a large majority of CPU time for up to several seconds.\n\nTo complement that, we also measured the wall clock time of individual calls.\n\nUsing the `funclatency` CLI tool (part of the [BCC suite of BPF tools](https://github.com/iovisor/bcc)), we measured call duration by instrumenting entry and exit from the `performEvictions` function and aggregated those measurements into a [histogram](https://en.wikipedia.org/wiki/Histogram) at 1-second intervals. When no evictions were occurring, the calls were consistently low latency (4-7 usecs/call). This is the no-op case described above (including 2.5 usecs/call of instrumentation overhead). But during an eviction burst, the results shift to a bimodal distribution, including a combination of the fast no-op calls along with much slower calls that are actively performing evictions:\n\n```\n$ sudo funclatency-bpfcc --microseconds --timestamp --interval 1 --duration 600 --pid $( pgrep -o redis-server ) '/opt/gitlab/embedded/bin/redis-server:performEvictions'\n...\n23:54:03\n     usecs               : count     distribution\n         0 -> 1          : 0        |                                        |\n         2 -> 3          : 576      |************                            |\n         4 -> 7          : 1896     |****************************************|\n         8 -> 15         : 392      |********                                |\n        16 -> 31         : 84       |*                                       |\n        32 -> 63         : 62       |*                                       |\n        64 -> 127        : 94       |*                                       |\n       128 -> 255        : 182      |***                                     |\n       256 -> 511        : 826      |*****************                       |\n       512 -> 1023       : 750      |***************                         |\n```\n\nThis measurement also directly confirmed and quantified the throughput drop in Redis requests handled per second: The call rate to `performEvictions` (and hence to `processCommand`) dropped to 20% of its norm from before the evictions began, from 25K to 5K calls per second.\n\nThis has a huge impact on clients: New requests are arriving at 5x the rate they are being completed. And crucially, we will see soon that this asymmetry is what drives the eviction burst.\n\nFor more details on this analysis, see the [safety check](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857869826) for instrumentation overhead and the [results walkthrough](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857907521). And for more general reference, the BPF instrumentation overhead estimate is based on these [benchmark results](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1383).\n\n## Experiment: Can tuning mitigate eviction-driven CPU saturation?\n\nThe analyses so far had shown that evictions were severely starving the Redis main thread for CPU time. There were still important unanswered questions (which we will return to shortly), but this was already enough info to [suggest some experiments](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_859236777) to test potential mitigations:\n* Can we spread out the eviction overhead so it takes longer to reach its goal but consumes a smaller percentage of the main thread’s time?\n* Are evictions freeing more memory than expected due to scheduling a lot of keys to be asynchronously deleted by the [lazyfree mechanism](https://github.com/redis/redis/blob/6.2.6/redis.conf#L1079)? Lazyfree is an optional feature that lets the Redis main thread [delegate to an async helper thread](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_859236777) the expensive task of deleting keys that have more than 64 elements. These async evictions do not count immediately towards the eviction loop’s memory goal, so if many keys qualify for lazyfree, this could potentially drive many extra iterations of the eviction loop.\n\nThe [answers](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7172#note_971197943) to both turned out to be no:\n* Reducing `maxmemory-eviction-tenacity` to its minimum setting still did not make `performEvictions` cheap enough to avoid accumulating a request backlog. It did increase response rate, but arrival rate still far exceeded it, so this was not an effective mitigation.\n* Disabling `lazyfree-lazy-eviction` did not prevent the eviction burst from dropping memory usage far below `maxmemory`. Those lazyfrees represent a small percentage of reclaimed memory. This rules out one of the potential explanations for the mystery of excessive memory being freed.\n\nHaving ruled out two potential mitigations and one candidate hypothesis, at this point we return to the pivotal question: Why are several hundred extra megabytes of memory being freed by the end of each eviction burst?\n\n## Why do evictions occur in bursts and free too much memory?\n\nEach round of eviction aims to free just barely enough memory to get back under the `maxmemory` threshold.\n\nWith a steady rate of demand for new memory allocations, the eviction rate should be similarly steady. The rate of arriving cache writes does appear to be steady. So why are evictions happening in dense bursts, rather than smoothly? And why do they reduce memory usage on a scale of hundreds of megabytes rather than hundreds of bytes?\n\nSome potential explanations to explore:\n* Do evictions only end when a large key gets evicted, spontaneously freeing enough memory to skip evictions for a while? No, the memory usage drop is far bigger than the largest keys in the dataset.\n* Do deferred lazyfree evictions cause the eviction loop to overshoot its goal, freeing more memory than intended? No, the above experiment disproved this hypothesis.\n* Is something causing the eviction loop to sometimes calculate an unexpectedly large value for its `mem_tofree` goal? We explore this next. The answer is no, but checking it led to a new insight.\n* Is a feedback loop causing evictions to become somehow self-amplifying? If so, what conditions lead to entering and leaving this state? This turned out to be correct.\n\nThese were all plausible and testable hypotheses, and each would point towards a different solution to the eviction-driven latency problem.\n\nThe first two hypotheses we have already eliminated.\n\nTo test the next two, we built custom BPF instrumentation to peek at the calculation of `mem_tofree` at the start of each call to `performEvictions`.\n\n## Observing the `mem_tofree` calculation with `bpftrace`\n\nThis part of the investigation was a personal favorite and led to a critical realization about the nature of the problem.\n\nAs noted above, our two remaining hypotheses were:\n* an unexpectedly large `mem_tofree` goal\n* a self-amplifying feedback loop\n\nTo differentiate between them, we used [`bpftrace`](https://github.com/iovisor/bpftrace) to instrument the calculation of `mem_tofree`, looking at its input variables and results.\n\nThis set of measurements directly tests the following:\n* Does each call to `performEvictions` aim to free a small amount of memory -- perhaps roughly the size of an average cache entry? If `mem_tofree` ever approaches hundreds of megabytes, that would confirm the first hypothesis and reveal what part of the calculation was causing that large value. Otherwise, it rules out the first hypothesis and makes the feedback loop hypothesis more likely.\n* Does the replication buffer size significantly influence `mem_tofree` as a feedback mechanism? Each eviction adds to this buffer, just like normal writes do. If this buffer grows large (possibly partly due to evictions) and then abruptly shrinks (due to the peer consuming it), that would cause a spontaneous large drop in memory usage, ending evictions and instantly reducing memory usage. This is one potential way for evictions to drive a feedback loop.\n\nTo peek at the values of the `mem_tofree` calculation ([script](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt)), we needed to isolate the [correct call from `performEvictions`](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L523) to the [`getMaxmemoryState`](https://github.com/redis/redis/blob/6.2.6/src/evict.c#L374-L407) function and reverse engineer its assembly to find the right instruction and register to instrument for each of the source code level variables that we wanted to capture. From that data we generate histograms for each of the following variables:\n\n```\nmem_reported = zmalloc_used_memory()        // All used memory tracked by jemalloc\noverhead = freeMemoryGetNotCountedMemory()  // Replication output buffers + AOF buffer\nmem_used = mem_reported - overhead          // Non-exempt used memory\nmem_tofree = mem_used - maxmemory           // Eviction goal\n```\n\n_Caveat:_ Our [custom BPF instrumentation](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt) is specific to this particular build of the `redis-server` binary, since it attaches to virtual addresses that are likely to change the next time Redis is compiled. But the approach is able to be generalized. Treat this as a concrete example of using BPF to inspect source code variables in the middle of a function call without having to rebuild the binary. Because we are peeking at the function’s intermediate state and because the compiler inlined this function call, we needed to do binary analysis to find the correct instrumentation points. In general, peeking at a function’s arguments or return value is easier and more portable, but in this case it would not suffice.\n\nThe results:\n* Ruled out the first hypothesis: Each call to `performEvictions` had a small target value (`mem_tofree` \u003C 2 MB). This means each call to `performEvictions` did a small amount of work. Redis’s mysterious rapid drop in memory usage cannot have been caused by an abnormally large `mem_tofree` target evicting a big batch of keys all at once. Instead, there must be many calls collectively driving down memory usage.\n* The replication output buffers remained consistently small, ruling out one of the potential feedback loop mechanisms.\n* Surprisingly, `mem_tofree` was usually 16 KB to 64 KB, which is larger than a typical cache entry. This size discrepancy hints that cache keys may not be the main source of the memory pressure perpetuating the eviction burst once it begins.\n\nAll of the above results were consistent with the feedback loop hypothesis.\n\nIn addition to answering the initial questions, we got a bonus outcome: Concurrently measuring both `mem_tofree` and `mem_used` revealed a crucial new fact – _the memory reclaim is a completely distinct phase from the eviction burst_.\n\nReframing the pathology as exhibiting separate phases for evictions versus memory reclaim led to a series of realizations, described in the next section. From that emerged a coherent hypothesis explaining all the observed properties of the pathology.\n\nFor more details on this analysis, see [methodology notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636), [build notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982499538) supporting the disassembly of the Redis binary, and [initial interpretations](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_977994182).\n\n## Three-phase cycle\n\nWith the above results indicating a distinct separation between the evictions and the memory reclaim, we can now concisely characterize [three phases](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982623949) in the cycle of eviction-driven latency spikes.\n\n**Graph:** Diagram (not to scale) comparing memory and CPU usage to request and response rates during each of the three phases\n\n![Diagram summarizes the text that follows, showing CPU and memory saturate in Phase 2 until request rate drops to match response rate, after which they recover](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/06_3_phase_cycle_of_eviction_bursts.png)\n\nPhase 1: Not saturated (7-15 minutes)\n* Memory usage is below `maxmemory`. No evictions occur during this phase.\n* Memory usage grows organically until reaching `maxmemory`, which starts the next phase.\n\nPhase 2: Saturated memory and CPU (6-8 seconds)\n* When memory usage reaches `maxmemory`, evictions begin.\n* Evictions occur only during this phase, and they occur intermittently and frequently.\n* Demand for memory frequently exceeds free capacity, repeatedly pushing memory usage above `maxmemory`. Throughout this phase, memory usage oscillates close to the `maxmemory` threshold, evicting a small amount of memory at a time, just enough to get back under `maxmemory`.\n\nPhase 3: Rapid memory reclaim (30-60 seconds)\n* No evictions occur during this phase.\n* During this phase, something that had been holding a lot of memory starts quickly and steadily releasing it.\n* Without the overhead of running evictions, CPU time is again spent mostly on handling requests (starting with the backlog that accumulated during Phase 2).\n* Memory usage drops rapidly and steadily. By the time this phase ends, hundreds of megabytes have been freed. Afterwards, the cycle restarts with Phase 1.\n\nAt the transition between Phase 2 and Phase 3, evictions abruptly ended because memory usage stays below the `maxmemory` threshold.\n\nReaching that transition point where memory pressure becomes negative signals that whatever was driving the memory demand in Phase 2 has started releasing memory faster than it is consuming it, shrinking the footprint it had accumulated during the previous phase.\n\nWhat is this **mystery memory consumer** that bloats its demand during Phase 2 and frees it during Phase 3?\n\n## The mystery revealed\n\n[Modeling the phase transitions](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982651298) gave us some useful constraints that a viable hypothesis must satisfy. The mystery memory consumer must:\n* quickly bloat its footprint to hundreds of megabytes on a timescale of less than 10 seconds (the duration of Phase 2), under conditions triggered by the start of an eviction burst\n* quickly release its accumulated excess on a timescale of just tens of seconds (the duration of Phase 3), under the conditions immediately following an eviction burst\n\n**The answer:** The client input/output buffers meet those constraints to be the mystery memory consumer.\n\nHere is how that hypothesis plays out:\n* During Phase 1 (healthy state), the Redis main thread’s CPU usage is already fairly high. At the start of Phase 2, when evictions begin, the eviction overhead saturates the main thread’s CPU capacity, quickly dropping response rate below the incoming request rate.\n* This throughput mismatch between arrivals versus responses **is itself the amplifier** that takes over driving the eviction burst. As the size of that rate gap increases, the proportion of time spent doing evictions also increases.\n* Accumulating a backlog of requests requires memory, and that backlog continues to grow until enough clients are stalled that the arrival rate drops to match the response rate. As clients stall, the arrival rate falls, and with it the memory pressure, eviction rate, and CPU overhead begin to reduce.\n* At the equilibrium point when arrival rate falls to match response rate, memory demand is satisfied and evictions stop (ending Phase 2). Without the eviction overhead, more CPU time is available to process the backlog, so response rate increases above request arrival rate. This recovery phase steadily consumes the request backlog, incrementally freeing memory as it goes (Phase 3).\n* Once the backlog is resolved, the arrival and response rates match again. CPU usage is back to its Phase 1 norm, and memory usage has temporarily dropped in proportion to the max size of Phase 2’s request backlog.\n\nWe confirmed this hypothesis via a [latency injection experiment](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_987049036) showing that queuing alone explains the pathology. This outcome supports the conclusion that the extra memory demand originates from response rate falling below request arrival rate.\n\n## Remedies: How to avoid entering the eviction burst cycle\n\nNow that we understand the dynamics of the pathology, we can draw confident conclusions about viable solutions.\n\nRedis evictions are only self-amplifying when all of the following conditions are present:\n* **Memory saturation:** Memory usage reaches the `maxmemory` limit, causing evictions to start.\n* **CPU saturation:** The baseline CPU usage by the Redis main thread’s normal workload is close enough to a whole core that the eviction overhead pushes it to saturation. This reduces the response rate below request arrival rate, inducing self-amplification via increased memory demand for request buffering.\n* **Many active clients:** The saturation only lasts as long as request arrival rate exceeds response rate. Stalled clients no longer contribute to that arrival rate, so the saturation lasts longer and has a greater impact if Redis has many active clients still sending requests.\n\nViable remedies include:\n* Avoid memory saturation by any combination of the following to make peak memory usage less than the `maxmemory` limit:\n  * Reduce cache time to live (TTL)\n  * Increase `maxmemory` (and host memory if needed, but watch out for [`numa_balancing` CPU overhead](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1889) on hosts with multiple NUMA nodes)\n  * Adjust client behavior to avoid writing unnecessary cache entries\n  * Split the cache among multiple instances (sharding or functional partitioning, helps avoid both memory and CPU saturation)\n* Avoid CPU saturation by any combination of the following to make peak CPU usage for the workload plus eviction overhead be less than 1 CPU core:\n  * Use the fastest processor available for single-threaded instructions per second\n  * Isolate the redis-server process (particularly its main thread) from any other competing CPU-intensive processes (dedicated host, taskset, cpuset)\n  * Adjust client behavior to avoid unnecessary cache lookups or writes\n  * Split the cache among multiple instances (sharding or functional partitioning, helps avoid both memory and CPU saturation)\n  * Offload work from the Redis main thread (io-threads, lazyfree)\n  * Reduce eviction tenacity (only gives a minor benefit in our experiments)\n\nMore exotic potential remedies could include a new Redis feature. One idea is to exempt ephemeral allocations like client buffers from counting towards the `maxmemory` limit, instead applying that limit only to key storage. Alternatively, we could limit evictions to only consume at most a configurable percentage of the main thread’s time, so that most of its time is still spent on request throughput rather than eviction overhead.\n\nUnfortunately, either of those features would trade one failure mode for another, reducing the risk of eviction-driven CPU saturation while increasing the risk of unbounded memory growth at the process level, which could potentially saturate the host or cgroup and lead to an OOM, or out of memory, kill. That trade-off may not be worthwhile, and in any case it is not currently an option.\n\n## Our solution\n\nWe had already exhausted the low-hanging fruit for CPU efficiency, so we focused our attention on avoiding memory saturation.\n\nTo improve the cache’s memory efficiency, we [evaluated](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_990891708) which types of cache keys were using the most space and how much [`IDLETIME`](https://redis.io/commands/object-idletime/) they had accrued since last access. This memory usage profile identified some rarely used cache entries (which waste space), helped inform the TTL, or time to live, tuning by first focusing on keys with a high idle time, and highlighted some useful potential cutpoints for functionally partitioning the cache.\n\nWe [decided](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_1014582669) to concurrently pursue several cache efficiency improvements and opened an [epic](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/764) for it. The goal was to avoid chronic memory saturation, and the main action items were:\n* Iteratively reduce the cache’s [default TTL](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1854) from 2 weeks to 8 hours (helped a lot!)\n* Switch to [client-side caching](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_1026821730) for certain cache keys (efficiently avoids spending shared cache space on non-shared cache entries)\n* [Partition](https://gitlab.com/groups/gitlab-com/gl-infra/-/epics/762) a set of cache keys to a separate Redis instance\n\nThe TTL reduction was the simplest solution and turned out to be a big win. One of our main concerns with TTL reduction was that the additional cache misses could potentially increase workload on other parts of the infrastructure. Some cache misses are more expensive than others, and our metrics are not granular enough to quantify the cost of cache misses per type of cache entry. This concern is why we applied the TTL adjustment incrementally and monitored for SLO violations. Fortunately, our inference was correct: Reducing TTL did not significantly reduce the cache hit rate, and the additional cache misses did not cause noticeable impact to downstream subsystems.\n\nThe TTL reduction turned out to be sufficient to drop memory usage consistently a little below its saturation point.\n\nIncreasing `maxmemory` had initially not been feasible because the original peak memory demand (prior to the efficiency improvements) was expected to be larger than the max size of the VMs we use for Redis. However, once we dropped memory demand below saturation, then we could confidently [provision headroom](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1868) for future growth and re-enable [saturation alerting](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1883).\n\n## Results\n\nThe following graph shows Redis memory usage transitioning out of its chronically saturated state, with annotations describing the milestones when latency spikes ended and when saturation margin became wide enough to be considered safe:\n\n![Redis memory usage stops showing a flat top saturation](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/07_epic_results__memory_saturation_avoided_by_TTL_reductions.png)\n\nZooming into the days when we rolled out the TTL adjustments, we can see the harmful eviction-driven latency spikes vanish as we drop memory usage below its saturation point, exactly as predicted:\n\n![Redis memory usage starts as a flat line and then falls below that saturation line](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/08_results__redis_memory_usage_stops_saturating.png)\n\n![Redis response time spikes stop occurring at the exact point when memory stops being saturated](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/09_results__redis_latency_spikes_end.png)\n\nThese eviction-driven latency spikes had been the biggest cause of slowess in Redis cache.\n\nSolving this source of slowness significantly improved the user experience. This 1-year lookback shows only the long-tail portion of the improvement, not even the full benefit.  Each weekday had roughly 2 million Redis requests slower than 1 second, until our fix in mid-August:\n\n![Graph of the daily count of Redis cache requests slower than 1 second, showing roughly 2 million slow requests per day on weekdays until mid-August, when the TTL adjustments were applied](https://about.gitlab.com/images/blogimages/2022-11-28-diagnosing-redis-latency-spikes-with-bpf-and-friends/10_results__1_year_retrospective_of_slow_redis_requests_per_day.png)\n\n## Conclusions\n\nWe solved a long-standing latency problem that had been worsening as the workload grew, and we learned a lot along the way. This article focuses mostly on the Redis discoveries, since those are general behaviors that some of you may encounter in your travels. We also developed some novel tools and analytical methods and uncovered several useful environment-specific facts about our workload, infrastructure, and observability, leading to several additional improvements and proposals not mentioned above.\n\nOverall, we made several efficiency improvements and broke the cycle that was driving the pathology. Memory demand now stays well below the saturation point, eliminating the latency spikes that were burning error budgets for the development teams and causing intermittent slowness for users. All stakeholders are happy, and we came away with deeper domain knowledge and sharper skills!\n\n## Key insights summary\n\nThe following notes summarize what we learned about Redis eviction behavior (current as of version 6.2):\n* The same memory budget (`maxmemory`) is shared by key storage and client connection buffers. A spike in demand for client connection buffers counts towards the `maxmemory` limit, in the same way that a spike in key inserts or key size would.\n* Redis performs evictions in the foreground on its main thread. All time spent in `performEvictions` is time not spent handling client requests. Consequently, during an eviction burst, Redis has a lower throughput ceiling.\n* If eviction overhead saturates the main thread’s CPU, then response rate falls below request arrival rate. Redis accumulates a request backlog (which consumes memory), and clients experience this as slowness.\n* The memory used for pending requests requires more evictions, driving the eviction burst until enough clients are stalled that arrival rate falls back below response rate. At that equilibrium point, evictions stop, eviction overhead vanishes, Redis rapidly handles its request backlog, and that backlog’s memory gets freed.\n* Triggering this cycle requires all of the following:\n  * Redis is configured with a `maxmemory` limit, and its memory demand exceeds that size. This memory saturation causes evictions to begin.\n  * Redis main thread’s CPU utilization is high enough under its normal workload that having to also perform evictions drives it to CPU saturation. This reduces response rate below request rate, causing a growing request backlog and high latency.\n  * Many active clients are connected. The duration of the eviction burst and the size of memory spent on client connection buffers increases proportionally to the number of active clients.\n* Prevent this cycle by avoiding either memory or CPU saturation. In our case, avoiding memory saturation was easier (mainly by reducing cache TTL).\n\n## Further reading\n\nThe following lists summarize the analytical tools and methods cited in this article. These tools are all highly versatile and any of them can provide a massive level-up when working on performance engineering problems.\n\nTools:\n* [perf](https://www.brendangregg.com/perf.html) - A Linux performance analysis multitool. In this article, we used `perf` as a sampling profiler, capturing periodic stack traces of the `redis-server` process's main thread when it is actively running on a CPU.\n* [Flamescope](https://github.com/Netflix/flamescope) - A visualization tool for rendering a `perf` profile (and other formats) into an interactive subsecond heat map. This tool invites the user to explore the timeline for microbursts of activity or inactivity and render flamegraphs of those interesting timespans to explore what code paths were active.\n* [BCC](https://github.com/iovisor/bcc) - BCC is a framework for building BPF tools, and it ships with many useful tools out of the box. In this article, we used `funclatency` to measure the call durations of a specific Redis function and render the results as a histogram.\n* [bpftrace](https://github.com/iovisor/bpftrace) - Another BPF framework, ideal for answering ad-hoc questions about your system's behavior. It uses an `awk`-like syntax and is [quick to learn](https://github.com/iovisor/bpftrace#readme). In this article, we wrote a [custom `bpftrace` script](https://gitlab.com/gitlab-com/gl-infra/scalability/uploads/cab2cd03231f8dd4819f77b44d768cb9/redis_snoop.getMaxmemoryState.sha_25a228b839a93a1395907a03f83e1eee448b0f14.production_thresholds.bt) for observing the variables used in computing how much memory to free during each round of evictions. This script's instrumentation points are specific to our particular build of `redis-server`, but the [approach is able to be generalized](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636) and illustrates how versatile this tool can be.\n\nUsage examples:\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_854745083) - Walkthrough of using `perf` and `flamescope` to capture, filter, and visualize the stack sampling CPU profiles of the Redis main thread.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_857869826) - Walkthrough (including safety check) of using `funclatency` to measure the durations of the frequent calls to function `performEvictions`.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/production/-/issues/7172#note_971197943) - Experiment for adjusting Redis settings `lazyfree-lazy-eviction` and `maxmemory-eviction-tenacity` and observing the results using `perf`, `funclatency`, `funcslower`, and the Redis metrics for eviction count and memory usage.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982498636) - This is a working example (script included) of using `bpftrace` to observe the values of a function's variables. In this case we inspected the `mem_tofree` calculation at the start of `performEvictions`. Also, these [companion notes](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_982499538) discuss some build-specific considerations.\n* [Example](https://gitlab.com/gitlab-com/gl-infra/scalability/-/issues/1601#note_987049036) - Describes the latency injection experiment (the first of the three ideas). This experiment confirmed that memory demand increases at the predicted rate when we slow response rate to below request arrival rate, in the same way evictions do. This result confirmed the request queuing itself is the source of the memory pressure that amplifies the eviction burst once it begins.\n",[728,9,773],{"slug":2904,"featured":6,"template":684},"how-we-diagnosed-and-resolved-redis-latency-spikes","content:en-us:blog:how-we-diagnosed-and-resolved-redis-latency-spikes.yml","How We Diagnosed And Resolved Redis Latency Spikes","en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes.yml","en-us/blog/how-we-diagnosed-and-resolved-redis-latency-spikes",{"_path":2910,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2911,"content":2917,"config":2923,"_id":2925,"_type":13,"title":2926,"_source":15,"_file":2927,"_stem":2928,"_extension":18},"/en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"title":2912,"description":2913,"ogTitle":2912,"ogDescription":2913,"noIndex":6,"ogImage":2914,"ogUrl":2915,"ogSiteName":669,"ogType":670,"canonicalUrls":2915,"schema":2916},"ICYMI: Key AI and security insights from our developer community","Our latest LinkedIn Live highlights the hottest trends in AI, security, DevSecOps, and more. Also get a taste of the GitLab community contributions that are making an impact.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098331/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%286%29_55zMmdJIUpfh5qaPW9dtVA_1750098331584.png","https://about.gitlab.com/blog/icymi-key-ai-and-security-insights-from-our-developer-community","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"ICYMI: Key AI and security insights from our developer community\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fatima Sarah Khalid\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":2912,"description":2913,"authors":2918,"heroImage":2914,"date":2919,"body":2920,"category":1103,"tags":2921},[1201],"2024-12-05","In our [November LinkedIn Live broadcast](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857), we brought together field CTOs, developer advocates, and community leaders to discuss industry trends and showcase features making a difference in developer workflows.\n\nHere are 5 key highlights:\n\n### 1. AI adoption trends from the field\nOur field CTOs shared insights on how organizations are embracing AI across their development workflows. For instance, Field CTO Cherry Han highlighted how financial organizations are thinking beyond individual developer tools.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388263?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Ai Adoption Trends from the Field\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n\u003Cbr>\u003C/br>\nAndrew Hasker, Field CTO for Asia Pacific and Japan, offered valuable perspective on AI adoption.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388277?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"From Field CTOs\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 2. Security coverage that makes a difference\n\nStaff Developer Advocate Fernando Diaz demonstrated how GitLab's security scanners cover the complete application lifecycle, showing how easy it is to implement [comprehensive security scanning](https://about.gitlab.com/solutions/security-compliance/) with just a few lines of code.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388297?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Security Coverage\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 3. AI-powered language migration made simple\nIn an impressive demonstration, Senior Technical Marketing Manager Cesar Saavedra showed how GitLab Duo can assist in migrating applications between programming languages.\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1036170482?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"AI-Powered Language Migration Made Simple\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 4. Making DevSecOps work smarter\n\nDeveloper Advocate Abubakar Siddiq Ango showcased how GitLab's triage features can automate routine tasks.\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035388290?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Making DEvOps Work Smarter\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n### 5. Community contributions making an impact\n\nDirector of Contributor Success Nick Veenhof shared how community contributions are shaping GitLab's development:\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1035395211?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Community Contributions Making an Impact\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## Watch on-demand\n\n[Watch the complete broadcast recording](https://www.linkedin.com/feed/update/urn:li:activity:7265408726696697857) for step-by-step demonstrations and insights from our experts. Also, be sure to [follow GitLab on LinkedIn](https://www.linkedin.com/company/gitlab-com) to stay up to date on our monthly broadcasts and get insights into our platform, DevSecOps, and software development.\n",[704,814,266,9,2922],"webcast",{"slug":2924,"featured":6,"template":684},"icymi-key-ai-and-security-insights-from-our-developer-community","content:en-us:blog:icymi-key-ai-and-security-insights-from-our-developer-community.yml","Icymi Key Ai And Security Insights From Our Developer Community","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community.yml","en-us/blog/icymi-key-ai-and-security-insights-from-our-developer-community",{"_path":2930,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2931,"content":2937,"config":2941,"_id":2943,"_type":13,"title":2944,"_source":15,"_file":2945,"_stem":2946,"_extension":18},"/en-us/blog/improve-security-auditing-with-gitlab-operational-container-scanning",{"title":2932,"description":2933,"ogTitle":2932,"ogDescription":2933,"noIndex":6,"ogImage":2934,"ogUrl":2935,"ogSiteName":669,"ogType":670,"canonicalUrls":2935,"schema":2936},"Improve security auditing with GitLab Operational Container Scanning","Learn how to conduct container vulnerability scans post-deployment to raise awareness of existing threats and to track resolution of vulnerabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664654/Blog/Hero%20Images/AdobeStock_1172300481.jpg","https://about.gitlab.com/blog/improve-security-auditing-with-gitlab-operational-container-scanning","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Improve security auditing with GitLab Operational Container Scanning\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Daniel Helfand\"}],\n        \"datePublished\": \"2025-01-29\",\n      }",{"title":2932,"description":2933,"authors":2938,"heroImage":2934,"date":1018,"body":2939,"category":814,"tags":2940},[2823],"Conducting security scans is a regular part of any software development process. Whether scanning source code (e.g., Java, Python, or other languages), configuration files (e.g., YAML files), or [container images](https://cloudnativenow.com/kubecon-cnc-na-2024/unlocking-the-full-potential-of-container-vulnerability-scans/), these scanning tools help development teams be proactive about understanding and addressing security threats. \n\nTraditionally, developers run these [security scans as part of CI/CD pipelines](https://docs.gitlab.com/ee/user/application_security/container_scanning/). By including these scans in CI/CD, every change to a project will be reviewed to see if any vulnerabilities are introduced. Understanding security concerns during development helps to assure that changes are addressed before they are deployed to a live environment, but there are many additional benefits to conducting container vulnerability scans post deployment as well.\n\n[GitLab's Operational Container Scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) feature allows DevSecOps practitioners to run container vulnerability scans against containers running in a Kubernetes environment. The benefits of conducting a vulnerability scan on deployed containers include regularly scanning the images for new vulnerabilities that are discovered, tracking which environments certain vulnerabilities are deployed to, and also tracking the progress of resolving these vulnerabilities. \n\nThe scans can be configured to run on a regular cadence and on containers in specific namespaces on a Kubernetes cluster. The results of these scans are then sent back to GitLab projects to be viewed via the GitLab UI. To show exactly how the feature works, the next steps in this article will demonstrate how to apply the Operational Container Scanning feature using a GitLab project, sample application, and a Kubernetes cluster. \n\n## Prerequisites\n\nTo get started, you will need the following:   \n* [GitLab Ultimate account](https://about.gitlab.com/free-trial/)   \n* Kubernetes cluster that meets [GitLab’s Kubernetes version requirements](https://docs.gitlab.com/ee/user/clusters/agent/#supported-kubernetes-versions-for-gitlab-features)  \n* [kubectl CLI](https://kubernetes.io/docs/tasks/tools/#kubectl)\n* [helm CLI](https://helm.sh/docs/intro/install/)\n\nAdditionally, the walkthrough below will use a [GitLab project](https://gitlab.com/gitlab-da/tutorials/cloud-native/operational-container-scanning-tutorial) that can be forked into a [GitLab group](https://docs.gitlab.com/ee/user/group/) where you have appropriate permissions to carry out the steps that follow. \n\n## Deploy a sample application\n\nThe first action we will carry out is to deploy a sample application to the Kubernetes cluster you will use in this tutorial. Before running the `kubectl` command to deploy a sample application, take a moment to make sure your `KUBECONFIG` is set to the cluster you would like to use. Once you are set up to use your cluster, run the following command:\n\n```bash  \n$ kubectl apply -f\nhttps://gitlab.com/gitlab-da/tutorials/cloud-native/go-web-server/-/raw/main/manifests/go-web-server-manifests.yaml\n\nnamespace/go-web-server-dev created  \ndeployment.apps/go-web-server created  \nservice/go-web-server created  \n```\n\nWait for all the pods to be running in the `go-web-server-dev` namespace by running the command below:\n\n```bash  \n$ kubectl get pods -n go-web-server-dev -w  \n```\n\nYou should see output similar to what is shown below:\n\n```  \nNAME                            READY   STATUS    RESTARTS   AGE  \ngo-web-server-f6b8767dc-57269   1/1     Running   0          18m  \ngo-web-server-f6b8767dc-fkct2   1/1     Running   0          18m  \ngo-web-server-f6b8767dc-j4qwg   1/1     Running   0          18m  \n```\n\nOnce everything is running, you can set up your forked GitLab project to connect to your Kubernetes cluster and configure the Operational Container Scanning properties. \n\n## Connect Kubernetes cluster\n\nIn this section, you will learn how to connect a Kubernetes cluster to your GitLab project via the [GitLab Agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/). By configuring and installing the agent on your Kubernetes cluster, you will be able to also configure Operational Container Scanning. \n\n### Change the id property for GitLab’s Kubernetes agent\n\nIn the forked GitLab project you are using, change the [`id` property in the config.yaml file](https://gitlab.com/gitlab-da/tutorials/cloud-native/operational-container-scanning-tutorial/-/blob/main/.gitlab/agents/k8s-agent/config.yaml?ref\\_type=heads\\#L5) to match the group where you have forked the project. By doing this, you will configure the GitLab Agent for Kubernetes to pass information about your cluster back to your GitLab project. Make sure to commit and push this change back to the main branch of the forked project.\n\n### Navigate to Kubernetes clusters page of the project\n\nIn the GitLab UI, select the **Operate > Kubernetes clusters** tab of the forked project. Click the **Connect a cluster (agent)** button. Add the name of the agent to the input box under `Option 2: Create and register an agent with the UI` and then click **Create and register**. In this case, the name of the agent is `k8s-agent` since the folder under agents with the `config.yaml` file is named `k8s-agent`. Note that this folder can have any name that follows [Kubernetes naming restrictions](https://docs.gitlab.com/ee/user/clusters/agent/install/#create-an-agent-configuration-file) and that `k8s-agent` is just being used for simplicity.\n\n### Install the GitLab Kubernetes agent\n\nAfter registering the agent, you will be asked to run a helm command shown in the GitLab UI from your command line against your Kubernetes cluster. Before running the command, make sure your `KUBECONFIG` is still connected to the same cluster where you deployed the sample application. \n\nAfter running the helm command successfully, wait for all pods to be running in the `gitlab-agent-k8s-agent` namespace on your cluster. You can wait for everything to be running using the following command: \n\n```bash  \n$ kubectl get pods -n gitlab-agent-k8s-agent -w  \n``` \n\nYou should see similar output to what is shown below:\n\n```  \nNAME                                         READY   STATUS    RESTARTS   AGE  \nk8s-agent-gitlab-agent-v2-6bb676b6bf-v4qml   1/1     Running   0          10m  \nk8s-agent-gitlab-agent-v2-6bb676b6bf-xt7xh   1/1     Running   0          10m  \n```\n\nOnce the pods are running, your GitLab project should be connected to your Kubernetes cluster and ready to use the Operational Container Scanning feature. Before proceeding, continue running the `kubectl get pods -n gitlab-agent-k8s-agent -w` command to help explain concepts in the next section.\n\n## Operational Container Scanning\n\nIn addition to the pods for the GitLab agent running in the `gitlab-agent-k8s-agent` namespace, there should eventually be another pod named `trivy-scan-go-web-server-dev`. This pod will start and run on a regular cadence and conduct a container vulnerability scan using a tool named [trivy](https://trivy.dev/latest/) against the `go-web-server-dev` namespace where the sample application deployed earlier is running. \n\nThe Operational Container Scanning properties are defined in the [`config.yaml` file](https://gitlab.com/gitlab-da/tutorials/cloud-native/operational-container-scanning-tutorial/-/blob/main/.gitlab/agents/k8s-agent/config.yaml?ref_type=heads#L6-L10) used to set up the GitLab agent for Kubernetes on your cluster. \n\nThe two main properties to define are `cadence`, which specifies how frequently to run the container vulnerability scan, and also the `namespaces` property nested under `vulnerability_report`, which defines one or more namespaces to conduct the scan on. You can see how this looks in `config.yaml` below:\n\n```yaml  \ncontainer_scanning:  \n  cadence: '*/5 * * * *'  \n  vulnerability_report:  \n    namespaces:  \n      - go-web-server-dev  \n```\n\nThe cadence follows a cron format. In this case, `*/5 * * * *` means the scan will be run every five minutes, but this can be changed to any amount of time (e.g., every 24 hours).  \n\nThe vulnerabilities revealed by the scan for containers running in the `go-web-server-dev` namespace are sent back to your GitLab project. To see the results, go to the GitLab UI and select your forked project. Select the **Secure > Vulnerability report** option for the project and then select the **Operational vulnerabilities** tab to view scan results. \n\nThe scan results will include information on the severity of the common vulnerabilities and exposures (CVEs), along with the name of the image. By using the tag of the image to include the version of the deployed software along with what environment it is deployed to, you can begin to audit what known vulnerabilities exist in your Kubernetes environments and keep track of how they are being addressed by engineering teams.\n\nWatch this demo for more information:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/2FVQec2J-Ew?si=T6kwPMnPAGwKlkfP\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Share your feedback\n\nAdding GitLab’s Operational Container Scanning to your Kubernetes environments can help development, security, and infrastructure teams have a consistent picture of container security in Kubernetes environments across an organization. In addition to GitLab’s CI container scanning capabilities and the ability to [scan containers pushed to GitLab’s container registry](https://www.youtube.com/watch?v=Zuk7Axs-CRw), GitLab has solutions at every phase of the software development lifecycle to address container security concerns.\n\nYou can share your feedback on Operational Container Scanning in this [forum post](https://forum.gitlab.com/t/operational-container-scanning-feedback/119479), which we will share with our product and engineering teams supporting this feature. You can get started with Operational Container Scanning by reading the [documentation on the feature](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) and [starting a 60-day free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).",[680,835,9,814,678],{"slug":2942,"featured":6,"template":684},"improve-security-auditing-with-gitlab-operational-container-scanning","content:en-us:blog:improve-security-auditing-with-gitlab-operational-container-scanning.yml","Improve Security Auditing With Gitlab Operational Container Scanning","en-us/blog/improve-security-auditing-with-gitlab-operational-container-scanning.yml","en-us/blog/improve-security-auditing-with-gitlab-operational-container-scanning",{"_path":2948,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2949,"content":2955,"config":2961,"_id":2963,"_type":13,"title":2964,"_source":15,"_file":2965,"_stem":2966,"_extension":18},"/en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"title":2950,"description":2951,"ogTitle":2950,"ogDescription":2951,"noIndex":6,"ogImage":2952,"ogUrl":2953,"ogSiteName":669,"ogType":670,"canonicalUrls":2953,"schema":2954},"Inside the improved CI logs management experience for multi-line commands","Reviewing log output for CI/CD jobs with multi-line commands is now easier than ever. Find out why, how to configure your pipelines, and what's ahead.\n\n","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099499/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_639935439_3oqldo5Yt5wPonEJYZOLTM_1750099498739.jpg","https://about.gitlab.com/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Inside the improved CI logs management experience for multi-line commands\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Romuald Atchadé\"}],\n        \"datePublished\": \"2024-01-25\",\n      }",{"title":2950,"description":2951,"authors":2956,"heroImage":2952,"date":2958,"body":2959,"category":769,"tags":2960},[2957],"Romuald Atchadé","2024-01-25","Improving the GitLab CI/CD log experience for jobs with multi-line commands has been a long-requested feature. With the latest release of GitLab and GitLab Runner, it's now easier to work with the log section for jobs with multi-line commands. In this post, we will describe the experience with the new feature, show you how to enable the new log output in your pipelines, and discuss key points regarding CI/CD script execution and log output in various shells, such as Bash and Powershell.\n\n## Overview of multi-line commands\n\nFirst, it’s helpful to describe what we mean by a CI job with multi-line commands. In GitLab CI the script keyword is used to specify commands to execute for a CI job. In the example below, the build-job has a single command, a basic echo statement, to execute in the script block. \n\n```\n## A pipeline with a single line command in the script block for the build-job\n\nbuild-job:\n  stage: build\n  script:\n    - echo \"this is the script to run for the build job\"\n\n```\n\nIf you were to run this pipeline, then the log output in the UI would display as follows:\nLine 17 - GitLab CI automatically generates a log entry for the command that you specify in the script block.\nLine 18 - This is the output of the command that was executed.\n\n![Ci log management - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099524655.png)\n\nNow as you can imagine, the script that you define in the script block will likely be more complex than the example provided and could very well span multiple lines in the CI/CD pipeline file. \n\n```\n## A pipeline with a multi-line command in the script block for the build-job\n\nbuild-job:\n  stage: build\n  script:\n       - |\n         echo \"this is a multi-line command\"  # a simple echo statement\n         ls  \n\n```\n\nIf you were to run this pipeline, then the log output in the UI would display as follows:\n\nLine 17 - As in the previous example, GitLab CI automatically generates a log entry for the command that you specify in the script block. You will notice that line 17 only includes the first command in the script block. This makes it more difficult to debug an issue with script execution as you will need to refer back to the source pipeline file to see exactly what script was executed.\n\n![CI log management - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099524656.png)\n\n## So what’s new?\n\nStarting in GitLab 16.7 and GitLab Runner 16.7, you can now enable a feature flag titled FF_SCRIPT_SECTIONS, which will add a collapsible output section to the CI job log for multi-line command script blocks. This feature flag changes the log output for CI jobs that execute within the Bash shell.\n\n![CI log management - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099524658.png)\n\nLine 17: Unlike the previous examples, the first thing you will notice in the screenshot above is that by default the log entry for the multi-line command is collapsed by default.\n\nSingle-line commands do not display in a collapsible element.\n\nFor multi-line scripts the multi-line command is now a collapsible element, so now, when you uncollapse the log entry for line 17, then the log will display all of the commands that were executed in the script block.\n\n![CI log management - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099524659.png)\n\nThere is also the [`custom collapsible section`](https://docs.gitlab.com/ee/ci/jobs/#custom-collapsible-sections) feature, which in combination with this new multi-command output capability does provide you additional flexibility for displaying log output in the UI. Here is how you can use the two features to change the log output. \n\n```\n## A pipeline with a multi-line command in the script block for the build-job\n\nvariables:\n  FF_PRINT_POD_EVENTS: \"true\"\n  FF_USE_POWERSHELL_PATH_RESOLVER: \"true\"\n  FF_SCRIPT_SECTIONS: \"true\"\n\ncollapsible_job_multiple:\n  stage: build\n  script:\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:my_first_section\\r\\033[0KHeader of the 1st collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - |\n      echo \"{\n        'test': 'data',\n        'test2': 'data2',\n      }\"\n    - echo -e \"\\033[0Ksection_start:`date +%s`:second_section\\r\\033[0KHeader of the 2nd collapsible section\"\n    - echo 'this line should be hidden when collapsed'\n    - echo -e \"\\033[0Ksection_end:`date +%s`:second_section\\r\\033[0K\"\n    - echo -e \"\\033[0Ksection_end:`date +%s`:my_first_section\\r\\033[0K\"\n\n```\n\nIf you were to run this pipeline with the FF_SCRIPT_SECTIONS feature flag set to false, then the log output would be as depicted in the following screenshot.\n\n![CI log management - image 5](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099524/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099524661.png)\n\nBut, if you were to run this pipeline with the FF_SCRIPT_SECTIONS feature flag set to true, then the log output would be as depicted in the following screenshot.\n\n![CI log management - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099525/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099524663.png)\n\n## What about other shells?\n\nAs of the 16.7 release, the collapsible output section in the CI job log for multi-line command script blocks is only visible for CI/CD jobs that are executed with the Bash shell. CI/CD jobs executed with Powershell is not currently supported. We plan to add this [capability](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4494) in a future release. \n\n## What are our future plans?\n\nA few features are still needed to improve the CI/CD job log output, and the `timestamp` for each log line is one of them. This addition will add missing features such as command/section duration.\n\n> To learn more about GitLab CI/CD features, refer to the official [CI/CD documentation](https://docs.gitlab.com/ee/ci/index.html). \n\n_Disclaimer: This blog contains information related to upcoming products, features, and functionality. It is important to note that the information in this blog post is for informational purposes only. Please do not rely on this information for purchasing or planning purposes. As with all projects, the items mentioned in this blog and linked pages are subject to change or delay. The development, release, and timing of any products, features, or functionality remain at the sole discretion of GitLab._\n",[771,772,108,9],{"slug":2962,"featured":90,"template":684},"inside-the-improved-ci-logs-management-experience-for-multi-line-commands","content:en-us:blog:inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","Inside The Improved Ci Logs Management Experience For Multi Line Commands","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands.yml","en-us/blog/inside-the-improved-ci-logs-management-experience-for-multi-line-commands",{"_path":2968,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2969,"content":2975,"config":2980,"_id":2982,"_type":13,"title":2983,"_source":15,"_file":2984,"_stem":2985,"_extension":18},"/en-us/blog/integrate-external-security-scanners-into-your-devsecops-workflow",{"title":2970,"description":2971,"ogTitle":2970,"ogDescription":2971,"noIndex":6,"ogImage":2972,"ogUrl":2973,"ogSiteName":669,"ogType":670,"canonicalUrls":2973,"schema":2974},"Integrate external security scanners into your DevSecOps workflow","Learn how to bring Snyk scan results into the merge request widget by parsing JSON artifacts and leveraging the SARIF file format.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098768/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%282%29_1khno1AUtxuL6zzmEmjK7v_1750098768560.png","https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Integrate external security scanners into your DevSecOps workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sam Morris\"}],\n        \"datePublished\": \"2024-04-08\",\n      }",{"title":2970,"description":2971,"authors":2976,"heroImage":2972,"date":2977,"body":2978,"category":814,"tags":2979},[830],"2024-04-08","Each day you build software there is another opportunity for security vulnerabilities to creep into production. So it is becoming more important than ever to shift security left and put security tests and the vulnerabilities they detect at the forefront of your software development lifecycle.\n\nWhile GitLab offers a wide range of different security scanners, our AI-powered DevSecOps platform provides full visibility into the security of your software. We seek to allow you to not only run scans, but also to view results, bake in approval processes via merge request policies, and display current vulnerabilities in your default branch for future triage in our Vulnerability Report.\n\n## How do security scans run?\n\nGitLab Ultimate displays your vulnerabilities directly in the merge request widget and it updates on every commit. These scans typically run via jobs in a pipeline, whether in the project’s `.gitlab-ci.yml` pipeline or in a separately-controlled [compliance pipeline](https://docs.gitlab.com/ee/user/group/compliance_pipelines.html), [security policy](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html), or [included pipeline configuration](https://docs.gitlab.com/ee/ci/yaml/includes.html) from a separate .yml file. You can run GitLab’s native security scanners or you can run an external scanner. For this blog post, I took running Snyk scans for a spin to see how I could feed the dependency scan results as vulnerability records back into GitLab. Additionally, I utilized a Static Analysis Results Interchange Format (SARIF) converter to read SAST results directly from Snyk without custom scripting. \n\n## Using external scanners\n\nGitLab is highly extensible, and the platform allows for you to integrate myriad tools. You can use one of our built-in security scanners, or use an external scanner via a job in a pipeline or policy. GitLab serves as a single platform for governance and enforcement, allowing you to bring your own scanners and see the results early in the DevSecOps lifecycle.\n\nAll you have to do to get started is run a security job, and from there you can obtain the results in the merge request and the vulnerability report.\n\n## Run an external scan from GitLab CI\n\nIn this example pipeline, I run a Snyk scan externally in the test stage in a job I overrode called `gemnasium-maven-dependency_scanning`. First, I install the required packages (npm, Maven, Python3, and Snyk) and then I authorize with my SNYK_TOKEN variable saved in the variables section of my project. Finally, I run a `snyk test` command with the Snyk CLI and output the results to the JSON. This saves my results to the snyk_data_file.json, which I will parse in a script detailed in the next section and save to the required artifact file `gl-dependency-scanning-report.json`.\n\n```\nstages:\n  - test\n\nvariables:\n\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml  \n\ngemnasium-maven-dependency_scanning:\n  image: node:latest\n  stage: test\n  services:\n  - openjdk:11-jre-slim-buster\n  before_script:\n    - apt-get update\n    - apt-get install default-jdk -y\n  script:\n    # Install npm, snyk, and maven\n    - npm install -g npm@latest\n    - npm install -g snyk\n    - npm install maven\n    - npm install python3\n    # Run snyk auth, snyk monitor, snyk test to break build and out report\n    - snyk auth $SNYK_TOKEN\n    - chmod +x mvnw\n    - snyk test --all-projects --json-file-output=snyk_data_file.json || true\n    - python3 convert-snyk-to-gitlab.py\n\n  # Save report to artifacts\n  artifacts:\n    when: always\n    paths: \n      - gl-dependency-scanning-report.json\n\n```\n\n### Parse the JSON\n\nYou can see scan results in the merge request widget from any external scanner as long as the artifact of the successful security job is named appropriately, for example, `gl-dependency-scanning-report.json`.  \n\nHere is an example script that converts the Snyk JSON output to the GitLab JSON output. In this example, I open the Snyk data file and load the vulnerability data. I create a new list of dependency files and a new list of vulnerabilities that contain data GitLab needs to display in the vulnerability records, such as the identifier, severity, category, description, and location. I added a few placeholder sections for required fields that I did not need to display in my record. Finally, I saved the contents I parsed out to a new JSON file called `gl-dependency-scanning-report.json`, which is the required name for the file to be read by GitLab and have its contents displayed in the widget.\n\n```\nimport json\nfrom types import SimpleNamespace\n\nwith open(\"snyk_data_file.json\") as snyk_data_file:\n    snyk_data = json.load(snyk_data_file, object_hook=lambda d: SimpleNamespace(**d))\n\ngitlab_vulns = []\ndependency_files = []\nfor i in snyk_data:\n    dependency_files.append({\"path\": i.path, \"package_manager\": i.packageManager, \"dependencies\": []})\n    for v in i.vulnerabilities:\n        gitlab_identifiers = []\n        for vuln_type, vuln_names in v.identifiers.__dict__.items():\n            if vuln_names: \n                for vuln_name in vuln_names:\n                    gitlab_identifiers.append({\"type\": vuln_type, \"name\": vuln_name, \"value\": vuln_name.partition(\"-\")[2]})\n                gitlab_vulns.append({\"id\": v.id, \"category\": \"dependency_scanning\", \"severity\": v.severity.capitalize(), \"identifiers\": gitlab_identifiers, \"description\": v.description, \"location\": {\"file\": i.displayTargetFile, \"dependency\": {\"package\": {\"name\": \"PLACEHOLDER\"}, \"version\": \"PLACEHOLDER\"}}})\n\n# Dummy data for scan and dependency files \nfull_json = {\"version\": \"15.0.6\", \"dependency_files\": dependency_files, \"scan\": {\"analyzer\": {\"id\": \"snyk\", \"name\": \"Snyk\", \"vendor\": {\"name\": \"Snyk\"}, \"version\": \"1.0.2\"}, \"scanner\": {\"id\": \"my-snyk-scanner\", \"name\": \"My Snyk Scanner\", \"version\": \"1.0.2\", \"vendor\": {\"name\": \"Snyk\"}}, \"end_time\": \"2022-01-28T03:26:02\", \"start_time\": \"2020-01-28T03:26:02\", \"status\": \"success\", \"type\": \"dependency_scanning\"}, \"vulnerabilities\": gitlab_vulns}\n\nwith open(\"gl-dependency-scanning-report.json\", \"w\") as gitlab_file:\n    json.dump(full_json, gitlab_file, default=vars)\n\n```\n\nNow, the vulnerability findings are visible in the merge request widget.\n\n![security scanning detection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098776/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098776479.png)\n\n## What are SARIF and the SARIF converter?\n\nSARIF is a file format for the output of static analysis tools. It is incredibly useful when leveraging different security scanners, as all of their output is formatted the same way. This allows for a generic, repeatable, and scalable approach to application security.\n\nThere is a community-maintained [SARIF converter](https://gitlab.com/ignis-build/sarif-converter), which takes SARIF files and converts them into ingestible reports. It supports many scanners, including Snyk. This converter works for both SAST and code quality findings. We are going to focus on SAST for this blog.\n\n### Use a SARIF converter to get SAST results\n\nTo leverage the SARIF results, first I trigger a Snyk scan as we did in the previous example, but I save the output to a SARIF file. After this, I use the aforementioned converter to create a new JSON file that I save as a report.\n\n```\nsnyk:\n  image: node:latest\n  stage: test\n  services:\n  - openjdk:11-jre-slim-buster\n  before_script:\n    - apt-get update\n    - apt-get install default-jdk -y\n    - wget -O sarif-converter https://gitlab.com/ignis-build/sarif-converter/-/releases/permalink/latest/downloads/bin/sarif-converter-linux\n    - chmod +x sarif-converter\n  script:\n    # Install npm, snyk, and maven\n    - npm install -g npm@latest\n    - npm install -g snyk\n    - npm install maven\n    # Run snyk auth, snyk monitor, snyk test to break build and out report\n    - snyk auth $SNYK_TOKEN\n    - chmod +x mvnw\n    - snyk test --all-projects --sarif-file-output=snyk.sarif  || true\n    - ./sarif-converter --type sast snyk.sarif snyk.json\n\n  artifacts:\n    reports:\n      sast: snyk.json\n\n```\n\nAfter saving the JSON as an artifact, the results are visible in the merge request widget.\n\n![security scanning - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098776/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098776479.png)\n\n## Get started\n\nIn this blog post, you learned how to use both custom scripting and a SARIF converter to view external scanner vulnerabilities in the GitLab merge request widget. These operations can be completed from the pipeline as shown, but also from compliance pipelines and pipeline execution policies, which allow for the enforcement of external scanners. With GitLab Ultimate. you have access to a full DevSecOps platform that allows you to use our scanners or bring your own, but build a shift-left workflow that empowers developers to remediate vulnerabilities before they hit production.\n\n> [Trial GitLab Ultimate today](https://gitlab.com/-/trials/) to begin merging external scanners.\n\n## More security scanning resources\n\n* [Security scanner integration documentation](https://docs.gitlab.com/ee/development/integrations/secure.html)\n* [How to integrate custom security scanners into GitLab](https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab/)\n* [GitLab Trust Center](https://about.gitlab.com/security/)\n",[814,9,1041],{"slug":2981,"featured":6,"template":684},"integrate-external-security-scanners-into-your-devsecops-workflow","content:en-us:blog:integrate-external-security-scanners-into-your-devsecops-workflow.yml","Integrate External Security Scanners Into Your Devsecops Workflow","en-us/blog/integrate-external-security-scanners-into-your-devsecops-workflow.yml","en-us/blog/integrate-external-security-scanners-into-your-devsecops-workflow",{"_path":2987,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":2988,"content":2994,"config":2999,"_id":3001,"_type":13,"title":3002,"_source":15,"_file":3003,"_stem":3004,"_extension":18},"/en-us/blog/integrating-azure-devops-scm-and-gitlab",{"title":2989,"description":2990,"ogTitle":2989,"ogDescription":2990,"noIndex":6,"ogImage":2991,"ogUrl":2992,"ogSiteName":669,"ogType":670,"canonicalUrls":2992,"schema":2993},"How to integrate Azure DevOps repositories with GitLab","How to keep your code in an Azure DevOps repository and run CI/CD with GitLab pipelines.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664363/Blog/Hero%20Images/aleksey-kuprikov.jpg","https://about.gitlab.com/blog/integrating-azure-devops-scm-and-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to integrate Azure DevOps repositories with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2020-07-09\",\n      }",{"title":2989,"description":2990,"authors":2995,"heroImage":2991,"date":2996,"body":2997,"category":769,"tags":2998},[1080],"2020-07-09","\n\nRecently we’ve been asked by several people if it is possible to integrate between Azure DevOps/VSTS (Visual Studio Team Services) source code management and GitLab. They are looking for a modern [CI/CD solution](/topics/ci-cd/) like GitLab, but as part of a gradual transition they still need to keep managing their code in Azure DevOps/VSTS. \n\n## Does Azure DevOps integrate with GitLab?\n\nYes, Azure DevOps Services does integrate with GitLab.\n\nAlthough we of course recommend using GitLab CI/CD together with our built-in GitLab SCM, this integration of Azure DevOps source code management and GitLab makes it possible to migrate slowly from Azure DevOps by leaving your code in the Azure [DevOps](/topics/devops/) repository while you adopt GitLab CI/CD. This integration is possible with both the self-managed and SaaS versions of GitLab. The integration works only with Azure DevOps/VSTS git version control. TFVC (Team Foundation Version Control) isn’t supported. \n\n### In GitLab, there are two features that enable this integration:  \n\n[GitLab CI/CD for external repositories](https://docs.gitlab.com/ee/ci/ci_cd_for_external_repos/) \n\n[Remote repository mirroring](https://docs.gitlab.com/ee/user/project/repository/repository_mirroring.html)\n\nWhat is a repository in DevOps?\n\nCode repositories in tools like GitLab and Azure exist to house all source code. Sometimes these repositories are referenced as a DevOps “repo” or a source repository. Whatever the title, code repositories provide a place where developers can work to ensure high code quality. \nGitLab uses a [git-based repository](/solutions/source-code-management/) for source code management with version control. It lets GitLab users perform code reviews and easily solve developer issues.\n\n## What is the difference between GitLab and Azure DevOps?\n\nAzure DevOps has a range of services for managing the development lifecycle. Some of its main features include agile planning boards, private git repos for source code management, and Azure pipelines.\n\nGitLab is a single platform for the entire DevSecOps lifecycle and includes the following:\n\n- Planning and collaboration\n- Source code management\n- Code reviews\n- CI/CD pipelines\n- Constant security scanning and monitoring\n- Advanced deployments\n- Vulnerability management\n\nGitLab can help manage the entire DevSecOps lifecycle to deliver software quickly and efficiently while bolstering security and compliance.\n\n## How do I connect to Azure from GitLab?\n\nIt may take some time to fully move over from Azure to GitLab for source code management. To smooth the transition, there are simple steps to connect to the Azure integration from GitLab.\n\n1. Create a new project in GitLab by clicking the New Project button  ![Create new project ](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado1.png){: .large.center}\n\n2. Choose the ‘CI/CD for external repo’ tab, and click on Repo by URL.  ![CI/CD for external repo](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado2.png){: .large.center}\n\n3. Open your repository in Azure DevOps and click Clone  ![Getting clone url ](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado3.png){: .large.center}\n\n4. Copy the URL. If your repository is private, you will need to generate Git credentials – just click this button and copy the username and password.  ![Credentials](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado4.png){: .large.center}\n\n5. Paste the URL in GitLab under the Git repository URL, give it a name, set the visibility level, and click create project. Add the username and password in case your Azure DevOps repository is private. Note: The repository must be accessible over http://, https:// or git://. When using the http:// or https:// protocols, please provide the exact URL to the repository. HTTP redirects will not be followed.  ![Create project form](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado5.png){: .large.center}\n\n6. Your project is now successfully Mirrored to GitLab. Now branches, tags, and commits will be synced automatically to GitLab. \n\n7. To configure a CI/CD pipeline there are two options:\n\nBefore pushing your first commit, open the CI/CD settings in GitLab and enable Auto DevOps.  It will set the CI/CD configuration, so each commit in Azure Repos will trigger a CI/CD  pipeline in GitLab which will build, test, and deploy your app.  ![Auto DevOps settings](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado6.png){: .shadow.large.center}\n \nAlternatively, in case you want to define the pipeline configuration yourself instead of using the Auto DevOps, add [.gitlab-ci.yml](https://docs.gitlab.com/ee/ci/yaml/) file to  your repository root directory. The Yaml code should include your [CI/CD definitions](/blog/guide-to-ci-cd-pipelines/). Once this file is included in the root directory a CI/CD pipeline will be triggered for each commit. If you are not familiar with .gitlab-ci.yml, start by creating a file with the name .gitlab-ci.yml and paste the below code to it. This code includes build and test stages, and a job that displays text to the console in each stage. Later on you can add additional scripts to each job, and also add additional jobs and stages. To create more complex pipelines, you can [use the pipeline templates](https://docs.gitlab.com/ee/ci/yaml/#includetemplate) that are [shipped with GitLab](https://gitlab.com/gitlab-org/gitlab/tree/master/lib/gitlab/ci/templates) instead of starting it from scracth.\n\n```\nstages:\n  - build\n  - test \n  \nbuild:\n  stage: build\n  script:\n    - echo \"Build job\"\n\ntest:\n  stage: test\n  script:\n    - echo \"Test job\"\n```\n\nThat’s it, you are all set! \n\n## Suggested development flow \n\n![Development flow diagram](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado_7_2.png){: .shadow.large.center}\n\n1. CODE (Developer IDE of choice) Developer uses the favorite IDE to develop code, clones the repo to the workstation and creates a branch.  ![Visual Studio Code](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado8.png){: .shadow.large.center}\n\n2. COMMIT (GIT) After the feature is developed/the bug is fixed, the developer pushes the work to the Azure Repository server.  ![Azure DevOps Repos](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado9.png){: .shadow.large.center}\n\n3. BUILD (GitLab) The branch with the commit history will be mirrored to GitLab. The CI/CD pipeline will be triggered. The pipeline will build the code.  ![GitLab pipeline graph](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado10.png){: .shadow.large.center}\n\n    Artifacts will be created, and be available for download.  ![Artifacts](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado11.png){: .shadow.large.center}\n\n    If Auto DevOps is enabled, a container image will be created and be pushed to the built-in Container Registry.  ![GitLab Container Registry](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado12.png){: .shadow.large.center}\n\n    In case a package registry is enabled in the project, packages will be published to the designated package manager.  ![GitLab Package Registry](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado13.png){: .shadow.large.center}\n\n4. TEST (GitLab) Security scans, license scans, and other tests are executed as part of the CI pipeline.  ![GitLab scans](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado14.png){: .shadow.large.center}\n\n5. REVIEW & PULL REQUEST (GitLab & Azure DevOps repos) Review pipeline results in GitLab and if the pipeline passed without errors, and the new change hasn’t introduced new vulnerabilities, the developer creates a pull request in Azure DevOps. A code review is started and the developer might need to make a few changes before merging to master. Each commit will trigger a CI/CD pipeline in GitLab.  ![Azure DevOps pull request](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado15.png){: .shadow.large.center}\n\n6. MERGE (Azure DevOps Repos and GitLab) The Azure DevOps pull request is approved and the branch will be merged to the master branch in the Azure DevOps Repository.\n\nDepending on your pipeline configuration, this merge to the master branch will trigger the CI/CD pipeline in GitLab to validate the merge results, build new packages and container images, and then deploy them.  ![GitLab CI/CD pipeline graph](https://about.gitlab.com/images/blogimages/ado_and_gitlab/ado16.png){: .shadow.large.center}\n\n## Development workflow demonstration \n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube.com/embed/HfpP2pEmkoM\" frameborder=\"0\" allow=\"accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n## A solution worth trying \n\nGitLab offers a leading source code management and CI/CD solution in one application which many [GitLab customers](/customers/) use together because of the power of this combination. However, we know that sometimes there are constraints that do not allow teams to migrate their repository to GitLab SCM, at least not right away. For these situations, even if it is only temporary, we offer the capability of GitLab CI/CD for external repositories illustrated here. \n\n\n**Read more about GitLab CI/CD:**\n\n[Forrester report compares between leading CI/CD tools](https://about.gitlab.com/analysts/forrester-cloudci19/)\n\n[Autoscale GitLab CI with AWS Fargate](/blog/introducing-autoscaling-gitlab-runners-on-aws-fargate/)\n\n[Case Study - how Goldman Sachs improved from 1 build every two weeks to over a thousand per day](https://about.gitlab.com/customers/goldman-sachs/)\n\nCover image by [Aleksey Kuprikov](https://unsplash.com/@alekskuprfilmz) on [Unsplash](https://unsplash.com/)\n{: .note}\n\n\n",[108,980,9],{"slug":3000,"featured":6,"template":684},"integrating-azure-devops-scm-and-gitlab","content:en-us:blog:integrating-azure-devops-scm-and-gitlab.yml","Integrating Azure Devops Scm And Gitlab","en-us/blog/integrating-azure-devops-scm-and-gitlab.yml","en-us/blog/integrating-azure-devops-scm-and-gitlab",{"_path":3006,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3007,"content":3013,"config":3019,"_id":3021,"_type":13,"title":3022,"_source":15,"_file":3023,"_stem":3024,"_extension":18},"/en-us/blog/introducing-ci-components",{"title":3008,"description":3009,"ogTitle":3008,"ogDescription":3009,"noIndex":6,"ogImage":3010,"ogUrl":3011,"ogSiteName":669,"ogType":670,"canonicalUrls":3011,"schema":3012},"Introducing CI/CD components and how to use them in GitLab","Learn the main benefits for using CI/CD components in your CI/CD pipelines and how to achieve them.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667676/Blog/Hero%20Images/buildingblocks.jpg","https://about.gitlab.com/blog/introducing-ci-components","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing CI/CD components and how to use them in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-07-10\",\n      }",{"title":3008,"description":3009,"authors":3014,"heroImage":3010,"date":3016,"body":3017,"category":769,"tags":3018},[3015],"Dov Hershkovitch","2023-07-10","\nWelcome to the third blog in our series on GitLab's CI/CD components! If you haven't already, we encourage you to read \"[How to build reusable CI/CD templates](https://about.gitlab.com/blog/how-to-build-reusable-ci-templates/)\" and \"[Use inputs in includable files](https://about.gitlab.com/blog/use-inputs-in-includable-files/)\" to gain a comprehensive understanding of these exciting new capabilities. In this blog post, we'll dive in and explore the power of GitLab's CI/CD components in revolutionizing CI/CD workflows. We'll also provide a glimpse into the future of GitLab's CI/CD ecosystem, including the upcoming release of the [CI/CD catalog](https://docs.gitlab.com/ee/architecture/blueprints/ci_pipeline_components/), a framework containing a collection of these components. With these moves, GitLab is taking a significant step towards streamlining pipeline configurations and enhancing reusability.\n\n### CI/CD components\nIn [GitLab 16.1](https://about.gitlab.com/releases/2023/06/22/gitlab-16-1-released/), an exciting experimental feature called CI/CD components was introduced. CI/CD components are reusable, single-purpose building blocks that abstract away pipeline configuration units.\n\nBy leveraging the power of CI/CD components, users can unlock several key benefits:\n1. **Reusability and abstraction.** CI/CD components allow pipelines to be assembled using abstractions instead of defining all the details in one place. With components encapsulating implementation details, developers can focus on composing pipelines using pre-built, reusable blocks. This approach promotes modularity, code reusability, and simplifies pipeline maintenance.\n2. **Flexibility with input.** Components support input parameters, enabling customization based on pipeline contexts, making them adaptable and reusable across various pipeline stages. Developers gain the ability to build a dynamic CI/CD catalog that is tagged and versioned, providing better control and compatibility. Developers can reference specific component versions, ensuring stability and reproducibility. By leveraging version tags, teams can maintain consistency in their pipelines while easily upgrading to newer versions when desired.\n4. **High-quality standards through testing.** Testing components as part of the development workflow to ensure quality maintains high standards is strongly recommended. By incorporating testing into the CI/CD process, developers can verify the reliability and functionality of components, identify and fix issues early on, and deliver more robust and dependable pipelines.\n5. **The CI/CD catalog.** A centralized repository of components, the CI/CD catalog is set to be released soon, and will act as a treasure trove of components curated to cover a wide range of use cases. This centralized repository offers developers a one-stop shop for discovering, integrating, and sharing components. Teams can benefit from a growing catalog of pre-built, quality-tested components, saving time and effort in configuring their pipelines.\n\nIn the previous blog posts, we discussed the main benefits for the first two points (which are also available with CI/CD templates), but now let's dig deeper into components and how they could revolutionize the way you construct your pipelines.\n\n### Testing a CI/CD component\nAs software development continues to evolve, ensuring the reliability and quality of code components becomes increasingly vital.\n\nOne of the main benefits of using components is the ability to thoroughly test components before software is officially released, enabling a more robust and streamlined development process. In our context, a released component is versioned and will follow a structured syntax, allowing for seamless integration within pipelines. \n\n```yaml\ninclude:\n  - components: /path/to/project@\u003Cversion> \n```\nOne of the unique benefits of our CI/CD components is the flexibility they offer. DevSecOps teams can opt in for an \"unofficial\" release by appending `@commit_SHA`, allowing them to experiment and iterate on their code before making it an official release.\n\n```yaml\ninclude:\n  - components: /path/to/project@\u003Ccommit_SHA> \n```\nTo make a component an official release, users must tag it, essentially creating a versioned snapshot. The tagged release will then be made available in our comprehensive CI/CD catalog (launching soon), providing users with easy access to a range of thoroughly tested and approved components. To ensure the stability and reliability of your CI/CD components, it is crucial to thoroughly test them. DevSecOps teams can leverage the power of our pipeline by utilizing the commit_SHA identifier to run comprehensive tests. If the pipeline successfully passes all tests, they can proceed to tag the component, signifying its readiness for release.\n\nBy configuring a release job based on the tagged version, DevSecOps teams can confidently incorporate the official component into their projects, knowing that it has undergone testing and validation. To learn more about how to test components, you can check out our [documentation](https://docs.gitlab.com/ee/ci/components/#test-a-component) or watch this walkthrough video:\n\n\u003Cfigure class=\"video_container\">\n\u003Ciframe width=\"1870\" height=\"937\" src=\"https://www.youtube.com/embed/Vw8-ce8LNBs\" title=\"\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\u003C/figure>\n\n### Versioning and tagging\nAs mentioned in the previous section, DevSecOps teams can leverage the `@version` or the `@commit_SHA` to refer to a component in their pipeline. Another option to refer to a component is by leveraging the `@latest`.\n\n```yaml\ninclude:\n  - components: /path/to/project@latest\n```\nThis will use the latest official (tagged) available components. When used in a pipeline in combination with reliable tests, you can guarantee that your components used in a pipeline will always be tested and verified.\n\n### On the horizon: CI/CD catalog\nOne of the biggest benefits of using components is yet to be seen and will be available with the launch of our CI/CD catalog. The catalog will allow users to search, find, and understand how to use components that are available across their organization, setting a framework for them to collaborate on pipeline constructs so that they can be evolved and improved over time. Stay tuned!\n\n### Dogfooding components \nAt GitLab, we believe in [dogfooding our own product](https://handbook.gitlab.com/handbook/values/#dogfooding). To demonstrate the power and practicality of CI/CD components, we have converted some of our GitLab templates into components and asked our internal team to use them and provide additional feedback. By doing so, we are actively using and testing components in real-world scenarios, uncovering insights, and continuously improving their functionality. In this [group](https://gitlab.com/gitlab-components), we’ve converted Code Quality, Container Scanning and SAST templates into CI/CD components and asked internal teams to use them.\n\nThrough this dogfooding process, we are not only validating the effectiveness of CI/CD components but also gaining invaluable experience and feedback to refine and enhance our offering. It's a testament to our commitment to providing practical and reliable solutions for our users. You can view the ongoing discussions between the internal teams in this [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/390656).\n\n### Call for action\nThe CI/CD component catalog is currently in an experimental phase, so we advise against using it in a production environment at this time. There is a high probability of changes being made to it. We are currently working on reorganizing the folder structure of the components to prepare for the launch of the CI/CD catalog. You can stay updated on our progress by following our [epic](https://gitlab.com/groups/gitlab-org/-/epics/10728), or let us know what you think in this dedicated [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/407556).\n\n### What's next\nGitLab's CI/CD component catalog and its accompanying CI/CD components feature are ushering in a new era of streamlined pipeline configurations. By embracing reusability, abstraction, input flexibility, versioning, and a centralized catalog, developers can build efficient, adaptable, and maintainable CI/CD workflows. The CI/CD component catalog empowers teams to accelerate their software delivery, collaborate effectively, and leverage the full potential of GitLab's CI/CD capabilities.\n\nStay tuned for the launch of the CI/CD catalog, where you'll gain access to an extensive collection of components, unlocking new possibilities for your pipelines. GitLab remains committed to empowering developers with cutting-edge tools, driving innovation, and simplifying the complexities of modern software development.\n\n> Learn more about the CI/CD Catalog and components:\n>  \n> - [CI/CD Catalog goes GA: No more building pipelines from scratch](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/)\n> \n> - [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n>\n> - [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n>\n> - [Documentation: CI/CD components and CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/)\n> \n\nCover image by [Alexander Grey](https://www.pexels.com/photo/assorted-color-bricks-1148496/) on [Pexels](https://www.pexels.com).\n{: .note}\n",[9,108,680],{"slug":3020,"featured":6,"template":684},"introducing-ci-components","content:en-us:blog:introducing-ci-components.yml","Introducing Ci Components","en-us/blog/introducing-ci-components.yml","en-us/blog/introducing-ci-components",{"_path":3026,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3027,"content":3033,"config":3038,"_id":3040,"_type":13,"title":3041,"_source":15,"_file":3042,"_stem":3043,"_extension":18},"/en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"title":3028,"description":3029,"ogTitle":3028,"ogDescription":3029,"noIndex":6,"ogImage":3030,"ogUrl":3031,"ogSiteName":669,"ogType":670,"canonicalUrls":3031,"schema":3032},"Introducing the GitLab CI/CD Catalog Beta","Discover, reuse, and contribute CI/CD components effortlessly, enhancing collaboration and efficiency when creating pipeline configurations.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099399/Blog/Hero%20Images/Blog/Hero%20Images/security-pipelines_4UHVIJlePT8rEzjvYkGYvi_1750099398604.jpg","https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Introducing the GitLab CI/CD Catalog Beta\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-12-21\",\n      }",{"title":3028,"description":3029,"authors":3034,"heroImage":3030,"date":3035,"body":3036,"category":769,"tags":3037},[1080],"2023-12-21","DevSecOps is all about speed – achieving rapid progress in software development. To succeed in DevSecOps, organizations require a well-functioning CI/CD pipeline that teams can utilize to automate their development workflows.\n\nHowever, crafting pipeline configurations with YAML can be intricate and challenging because YAML isn't a programming language, Developers may find themselves reinventing the wheel each time they try to create new configurations because they don't have visibility into existing configurations and work that others may have already done, resulting in inefficiency.\n\n[GitLab 16.7](https://about.gitlab.com/releases/2023/12/21/gitlab-16-7-released/) introduces the [CI/CD Catalog](https://docs.gitlab.com/ee/ci/components/#cicd-catalog) (Beta), with the goal of enhancing developer efficiency by addressing three main questions developers encounter when creating pipeline configurations:\n\n* Discoverability: Has someone already created a configuration for my task, and where can I find it?\n* Reusability: Once I find a suitable pipeline, how do I use it effectively?\n* Ease of contribution: I've created a useful configuration; how can I easily share it with the GitLab community?\n\n## What is the GitLab CI/CD Catalog?\n\nThe CI/CD Catalog serves as a centralized hub for developers and organizations to share pre-existing [CI/CD components](https://docs.gitlab.com/ee/ci/components/) and to discover reusable configurations that others may have already developed. Every component published by users will be part of a public catalog accessible to all users, regardless of their organization or project. \n\nThis approach promotes cross-organization collaboration, allowing the entire GitLab community to benefit from the wealth of CI components available. It's a powerful step forward in sharing knowledge among GitLab users, enabling developers to harness the collective expertise of the platform.\n\n## Easy component creation and publishing\n\nIn addition to reusing components, developers can contribute to the GitLab CI/CD community by creating their own components and publishing them in the catalog. This ensures that others can benefit from their expertise and encourages collaboration across the platform.\n\n## How to discover and use components\n\n**1. Opening the CI/CD Component Catalog**\n\nClick on “Search or go to...”\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099406962.png)\n\nOpen the catalog by navigating to “Explore > CI/CD Catalog” or visit this [catalog page](https://gitlab.com/explore/catalog).\n\nUpon accessing the catalog, you'll find a list of CI/CD components projects contributed by your team, organization, or the wider GitLab community.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099406963.png)\n\n**2. Browsing components**\n\nNavigate through the list of components in the CI/CD Catalog or use the Search bar to find components related to a specific topic.\n\nEach component project contains one or multiple components. Opening a component project will display its documentation, providing details on all available components. This includes insights into how to use each component and understanding the expected input parameters.\n\n**3. Include the selected components in your .gitlab-ci.yml**\n\nNow that you've explored the catalog and selected the desired CI/CD components, integrate them into your project's CI/CD pipeline.\n\nFollow these steps to update your .gitlab-ci.yml file:\n\n1. Open the .gitlab-ci.yml file in your project for editing.\n2. Use the include keyword to add the selected components to your CI configuration. \n3. Ensure that the paths to the component YAML files are correct and specify the appropriate version for each component.\n4. In case the components have input parameters, review the component’s documentation to understand which inputs are required, and add them to your CI configuration.\n5. Save and commit your changes to the .gitlab-ci.yml file.\n\nHere is an example of YAML code that demonstrates how to include a few components and use them with input parameters.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.15.48_PM_aHR0cHM6_1750099406965.png)\n\n## How to create and publish components\n\nHave you crafted a valuable configuration that you'd like to share and contribute to your team or the GitLab community? Here are the six steps to make it happen:\n\n**Step 1: Create a new project and set it as a component project**\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. On the left sidebar, select Settings > General.\n3. Expand Visibility, project features, permissions.\n4. Scroll down to CI/CD Catalog resource and select the toggle to set the project as a CI/CD Catalog resource.\n5. Ensure that your project description is filled out; this information will be showcased in the catalog, providing users with insights into the purpose and functionality of your components.\n6. Create a .gitlab-ci.yml file in the root of the repository. You will need this file to test and release the components as described in steps 4 and 5 below. Note: This step only needs to be done once for any project that contains components.\n\n**Step 2: Create the components**\n\n1. Create a /templates folder in the root directory of the project.\n2. In this templates directory, create one YAML template file (ending in .yml) for each component.   \n3. The template can optionally include a description of input arguments using the `spec` keyword if the component requires input parameters, and the definition of jobs, that may include references to values using the interpolation format $[[ inputs.input-name ]]. Ensure you use three dash lines between the spec header, and job definitions.\n\nHere is an example of a `deploy.yml` template that gets input parameters:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_11.34.20_AM_aHR0cHM6_1750099406966.png)\n\nIn this template, we've defined two input parameters, `stage` and `environment`, both with default values. In the content section, a job is defined that interpolates these input arguments.\n\n**Step 3: Create components documentation** \n\nCreate a README.md file in the root of the project, including information about the components. Explain the component's functionality, detail input parameters, and provide illustrative examples. This ensures clarity for component consumers on how to use them.\n\nThis is an example of component documentation:\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099406967.png)\n\nAdditional information can be found in our [CI/CD components](https://docs.gitlab.com/ee/ci/components/index.html#components-repository) documentation. \n\n**Step 4: Add tests to the components (recommended)**\n\nDeveloping a component follows a standard software development cycle with stages like build, test, and deploy. It's highly recommended to test your components before publishing them. Check out this example test, which queries the GitLab REST API to check whether a component job has been added to the pipeline. Feel free to use it, and consider adding more tests to ensure your components work as expected.\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.32.53_PM_aHR0cHM6_1750099406968.png)\n\nInclude all your test jobs in the **.gitlab-ci.yml** file in your Catalog project.\n\n**Step 5: Prepare your CI/CD configuration for publishing**\n\n1. Create a release job in the **.gitlab-ci.yml** file in the component project using the `Release` keyword.  See the job example:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.34.27_PM_aHR0cHM6_1750099406969.png)\n\n__Note:__ Do not \"create release\" from GitLab UI since this soon won't be supported for a Component Catalog.\n\n2. We recommend adding this rule in the Release job; this will automatically trigger the Release job only when creating a git tag starts with digits in the project, following semantic release conventions (1.0.0 for example).\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_1.21.30_PM_aHR0cHM6_1750099406970.png)\n\n3. So this is how we recommend your job to look: \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.37.09_PM_aHR0cHM6_1750099406970.png)\n\n4. To manually release components, add manual rule as below, so when the pipeline is triggered, someone will need to manually run the release job. \n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.38.18_PM_aHR0cHM6_1750099406971.png)\n\nHere is the release job with the `when:manual` rule:\n\n![component catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099407/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2023-12-19_at_12.41.00_PM_aHR0cHM6_1750099406972.png)\n\n**Step 6: Publish your components**\n\nOnce you are satisfied with your components, and all tests have passed successfully, it's time to publish a new version by creating a git tag, so they will be available in the CI/CD Catalog.\n\n1. Create a Git tag using the semantic versioning format \"MAJOR.MINOR.PATCH\". \n\n2. You can create tags through the UI by navigating to Code -> Tags -> New Tag, or via the CLI using `git tag`. \n\n3. Creating the tag will trigger a pipeline that runs the Release job if all tests pass successfully. The component project will then be assigned the version you defined in the tag, and it will appear in the catalog.\n\n### Example projects\n\n* [GitLab official components](https://gitlab.com/components)\n\n### Documentation \n\nFor more details on using components from the CI/CD Catalog and maximizing their potential within your projects, refer to the official [CI/CD Catalog documentation](https://docs.gitlab.com/ee/ci/components/#cicd-catalog). This documentation provides in-depth insights into the functionality.\n\n> [Take a tour](https://gitlab.navattic.com/cicd-catalog) of the GitLab CI/CD Catalog.\n\n_A special thank you to [Dov Hershkovitch](https://about.gitlab.com/company/team/#dhershkovitch) and [Fabio Pitino](https://gitlab.com/fabiopitino) for their invaluable content reviews and contributions to this blog post._",[108,875,773,9],{"slug":3039,"featured":6,"template":684},"introducing-the-gitlab-ci-cd-catalog-beta","content:en-us:blog:introducing-the-gitlab-ci-cd-catalog-beta.yml","Introducing The Gitlab Ci Cd Catalog Beta","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta.yml","en-us/blog/introducing-the-gitlab-ci-cd-catalog-beta",{"_path":3045,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3046,"content":3052,"config":3060,"_id":3062,"_type":13,"title":3063,"_source":15,"_file":3064,"_stem":3065,"_extension":18},"/en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci",{"title":3047,"description":3048,"ogTitle":3047,"ogDescription":3048,"noIndex":6,"ogImage":3049,"ogUrl":3050,"ogSiteName":669,"ogType":670,"canonicalUrls":3050,"schema":3051},"OIDC simplifies GitLab CI/CD authentication with Google Cloud","OpenID Connect can sometimes be complex, but it's the safer and recommended way to authenticate your GitLab pipeline with Google Cloud. This tutorial shows you how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669863/Blog/Hero%20Images/security-pipelines.jpg","https://about.gitlab.com/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How OIDC can simplify authentication of GitLab CI/CD pipelines with Google Cloud\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hiroki Suezawa\"},{\"@type\":\"Person\",\"name\":\"Dhruv Jain\"}],\n        \"datePublished\": \"2023-06-28\",\n      }",{"title":3053,"description":3048,"authors":3054,"heroImage":3049,"date":3057,"body":3058,"category":814,"tags":3059},"How OIDC can simplify authentication of GitLab CI/CD pipelines with Google Cloud",[3055,3056],"Hiroki Suezawa","Dhruv Jain","2023-06-28","\n\nIn recent years, the [integration of cloud services and GitLab through GitOps](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/) has become very common. Applications are now continuously tested and deployed through [continuous integration and delivery (CI/CD)](https://about.gitlab.com/topics/ci-cd/); cloud environments are managed in code through Infrastructure as Code (IaC) using tools like Terraform; and GitLab CI is used as a core tool to perform these GitOps processes.\n\nAt the same time, [software supply chain attacks](https://about.gitlab.com/blog/the-ultimate-guide-to-software-supply-chain-security/) have increased. To reduce the risk of an attack, the use of OpenID Connect ([OIDC](https://docs.gitlab.com/ee/integration/openid_connect_provider.html#introduction-to-openid-connect)) auth is recommended, and GitLab 15.7 introduced [ID tokens](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html), a mechanism for secure OIDC integration.\n\nHowever, OIDC integration can be complex for beginners and can be difficult to configure properly. Therefore, GitLab's Infrastructure Security Team has created a Terraform module for configuring Google Cloud and a CI template for GitLab CI so GitLab CI and Google Cloud can be securely integrated.\n\nThis tutorial explains how to use [these OIDC modules](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules).\n\n## Why OIDC?\nThe integration between Google Cloud and GitLab CI has often been done by adding a static key of the service account in Google Cloud to the environment variables of CI. However, this method has the following problems:\n\n- The risk of compromise is high because the same key can be used to manipulate the cloud environment over time.\n- Because static keys are portable, there is no link between the key and the environment in which it is used, making it difficult to identify where the key is being used.\n\nOIDC authentication can solve the above problems by providing the following benefits:\n- No need to issue static keys, eliminating the need for long-term key management.\n  - It also eliminates the compliance need of rotating the secrets every few months.\n- Low risk of leakage due to temporary tokens issued.\n- Because the CI used is tied to the Google Cloud environment, it is possible to properly manage where the service account is used.\nIn addition, other settings such as CI and CD isolation can be configured using [the claims provided by GitLab CI](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html).\n\n## OIDC authentication with Google Cloud\nThe OIDC integration between Google Cloud and GitLab CI works as follows:\n\n- Preparation (areas to configure in Terraform in OIDC models)\n  1. Create a service account in Google Cloud for CI integration and set up the appropriate roles.\n  1. Create a Google Cloud Workload Identity pool and provider, and configure integration with GitLab CI.\n  1. Assign the Workload Identity User role to the service account.\n\n\n\n\n![Simplified diagram](https://about.gitlab.com/images/blogimages/2023-06-30-introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci/oidc-auth-diagram.png){: .shadow}\n\nGitLab CI in action (simplified by the GitLab CI template in OIDC modules)\n{: .note .text-center}\n\nGoogle Cloud authenticates using an ID token issued on GitLab CI, so there is no need to issue a Google Cloud service account key.\n\n## How to use a Terraform module\nThe process of configuring a Terraform module to establish a connection between Google Cloud and GitLab using OIDC is fairly simple. This module takes care of the following steps:\n1. Create the Google Cloud Workload Identity Pool.\n1. Create a Workload Identity Provider.\n1. Grant permissions for service account impersonation.\n\nNote: Your account must have at least the Workload Identity Pool Admin permission on the Google Cloud project.\n\n```terraform\n# terraform\nmodule \"gl_oidc\" {\n source = \"gitlab.com/gitlab-com/gcp-oidc/google\"\n version = \"3.0.0\"\n google_project_id = GOOGLE_PROJECT_ID\n gitlab_project_id = GITLAB_PROJECT_ID\n oidc_service_account = {\n   \"sa\" = {\n     sa_email  = \"SERVICE_ACCOUNT_EMAIL\"\n     attribute = \"attribute.project_id/GITLAB_PROJECT_ID\"\n   }\n }\n}\n```\n\nThe above sample module can be used to configure OIDC. There are some additional parameters that can be used to configure this module further (a detailed list and description of those parameters can be found [here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/#configure-gitlab-for-oidc-integration-using-terraform-module)).\n \nBy default, all branches of the project are authenticated to Google Cloud, but you can specify more granular conditions, such as the branch name of the commit that triggered the CI, or authenticating only with a specific tag.\n\nFurther settings can be made by changing the following attribute settings in accordance with the ID token claim:\n\n```\n  oidc_service_account = {\n    \"sa\" = {\n      sa_email  = \"SERVICE_ACCOUNT_EMAIL\"\n      attribute = \"attribute.project_id/GITLAB_PROJECT_ID\"\n    }\n```\n\nCode files for this module are available [here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/terraform-modules/gcp-oidc).\n\n## How to use the CI template\n[The CI template](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/blob/main/templates/gcp_auth.yaml) makes GitLab CI very easy for Google Cloud OIDC authentication. This CI template supports [Application Default Credentials](https://cloud.google.com/docs/authentication/application-default-credentials) and can be used from IaC such as Terraform, CLI such as gcloud, and SDKs in Python and Go.\n\nFor example, if you want to use the CI template for Terraform, you can write:\n\n```\n# You should upgrade to the latest version. You can find the latest version at https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/releases\ninclude:\n  - remote: 'https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/raw/3.0.0/templates/gcp_auth.yaml'\n\nterraform:\n  image:\n    name: hashicorp/terraform:1.5.3\n    entrypoint:\n      - /usr/bin/env\n      - \"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\"\n  extends: .google-oidc:auth\n  variables:\n    WI_POOL_PROVIDER: //iam.googleapis.com/projects/GOOGLE_PROJECT_ID/locations/global/workloadIdentityPools/WORKLOAD_IDENTITY_POOL/providers/WORKLOAD_IDENTITY_POOL_PROVIDER\n    SERVICE_ACCOUNT: SERVICE_ACCOUNT_EMAIL\n  script:\n    - terraform init\n    - terraform plan\n```\n\n### Required variables\n- WI_POOL_PROVIDER(under .google-oidc:) - Full canonical resource name of the workload identity pool provider. This value must be written under .google-oidc: like this.\n- SERVICE_ACCOUNT - Service Account email address\n\nA detailed list and description of those parameters can be found [here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/blob/main/README.md#using-oidc-in-pipelines).\n\nAs a note, you cannot use `before_script` in the job that uses this template because the way GitLab CI works will result in OIDC code being overwritten. CI template uses `before_script` to perform the initial configuration of OIDC.\n\nCode samples for this module are available [here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/samples/ci/gcp).\n\n## Next steps\nThis article has introduced OIDC modules for OIDC integration and secure authentication between Google Cloud and GitLab CI. In short, we are doing the following steps:\n\n1. Setting up a service account\n1. Granting permissions to the service account\n1. Running the Terraform module\n1. Setting up CI pipeline\n\nYou can find the relevant sample for the above steps [here](https://gitlab.com/gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules/-/tree/main/samples).\n\nAlso, GitLab is currently developing a [CI Catalog and CI Components](https://about.gitlab.com/blog/use-inputs-in-includable-files/). We plan to support them.\n\nThe GitLab Infrastructure Security Team will continue to improve the modules as we receive feedback, and we hope to consider and release components that maintain a high level of security and usability for both internal and external use. \n\n## Read more\n- [Configure OIDC with GCP Workload Identity Federation](https://docs.gitlab.com/ee/ci/cloud_services/google_cloud/)\n- [Workload Identity Federation on Google Cloud](https://cloud.google.com/iam/docs/workload-identity-federation)\n- [Terraform for google_iam_workload_identity_pool_provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/iam_workload_identity_pool_provider)\n- [OIDC Authentication using ID tokens](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html)\n",[9,108,814,1865],{"slug":3061,"featured":6,"template":684},"introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci","content:en-us:blog:introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci.yml","Introduction Of Oidc Modules For Integration Between Google Cloud And Gitlab Ci","en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci.yml","en-us/blog/introduction-of-oidc-modules-for-integration-between-google-cloud-and-gitlab-ci",{"_path":3067,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3068,"content":3074,"config":3079,"_id":3081,"_type":13,"title":3082,"_source":15,"_file":3083,"_stem":3084,"_extension":18},"/en-us/blog/ios-cicd-with-gitlab",{"title":3069,"description":3070,"ogTitle":3069,"ogDescription":3070,"noIndex":6,"ogImage":3071,"ogUrl":3072,"ogSiteName":669,"ogType":670,"canonicalUrls":3072,"schema":3073},"Tutorial: iOS CI/CD with GitLab","Learn how to create an automated CI/CD pipeline using GitLab and fastlane.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669340/Blog/Hero%20Images/john-cameron-DgRb7aAGK4k-unsplash.jpg","https://about.gitlab.com/blog/ios-cicd-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: iOS CI/CD with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darby Frey\"}],\n        \"datePublished\": \"2023-06-07\",\n      }",{"title":3069,"description":3070,"authors":3075,"heroImage":3071,"date":3076,"body":3077,"category":769,"tags":3078},[894],"2023-06-07","\n\nCreating an automated [CI/CD](https://docs.gitlab.com/ee/ci/) pipeline for an Apple iOS application can be challenging. Configuring build environments and managing code signing can be very time-consuming and error-prone, and when you get that all working, you still need a way to send your app to Apple.\n\nGitLab makes this much easier with [GitLab Mobile DevOps](https://docs.gitlab.com/ee/ci/mobile_devops.html).\n\nGitLab Mobile DevOps is a collection of features built right into GitLab to solve the biggest challenges mobile teams face in establishing a DevOps practice.\n\nIn this blog post, I’ll demonstrate how to set up an automated CI/CD pipeline using GitLab and [fastlane](https://fastlane.tools/).\n\n## Prerequisites\nTo get started, there are a few prerequisites you’ll need:\n\n* An Apple Developer account - [https://developer.apple.com/](https://developer.apple.com/)\n* Ruby and XCode command line tools installed on your local machine [https://docs.fastlane.tools/getting-started/ios/setup](https://docs.fastlane.tools/getting-started/ios/setup/) \n\n> Try out our [Android CI/CD with GitLab tutorial](/blog/android-cicd-with-gitlab/).\n\n## Reference project\nFor this walkthrough, we’ll use the iOS demo project for reference: [https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo)\n\n## Install fastlane\nIf you haven’t done so yet, the first step will be to install fastlane. Do this by creating a file in the root of your project called Gemfile. Give it the following contents:\n\n```\nsource \"https://rubygems.org\"\n\ngem \"fastlane\"\n```\n\nThen, from the terminal in your project, run:\n\n```\nbundle install\n```\n\nThis command will install fastlane, and all of its related dependencies.\n\n## Initialize fastlane\nNow that fastlane is installed, we can set it up for our project. Run the following command from the terminal in your project and choose Option No. 2 since we will be targeting Test Flight in this tutorial:\n\n```\nbundle exec fastlane init\n```\n\nRunning this command will create a new folder in your project called `fastlane`. This folder will contain two files `Appfile` and `Fastfile`. \n\n![Initialize Fastlane](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/fastlane-init.png)\n\nThe Appfile contains the configuration information for the app, and the Fastfile has some sample code that we will replace later. See the fastlane docs for more information about the configuration details in the Appfile [https://docs.fastlane.tools/advanced/Appfile/](https://docs.fastlane.tools/advanced/Appfile/)\n\n## Initialize fastlane match\nThe next step will be to set up fastlane Match, which is the part of fastlane that handles code signing. For more information on fastlane match, see the docs [https://docs.fastlane.tools/actions/match/](https://docs.fastlane.tools/actions/match/ )\n\nWe’ll start by running the following command from the terminal in your project:\n\n```\nbundle exec fastlane match init\n```\n\nThis command will prompt you to choose which storage backend you want to use (select gitlab_secure_files) and to input your project path (for example: gitlab-org/gitlab). It will then generate a fastlane Matchfile configured to use your project as the storage backend for fastlane Match.\n\n![Initialize fastlane Match](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/match-init.png)\n\n## Generate a project access token\nNext, you'll need a GitLab Access Token to use fastlane Match from your local machine. To create a project access token, visit the Access Tokens section under Settings in your GitLab project. Create a new token with maintainer access to the “api” scope.\n\nThen run the following command from the terminal in your project replacing “YOUR_NEW_TOKEN” with the access token you just generated:\n\n```\nexport PRIVATE_TOKEN=YOUR_NEW_TOKEN\n```\n\nThis will configure fastlane to use this access token when making fastlane Match requests to your project.\n\n## Generate signing certificates\nNow that fastlane Match is configured, we can use it to generate the signing certificates and provisioning profiles for our app and upload them to GitLab.\n\nNOTE: If you already have these files for your app, see the instructions in this blog post on how to use fastlane to import your existing code signing files [/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/](/blog/mobile-devops-with-gitlab-part-3-code-signing-for-ios-with-gitlab-and-fastlane/).\n\nRun the following command from the terminal in your project to generate development code signing files and upload them to GitLab.\n\n```\nbundle exec fastlane match development\n```\n\nWhen this command completes, go to the CI/CD settings page in your project and scroll down to the Secure Files section to see the files that were just generated and added to your project.\n\nWhile we’re here, we can go ahead and do that same thing for the appstore code signing files. Run the following command to generate the appstore code signing files and upload them to GitLab.\n\n```\nbundle exec fastlane match appstore\n```\n\n## Update Xcode configuration\nWith the code signing files ready to go, we have one small change to make in Xcode. In your project in Xcode, go to the Signing & Capabilities section and disable automatically managing code signing. Then, select the appropriate provisioning profile and signing certificate from the list based on your build target. The certificates we just generated will show up in that list.\n\n![Configure Xcode Provisioning Profiles](https://about.gitlab.com/images/blogimages/2023-04-15-ios-cicd-with-gitlab/xcode.png)\n\nWith all of our code signing configuration in place, we can now move on to setting up the integration with the Apple App Store.\n\n## Apple App Store integration\nThe final bit of configuration is the Apple App Store integration. To do this, we’ll need to create an API key in App Store Connect. See the instructions here to create and download the key file to your location machine. This key should have the role of App Manager. [https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api](https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api)\n\nOnce the key is generated, go to Settings, Integrations in your project, and click on the integration for Apple App Store Connect. You’ll be asked to supply the issuer ID and key ID from App Store Connect, along with the key file you just downloaded. With all of that configuration in place, click the Test Settings button to ensure everything works. If it gives you an error, double check your settings and try again. Once it’s working, click Save Changes to save and activate the integration. \n\nWith the integration activated, the following CI variables are added to all pipelines on protected branches and tags:\n\n* `APP_STORE_CONNECT_API_KEY_ISSUER_ID`\n* `APP_STORE_CONNECT_API_KEY_KEY_ID`\n* `APP_STORE_CONNECT_API_KEY_KEY`\n\nThese CI variables can be used by fastlane or any custom tooling to interact with the Apple App Store to upload builds, or perform other API enabled tasks.\n\n## Fastfile\nWith all of our configuration in place, we can now drop in a sample Fastfile to show how to perform the build, sign, and release actions.\n\nFrom the [sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo), copy the contents of the fastlane/Fastfile and paste it into the Fastfile in your project, replacing the existing content. \n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/fastlane/Fastfile)\n\nThis sample Fastfile contains two lanes, which are actions fastlane can execute. The lanes in this file are `build` and `beta`. \n\n### Build\nThe build lane will perform just a couple of actions to `setup_ci`, `match`, and `build_app`. This will use the development certificate we generated with fastlane Match earlier to build and sign the app for development. \n\n### Beta\nThe beta lane takes a few more steps to `setup_ci`, `match`, `app_store_connect_api_key`, `increment_build_number`, `build_app`, and `upload_to_testflight`. This lane will use the appstore certificates we generated with faslane Match earlier to build and sign the app for an appstore release. This lane also uses the App Store Connect integration to connect to the app store to determine the next build number to use, and to upload the final build to Test Flight. \n\n### .gitlab-ci.yml\nWith the fastlane configuration ready to go, the last step is to hook it up to GitLab CI. \n\nFrom the [sample project](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo), copy the contents of the `.gitlab-ci.yml` file and paste it into the project. \n\n[https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml](https://gitlab.com/gitlab-org/incubation-engineering/mobile-devops/demo-projects/ios-demo/-/blob/main/.gitlab-ci.yml )\n\nThis is a simplified CI configuration that created two CI jobs to run each of the lanes in fastlane on the GitLab macOS shared runners. The build job will run for all CI pipelines and the beta job will only be run on CI pipelines on the master branch. The beta job is also manually triggered, so you can control when the beta release is pushed to Test Flight. \n\nWith all of this in place, commit all of these changes and push them up to your project. The CI pipeline will kick off, and you can see these jobs in action. \n\nCover image by \u003Ca href=\"https://unsplash.com/@john_cameron?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">John Cameron\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/DgRb7aAGK4k?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n",[773,108,9],{"slug":3080,"featured":6,"template":684},"ios-cicd-with-gitlab","content:en-us:blog:ios-cicd-with-gitlab.yml","Ios Cicd With Gitlab","en-us/blog/ios-cicd-with-gitlab.yml","en-us/blog/ios-cicd-with-gitlab",{"_path":3086,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3087,"content":3093,"config":3098,"_id":3100,"_type":13,"title":3101,"_source":15,"_file":3102,"_stem":3103,"_extension":18},"/en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"title":3088,"description":3089,"ogTitle":3088,"ogDescription":3089,"noIndex":6,"ogImage":3090,"ogUrl":3091,"ogSiteName":669,"ogType":670,"canonicalUrls":3091,"schema":3092},"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment","Learn how to migrate from Jenkins to the integrated CI/CD of the GitLab DevSecOps Platform to deliver high-quality software rapidly.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663000/Blog/Hero%20Images/tanukilifecycle.png","https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":3088,"description":3089,"authors":3094,"heroImage":3090,"date":3095,"body":3096,"category":1103,"tags":3097},[1080],"2023-11-01","\nIn today's dynamic landscape of software development, certain requirements have become paramount for delivering high-quality software rapidly. These requirements include the need for cloud compatibility, faster development cycles, improved collaboration, containerization, enhanced development experiences, and the integration of AI-driven capabilities for better efficiency and speed. Jenkins, a longstanding and respected continuous integration (CI) tool, has admirably played a role in many teams' software development for years. However, as more teams adopt DevOps/DevSecOps strategies for their software delivery, leveraging the integrated CI that is available in a DevSecOps platform like GitLab can provide benefits that Jenkins does not. \n\nSome organizations find themselves hesitating to migrate, not because they doubt the benefits of a top-tier [CI/CD](https://about.gitlab.com/topics/ci-cd/) solution such as GitLab, but due to the complexities of their existing Jenkins implementations. It's understandable that such a transition can seem daunting. \n\nIn this blog, you'll find several migration strategies to help transition from Jenkins to GitLab and make the process smoother and more manageable.\n\n## Migrating to GitLab\nIt's become evident that for organizations seeking a CI/CD solution that can seamlessly support their evolving demands, GitLab emerges as a powerful game-changer. Let's explore why transitioning to this advanced platform is transformative for Jenkins users.\n\n### Why migrate to GitLab \nBefore we delve into the migration approaches, let's take a moment to understand GitLab CI and what makes it a compelling choice for modern CI/CD needs.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n\n### GitLab CI overview\nGitLab CI is an integral part of the GitLab [AI-powered](https://about.gitlab.com/gitlab-duo/) DevSecOps Platform, which offers a comprehensive and unified solution for DevSecOps and CI/CD. GitLab's design revolves around streamlining development workflows, fostering collaboration, enhancing security, and ensuring scalability.\n\n### Key features of GitLab CI\nThese are the key features of GitLab CI:\n- **Unified platform:** GitLab CI is more than just a CI/CD tool; it's part of a broader ecosystem that includes source code management, project management, security features, analytics and more. This unified platform streamlines workflows and enhances collaboration among development teams.\n- **Containerization and orchestration:** GitLab CI/CD is designed with containerization in mind, offering native support for Docker and Kubernetes. This enables seamless integration of container technologies into your CI/CD pipelines.\n- **Security by design:** Security is a top priority, and GitLab CI incorporates features such as static code analysis and vulnerability scanning to help teams identify and address security issues early in the development process.\n- **GitOps principles:** GitLab CI aligns with [GitOps principles](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/), emphasizing version-controlled, declarative configurations for infrastructure and application deployments. This approach enhances the reliability and repeatability of deployments.\n\nGet familiar with GitLab CI with this tutorial:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WKR-7clknsA?si=T21Fe10Oa0rQ0SGB\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWith that understanding of GitLab CI's capabilities, let's explore the migration steps and strategies for Jenkins users looking to leverage the benefits of GitLab CI.\n\n## A recommended step-by-step Jenkins-to-GitLab CI migration\nWhen considering a migration from Jenkins to GitLab CI, we strongly recommend following a well-structured, step-by-step approach to ensure a seamless transition. Here's our recommended process:\n1. **Pipeline assessment:** Start by conducting a comprehensive inventory of all your existing pipelines in Jenkins. This initial step will help you gain a clear understanding of the scope and complexity of the migration.\n2. **Parallel migration:** Begin the migration process by selecting individual pipelines and moving them to GitLab CI one at a time. Continue to maintain the use of Jenkins for your ongoing work during this transition to minimize disruptions.\n3. **Code verification:** We advise beginning with verification checks in CI. Run both the Jenkins and GitLab CI pipelines in parallel. This dual approach allows you to directly compare the two workflows and identify any issues in the new GitLab workflows. During this phase, keep the GitLab workflow as an optional choice while Jenkins remains required.\n4. **Continuous validation:** After running both pipelines in parallel for a full iteration, thoroughly evaluate the outcomes from each pipeline. This evaluation should consider various factors, including status codes, logs, and performance. \n5. **GitLab CI transition:** As you gain confidence in the reliability and effectiveness of GitLab CI through the parallel runs, make the transition to the GitLab CI workflow as the required standard while Jenkins continues to operate in the background.\n6. **Jenkins phaseout:** After a second iteration, when you are confident in the performance and stability of GitLab CI, you can begin to remove the Jenkins job from your code verification pipeline. This successful transition will enable you to retire Jenkins from this particular aspect of your CI/CD process.\n\nThis recommended approach ensures that your migration is a gradual evolution, allowing you to identify and address any issues or discrepancies before fully committing to GitLab CI. Running Jenkins and GitLab CI pipelines in parallel provides valuable insights and ensures the effective streamlining of your CI/CD processes.\n\n## Preparing for migration: Training and communication\nTo ensure a smooth and successful migration from Jenkins to GitLab CI, follow these essential steps:\n- **Stakeholder communication:** Start by announcing your migration plans and timelines to all relevant stakeholders. This includes DevOps teams, developers, and QA engineers. Transparency in communication is crucial to ensure that everyone understands the objectives and expectations of the migration.\n- **Knowledge-level training:** Conduct knowledge-level training sessions for your teams to promote GitLab CI adoption.\nCover topics such as using GitLab CI, understanding the YAML syntax, and how to create a basic pipeline.\nProvide team members with the knowledge and skills necessary to navigate the new GitLab CI environment effectively.\n- **Hands-on learning:** Encourage hands-on learning by pairing up developers.\nCreate opportunities for them to learn from each other's experiences throughout the migration process.\n\nBy following these instructions for training and communication, you'll build a strong foundation for a successful migration, empowering your teams to adapt and thrive in the new environment.\n\n## 3 Jenkins-to-GitLab CI migration strategies\nThere are different strategies to consider. These three strategies offer flexibility, allowing organizations to choose the path that best aligns with their specific needs and resources. Let's explore these strategies in detail to help you make an informed decision about which one suits your organization best.\n\n### Migration Strategy 1: Using GitLab CI for new projects\nThe first migration strategy involves a gradual transition. While you maintain your existing Jenkins infrastructure for ongoing projects, you introduce GitLab CI for new projects. This approach allows you to harness the modern features of GitLab CI without disrupting your current work.\n\n#### Benefits of Migration Strategy 1\nThe benefits of this approach include the following:\n- New projects can leverage GitLab CI's advanced features right from the start. \n- This strategy minimizes the risk of disrupting existing workflows, as your existing Jenkins setup remains intact.\n- Your team can gradually adapt to GitLab CI, building confidence and expertise without the pressure of an immediate full-scale migration.\n\n#### Challenges of Migration Strategy 1\nThe challenges of this approach include the following:\n- Operating two CI/CD platforms simultaneously can introduce complexity, especially in terms of integration and team collaboration.\n- Managing projects on different platforms may require careful coordination to ensure consistency in processes and security practices.\n\nThis strategy offers a smooth and manageable transition by allowing you to harness GitLab CI's strengths for new projects, while your existing Jenkins infrastructure continues to support ongoing work.\n\n### Migration Strategy 2: Migrating only strategic projects\nIn this strategy, you identify specific projects within your organization that stand to benefit the most from the capabilities of GitLab CI. Instead of preparing for a wholesale migration, you start by focusing your efforts on migrating these strategically selected projects first.\n\n#### Benefits of Migration Strategy 2\nThe benefits of this approach include the following:\n- By concentrating on key projects, you can realize significant improvements in those areas where GitLab CI aligns with specific needs.\n- This approach reduces the complexity of migrating everything at once, minimizing the potential for disruptions.\n- You can gradually build confidence with GitLab CI and its benefits before considering further migrations.\n\n#### Challenges of Migration Strategy 2\nThe challenges of this approach include the following:\n- Even though you're not migrating all projects, the chosen projects' migration can still be intricate and require careful planning.\n- Ensuring seamless collaboration between projects on different platforms may require additional attention.\n\nThis strategy allows you to maximize the impact of GitLab CI by focusing on strategic areas, minimizing risk, and gradually gaining experience with the new tool.\n\n### Migration Strategy 3: Migrating everything\nThe third strategy is a comprehensive migration where you commit to moving all your CI/CD processes, projects, and workflows to GitLab CI. This approach aims for uniformity and simplification of CI/CD across all projects. This strategy can benefit from taking an iterative approach. Consider starting with new projects, followed by migrating strategic projects, and then leverage your growing knowledge and experience with GitLab CI to complete the migration of remaining projects. \n\n#### Benefits of Migration Strategy 3\nThe benefits of this approach include the following:\n- Uniform CI/CD processes across all projects can streamline administration and maintenance, reducing complexity.\n- You can take full advantage of GitLab CI's modern capabilities, from Infrastructure as Code to enhanced security features.\n- As your projects grow, GitLab CI is designed to handle increased demands, ensuring long-term scalability.\n\n#### Challenges of Migration Strategy 3\nThe challenges of this approach include the following:\n- A full-scale migration can be intricate, requiring meticulous planning and implementation.\n- The transition may disrupt ongoing projects and require a significant time investment.\n- Investment in training and potential tool migration expenses should be considered.\n\nOpt for this approach if uniformity and consolidation of CI/CD processes are a high priority, and you have the resources to execute a full migration.\n\nThe migration strategy you select should align with your organization's specific needs and circumstances. In all cases, the ultimate goal is to enhance your development process with modern CI/CD tools like GitLab CI, which offers scalability, infrastructure automation, security, and collaboration features that align with today's development needs.\n\n## Technical insights: How the migration works\nMoving your CI/CD workflows from Jenkins to GitLab CI is a transformative journey, and understanding how it works is vital for a successful transition.\n\n### Understanding the configurations: Jenkinsfile vs. .gitlab-ci.yml\nThe heart of your CI/CD pipeline lies in the configurations defined in your Jenkinsfile (for Jenkins) and .gitlab-ci.yml (for GitLab CI). While there are some similarities between these configuration files, there are notable differences as well.\n\n#### Similarities\n- Both files define the stages, jobs, and steps of your CI/CD process.\n- You specify the desired build, test, and deployment steps in both files.\n- Environment variables and settings can be configured in either file.\n\n#### Differences\n- Jenkinsfile uses Groovy for scripting, while .gitlab-ci.yml uses YAML. This change in language affects the way you write and structure your configurations.\n- The process of defining pipelines is more intuitive in .gitlab-ci.yml, with a cleaner, more human-readable syntax.\n- GitLab CI provides a wide range of built-in templates and predefined jobs, simplifying configuration and reducing the need for custom scripting.\n\n### Manually converting the pipeline configuration\nCurrently, migrating your existing Jenkins pipelines to GitLab CI is typically done manually. This means analyzing your Jenkinsfile and re-creating the equivalent configurations in .gitlab-ci.yml. While there are similarities in the concepts and structure, the differences in syntax and the specific capabilities of each platform require careful consideration during the migration.\n\n## Strategic planning for a smooth transition\nMigrating from Jenkins to GitLab CI requires meticulous planning to ensure a seamless transition. It's crucial to assess the disparities between the two systems and evaluate their impact on your workflow, considering aspects like security, cost, time, and capacity.\n\nOnce you've identified these differences and devised your migration strategy, break down the migration into key steps. These include setting up GitLab CI pipelines, securely transferring data from Jenkins to GitLab CI, and integrating GitLab CI into your existing tools and processes. \n\n## Case study: A seamless transition for Lockheed Martin\nLet's look at a real-world case study to illustrate the effectiveness of the \"Migrate Everything\" strategy. [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/), the world’s largest defense contractor, had been using Jenkins for several years. As their project portfolio expanded, they realized that their Jenkins implementation with a wide variety of DevOps tools was becoming increasingly complex to manage. They were also eager to adopt modern CI/CD capabilities that Jenkins struggled to provide.\n\nIn collaboration with GitLab, Lockheed Martin decided to undertake a comprehensive migration to GitLab CI. Their goals included achieving consistency in their CI/CD processes, simplifying administration and maintenance, and taking full advantage of The GitLab Platform’s robust features.\n\nThe comprehensive migration strategy proved to be a resounding success for Lockheed Martin. With GitLab CI, they not only streamlined their CI/CD processes but achieved remarkable results. **They managed to run CI pipeline builds a staggering 80 times faster, retired thousands of Jenkins servers, and reduced the time spent on system maintenance by a staggering 90%. This monumental shift resulted in a significant increase in efficiency and productivity for Lockheed Martin.**\n\nThis case study showcases how a comprehensive migration strategy can be effective for organizations looking to leverage GitLab capabilities across all their projects.\n\nFor more in-depth insights into Lockheed Martin's successful transition to GitLab and how it streamlined their software development processes, check out [the detailed case study](https://about.gitlab.com/customers/lockheed-martin/).\n\n## GitLab documentation and support\nFor those embarking on this migration journey, GitLab offers documentation to guide you through the process. You can find valuable resources in GitLab's [official documentation](https://docs.gitlab.com/ee/ci/migration/jenkins.html).\n\nIn addition to documentation, GitLab's Professional Services team is available to assist organizations in their migrations. They bring expertise and experience to ensure a smooth transition. Whether it's understanding the nuances of Jenkinsfile to .gitlab-ci.yml conversion or optimizing your CI/CD workflows, their support can be invaluable.\n\n> Try GitLab CI/CD today with [a free trial of Ultimate](https://gitlab.com/-/trials/new).\n",[9,276,704,230,835,773],{"slug":3099,"featured":6,"template":684},"jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment","content:en-us:blog:jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","Jenkins Gitlab Ultimate Guide To Modernizing Cicd Environment","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment.yml","en-us/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment",{"_path":3105,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3106,"content":3112,"config":3118,"_id":3120,"_type":13,"title":3121,"_source":15,"_file":3122,"_stem":3123,"_extension":18},"/en-us/blog/keep-git-history-clean-with-interactive-rebase",{"title":3107,"description":3108,"ogTitle":3107,"ogDescription":3108,"noIndex":6,"ogImage":3109,"ogUrl":3110,"ogSiteName":669,"ogType":670,"canonicalUrls":3110,"schema":3111},"How to keep your Git history clean with interactive rebase","Interactive rebase is one of Git’s most versatile tools. Here's how to use it to correct commit messages, fix mistakes, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662593/Blog/Hero%20Images/title-image.png","https://about.gitlab.com/blog/keep-git-history-clean-with-interactive-rebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to keep your Git history clean with interactive rebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tobias Günther\"}],\n        \"datePublished\": \"2020-11-23\",\n      }",{"title":3107,"description":3108,"authors":3113,"heroImage":3109,"date":3115,"body":3116,"category":769,"tags":3117},[3114],"Tobias Günther","2020-11-23","\n## What is interactive rebase? \n\nInteractive [rebase](/solutions/source-code-management/), or Git rebase interactive, is sometimes called the \"Swiss Army Knife\" of Git – because it contains so many different tools, for so many different use cases! However, there's one main, overarching use case: _cleaning up your local commit history_.\n\nMind the word \"local\": it should only be used for cleaning up your own, local commit history, for example before integrating one of your feature branches into a team branch. In contrast, it should NOT be used on commit history that has already been pushed and shared on a remote repository. Interactive rebase is one of those tools that \"rewrite\" Git history – and you shouldn't do this on commits that have already been shared with others.\n\nWith this little warning message out of the way, let's look at some practical examples! \n\nNote: for easier visualization of the scenarios and workflows in this post, I’ve been using the [\"Tower\" Git desktop GUI](https://www.git-tower.com/?utm_source=gitlab&utm_medium=guestpost&utm_campaign=interactive-rebase) in some of my screenshots.\n{: .note}\n\n## Correcting an old commit message with Git rebase interactive\n\nSometimes you notice a typo in an **old commit message** – or you've forgotten to mention something in the description that is noteworthy. If we were talking about the _very last_ commit, we could have simply used the `--amend` option of the `git commit` command. But for older commits you will have to use interactive rebase to change them after the fact.\n\nHere's an example of a commit message gone horribly wrong that we want to correct:\n\n![A bad commit message that needs correction](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/bad-commit-message@2x.png){: .shadow.medium.center}\nA bad commit message that needs correction\n{: .note.text-center}\n\nThe first step in _any_ Git interactive rebase session is to **determine what part of commit history you want to manipulate**. To again take the above example: in order to change this bad commit we have to start the session at its _parent_ commit.\n\n![Starting our interactive rebase session](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/start-at-parent-commit@2x.png){: .shadow.medium.center}\nStarting our interactive rebase session\n{: .note.text-center}\n\nWe can now feed this starting commit's hash to the Git rebase interactive command:\n\n```\n$ git rebase -i 0023cddd\n```\n\nAn editor window will now open, containing a list of the commits that you just selected for manipulation. And don't be surprised because they are in _reverse order_: in an interactive rebase session, Git will reapply the old commits, item after item – which means that reversing the order is correct from Git's perspective.\n\n![Editor window with the selected commits](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/editor-window-start-ir@2x.png){: .shadow.medium.center}\nEditor window with the selected commits\n{: .note.text-center}\n\nOne other important thing to note about this editor window: _you don't perform the actual manipulations here_! Or, in this concrete example, you do NOT go ahead and change the commit message here. Instead, you only mark the commit you want to change with an action keyword. In our case, because we want to change a commit’s message, we mark the line with \"reword\". If you then save and close this editor window, a new one will open, containing the old commit’s message. Now is the time to finally make your changes:\n\n![Finally, we can make our changes](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/correct-commit-message.gif){: .shadow.medium.center}\nFinally, we can make our changes\n{: .note.text-center}\n\nAfter saving and closing once more, the interactive rebase session is complete and our old commit message has been corrected!\n\n## Combining multiple commits into one using interactive rebase\n\nAnother use case for interactive rebase is when you want to **combine multiple old comments into one**. Although, of course, the golden rule of version control applies: in most situations, it's beneficial to create more and smaller commits instead of a few big ones. However, as with everything, we might find that we have overdone this and now want to meld two or more old commits into a single one.\n\nTo make a concrete example, let's say we want to combine the following selected commits into a single one:\n\n![Let's combine multiple commits into one](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/squash-selected-commits@2x.png){: .shadow.medium.center}\nLet's combine multiple commits into one\n{: .note.text-center}\n\nJust like in our first case, we begin by starting the interactive rebase session at least at the parent commit of the one we want to manipulate.\n\n```\n$ git rebase -i 2b504bee\n```\n\nAgain, an editor window will open, listing that part of our commit history that we want to manipulate:\n\n![Marking lines with \"squash\"](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/squash-mark-commit@2x.png){: .shadow.medium.center}\nMarking lines with \"squash\"\n{: .note.text-center}\n\nThe action keyword we are going to use here is called \"squash.\" And there's only one important piece of information you need to know about squash in order to use it: _the line we mark up with the \"squash\" keyword will be combined with the line directly above_. That’s why, as you can see in my screenshot above, I’ve marked line #2 with \"squash\" in order to combine it with line #1.\n\nWe can now save and close the editor window and again watch and a new window appear: we are now asked to provide a commit message for the new commit that is created when combining those two old ones.\n\n![Entering a new message for the new, squashed commit](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/squash-enter-new-message@2x.png){: .shadow.medium.center}\nEntering a new message for the new, squashed commit\n{: .note.text-center}\n\nAfter saving and closing this editor window, you will see that a new commit was created that contains the changesets of both old commits. Voila!\n\n## Fixing a mistake with interactive rebase\n\nAnother use case for interactive rebase is when you found a mistake in one of your earlier commits. And it doesn't matter what exactly you messed up: you could have forgotten to add a certain change, should have deleted a file, or simply introduced a typo...\n\nThe natural tendency, in such a situation, is to simply create a new commit that corrects the mistake. But on the other hand, this will mess up our commit history: making an original commit, and then adding a \"band-aid\" commit just to fix some mistakes… that’s a messy way of working. Your commit history will soon become hard to understand, because it's littered with all those little \"quick fix commits\"!\n\nThis is where \"fixup,\" one of the tools that come with interactive rebase, comes in very handy. Fixup takes this \"quick fix\" commit, applies its changes to the original commit (thereby correcting it), and then gets rid of the band-aid commit:\n\n![How \"fixup\" works](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/diagram-fixup.png){: .medium.center}\nHow \"fixup\" works\n{: .note.text-center}\n\nAfter we're done, it looks as if there had never been a problem with our original commit! So let's walk through this using a practical example. \n\nThe first step is to do whatever is necessary to fix the problem: this could mean adding a new file, making changes to existing ones, deleting obsolete files... you \"just\" need to produce the changes that correct the mistake.\n\nThe next step is to commit these changes to the repository – but with a little extra: when making the commit, we are going to use the `--fixup` flag and tell Git the commit hash of our bad commit:\n\n```\n$ git add corrections.txt\n$ git commit --fixup 2b504bee\n```\n\nWhen you now take a look at the commit history, you will see that a pretty ordinarily looking commit has been created – probably not the magic and fireworks you would have expected. But if you take a closer look, you will see that something’s going on: the new commit has automatically been prepended with \"fixup !\" and the commit subject of our bad commit.\n\n![The original commit and the fix commit](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/fixup_create-fix-commit@2x.png){: .shadow.medium.center}\nThe original commit and the fix commit\n{: .note.text-center}\n\nThe third step now is to start the interactive rebase session. Again, we choose the parent of our bad commit as the starting point...\n\n```\n$ git rebase -i 0023cddd --autosquash\n```\n\n... and as the second part of the secret sauce, we are using the `--autosquash` flag. This option makes sure that we don't have to do _anything_ in the editor window that is now open. Take a close look at the situation:\n\n![Our fix commit is marked \"fixup\" and sorted to the right position](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/fixup_editor@2x.png){: .shadow.medium.center}\nOur fix commit is marked \"fixup\" and sorted to the right position\n{: .note.text-center}\n\nYou will see that Git automatically did two things for us:\n1. It marked our band-aid commit as \"fixup.\"\n2. It re-ordered the lines so that our band-aid commit appears directly below our bad commit. This is because fixup works exactly like squash in that it _combines with the line above_.\n\nIn other words: there's nothing left to do for us but save and close the editor window.\n\nLet's take another look at the commit history:\n\n![A happy ending!](https://about.gitlab.com/images/blogimages/how-to-keep-your-git-history-clean-with-interactive-rebase/fixup_final-corrected@2x.png){: .shadow.medium.center}\nA happy ending!\n{: .note.text-center}\n\nNot only does our originally bad commit now contain the changes from our band-aid commit. But on top of that, the ugly band-aid commit has disappeared from the commit history! Everything is nice and clean, just as if there had never been a problem!\n\n## Discover the power of Git rebase interactive\n\nThere are lots of use cases for interactive rebase – and most of them in the department of “fixing mistakes”. For an overview of other useful things you can do, I recommend the _free_ [\"First Aid Kit for Git\"](https://www.git-tower.com/learn/git/first-aid-kit?utm_source=gitlab&utm_medium=guestpost&utm_campaign=interactive-rebase): it’s a collection of short videos (2-3 min per episode) that help you learn to undo mistakes using interactive rebase and other Git tools.\n\nEditor's note: I had to use interactive rebase when reviewing this very post! One of my commits included an image that was greater than 1MB which is against the rules for GitLab website project. I had to go back and fix that commit to include a correctly sized image instead. Thanks for the lesson, universe! 😁\n{: .note}\n\n## More Git tips and tricks\n\n- [15 Git tips to improve your workflow](/blog/15-git-tips-improve-workflow/)\n- [How Git Partial Clone lets you fetch only the large file you need](/blog/partial-clone-for-massive-repositories/)\n- [Git happens! 6 Common Git mistakes and how to fix them](/blog/git-happens/)\n\n### About the guest author\n\n_[Tobias Günther](https://twitter.com/gntr) is the CEO of [Tower](https://www.git-tower.com/?utm_source=gitlab&utm_medium=guestpost&utm_campaign=interactive-rebase), the popular Git desktop client that helps more than 100,000 developers around the world to be more productive with Git._\n\nCover image by [David Taljat](https://www.pexels.com/@david-taljat-3748658) on [Pexels](https://www.pexels.com/photo/yellow-and-blue-line-on-gray-asphalt-road-5690623/)\n{: .note}\n",[726,9],{"slug":3119,"featured":6,"template":684},"keep-git-history-clean-with-interactive-rebase","content:en-us:blog:keep-git-history-clean-with-interactive-rebase.yml","Keep Git History Clean With Interactive Rebase","en-us/blog/keep-git-history-clean-with-interactive-rebase.yml","en-us/blog/keep-git-history-clean-with-interactive-rebase",{"_path":3125,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3126,"content":3132,"config":3138,"_id":3140,"_type":13,"title":3141,"_source":15,"_file":3142,"_stem":3143,"_extension":18},"/en-us/blog/kubernetes-overview-operate-cluster-data-on-the-frontend",{"title":3127,"description":3128,"ogTitle":3127,"ogDescription":3128,"noIndex":6,"ogImage":3129,"ogUrl":3130,"ogSiteName":669,"ogType":670,"canonicalUrls":3130,"schema":3131},"Kubernetes overview: Operate cluster data on the frontend","GitLab offers a built-in solution for monitoring your Kubernetes cluster health. Learn more about the technical design and functionality with this detailed guide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099045/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2816%29_3L7ZP4GxJrShu6qImuS4Wo_1750099045397.png","https://about.gitlab.com/blog/kubernetes-overview-operate-cluster-data-on-the-frontend","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Kubernetes overview: Operate cluster data on the frontend\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Anna Vovchenko\"}],\n        \"datePublished\": \"2024-06-20\",\n      }",{"title":3127,"description":3128,"authors":3133,"heroImage":3129,"date":3135,"body":3136,"category":769,"tags":3137},[3134],"Anna Vovchenko","2024-06-20","Accessing real-time cluster information is crucial for verifying successful software deployments and initiating troubleshooting processes. In this article, you'll learn about GitLab's enhanced Kubernetes integration, including how to leverage the Watch API for real-time insights into deployment statuses and streamlined troubleshooting capabilities. \n\n## What are GitLab's Kubernetes resources?\n\nGitLab offers a dedicated [dashboard for Kubernetes](https://gitlab.com/groups/gitlab-org/-/epics/2493 \"Visualize the cluster state in GitLab\") to understand the status of connected clusters with an intuitive visual interface. It is integrated into the Environment Details page and shows resources relevant to the environment. Currently, three types of Kubernetes resources are available:\n\n- pods filtered by the Kubernetes namespace\n- services\n- Flux resource ([HelmRelease](https://fluxcd.io/flux/components/helm/helmreleases/) or [Kustomization](https://fluxcd.io/flux/components/kustomize/kustomizations/))\n\nFor these resources, we provide general information, such as name, status, namespace, age, etc. It is represented similarly to what the [kubectl](https://kubernetes.io/docs/reference/kubectl/) command would show when run from the Kubernetes cluster. More details can be found when clicking each resource: The side drawer shows the list of labels, annotations, and detailed status and spec information presented as read-only YAML code blocks.\n\nThe information provided helps to visualize the cluster state, spot any issues, and debug problematic deployments right away.\n\n## Frontend to cluster communication: The GitLab solution\n\nWe have developed a range of tools and solutions to enable a seamless connection and management of Kubernetes clusters within GitLab. One of the core components of this system is the [GitLab agent for Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/install/). This powerful tool provides a secure bidirectional connection between a GitLab instance and a Kubernetes cluster. It is composed of two main components: **agentk** and **KAS** (Kubernetes agent server).\n\n![Kubernetes flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099055229.png)\n\nagentk is a lightweight cluster-side component. It is responsible for establishing a connection to a KAS instance and waiting for requests to process. It is proxying requests from KAS to Kubernetes API. It may also actively send information about cluster events to KAS.\n\nWhile agentk is actively communicating with the cluster, KAS represents a GitLab server-side component. It is responsible for:\n\n- accepting requests from agentk\n- authenticating agentk requests by querying GitLab backend\n- fetching the agent's configuration from a corresponding Git repository using Gitaly\n- polling manifest repositories for GitOps support\n\nWe implemented the agent access rights feature to provide access from the GitLab frontend to the cluster in a secure and reliable way. To enable the feature, the user should update the agent’s configuration file by adding the [user_access](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html) section with the following parameters: `projects`, `groups`, and `access_as` to specify which projects can access cluster information via the agent and how it should authenticate.\n\nOnce this is done, the frontend can connect to the cluster by sending a request to the Rails controller, which should set a `gitlab_kas cookie`. This cookie is then added to the request sent to KAS together with the agent ID and Cross-Site Request Forgery (CSRF) token. Upon receiving the request, KAS checks the user’s authorization and forwards it to agentk, which makes an actual request to the Kubernetes API. Then the response goes all the way back from the agentk to KAS and finally to the GitLab client.\n\n![Kubernetes overview - how it works](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099055229.png)\n\nTo integrate this logic on the GitLab frontend and use it within the Vue app, we developed a JavaScript library: [@gitlab/cluster-client](https://gitlab.com/gitlab-org/cluster-integration/javascript-client). It is generated from the Kubernetes OpenAPI specification using the typescript-fetch generator. It provides all the Kubernetes APIs in a way that can be used in a web browser.\n\n## Introducing the Watch API\n\nThe most challenging task is to provide **real-time updates** for the Kubernetes dashboard. Kubernetes introduces the concept of watches as an extension of GET requests, exposing the body contents as a [readable stream](https://developer.mozilla.org/en-US/docs/Web/API/Streams_API/Using_readable_streams). Once connected to the stream, the Kubernetes API pushes cluster state updates similarly to how the `kubectl get \u003Cresource> --watch` command works. The watch mechanism allows a client to fetch the current state of the resource (or resources list) and then subscribe to subsequent changes, without missing any events. Each event contains a type of modification (one of three types: added, modified, or deleted) and the affected object.\n\nWithin the `WatchApi` class of the `@gitlab/cluster-client` library, we've developed a systematic approach for interacting with the Kubernetes API. This involves fetching a continuous stream of data, processing it line by line, and managing events based on their types. Let's explore the key components and functionalities of this approach:\n\n1. Extending the Kubernetes API: Within the WatchApi class, we extend the base Kubernetes API functionality to fetch a continuous stream of data with a specified path and query parameters. This extension enables efficient handling of large datasets, as the stream is processed line by line.\n  2. Decoding and event categorization: Upon receiving the stream, each line, typically representing a JSON object, is decoded. This process extracts relevant information and categorizes events based on their types.\n3. Internal data management: The `WatchApi` class maintains an internal data array to represent the current state of the streamed data, updating it accordingly as new data arrives or changes occur. \n4. The `WatchApi` class implements methods for registering event listeners, such as `onData`, `onError`, `onTimeout`, and `onTerminate`. These methods allow developers to customize their application's response to events like data updates, errors, and timeouts. \n\nThe code also handles scenarios such as invalid content types, timeouts, and errors from the server, emitting corresponding events for clients to handle appropriately. **With this straightforward, event-driven approach, the `WatchApi` class allows developers to create responsive real-time applications efficiently.**\n\n![Kubernetes overview - flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099055231.png)\n\n## How is the Kubernetes overview integrated with the GitLab frontend?\n\nCurrently, we have two Kubernetes integrations within the product: the Kubernetes overview section for the Environments and the full Kubernetes dashboard as a separate view. The latter is a major effort of representing all the available Kubernetes resources with filtering and sorting capabilities and a detailed view with the full information on the metadata, spec, and status of the resource. This initiative is now on hold while we are searching for the most useful ways of representing the Kubernetes resources related to an environment.\n\n[The Kubernetes overview](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html) on the Environments page is a detailed view of the Kubernetes resources related to a specific environment. To access the cluster state view, the user should select an agent installed in the cluster with the appropriate access rights, provide a namespace (optionally), and select a related Flux resource.\n\nThe view renders a list of Kubernetes pods and services filtered by the namespace representing their statuses as well as the Flux sync status. Clicking each resource opens a detailed view with more information for easy issue spotting and high-level debugging. \n\n![Kubernetes overview - list of Kubernetes pods and services](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099055233.png)\n\nWe need to set up a correct configuration object that will be used for all the API requests. In the configuration, we need to specify the URL provided by the KAS, that proxies the Kubernetes APIs; the GitLab agent ID to connect with; and the CSRF token. We need to include cookies so that the `kas_cookie` gets picked up and sent within the request.\n\n```javascript\ncreateK8sAccessConfig({ kasTunnelUrl, gitlabAgentId }) {\n  return {\n    basePath: kasTunnelUrl,\n    headers: {\n      'GitLab-Agent-Id': gitlabAgentId,\n      ...csrf.headers,\n    },\n    credentials: 'include',\n  };\n}\n```\n\nAll the API requests are implemented as GraphQl client queries for efficiency, flexibility, and ease of development. The query structure enables clients to fetch data from various sources in one request. With clear schema definitions, GraphQL minimizes errors and enhances developer efficiency.\n\nWhen first rendering the Kubernetes overview, the frontend requests static lists of pods, services, and Flux resource (either HelmRelease or Kustomization). The fetch request is needed to render the empty view correctly. If the frontend tried to subscribe to the Watch API stream and one of the resource lists was empty, we would wait for the updates forever and never show the actual result – 0 resources. In the case of pods and services, after the initial request, we subscribe to the stream even if an empty list was received to reflect any cluster state changes. For the Flux resource, the changes that the user would expect the resource to appear after the initial request are low. We use the empty response here as an opportunity to provide more information about the feature and its setup. \n\n![Kubernetes overview - flux sync status unavailable](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099055235.png)\n\nAfter rendering the initial result, the frontend makes additional requests to the Kubernetes API with the `?watch=true` query parameter in the URL. We create separate watchers for each event type – data, error, or timeout. When receiving the data, we follow three steps:\n\n- transform the data\n- update the Apollo cache\n- run a mutation to update the connection status\n\n```javascript\nwatcher.on(EVENT_DATA, (data) => {\n  result = data.map(mapWorkloadItem);\n  client.writeQuery({\n    query,\n    variables: { configuration, namespace },\n    data: { [queryField]: result },\n  });\n\n  updateConnectionStatus(client, {\n    configuration,\n    namespace,\n    resourceType: queryField,\n    status: connectionStatus.connected,\n  });\n});\n```\n\nAs we show the detailed information for each resource, we rely on having the status, spec, and metadata fields with the annotations and labels included. The Kubernetes API wouldn’t always send this information, which could break the UI and throw errors from the GraphQl client. We transform the received data first to avoid these issues. We also add the `__typename` so that we can better define the data types and simplify the queries by reusing the shared fragments.\n\nAfter data stabilization, we update the Apollo cache so that the frontend re-renders the views accordingly to reflect cluster state changes. Interestingly, we can visualize exactly what happens in the cluster – for example, when deleting the pods, Kubernetes first creates the new ones in the pending state, and only then removes the old pods. Thus, for a moment we can see double the amount of pods. We can also verify how the pods proceed from one state to another in real-time. This is done with the combination of added, deleted, and modified events received from the Kubernetes APIs and processed in the `WatchApi` class of the `@gitlab/cluster-client` library.\n\n![Kubernetes overview - states of connection status](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099055/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099055236.gif)\n\nBy default, with a single Watch request, we get a stream of events for five minutes, and then it hits the timeout. We need to properly reflect this on the frontend so that the user is aware of any outdated information. To achieve this, we introduced a `k8sConnection` query together with `reconnectToCluster` mutation. We have a UI element – a badge with a tooltip to indicate the connection status. It has three states: connecting, connected, and disconnected. The state gets updated within every step of the UX flow. First, we set it to `connecting` once the Watch client gets created. Then we update it to `connected` with the first received piece of data. Last, we trigger the mutation for `disconnected` state when an error or timeout event occurs. This way, we can let the user refresh the view and reconnect to the stream without the need of refreshing the browser tab. Relying on the user action to reconnect to the stream helps us save resources and only request the necessary data while ensuring the accurate cluster state is available for the user at any time.\n\n## What’s next?\n\nLeveraging the Kubernetes built-in functionality for watching the Readable stream helped us to build the functionality quickly and provide the Kubernetes UI solution to our customers, getting early feedback and adjusting the product direction. This approach, however, presented technical challenges, such as the inability to utilize the GraphQl subscriptions and the need for reconnecting to the stream.\n\nWe are planning our next iterations to enhance the Kubernetes overview within GitLab UI. One of the planned iterations for the feature, [Frontend-friendly Kubernetes Watch API](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/issues/541), is an updated mechanism of batch-watching the cluster data and moving from the fetch Readable stream to WebSockets. We are going to create a new API in KAS to expose the Kubernetes watch capability via WebSocket. This should reduce the complexity of the JavaScript code, resolve the timeout issue, and improve the compatibility of the Kubernetes APIs within GitLab frontend integrations.\n\n> Curious to learn more or want to try out this functionality? Visit our [Kubernetes Dashboard documentation](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html) for more details and configuration tips.\n",[1225,680,9],{"slug":3139,"featured":90,"template":684},"kubernetes-overview-operate-cluster-data-on-the-frontend","content:en-us:blog:kubernetes-overview-operate-cluster-data-on-the-frontend.yml","Kubernetes Overview Operate Cluster Data On The Frontend","en-us/blog/kubernetes-overview-operate-cluster-data-on-the-frontend.yml","en-us/blog/kubernetes-overview-operate-cluster-data-on-the-frontend",{"_path":3145,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3146,"content":3152,"config":3157,"_id":3159,"_type":13,"title":3160,"_source":15,"_file":3161,"_stem":3162,"_extension":18},"/en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"title":3147,"description":3148,"ogTitle":3147,"ogDescription":3148,"noIndex":6,"ogImage":3149,"ogUrl":3150,"ogSiteName":669,"ogType":670,"canonicalUrls":3150,"schema":3151},"Learn advanced Rust programming with a little help from AI","Use this guided tutorial, along with AI-powered GitLab Duo Code Suggestions, to continue learning advanced Rust programming.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662439/Blog/Hero%20Images/codewithheart.png","https://about.gitlab.com/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn advanced Rust programming with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-10-12\",\n      }",{"title":3147,"description":3148,"authors":3153,"heroImage":3149,"date":3154,"body":3155,"category":702,"tags":3156},[1612],"2023-10-12","When I started learning a new programming language more than 20 years ago, we had access to the Visual Studio 6 MSDN library, installed from 6 CD-ROMs. Algorithms with pen and paper, design pattern books, and MSDN queries to figure out the correct type were often time-consuming. Learning a new programming language changed fundamentally in the era of remote collaboration and artificial intelligence (AI). Now you can spin up a [remote development workspace](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/), share your screen, and engage in a group programming session. With the help of [GitLab Duo Code Suggestions](/gitlab-duo/), you always have an intelligent partner at your fingertips. Code Suggestions can learn from your programming style and experience. They only need input and context to provide you with the most efficient suggestions.\n\nIn this tutorial, we build on the [getting started blog post](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/) and design and create a simple feed reader application.\n\n- [Preparations](#preparations)\n    - [Code Suggestions](#code-suggestions)\n- [Continue learning Rust](#continue-learning-rust)\n    - [Hello, Reader App](#hello-reader-app)\n    - [Initialize project](#initialize-project)\n    - [Define RSS feed URLs](#define-rss-feed-urls)\n- [Modules](#modules)\n    - [Call the module function in main()](#call-the-module-function-in-main)\n- [Crates](#crates)\n    - [feed-rs: parse XML feed](#feed-rs-parse-xml-feed)\n- [Runtime configuration: Program arguments](#runtime-configuration-program-arguments)\n    - [User input error handling](#user-input-error-handling)\n- [Persistence and data storage](#persistence-and-data-storage)\n- [Optimization](#optimization)\n    - [Asynchronous execution](#asynchronous-execution)\n    - [Spawning threads](#spawning-threads)\n    - [Function scopes, threads, and closures](#function-scopes-threads-and-closures)\n- [Parse feed XML into objects](#parse-feed-xml-into-object-types)\n    - [Map generic feed data types](#map-generic-feed-data-types)\n    - [Error handling with Option::unwrap()](#error-handling-with-option-unwrap)\n- [Benchmarks](#benchmarks)\n    - [Sequential vs. Parallel execution benchmark](#sequential-vs-parallel-execution-benchmark)\n    - [CI/CD with Rust caching](#cicd-with-rust-caching)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations\nBefore diving into the source code, make sure to set up [VS Code](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#vs-code) and [your development environment with Rust](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/#development-environment-for-rust).\n\n### Code Suggestions\nFamiliarize yourself with suggestions before actually verifying the suggestions. GitLab Duo Code Suggestions are provided as you type, so you do not need use specific keyboard shortcuts. To accept a code suggestion, press the `tab` key. Also note that writing new code works more reliably than refactoring existing code. AI is non-deterministic, which means that the same suggestion may not be repeated after deleting the code suggestion. While Code Suggestions is in Beta, we are working on improving the accuracy of generated content overall. Please review the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations), as this could affect your learning experience.\n\n**Tip:** The latest release of Code Suggestions supports multi-line instructions. You can refine the specifications to your needs to get better suggestions.\n\n```rust\n    // Create a function that iterates over the source array\n    // and fetches the data using HTTP from the RSS feed items.\n    // Store the results in a new hash map.\n    // Print the hash map to the terminal.\n```\n\nThe VS Code extension overlay is shown when offering a suggestion. You can use the `tab` key to accept the suggested line(s), or `cmd cursor right` to accept one word. Additionally, the three dots menu allows you to always show the toolbar.\n\n![VS Code GitLab Duo Code Suggestions overlay with instructions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_code_suggestions_options_overlay_keep_toolbar.png){: .shadow}\n\n## Continue learning Rust\nNow, let us continue learning Rust, which is one of the [supported languages in Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). [Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides an excellent tutorial for beginners, together with the official [Rust book](https://doc.rust-lang.org/book/). Both resources are referenced throughout this blog post.\n\n### Hello, Reader App\nThere are many ways to create an application and learn Rust. Some of them involve using existing Rust libraries - so-called `Crates`. We will use them a bit further into the blog post. For example, you could create a command-line app that processes images and writes the result to a file. Solving a classic maze or writing a Sudoku solver can also be a fun challenge. Game development is another option. The book [Hands-on Rust](https://hands-on-rust.com/) provides a thorough learning path by creating a dungeon crawler game. My colleague Fatima Sarah Khalid started the [Dragon Realm in C++ with a little help from AI](/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/) -- check it out, too.\n\nHere is a real use case that helps solve an actual problem: Collecting important information from different sources into RSS feeds for (security) releases, blog posts, and social discussion forums like Hacker News. Often, we want to filter for specific keywords or versions mentioned in the updates. These requirements allow us to formulate a requirements list for our application:\n\n1. Fetch data from different sources (HTTP websites, REST API, RSS feeds). RSS feeds in the first iteration.\n1. Parse the data.\n1. Present the data to the user, or write it to disk.\n1. Optimize performance.\n\nThe following example application output will be available after the learning steps in this blog post:\n\n![VS Code Terminal, cargo run with formatted feed entries output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\nThe application should be modular and build the foundation to add more data types, filters, and hooks to trigger actions at a later point.\n\n### Initialize project\nReminder: `cargo init` in the project root creates the file structure, including the `main()` entrypoint. Therefore, we will learn how to create and use Rust modules in the next step.\n\nCreate a new directory called `learn-rust-ai-app-reader`, change into it and run `cargo init`. This command implicitly runs `git init` to initialize a new Git repository locally. The remaining step is to configure the Git remote repository path, for example, `https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader`. Please adjust the path for your namespace. Pushing the Git repository [automatically creates a new private project in GitLab](https://docs.gitlab.com/ee/user/project/#create-a-new-project-with-git-push).\n\n```shell\nmkdir learn-rust-ai-app-reader\ncd learn-rust-ai-app-reader\n\ncargo init\n\ngit remote add origin https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader.git\ngit push --set-upstream origin main\n```\n\nOpen VS Code from the newly created directory. The `code` CLI will spawn a new VS Code window on macOS.\n\n```shell\ncode .\n```\n\n### Define RSS feed URLs\nAdd a new hashmap to store the RSS feed URLs inside the `src/main.rs` file in the `main()` function. You can instruct GitLab Duo Code Suggestions with a multi-line comment to create a [`HashMap`](https://doc.rust-lang.org/stable/std/collections/struct.HashMap.html) object, and initialize it with default values for Hacker News, and TechCrunch. Note: Verify that the URLs are correct when you get suggestions.\n\n```rust\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n\n}\n```\n\nNote that the code comment provides instructions for:\n\n1. The variable name `rss_feeds`.\n2. The `HashMap` type.\n3. Initial seed key/value pairs.\n4. String as type (can be seen with `to_string()` calls).\n\nOne possible suggested path can be as follows:\n\n```rust\nuse std::collections::HashMap;\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n}\n```\n\n![VS Code with Code Suggestions for RSS feed URLs for Hacker News and TechCrunch](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_main_array_rss_feed_urls_suggested.png)\n\nOpen a new terminal in VS Code (cmd shift p - search for `terminal`), and run `cargo build` to build the changes. The error message instructs you to add the `use std::collections::HashMap;` import.\n\nThe next step is to do something with the RSS feed URLs. [The previous blog post](/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/) taught us to split code into functions. We want to organize the code more modularly for our reader application, and use Rust modules.\n\n## Modules\n[Modules](https://doc.rust-lang.org/rust-by-example/mod.html) help with organizing code. They can also be used to hide functions into the module scope, limiting access to them from the main() scope. In our reader application, we want to fetch the RSS feed content, and parse the XML response. The `main()` caller should only be able to access the `get_feeds()` function, while other functionality is only available in the module.\n\nCreate a new file `feed_reader.rs` in the `src/` directory. Instruct Code Suggestions to create a public module named `feed_reader`, and a public function `get_feeds()` with a String HashMap as input. Important: The file and module names need to be the same, following the [Rust module structure](https://doc.rust-lang.org/book/ch07-02-defining-modules-to-control-scope-and-privacy.html).\n\n![Code Suggestions: Create public module, with function and input types](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){: .shadow}\n\nInstructing Code Suggestions with the input variable name and type will also import the required `std::collections::HashMap` module. Tip: Experiment with the comments, and refine the variable types to land the best results. Passing function parameters as object references is considered best practice in Rust, for example.\n\n```rust\n// Create public module feed_reader\n// Define get_feeds() function which takes rss_feeds as String HashMap reference as input\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Do something with the RSS feeds\n    }\n}\n```\n\n![Code Suggestions: Public module with `get_feeds()` function, and suggested input variable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_public_module_function_input.png){: .shadow}\n\nInside the function, continue to instruct Code Suggestions with the following steps:\n\n1. `// Iterate over the RSS feed URLs`\n2. `// Fetch URL content`\n3. `// Parse XML body`\n4. `// Print the result`\n\n![Code Suggestions: Public module with `get_feeds()` function, step 1: Iterate](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_01_iterate.png){: .shadow}\n\n![Code Suggestions: Public module with `get_feeds()` function, step 2: Fetch URL content](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_02_fetch_content.png){: .shadow}\n\n![Code Suggestions: Public module with `get_feeds()` function, step 3: Parse XML body](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_03_parse_body.png){: .shadow}\n\n![Code Suggestions: Public module with `get_feeds()` function, step 4: Print the results](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_04_print_result.png){: .shadow}\n\nThe following code can be suggested:\n\n```rust\n// Create public module feed_reader\n// Define get_feeds() function which takes rss_feeds as String HashMap reference as input\npub mod feed_reader {\n    use std::collections::HashMap;\n\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n        // Iterate over the RSS feed URLs\n        for (name, url) in rss_feeds {\n            println!(\"{}: {}\", name, url);\n\n            // Fetch URL content\n            let body = reqwest::blocking::get(url).unwrap().text().unwrap();\n\n            // Parse XML body\n            let parsed_body = roxmltree::Document::parse(&body).unwrap();\n\n            // Print the result\n            println!(\"{:#?}\", parsed_body);\n        }\n    }\n}\n```\n\nYou see a new keyword here: [`unwrap()`](https://doc.rust-lang.org/rust-by-example/error/option_unwrap.html). Rust does not support `null` values, and uses the [`Option` type](https://doc.rust-lang.org/rust-by-example/std/option.html) for any value. If you are certain to use a specific wrapped type, for example, `Text` or `String`, you can call the `unwrap()` method to get the value. The `unwrap()` method will panic if the value is `None`.\n\n**Note** Code Suggestions referred to the `reqwest::blocking::get` function for the `// Fetch URL content` comment instruction. The [`reqwest` crate](https://docs.rs/reqwest/latest/reqwest/) name is intentional and not a typo. It provides a convenient, higher-level HTTP client for async and blocking requests.\n\nParsing the XML body is tricky - you might get different results, and the schema is not the same for every RSS feed URL. Let us try to call the `get_feeds()` function, and then work on improving the code.\n\n### Call the module function in main()\n\nThe main() function does not know about the `get_feeds()` function yet, so we need to import its module. In other programming languages, you might have seen the keywords `include` or `import`. The Rust module system is different.\n\nModules are organized in path directories. In our example, both source files exist on the same directory level. `feed_reader.rs` is interpreted as crate, containing one module called `feed_reader`, which defines the function `get_feeds()`.\n\n```\nsrc/\n  main.rs\n  feed_reader.rs\n```\n\nIn order to access `get_feeds()` from the `feed_reader.rs` file, we need to [bring module path](https://doc.rust-lang.org/book/ch07-04-bringing-paths-into-scope-with-the-use-keyword.html) into the `main.rs` scope first, and then call the full function path.\n\n```rust\nmod feed_reader;\n\nfn main() {\n\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n\n```\n\nAlternatively, we can import the full function path with the `use` keyword, and later use the short function name.\n\n```rust\nmod feed_reader;\nuse feed_reader::feed_reader::get_feeds;\n\nfn main() {\n\n    get_feeds(&rss_feeds);\n\n```\n\n**Tip:** I highly recommend reading the [Clear explanation of the Rust module system blog post](https://www.sheshbabu.com/posts/rust-module-system/) to get a better visual understanding.\n\n```diff\n\nfn main() {\n    // ...\n\n    // Print feed_reader get_feeds() output\n    println!(\"{}\", feed_reader::get_feeds(&rss_feeds));\n```\n\n```rust\nuse std::collections::HashMap;\n\nmod feed_reader;\n// Alternative: Import full function path\n//use feed_reader::feed_reader::get_feeds;\n\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n```\n\nRun `cargo build` in the terminal again to build the code.\n\n```shell\ncargo build\n```\n\nPotential build errors when Code Suggestions refer to common code and libraries for HTTP requests, and XML parsing:\n\n1. Error: `could not find blocking in reqwest`. Solution: Enable the `blocking` feature for the crate in `Config.toml`: `reqwest = { version = \"0.11.20\", features = [\"blocking\"] }`.\n2. Error: `failed to resolve: use of undeclared crate or module reqwest`. Solution: Add the `reqwest` crate.\n3. Error: `failed to resolve: use of undeclared crate or module roxmltree`. Solution: Add the `roxmltree` crate.\n\n```shell\nvim Config.toml\n\nreqwest = { version = \"0.11.20\", features = [\"blocking\"] }\n```\n\n```shell\ncargo add reqwest\ncargo add roxmltree\n```\n\n**Tip:** Copy the error message string, with a leading `Rust \u003Cerror message>` into your preferred browser to check whether a missing crate is available. Usually this search leads to a result on crates.io and you can add the missing dependencies.\n\nWhen the build is successful, run the code with `cargo run` and inspect the Hacker News RSS feed output.\n\n![VS Code terminal, cargo run to fetch Hacker News XML feed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news.png){: .shadow}\n\nWhat is next with parsing the XML body into human-readable format? In the next section, we will learn about existing solutions and how Rust crates come into play.\n\n## Crates\nRSS feeds share a common set of protocols and specifications. It feels like reinventing the wheel to parse XML items and understand the lower object structure. Recommendation for these types of tasks: Look whether someone else had the same problem already and might have created code to solve the problem.\n\nReusable library code in Rust is organized in so-called [`Crates`](https://doc.rust-lang.org/rust-by-example/crates.html), and made available in packages, and the package registry on crates.io. You can add these dependencies to your project by editing the `Config.toml` in the `[dependencies]` section, or using `cargo add \u003Cname>`.\n\nFor the reader app, we want to use the [feed-rs crate](https://crates.io/crates/feed-rs). Open a new terminal, and run the following command:\n\n```shell\ncargo add feed-rs\n```\n\n![VS Code Terminal Terminal: Add crate, verify in Config.toml](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_rust_crate_add_feed-rs_explained.png)\n\n### feed-rs: parse XML feed\nNavigate into `src/feed_reader.rs` and modify the part where we parse the XML body. Code Suggestions understands how to call the `feed-rs` crate `parser::parse` function -- there is only one specialty here: `feed-rs` [expects string input as raw bytes](https://docs.rs/feed-rs/latest/feed_rs/parser/fn.parse_with_uri.html) to determine the encoding itself. We can provide instructions in the comment to get the expected result though.\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n```\n\n![Code Suggestions: Public module with `get_feeds()` function, step 5: Modify XML parser to feed-rs](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_module_function_05_use_feed_rs_to_parse.png){: .shadow}\n\nThe benefit of using `feed-rs` is not immediately visible until you see the printed output with `cargo run`: All keys and values are mapped to their respective Rust object types, and can be used for further operations.\n\n![VS Code terminal, cargo run to fetch Hacker News XML feed](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_fetch_rss_feed_output_hacker_news_feed_rs.png){: .shadow}\n\n## Runtime configuration: Program arguments\nUntil now, we have run the program with hard-coded RSS feed values compiled into the binary. The next step is allowing to configure the RSS feeds at runtime.\n\nRust provides [program arguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg.html) in the standard misc library. [Parsing the arguments](https://doc.rust-lang.org/rust-by-example/std_misc/arg/matching.html) provides a better and faster learning experience than aiming for advanced program argument parsers (for example, the [clap](https://docs.rs/clap/latest/clap/) crate), or moving the program parameters into a configuration file and format ([TOML](https://toml.io/en/), YAML). You are reading these lines after I tried and failed with different routes for the best learning experience. This should not stop you from taking the challenge to configure RSS feeds in alternative ways.\n\nAs a boring solution, the command parameters can be passed as `\"name,url\"` string value pairs, and then are split by the `,` character to extract the name and URL values. The comment instructs Code Suggestions to perform these operations and extend the `rss_feeds` HashMap with the new values. Note that the variable might not be mutable, and, therefore, needs to be modified to `let mut rss_feeds`.\n\nNavigate into `src/main.rs` and add the following code to the `main()` function after the `rss_feeds` variable. Start with a comment to define the program arguments, and check the suggested code snippets.\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n```\n\n![Code suggestions for program arguments, and splitting name,URL values for the rss_feeds variable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_program_args_boring_solution.png){: .shadow}\n\nThe full code example can look like the following:\n\n```rust\nfn main() {\n    // Define RSS feed URLs in the variable rss_feeds\n    // Use a HashMap\n    // Add Hacker News and TechCrunch\n    // Ensure to use String as type\n    let mut rss_feeds = HashMap::from([\n        (\"Hacker News\".to_string(), \"https://news.ycombinator.com/rss\".to_string()),\n        (\"TechCrunch\".to_string(), \"https://techcrunch.com/feed/\".to_string()),\n    ]);\n\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n\n    // Call get_feeds() from feed_reader module\n    feed_reader::feed_reader::get_feeds(&rss_feeds);\n    // Alternative: Imported full path, use short path here.\n    //get_feeds(&rss_feeds);\n}\n```\n\nYou can pass program arguments directly to the `cargo run` command, preceding the arguments with `--`. Enclose all arguments with double quotes, put the name followed by a comma and the RSS feed URL as argument. Separate all arguments with whitespaces.\n\n```\ncargo build\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n![VS Code terminal, RSS feed output example for the GitLab blog](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_gitlab_blog_rss_feed_example.png){: .shadow}\n\n### User input error handling\nIf the provided user input does not match the program expectation, we need to [throw an error](https://doc.rust-lang.org/rust-by-example/error.html) and help the caller to fix the program arguments. For example, passing a malformed URL format should be treated as a runtime error. Instruct Code Suggestions with a code comment to throw an error if the URL is not valid.\n\n```rust\n    // Ensure that URL contains a valid format, otherwise throw an error\n```\n\nOne possible solution is to check if the `url` variable starts with `http://` or `https://`. If not, throw an error using the [panic! macro](https://doc.rust-lang.org/rust-by-example/std/panic.html). The full code example looks like the following:\n\n```rust\n    // Program args, format \"name,url\"\n    // Split value by , into name, url and add to rss_feeds\n    for arg in std::env::args().skip(1) {\n        let mut split = arg.split(\",\");\n        let name = split.next().unwrap();\n        let url = split.next().unwrap();\n\n        // Ensure that URL contains a valid format, otherwise throw an error\n        if !url.starts_with(\"http://\") && !url.starts_with(\"https://\") {\n            panic!(\"Invalid URL format: {}\", url);\n        }\n\n        rss_feeds.insert(name.to_string(), url.to_string());\n    }\n```\n\nTest the error handling with removing a `:` in one of the URL strings. Add the `RUST_BACKTRACE=full` environment variable to get more verbose output when the `panic()` call happens.\n\n```\nRUST_BACKTRACE=full cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https//www.cncf.io/feed/\"\n```\n\n![VS Code Terminal with wrong URL format, panic error backtrace](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_url_format_error_panic_backtrace.png){: .shadow}\n\n## Persistence and data storage\nThe boring solution for storing the feed data is to dump the parsed body into a new file. Instruct Code Suggestions to use a pattern that includes the RSS feed name, and the current ISO date.\n\n```rust\n    // Parse XML body with feed_rs parser, input in bytes\n    let parsed_body = feed_rs::parser::parse(body.as_bytes()).unwrap();\n\n    // Print the result\n    println!(\"{:#?}\", parsed_body);\n\n    // Dump the parsed body to a file, as name-current-iso-date.xml\n    let now = chrono::offset::Local::now();\n    let filename = format!(\"{}-{}.xml\", name, now.format(\"%Y-%m-%d\"));\n    let mut file = std::fs::File::create(filename).unwrap();\n    file.write_all(body.as_bytes()).unwrap();\n```\n\nA possible suggestion will include using the [chrono crate](https://crates.io/crates/chrono). Add it using `cargo add chrono` and then invoke `cargo build` and `cargo run` again.\n\nThe files are written into the same directory where `cargo run` was executed. If you are executing the binary direcly in the `target/debug/` directory, all files will be dumped there.\n\n![VS Code with CNCF RSS feed content file, saved on disk](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_cncf_rss_feed_saved_on_disk.png)\n\n## Optimization\nThe entries in the `rss_feeds` variable are executed sequentially. Imagine having a list of 100+ URLs configured - this could take a long time to fetch and process. What if we could execute multiple fetch requests in parallel?\n\n### Asynchronous execution\nRust provides [threads](https://doc.rust-lang.org/book/ch16-01-threads.html) for asynchronous execution.\n\nThe simplest solution will be spawning a thread for each RSS feed URL. We will discuss optimization strategies later. Before you continue with parallel execution, measure the sequential code execution time by preceding the `time` command with `cargo run`.\n\n```\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\n0.21s user 0.08s system 10% cpu 2.898 total\n```\n\nNote that this exercise could require more manual code work. It is recommended to persist the sequential working state in a new Git commit and branch `sequential-exec`, to better compare the impact of parallel execution.\n\n```shell\ngit commit -avm \"Sequential execution working\"\ngit checkout -b sequential-exec\ngit push -u origin sequential-exec\n\ngit checkout main\n```\n\n### Spawning threads\nOpen `src/feed_reader.rs` and refactor the `get_feeds()` function. Start with a Git commit for the current state, and then delete the contents of the function scope. Add the following code comments with instructions for Code Suggestions:\n\n1. `// Store threads in vector`: Store thread handles in a vector, so we can wait for them to finish at the end of the function call.\n2. `// Loop over rss_feeds and spawn threads`: Create boilerplate code for iterating over all RSS feeds, and spawn a new thread.\n\nAdd the following `use` statements to work with the `thread` and `time` modules.\n\n```rust\n    use std::thread;\n    use std::time::Duration;\n```\n\nContinue writing the code, and close the for loop. Code Suggestions will automatically propose adding the thread handle in the `threads` vector variable, and offer to join the threads at the end of the function.\n\n```rust\n    pub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n        // Store threads in vector\n        let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n        // Loop over rss_feeds and spawn threads\n        for (name, url) in rss_feeds {\n            let thread_name = name.clone();\n            let thread_url = url.clone();\n            let thread = thread::spawn(move || {\n\n            });\n            threads.push(thread);\n        }\n\n        // Join threads\n        for thread in threads {\n            thread.join().unwrap();\n        }\n    }\n```\n\nAdd the `thread` crate, build and run the code again.\n\n```shell\ncargo add thread\n\ncargo build\n\ncargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\nAt this stage, no data is processed or printed. Before we continue re-adding the functionality, let us learn about the newly introduced keywords here.\n\n### Function scopes, threads, and closures\nThe suggested code brings new keywords and design patterns to learn. The thread handle is of the type `thread::JoinHandle`, indicating that we can use it to wait for the threads to finish ([join()](https://doc.rust-lang.org/book/ch16-01-threads.html#waiting-for-all-threads-to-finish-using-join-handles)).\n\n`thread::spawn()` spawns a new thread, where we can pass a function object. In this case, a [closure](https://doc.rust-lang.org/book/ch13-01-closures.html) expression is passed as anonymous function. Closure inputs are passed using the `||` syntax. You will recognize the [`move` Closure](https://doc.rust-lang.org/book/ch16-01-threads.html#using-move-closures-with-threads), which moves the function scoped variables into the thread scope. This avoids manually specifying which variables need to be passed into the new function/closure scope.\n\nThere is a limitation though: `rss_feeds` is a reference `&`, passed as parameter by the `get_feeds()` function caller. The variable is only valid in the function scope. Use the following code snippet to provoke this error:\n\n```rust\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (key, value) in rss_feeds {\n        let thread = thread::spawn(move || {\n            println!(\"{}\", key);\n        });\n    }\n}\n```\n\n![VS Code Terminal, variable scope error with references and thread move closure](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_build_error_function_threads_variable_scopes.png){: .shadow}\n\nAlthough the `key` variable was created in the function scope, it references the `rss_feeds` variable, and therefore, it cannot be moved into the thread scope. Any values accessed from the function parameter `rss_feeds` hash map will require a local copy with `clone()`.\n\n![VS Code Terminal, thread spawn with clone](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_rust_thread_spawn_clone.png){: .shadow}\n\n```rust\npub fn get_feeds(rss_feeds: &HashMap\u003CString, String>) {\n\n    // Store threads in vector\n    let mut threads: Vec\u003Cthread::JoinHandle\u003C()>> = Vec::new();\n\n    // Loop over rss_feeds and spawn threads\n    for (name, url) in rss_feeds {\n        let thread_name = name.clone();\n        let thread_url = url.clone();\n        let thread = thread::spawn(move || {\n            // Use thread_name and thread_url as values, see next chapter for instructions.\n```\n\n## Parse feed XML into object types\nThe next step is to repeat the RSS feed parsing steps in the thread closure. Add the following code comments with instructions for Code Suggestions:\n\n1. `// Parse XML body with feed_rs parser, input in bytes` to tell Code Suggestions that we want to fetch the RSS feed URL content, and parse it with the `feed_rs` crate functions.\n2. `// Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and print its name`: Extract the feed type by comparing the `feed_type` attribute with the [`feed_rs::model::FeedType`](https://docs.rs/feed-rs/latest/feed_rs/model/enum.FeedType.html). This needs more direct instructions for Code Suggestions telling it about the exact Enum values to match against.\n\n![Instruct Code Suggestions to match against specific feed types](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_feed_rs_type_condition.png){: .shadow}\n\n```rust\n            // Parse XML body with feed_rs parser, input in bytes\n            let body = reqwest::blocking::get(thread_url).unwrap().bytes().unwrap();\n            let feed = feed_rs::parser::parse(body.as_ref()).unwrap();\n\n            // Check feed_type attribute feed_rs::model::FeedType::RSS2 or Atom and print its name\n            if feed.feed_type == feed_rs::model::FeedType::RSS2 {\n                println!(\"{} is an RSS2 feed\", thread_name);\n            } else if feed.feed_type == feed_rs::model::FeedType::Atom {\n                println!(\"{} is an Atom feed\", thread_name);\n            }\n```\n\nBuild and run the program again, and verify its output.\n\n```\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\nCNCF is an RSS2 feed\nTechCrunch is an RSS2 feed\nGitLab Blog is an Atom feed\nHacker News is an RSS2 feed\n```\n\nLet us verify this output by opening the feed URLs in the browser, or inspecting the previously downloaded files.\n\nHacker News supports RSS version 2.0, with `channel(title,link,description,item(title,link,pubDate,comments))`. TechCrunch and the CNCF blog follow a similar structure.\n```xml\n\u003Crss version=\"2.0\">\u003Cchannel>\u003Ctitle>Hacker News\u003C/title>\u003Clink>https://news.ycombinator.com/\u003C/link>\u003Cdescription>Links for the intellectually curious, ranked by readers.\u003C/description>\u003Citem>\u003Ctitle>Writing a debugger from scratch: Breakpoints\u003C/title>\u003Clink>https://www.timdbg.com/posts/writing-a-debugger-from-scratch-part-5/\u003C/link>\u003CpubDate>Wed, 27 Sep 2023 06:31:25 +0000\u003C/pubDate>\u003Ccomments>https://news.ycombinator.com/item?id=37670938\u003C/comments>\u003Cdescription>\u003C![CDATA[\u003Ca href=\"https://news.ycombinator.com/item?id=37670938\">Comments\u003C/a>]]>\u003C/description>\u003C/item>\u003Citem>\n```\n\nThe GitLab blog uses the [Atom](https://datatracker.ietf.org/doc/html/rfc4287) feed format similar to RSS, but still requires different parsing logic.\n```xml\n\u003C?xml version='1.0' encoding='utf-8' ?>\n\u003Cfeed xmlns='http://www.w3.org/2005/Atom'>\n\u003C!-- / Get release posts -->\n\u003C!-- / Get blog posts -->\n\u003Ctitle>GitLab\u003C/title>\n\u003Cid>https://about.gitlab.com/blog\u003C/id>\n\u003Clink href='https://about.gitlab.com/blog/' />\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\u003Cauthor>\n\u003Cname>The GitLab Team\u003C/name>\n\u003C/author>\n\u003Centry>\n\u003Ctitle>Atlassian Server ending: Goodbye disjointed toolchain, hello DevSecOps platform\u003C/title>\n\u003Clink href='https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/' rel='alternate' />\n\u003Cid>https://about.gitlab.com/blog/atlassian-server-ending-move-to-a-single-devsecops-platform/\u003C/id>\n\u003Cpublished>2023-09-26T00:00:00+00:00\u003C/published>\n\u003Cupdated>2023-09-26T00:00:00+00:00\u003C/updated>\n\u003Cauthor>\n\u003Cname>Dave Steer, Justin Farris\u003C/name>\n\u003C/author>\n```\n\n### Map generic feed data types\nUsing [`roxmltree::Document::parse`](https://docs.rs/roxmltree/latest/roxmltree/struct.Document.html) would require us to understand the XML node tree and its specific tag names. Fortunately, [feed_rs::model::Feed](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html) provides a combined model for RSS and Atom feeds, therefore let us continue using the `feed_rs` crate.\n\n1. Atom: Feed->Feed, Entry->Entry\n2. RSS: Channel->Feed, Item->Entry\n\nIn addition to the mapping above, we need to extract the required attributes, and map their data types. It is helpful to open the [feed_rs::model documentation](https://docs.rs/feed-rs/latest/feed_rs/model/index.html) to understand the structs and their fields and implementations. Otherwise, some suggestions would result in type conversion errors and compilation failures, that are specific to the `feed_rs` implementation.\n\nA [`Feed`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html) struct provides the `title`, type `Option\u003CText>` (either a value is set, or nothing). An [`Entry`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html) struct provides:\n\n1. `title`: `Option\u003CText>`with [`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and the `content` field as `String`.\n2. `updated`: `Option\u003CDateTime\u003CUtc>>` with [`DateTime`](https://docs.rs/chrono/latest/chrono/struct.DateTime.html) with the [`format()` method](https://docs.rs/chrono/latest/chrono/struct.DateTime.html#method.format).\n3. `summary`: `Option\u003CText>` [`Text`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html) and the `content` field as `String`.\n4. `links`: `Vec\u003CLink>`, vector with [`Link`](https://docs.rs/feed-rs/latest/feed_rs/model/struct.Link.html) items. The `href` attribute provides the raw URL string.\n\nUse this knowledge to extract the required data from the feed entries. Reminder that all `Option` types need to call `unwrap()`, which requires more raw instructions for Code Suggestions.\n\n```rust\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Feed.html\n                // https://docs.rs/feed-rs/latest/feed_rs/model/struct.Entry.html\n                // Loop over all entries, and print\n                // title.unwrap().content\n                // published.unwrap().format\n                // summary.unwrap().content\n                // links href as joined string\n                for entry in feed.entries {\n                    println!(\"Title: {}\", entry.title.unwrap().content);\n                    println!(\"Published: {}\", entry.published.unwrap().format(\"%Y-%m-%d %H:%M:%S\"));\n                    println!(\"Summary: {}\", entry.summary.unwrap().content);\n                    println!(\"Links: {:?}\", entry.links.iter().map(|link| link.href.clone()).collect::\u003CVec\u003CString>>().join(\", \"));\n                    println!();\n                }\n```\n\n![Code suggestions to print feed entry types, with specific requirements](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_print_feed_entries_fields_with_rust_type_specifics.png){: .shadow}\n\n### Error handling with Option unwrap()\nContinue iterating on the multi-line instructions after building and running the program again. Spoiler: `unwrap()` will call the `panic!` macro and crash the program when it encounters empty values. This can happen if a field like `summary` is not set in the feed data.\n\n```shell\nGitLab Blog is an Atom feed\nTitle: How the Colmena project uses GitLab to support citizen journalists\nPublished: 2023-09-27 00:00:00\nthread '\u003Cunnamed>' panicked at 'called `Option::unwrap()` on a `None` value', src/feed_reader.rs:40:59\n```\n\nA potential solution is to use [`std::Option::unwrap_or_else`](https://doc.rust-lang.org/std/option/enum.Option.html#method.unwrap_or_else) and set an empty string as default value. The syntax requires a closure that returns an empty `Text` struct instantiation.\n\nSolving the problem required many attempts to find the correct initialization, passing just an empty string did not work with the custom types. I will show you all my endeavors, including the research paths.\n\n```rust\n// Problem: The `summary` attribute is not always initialized. unwrap() will panic! then.\n// Requires use mime; and use feed_rs::model::Text;\n/*\n// 1st attempt: Use unwrap() to extraxt Text from Option\u003CText> type.\nprintln!(\"Summary: {}\", entry.summary.unwrap().content);\n// 2nd attempt. Learned about unwrap_or_else, passing an empty string.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| \"\").content);\n// 3rd attempt. summary is of the Text type, pass a new struct instantiation.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{}).content);\n// 4th attempt. Struct instantiation requires 3 field values.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{\"\", \"\", \"\"}).content);\n// 5th attempt. Struct instantation with public fields requires key: value syntax\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: \"\", src: \"\", content: \"\"}).content);\n// 6th attempt. Reviewed expected Text types in https://docs.rs/feed-rs/latest/feed_rs/model/struct.Text.html and created Mime and String objects\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: String::new(), content: String::new()}).content);\n// 7th attempt: String and Option\u003CString> cannot be casted automagically. Compiler suggested using `Option::Some()`.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(), content: String::new()}).content);\n*/\n\n// xth attempt: Solution. Option::Some() requires a new String object.\nprintln!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(String::new()), content: String::new()}).content);\n```\n\nThis approach did not feel satisfying, since the code line is complicated to read, and required manual work without help from Code Suggestions. Taking a step back, I reviewed what brought me there - if `Option` is `none`, `unwrap()` will throw an error. Maybe there is an easier way to handle this? I asked Code Suggestions in a new comment:\n\n```\n                // xth attempt: Solution. Option::Some() requires a new String object.\n                println!(\"Summary: {}\", entry.summary.unwrap_or_else(|| Text{content_type: mime::TEXT_PLAIN, src: Option::Some(String::new()), content: String::new()}).content);\n\n                // Alternatively, use Option.is_none()\n```\n\n![Code suggestions asked for alternative with Options.is_none](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/code_suggestions_after_complex_unwrap_or_else_ask_for_alternative_option.png){: .shadow}\n\nIncreased readability, less CPU cycles wasted on `unwrap()`, and a great learning curve from solving a complex problem to using a boring solution. Win-win.\n\nBefore we forget: Re-add storing the XML data on disk to complete the reader app again.\n\n```rust\n                // Dump the parsed body to a file, as name-current-iso-date.xml\n                let file_name = format!(\"{}-{}.xml\", thread_name, chrono::Local::now().format(\"%Y-%m-%d-%H-%M-%S\"));\n                let mut file = std::fs::File::create(file_name).unwrap();\n                file.write_all(body.as_ref()).unwrap();\n```\n\nBuild and run the program to verify the output.\n\n```shell\ncargo build\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n![VS Code Terminal, cargo run with formatted feed entries output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/vs_code_terminal_cargo_run_formatted_output_final.png)\n\n## Benchmarks\n\n### Sequential vs. Parallel execution benchmark\nCompare the execution time benchmarks by creating five samples each.\n\n1. Sequential execution. [Example source code MR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/1)\n2. Parallel exeuction. [Example source code MR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader/-/merge_requests/3)\n\n```shell\n# Sequential\ngit checkout sequential-exec\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\n0.21s user 0.08s system 10% cpu 2.898 total\n0.21s user 0.08s system 11% cpu 2.585 total\n0.21s user 0.09s system 10% cpu 2.946 total\n0.19s user 0.08s system 10% cpu 2.714 total\n0.20s user 0.10s system 10% cpu 2.808 total\n```\n\n```shell\n# Parallel\ngit checkout parallel-exec\n\ntime cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n\n0.19s user 0.08s system 17% cpu 1.515 total\n0.18s user 0.08s system 16% cpu 1.561 total\n0.18s user 0.07s system 17% cpu 1.414 total\n0.19s user 0.08s system 18% cpu 1.447 total\n0.17s user 0.08s system 16% cpu 1.453 total\n```\n\nThe CPU usage increased for parallel execution of four RSS feed threads, but it nearly halved the total time compared to sequential execution. With that in mind, we can continue learning Rust and optimize the code and functionality.\n\nNote that we are running the debug build through Cargo, and not the optimized released builds yet. There are caveats with parallel execution though: Some HTTP endpoints put rate limits in place, where parallelism could hit these thresholds easier.\n\nThe system executing multiple threads in parallel might get overloaded too – threads require context switching in the Kernel, assigning resources to each thread. While one thread gets computing resources, other threads are put to sleep. If there are too many threads spawned, this might slow down the system, rather than speeding up the operations. Solutions include design patterns such as [work queues](https://docs.rs/work-queue/latest/work_queue/), where the caller adds a task into a queue, and a defined number of worker threads pick up the tasks for asynchronous execution.\n\nRust also provides data synchronisation between threads, so-called [channels](https://doc.rust-lang.org/rust-by-example/std_misc/channels.html). To ensure concurrent data access, [mutexes](https://doc.rust-lang.org/std/sync/struct.Mutex.html) are available to provide safe locks.\n\n### CI/CD with Rust caching\nAdd the following CI/CD configuration into the `.gitlab-ci.yml` file. The `run-latest` job calls `cargo run` with RSS feed URL examples, and measures the execution time continuously.\n\n```\nstages:\n  - build\n  - test\n  - run\n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:\n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory for being cached.\nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo\n\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n\nrun-latest:\n  stage: run\n  script:\n    - time cargo run -- \"GitLab Blog,https://about.gitlab.com/atom.xml\" \"CNCF,https://www.cncf.io/feed/\"\n```\n\n![GitLab CI/CD pipelines for Rust, cargo run output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-advanced-programming/gitlab_cicd_pipeline_rust_cargo_run_output.png){: .shadow}\n\n## What is next\nThis blog post was challenging to create, with both learning advanced Rust programming techniques myself, and finding a good learning curve with Code Suggestions. The latter greatly helps with quickly generating code, not just boilerplate snippets – it understands the local context, and better understands the purpose and scope of the algorithm, the more code you write. After reading this blog post, you know of a few challenges and turnarounds. The example solution code for the reader app is available in [the learn-rust-ai-app-reader project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai-app-reader).\n\nParsing RSS feeds is challenging since it involves data structures, with external HTTP requests and parallel optimizations. As an experienced Rust user, you might have wondered: `Why not use the std::rss crate?` -- It is optimized for advanced asynchronous execution, and does not allow to show and explain the different Rust functionalities, explained in this blog post. As an async exercise, try to rewrite the code using the [`rss` crate](https://docs.rs/rss/latest/rss/).\n\n### Async learning exercises\nThe lessons learned in this blog post also lay the foundation for future exploration with persistent storage and presenting the data. Here are a few ideas where you can continue learning Rust and optimize the reader app:\n\n1. Data storage: Use a database like sqlite, and RSS feed update tracking.\n2. Notifications: Spawn child processes to trigger notifications into Telegram, etc.\n3. Functionality: Extend the reader types to REST APIs\n4. Configuration: Add support for configuration files for RSS feeds, APIs, etc.\n5. Efficiency: Add support for filters, and subscribed tags.\n6. Deployments: Use a webserver, collect Prometheus metrics, and deploy to Kubernetes.\n\nIn a future blog post, we will discuss some of these ideas, and how to implement them. Dive into existing RSS feed implementations, and learn how you can refactor the existing code into leveraging more Rust libraries (`crates`).\n\n### Share your feedback\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please [share your thoughts in the feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[835,1105,9,940,704],{"slug":3158,"featured":6,"template":684},"learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","Learn Advanced Rust Programming With A Little Help From Ai Code Suggestions","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learn-advanced-rust-programming-with-a-little-help-from-ai-code-suggestions",{"_path":3164,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3165,"content":3171,"config":3177,"_id":3179,"_type":13,"title":3180,"_source":15,"_file":3181,"_stem":3182,"_extension":18},"/en-us/blog/learn-gitlab-devops-version-control",{"title":3166,"description":3167,"ogTitle":3166,"ogDescription":3167,"noIndex":6,"ogImage":3168,"ogUrl":3169,"ogSiteName":669,"ogType":670,"canonicalUrls":3169,"schema":3170},"GitLab tutorials for secure pipelines, Kubernetes, and more at Learn@GitLab","Learn@GitLab offers videos and self-driven demos so you can get the most out of GitLab at your own pace.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667236/Blog/Hero%20Images/Learn-at-GL.jpg","https://about.gitlab.com/blog/learn-gitlab-devops-version-control","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"GitLab tutorials for secure pipelines, Kubernetes, and more at Learn@GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2021-01-12\",\n      }",{"title":3166,"description":3167,"authors":3172,"heroImage":3168,"date":3174,"body":3175,"category":769,"tags":3176},[3173],"Chrissie Buchanan","2021-01-12","\nAt GitLab, we often say that it's not what you know, it's knowing where to look. But sometimes, finding answers isn’t so easy.\n\nAn autonomous, [self-service](/company/culture/all-remote/self-service/#proactive-approach-to-answering-questions), self-learning, and self-searching mindset is when you operate with the idea that your question has already been answered – somewhere. But we realized that for people interested in GitLab, or even those using GitLab, learning **how** to use it wasn’t always easy to find.\n\nWhile we stress the importance of having a [single source of truth](https://handbook.gitlab.com/handbook/values/#single-source-of-truth), we realized that when it came to learning about GitLab, there were almost too many places to look. We have [GitLab University](https://docs.gitlab.com/ee/index.html), our official [GitLab](https://www.youtube.com/channel/UCnMGQ8QHMAnVIsI3xJrihhg) and [GitLab Unfiltered](https://www.youtube.com/channel/UCMtZ0sc1HHNtGGWZFDRTh5A) YouTube pages where we regularly upload educational content, and of course, the [docs](https://docs.gitlab.com/). We needed to find a way to consolidate self-education and make it more intuitive.\n\n## What is Learn@GitLab?\n\n[Learn@GitLab](/learn/) is a learning portal where anyone can go to find self-driven demos and videos about using GitLab. Rather than just making Learn@GitLab _one more resource_, we’re iterating on this idea and consolidating our educational content so that it’s self-driven and easy to find.\n\nThe goal for Learn@GitLab is to present high quality, and accessible technical content that is easy to find on our website to help prospects and users educate themselves about GitLab. This content will include educational technical videos, as well as simulation/click-through demos, and tutorials. The content is organized by common topics such as [DevOps Platform](/solutions/devops-platform/), [version control](/topics/version-control/) and collaboration, and continuous integration, to name a few.\n\nWe’ve picked three of our favorite videos/tutorials for you to get a quick introduction to Learn@GitLab.\n\n## The benefits of a single DevOps platform\n\nWhen we talk about the benefits of GitLab, we often talk about how it saves time and how the single application reduces toolchain complexity. But what does that mean in the context of an ordinary toolchain using tools like GitHub, Jenkins, Jira, etc.?\n\nIn this super short video, we break down a typical toolchain according to three criteria: Integrations needed, clicks, and screen switches. How many times do you need to context switch for a simple task? We break it down for you.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/MNxkyLrA5Aw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Adding security to your GitLab CI/CD pipeline\n\nGitLab helps teams go from DevOps to DevSecOps. One of the ways we help is by allowing you to check your application for security vulnerabilities in your CI/CD pipelines that may lead to unauthorized access, data leaks, denial of services, or worse. GitLab reports these vulnerabilities in the merge request so you can fix them before they ever reach end users.\n\nThis quick video guides you through setting up and configuring GitLab security features, and setting up approval rules for merge requests.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/Fd5DhebtScg\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## GitLab developer flow with Kubernetes\n\nIf you’re a developer, or even just managing a team of developers, you might want to see what a typical workflow would be like using GitLab. If you’re using [Kubernetes](/solutions/kubernetes/), seeing how GitLab works within a deployment environment is especially important.\n\nIn this technical demo, we use Amazon EKS as the deployment environment. We go over creating GitLab issues, merge requests, how to use Auto DevOps pipeline templates, review apps, advanced deployment techniques, and staging and production rollout – all in **just 15 minutes.**\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/TMQziI2VDbQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nWhile we’ll continue to have educational content in other places on our site (and will continue to update them), Learn@GitLab will act as a front door for self education that is no more than two clicks from our homepage. With this new learning portal, we hope to teach people what problems GitLab can solve, but more importantly, show step-by-step _how_ GitLab solves them.\n\nFeel free to explore the different learning paths and comment below if you have any suggestions. Everyone can contribute.\n\n\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>&nbsp;&nbsp;\n[Go to Learn@GitLab](/learn/)!\n&nbsp;&nbsp;\u003Ci class=\"fab fa-gitlab\" style=\"color:rgb(107,79,187); font-size:.85em\" aria-hidden=\"true\">\u003C/i>\n\nCover image by [Benjamin Davies](https://unsplash.com/@bendavisual?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/learn?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)\n",[1247,230,9],{"slug":3178,"featured":6,"template":684},"learn-gitlab-devops-version-control","content:en-us:blog:learn-gitlab-devops-version-control.yml","Learn Gitlab Devops Version Control","en-us/blog/learn-gitlab-devops-version-control.yml","en-us/blog/learn-gitlab-devops-version-control",{"_path":3184,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3185,"content":3190,"config":3197,"_id":3199,"_type":13,"title":3200,"_source":15,"_file":3201,"_stem":3202,"_extension":18},"/en-us/blog/learn-python-with-pj-part-3",{"title":3186,"description":3187,"ogTitle":3186,"ogDescription":3187,"noIndex":6,"ogImage":1096,"ogUrl":3188,"ogSiteName":669,"ogType":670,"canonicalUrls":3188,"schema":3189},"Learn Python with Pj! Part 3 - Functions and strings","Pj shares his experiences learning how to program functions and strings.","https://about.gitlab.com/blog/learn-python-with-pj-part-3","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn Python with Pj! Part 3 - Functions and strings\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-04-04\",\n      }",{"title":3186,"description":3187,"authors":3191,"heroImage":1096,"date":3193,"body":3194,"category":769,"tags":3195},[3192],"PJ Metz","2022-04-04","\n\n_This is the third installment in the Learn Python with Pj! series. Make sure to read [Part 1](/blog/learn-python-with-pj-part-1/) and [Part 2](/blog/learn-python-with-pj-part-2/)._\n\n\nIn learning Python, I’m happy to have found a language with a straightforward syntax that just seems to make sense. I don’t have to define a type; Python just knows. I don’t have to worry about `let` or `const` or `var` for different use cases; I just make the variable. I’m very glad I learned C# and JavaScript first, as those feel important to understanding exactly what is happening when I write code. In turn, I think it’s made Python easier for me, which is usually true when learning another programming language: Your second and third are easier to learn since your brain understands “programming logic” better now than when you made your first “Hello World”. This week we’re going to talk about what I’ve learned in functions and strings. \n\n## Functions\n\nFunctions are the backbone of any app you write. It’s an important step in learning any language to learn how to put a series of actions inside a single function that can be called later in the code. Python does this simply compared to other languages I’ve learned. \n\n```python\ndef my_first_function(arg1, arg2):\n  print(f”Your input was {arg1} and {arg2}.”)\n\n#prints “Your input was hilarious and unnecessary”\nmy_first_function(“hilarious”, “unnecessary”)\n```\nUsing the keyword `def` lets Python know that you’re about to write a function. Inside the parentheses, you put any parameters that must be included when calling the function. Some people use argument and parameter interchangeably, but technically, when defining a function, it’s a parameter, and when calling a function, it’s an argument. Either way, when defining the function, include some variables that you’ll expect when the function is called later. Finally, put a colon and then move to the next line. All the code for the function to run is indented. Inside the function, you can run loops, logic, or even other functions. Let’s check out a slightly more complex use. \n\n```python\ndef halloween_horror_nights(days, link,):\n    named = input(\"What is your name?\")\n    name = named.capitalize()\n    if isinstance(days, int):\n        if days == 0:\n            print(f\" Hello, {name}. We're ready to see you at HHN. {link}\")\n        elif days \u003C= 30:\n            print(f\"{name}, You have {days} days until the terror is home. {link}\")\n        elif days \u003C= 60:\n            print(f\"{name}, The horror comes home in {days} days. Join us in the dark. {link}\")\n        elif days \u003C 365 and days > 60:\n            print(f\"{name}, Patience is a virtue. You're {days} days away from the top rated Halloween event in the world.{link}\")\n        else:\n            print(f\"{name}, it can't be more than a year away. It's closer than you think... {link}\")\n    else:\n        print(\"Days must be an int\")\n    \n    \n#This will print “{Name input by user}, The horror comes home in 56 days. Join us in the dark. https://orlando.halloweenhorrornights.com/site\"\nhalloween_horror_nights(56, \"https://orlando.halloweenhorrornights.com/site\")\n```\n\nFor context, Halloween Horror Nights in Orlando is my favorite event of the year. This function takes in a number of days and a link (meant to be days until the event and a link to the HHN web page) and outputs a string that says how many days are left until the event. The string also includes a link to the web page and asks for user input to personalize each string. The function `isinstance()` checks if `days` is an int to make sure the sentence makes sense and returns `True` if the first argument is the type of the second argument. \n\nI really found functions in Python to be a lot easier than in other languages, though I still miss the curly brackets of C# and JavaScript. Additionally, the simplicity of `def` followed by the function name and any required parameters is really straightforward and makes reading the code easier. And since code is read more than it’s written, that makes Python pretty awesome in my book. \n\nI also used the f-string format for these print statements, and it’s still one of my favorite ways to concatenate. It feels easier than a lot of the other ways of inserting variables into a string in Python, and a little easier than the way it’s done in JavaScript, at least to me. I use a different method of including variables in a string called `.format()`. \n\nMaking your own functions is important, but there are a bunch of built-in functions in Python. There are also methods, which are similar to functions but are associated with the objects in a class they’re assigned to. Let’s talk about some strings and some methods that come with them.\n\n## Strings\n\nI thought it was strange that I had a whole section on strings in my Codecademy Python curriculum, but I soon realized that it was giving me a lot of very useful methods to use on strings that seemed very versatile. The most interesting thing to me is that strings are an object and act like a list of characters. I’m not entirely sure how strings are treated in other languages, but this really struck me as a cool idea. You can even call specific characters using the same syntax you would for a list. \n\n```python\nspooky = “Halloween Horror Nights is my favorite thing about Autumn.”\n\n#the following prints “l” since it’s the 3rd char in the string `spooky`. \nprint(spooky[2]) \n```\n\nOr you can use a for loop on a string.\n\n```python\n#This prints each letter on a new line and capitalizes it. The message now reads vertical in the output.\nfor letter in spooky:\n  print(letter.upper())\n```\n\n### String methods\t\n\nA few built-in methods exist for strings in Python, like `capitalize()` and `upper()`, two I used in the above examples. In addition to those, there are many more that can do things like remove the whitespace or noise from the beginning and end of a string, tell you the index of the first appearance of something, or join a list of strings into a single string. There are lots of great included methods. Here’s an exercise I took from Codecademy and changed the content to fit this article's theme. \n\n```python\n#given a string that contains a ton of information separated by semicolons and commas. Each part is a haunted house name, Universal Studios location, and the year the house appeared at the event.\nhhn_houses_location_year = \"Chucky;Japan;2016, Run;Orlando;2001, The Orfanage: Ashes to Ashes;Orlando;2010, The Real: Haunted Village;Japan;2021, The Undertaker: No Mercy;Hollywood;2000, Welcome to Silent Hill;Hollywood;2012, American Werewolf in London;Orlando;2013\"   \n\n#this splits the string up into a list where each element of the list is the section separated by a comma.\nhhn_houses_list = hhn_houses_location_year.split(\",\")\n\n#empty list for the next step\nhhn_houses_stripped = []\n\n#this strips any whitespace from the element in the list and adds it to the empty list from before\nfor house in hhn_houses_list:\n    hhn_houses_stripped.append(house.strip())\n\n#empty list for next step\nhhn_house_details = []\n\n#the next few lines split the details into their own list. \n#first, each house, with the details, is split along the semicolons to make a list of lists, with each house being its own element in the larger list\n#next, empty lists are made for each detail\n#finally, using index numbers, each detail is placed in it’s own list so all the houses, locations, and years are separated. \nfor info in hhn_houses_stripped:\n    hhn_house_details.append(info.split(\";\"))\n\nhouse = []\nlocation = []\nyear = []\n\nfor stuff in hhn_house_details:\n    house.append(stuff[0])\n    location.append(stuff[1])\n    year.append(stuff[2])\n\n#loops through and using .format() prints a sentence that tells about each house. \nfor num in range(0, len(house)):\n  print(\"{} was located in {} for the {} event\".format(house[num], location[num], year[num]))\n```\n\nAs you can see, I am obsessed with Halloween horror nights… er, wait, not the point of the article. As you can see, Python’s built-in methods for strings can be pretty useful, especially if you end up with a bunch of data sitting around in unformatted strings. Next time, we’re going to talk about Dictionaries and how they are used in Python! \n",[1105,3196,9],"growth",{"slug":3198,"featured":6,"template":684},"learn-python-with-pj-part-3","content:en-us:blog:learn-python-with-pj-part-3.yml","Learn Python With Pj Part 3","en-us/blog/learn-python-with-pj-part-3.yml","en-us/blog/learn-python-with-pj-part-3",{"_path":3204,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3205,"content":3210,"config":3215,"_id":3217,"_type":13,"title":3218,"_source":15,"_file":3219,"_stem":3220,"_extension":18},"/en-us/blog/learn-python-with-pj-part-4-dictionaries-and-files",{"title":3206,"description":3207,"ogTitle":3206,"ogDescription":3207,"noIndex":6,"ogImage":1096,"ogUrl":3208,"ogSiteName":669,"ogType":670,"canonicalUrls":3208,"schema":3209},"Learn Python with Pj! Part 4 - Dictionaries and Files","Our education evangelist Pj Metz continues his journey to learn how to code in Python.","https://about.gitlab.com/blog/learn-python-with-pj-part-4-dictionaries-and-files","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn Python with Pj! Part 4 - Dictionaries and Files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-05-05\",\n      }",{"title":3206,"description":3207,"authors":3211,"heroImage":1096,"date":3212,"body":3213,"category":1105,"tags":3214},[3192],"2022-05-05","\n\nThis is the fourth installment in the Learn Python with Pj! series. Make sure to read:\n- [Part 1 - Getting started](/blog/learn-python-with-pj-part-1/)\n- [Part 2 - Lists and loops](/blog/learn-python-with-pj-part-2/)\n- [Part 3 - Functions and strings](/blog/learn-python-with-pj-part-3/)\n- [Part 5 - Build a hashtag tracker with the Twitter API](/blog/learn-python-with-pj-part-5-building-something-with-the-twitter-api/)\n\nI’ve learned a lot with Python so far, but when I learned dictionaries (sometimes shortened to dicts), I was really excited about what could be done. A dictionary in Python is a series of keys and values stored inside a single object. This is kind of like a super array; one that allows you to connect keys and values together in a single easily accessible source. Creating dictionaries from arrays can actually be very simple, too.\n\nIn this blog, I'll dig into how to create dictionaries and how to read and write files in the code.\n\n## Dictionaries\n\nDictionaries in Python are indicated by using curly braces, or as I like to call them, mustaches. `{ }` indicates that the list you’re looking at isn’t a list at all, but a dictionary. \n\n```python\nshows_and _characters = {\n    \"Bojack Horseman\": \"Todd\",\n    \"My Hero Academia\": \"Midoriya\"\n    \"Ozark\": \"Ruth\"\n    \"Arrested Development\": \"Tobias\",\n    \"Derry Girls\": \"Sister Michael\",\n    \"Tuca & Bertie\": \"Bertie\"\n    }\n```\n\nThis is a dictionary of my favorite TV shows and my favorite characters in that show. In this example, the key is on the left and the value is on the right. To access dictionaries, you use a similar call like you would for a list, except instead of an element number, you would put the key. `print(shows_and_characters[“Ozark”])` would print `Ruth` to the console. Additionally, both the key and value in this example are strings, but that’s not a requirement. Keys can be any immutable type, like strings, ints, floats, and tuples. Values don’t have this same restriction, therefore values can be a nested dictionary or a list, in addition to the types mentioned for keys. For instance, the following dictionary is a valid dictionary.\n\n```python\nshows_with_lists = {\n    \"Bojack Horseman\": [\"Todd\", \"Princess Carolyn\", \"Judah\", \"Diane\"],\n    \"My Hero Academia\": [\"Midoriya\", \"Shoto\", \"All Might\", \"Bakugo\", \"Kirishima\"],\n    \"Ozark\": [\"Ruth\", \"Jonah\", \"Wyatt\"],\n    \"Arrested Development\": [\"Tobias\", \"Gob\", \"Anne\", \"Maeby\"],\n    \"Derry Girls\": [\"Sister Michael\", \"Orla\", \"Erin\", \"Claire\", \"James\"],\n    \"Tuca & Bertie\": [\"Bertie\", \"Speckle\", \"Tuca\", \"Dakota\"]\n    }\n```\nIn this example, each value is a list. So if we tried to print the value for the key `”Derry Girls”`, we would see `[“Sister Michael”, “Orla”, “Erin”, “Claire”, “James”]` printed to the console. However, if we wanted the last element in the value list, we’d write `shows_with_lists[“Derry Girls”] [-1]`. This would print the last element in the list, which in this case is `James`. \n\nDictionaries can be written manually, or, if you have two lists, you can combine the `dict()` and `zip()` methods to make the lists into a dictionary. \n\n```python\nlist_of_shows = [\"Bojack Horseman\",\n                 \"My Hero Academia\",\n                 \"Ozark\",\n                 \"Arrested Development\",\n                 \"Derry Girls\",\n                 \"Tuca & Bertie\"]\nlist_of_characters = [[\"Todd\", \"Princess Carolyn\", \"Judah\", \"Diane\"],\n                      [\"Midoriya\", \"Shoto\", \"All Might\", \"Bakugo\", \"Kirishima\"],\n                      [\"Ruth\", \"Jonah\", \"Wyatt\"],\n                      [\"Tobias\", \"Gob\", \"Anne\", \"Maeby\"],\n                      [\"Sister Michael\", \"Orla\", \"Erin\", \"Claire\", \"James\"],\n                      [\"Bertie\", \"Speckle\", \"Tuca\", \"Dakota\"]]\n\ncombined_shows_characters = dict(zip(list_of_shows, list_of_characters))\n\nprint(combined_shows_characters)\n```\n\nThis is one way to create a dictionary. Another is called Dictionary Comprehension. This one is a little more work, but can be used in a variety of different ways, including using a bit of logic on a single list to generate a dictionary using that original list. Here’s how with two examples: one based on the above lists, and one with a single list and some logic. \n\n```python\nimport math\n\n#This is doing the same work as the above example, but using Dict Comprehension instead. \ncomprehension_shows_characters = { shows:characters for shows, characters in zip(list_of_shows, list_of_characters)  }\n\nhip_to_be_square = [4, 9, 16, 25, 36, 49]\n\nno_longer_hip_to_be_square = { key:math.sqrt(key) for key in hip_to_be_square }\n\nprint(no_longer_hip_to_be_square)\n```\n\nIn the `no_longer_hip_to_be_square` dictionary, the key is found in the `hip_to_be_square` list. The value for each key is its own square root, brought in with the import math function. There are plenty more useful methods for dealing with dictionaries [here](https://realpython.com/python-dicts/). \n\n## Reading and writing files\n\nThis one is a pretty cool part of Python: reading and writing other files right in the code. With Python, you’re able to take the contents of certain types of files and use it in your code, or even create a new file based on some input. This is useful for data handling and can be used with a  variety of file types. The two I’ll be covering here are .csv and .txt.\n\n### Reading from a file\n\nImagine a .txt file named `best-ever.txt` containing the line `My favorite tv show is Derry Girls`. We can use Python to take that line and turn it into a variable. Running the following code would print the contents of the .txt file to the terminal. \n\n```python\nwith open(\"best-ever.txt\") as text_file:\n  text_data = text_file.read()\n\n#This will print the contents of the .txt file. \nprint(text_data)\n```\n\nBy using `with open(NAME OF FILE) as VARIABLE_NAME:`, we can examine the contents of files as a single string. If the document has multiple lines, you can even separate those by iterating over them by using a for loop and the `.readlines()` method. Using an imaginary .txt document called `buncha-lines` we could use the following to print out each line individually.\n\n```python\nwith open(\"buncha-lines.txt\") as lines_doc:\n  for line in lines_doc.readlines():\n    print(line)\n``` \n### Writing a new file\n\nCreating a new file is also easy with Python. The `open()` function can take an additional argument in order to create a new file. In fact, there’s a default argument that’s been being passed each time without us knowing! `r` is the default argument for `open()` and puts it in read mode. To turn on write mode, pass in a `w` as the second argument. The following code will write a brand-new file called `best_tv_character.txt` with the contents `Peggy Olson from Mad Men`. \n\n```python\nwith open(\"best_tv_character.txt\", \"w\") as best_character:\n  best_character.write(\"Peggy Olson from Mad Men\")\n```\n### Working with .csv files\n\nYou can read a .csv file with Python by using `import csv` at the beginning of the file, and then using some of its built-in methods in the code. However, even though .csv files are plain text, treating a .csv file the same as you treat .txt files can lead to difficult to read outputs; after all, the point of a spreadsheet is to table information. Without that table, the output can be chaotic. A way around this is to use the `dictreader()` method. This method allows you to map the information in each row to a dictionary with field names you can create. The default field names are collected from the first row of the .csv if no field names are given. Imagine a .csv file with columns labeled, “Network”, “Show name”, “Seasons”. Maybe we just want to print the number of seasons from this .csv. \n\n```python\nimport csv \n\nwith open(\"shows.csv\") as shows_csv:\n  shows_dict = csv.DictReader(shows_csv)\n  for row in shows_dict:\n    print(row[\"Seasons\"])\n```\n\nThis would print to the console, on a new line, the number of seasons for each row that exists in the .csv. \n\nJust like with .txt files, you can also create .csv files with Python. It’s a bit more complicated since you need to define the headers, or column names, but it is still a quick process. This can be used to take lists and turn them into .csv files. Let’s check out the following example:\n\n```python\nimport csv\n\nworking_list = [{\"Network\": \"Netflix\", \"Show Name\":\"Bojack Horseman\", \"Seasons\":6}, {\"Network\":\"Channel 4\",\"Show Name\":\"Derry Girls\", \"Seasons\": 3}, {\"Network\":\"HBO Max\", \"Show Name\":\"Our Flag Means Death\", \"Seasons\": 1}]\n\n\nwith open(\"shows.csv\", \"w\") as shows_csv:\n    fields = [\"Network\", \"Show Name\", \"Seasons\"]\n    shows_w = csv.DictWriter(shows_csv, fieldnames = fields)\n\n    shows_w.writeheader()\n    for item in working_list:\n        shows_w.writerow(item)\n```\n\nThis previous code block creates a brand-new csv file by using the `”w”` parameter in `open()`. We manually name the fields in the order they appear in a separate list, then pass that list into the `DictWriter` parameter `fieldnames`. Finally, we use the `writeheader()` and a for loop with the `writerow()` methods to create a header row and to iterate over the `working_list` and turn each entry into a row in the .csv. \n\nThese are only a few ways to work with .csv and .txt files; Python is very versatile and more information [can be found here](https://realpython.com/working-with-files-in-python/).\n",[1105,9,3196],{"slug":3216,"featured":6,"template":684},"learn-python-with-pj-part-4-dictionaries-and-files","content:en-us:blog:learn-python-with-pj-part-4-dictionaries-and-files.yml","Learn Python With Pj Part 4 Dictionaries And Files","en-us/blog/learn-python-with-pj-part-4-dictionaries-and-files.yml","en-us/blog/learn-python-with-pj-part-4-dictionaries-and-files",{"_path":3222,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3223,"content":3228,"config":3233,"_id":3235,"_type":13,"title":3236,"_source":15,"_file":3237,"_stem":3238,"_extension":18},"/en-us/blog/learn-python-with-pj-part-5-building-something-with-the-twitter-api",{"title":3224,"description":3225,"ogTitle":3224,"ogDescription":3225,"noIndex":6,"ogImage":1096,"ogUrl":3226,"ogSiteName":669,"ogType":670,"canonicalUrls":3226,"schema":3227},"Learn Python with Pj! Part 5 - Build a hashtag tracker with the Twitter API","Our Education Evangelist Pj Metz wraps up his five-part series with this penultimate tutorial.","https://about.gitlab.com/blog/learn-python-with-pj-part-5-building-something-with-the-twitter-api","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learn Python with Pj! Part 5 - Build a hashtag tracker with the Twitter API\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"PJ Metz\"}],\n        \"datePublished\": \"2022-06-01\",\n      }",{"title":3224,"description":3225,"authors":3229,"heroImage":1096,"date":3230,"body":3231,"category":1105,"tags":3232},[3192],"2022-06-01","\nThis is the fifth and final installment in the Learn Python with Pj! series. Make sure to read:\n- [Part 1 - Getting started](/blog/learn-python-with-pj-part-1/)\n- [Part 2 - Lists and loops](/blog/learn-python-with-pj-part-2/)\n- [Part 3 - Functions and strings](/blog/learn-python-with-pj-part-3/)\n- [Part 4 - Dictionaries and Files](/blog/learn-python-with-pj-part-4-dictionaries-and-files/)\n\n## Putting it all together\nI’ve completed my Python course on [Codecademy](https://codecademy.com/), and am excited to put the skills I learned into building something practical. I’ve worked with the Twitter API before; I wrote a few bots in Node.js to make them tweet and respond to tweets they’re tagged in. I thought it’d be fun to work with the API again, but this time do it in Python. I didn’t just want to make another bot, so I had to figure out something else. In this case, I made a bot that can track hashtags being used in real time on Twitter.\n\nHere’s [my repo](https://gitlab.com/MetzinAround/python-hashtagger) containing a few different files, but `live_tweets.py` is what we’ll focus on for this blog. Let’s talk about how I built it and what it does. \n\n```python\nimport tweepy\nimport config\n\nauth = tweepy.OAuth1UserHandler(config.consumer_key, config.consumer_secret, config.access_token, config.access_token_secret\n)\n\napi = tweepy.API(auth) \n\n#prints the text of the tweet using hashtag designated in stream.filter(track=[])\nclass LogTweets(tweepy.Stream):\n        def on_status(self, status):\n                date = status.created_at\n                username = status.user.screen_name\n                \n                try:\n                        tweet = status.extended_tweet[\"full_text\"]\n                except AttributeError:\n                        tweet = status.text\n\n                print(\"**Tweet info**\")\n                print(f\"Date: {date}\")\n                print(f\"Username: {username}\")\n                print(f\"Tweet: {tweet}\")\n                print(\"*********\")\n                print(\"********* \\n\")\n              \n\nif __name__ == \"__main__\":         \n        #creates instance of LogTweets with authentication\n        stream = LogTweets(config.consumer_key, config.consumer_secret, config.access_token, config.access_token_secret)\n\n\n        #hashtags as str in list will be watched live on twitter. \n        hashtags = []\n        print(\"Looking for Hashtags...\")\n        stream.filter(track=hashtags)\n\n\n```\n\nHere’s how this all works. First, we import two modules: [Tweepy](https://www.tweepy.org/) and config. Tweepy is a wrapper that makes using the Twitter API very easy. Config allows us to use config files and keep our secrets safe. This is important since using the Twitter API involves four keys that are specific to your Twitter developer account. Getting these keys is covered in this Twitter [documentation](https://developer.twitter.com/en/docs/twitter-api/getting-started/getting-access-to-the-twitter-api). We’ll talk about what’s in the config file and how it works later. \n\nThe next line defines the variable `auth` using tweepy’s built in authorization handler. Normally, you’d put in the keys directly here, but since we’re trying to keep secrets safe, we handle those through the config file. In order to call those variables hosted in the config file, we type `config.variable_name`. Finally, in order to access the tweepy api, we create the variable `api` with the auth variable from the line above passed into `tweepy.API()`. Now, the variable `api` will give us access to all the features in Tweepy’s Twitter API library. \n\n> You’re invited! Join us on June 23rd for the [GitLab 15 launch event](https://page.gitlab.com/fifteen) with DevOps guru Gene Kim and several GitLab leaders. They’ll show you what they see for the future of DevOps and The One DevOps Platform.\n\nFor our purposes, we want to find a hashtag being used, then collect the tweet that used it and print some information about the tweet to the console. To make this happen, we’ve created a class called `LogTweets` that takes an input `tweepy.Stream`. Stream is a Twitter API term that refers to all of the tweets being posted on Twitter at any given moment. Think of it as opening a window looking out onto every single tweet as it’s posted. We have to make this open connection in order to be able to find tweets that are using our hashtag. Inside `LogTweets`, we define a function called `on_status` with the parameters `self` and `status`. `On_status` will be called when a status is detected in the stream. `Self` is required as the first parameter in any class function, and `status` in this function will be referring to the status posted by a Twitter user, often called a tweet.\n\nIn our case, we’re going with status because `tweet` will represent the text of the status itself. We define `date` and `username` using Tweepy documentation: `created_at` is the date and `user.screen_name` is the username of the person who posted the status.\n\nNext is a `try/except` block. Try/except is a concept that works similarly to an if statement, but it allows for error handling a little bit better. It essentially says, “Try this, but if there’s a problem, do this instead.” In this case, we try to define the variable `tweet` as `.extended_tweet[“full_text”]`. This checks if the status we’re working with has the `extended_tweet` attribute. Twitter used to be limited to 140 characters, and when they increased the limit to 280, the `extended_tweet` became necessary.\n\nNow, if you want to capture the full tweet, you need the `extended_tweet` attribute. Inside of that attribute is the key `full_text`. Longer tweets will need that full_text or it will cut off at the 140 character limit. This `try` command checks if that key exists; if it does, `tweet` is equal to that full text.\n\nHowever, if an `AttributeError` happens, we just grab the regular text and set it equal to the variable `tweet`. Next, we print some info to the terminal. Whenever this function is called, the six lines will print to the console with the variables created above replaced by whatever status info was passed in. This makes it easier to keep track of what we’re looking at in the terminal. \n\nNext, we have an important if statement: `if __name__ == \"__main__\":`. This is used to indicate what happens when the file is run. Basically, files in Python receive a property called `__name__` from the compiler. The file that is called to be run directly is called `__main__`. Other files not run are given names equal to the file name. Therefore, anything under this if statement will only run if the file is being called directly by the compiler. \n\nNext, we create an instance of `LogTweets` called `stream`. We pass in the authentication information from the config file just like we did for the `auth` variable in the beginning of the code. This “opens up” the stream and we are now looking at all the tweets being sent in real time. In order to narrow our search, we need something to look for. The variable `hashtags` is an empty list that must be populated with strings of the hashtags we’re looking to track. This list will be put into the keyword `track` in a few lines. \n\n`Track` is an important keyword for the stream. It tells the instance what word we are looking for, input as a list of strings. These words can show up in any form, so it’s very broad.  If we didn’t put the hashtag in front of it, it would simply look for that word no matter where it showed up, so we might have too many results. By looking for hashtags, we narrow our search only to people using that specific hashtag, not just the word wherever it is. To search for terms, you have to put them into the list as a string before running the code. \n\nWhen the code is run by typing `python3 live_tweets.py` into the terminal, this is what the output looks like in the terminal.\n\n![Output in terminal](https://about.gitlab.com/images/blogimages/pythonwithpj5.png){: .shadow}\n\n\nThat’s it! That’s how the bot works, but we still need to talk about `config.py` and why we used it before. Here’s the contents of the file: \n\n```python\nimport os\nfrom dotenv import load_dotenv\n\nload_dotenv()\nconsumer_key = os.getenv(\"consumer_key\")\nconsumer_secret = os.getenv(\"consumer_secret\")\naccess_token = os.getenv(\"access_token\")\naccess_token_secret = os.getenv(\"access_token_secret\")\n```\n\nI tricked you! This doesn’t have the keys there either! Using `import os` and `import dotenv import load_dotenv` gives us access to something very important to keep secret keys safe: environmental variables. An environmental variable can be set in many different places, but in this case, our local repo has a file called `.env` that holds the actual keys.\n\nThis is there so I can test the app and run it on my machine. To use it somewhere else, you’d have to have environmental variables set up to hold the keys for the Twitter API. When I run my bots on Heroku, I keep the keys in the settings so it has access to the keys it needs to run. I use a `.gitignore` file that keeps my `.env` file from being committed to GitLab. \n\nAs you can see, the variables in `config.py` are set to `os.getenv(“name_of_key”)`. When we import `config.py` as `import config`, we gain access to these variables by calling `config.name_of_variable` in our main file. \n\nSo, for now, that’s what I built! It’s not much and I pieced it together using a lot of documentation from Twitter and Tweepy as well as a few tutorials and plenty of Stackoverflow, but it got built and it works the way I want it to!\n\nI’ve really enjoyed learning Python online and writing about it for everyone who has been reading it. I encourage anyone learning a new language or skill to write about it; it has really helped solidify my learning, and who knows, maybe I’ve helped someone else understand something in Python as well. \n\n",[1105,9,3196],{"slug":3234,"featured":6,"template":684},"learn-python-with-pj-part-5-building-something-with-the-twitter-api","content:en-us:blog:learn-python-with-pj-part-5-building-something-with-the-twitter-api.yml","Learn Python With Pj Part 5 Building Something With The Twitter Api","en-us/blog/learn-python-with-pj-part-5-building-something-with-the-twitter-api.yml","en-us/blog/learn-python-with-pj-part-5-building-something-with-the-twitter-api",{"_path":3240,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3241,"content":3247,"config":3252,"_id":3254,"_type":13,"title":3255,"_source":15,"_file":3256,"_stem":3257,"_extension":18},"/en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"title":3242,"description":3243,"ogTitle":3242,"ogDescription":3243,"noIndex":6,"ogImage":3244,"ogUrl":3245,"ogSiteName":669,"ogType":670,"canonicalUrls":3245,"schema":3246},"Learning Python with a little help from AI","Use this guided tutorial, along with GitLab Duo Code Suggestions, to learn a new programming language.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663918/Blog/Hero%20Images/aipower.jpg","https://about.gitlab.com/blog/learning-python-with-a-little-help-from-ai-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Python with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-11-09\",\n      }",{"title":3242,"description":3243,"authors":3248,"heroImage":3244,"date":3249,"body":3250,"category":702,"tags":3251},[1612],"2023-11-09","\nLearning a new programming language can help broaden your software development expertise, open career opportunities, or create fun challenges. However, it can be difficult to decide on one specific approach to learning a new language. Artificial intelligence (AI) can help. In this tutorial, you'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a guided experience in learning the Python programming language with a practical hands-on example.\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language: Python](#learning-a-new-programming-language-python)\n    - [Development environment for Python](#development-environment-for-python)\n    - [Hello, World](#hello-world)\n- [Start learning Python with a practical example](#start-learning-python-with-a-practical-example)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n- [File I/O: Read and print a log file](#file-io-read-and-print-a-log-file)\n- [Flow control](#flow-control)\n    - [Loops and lists to collect files](#loops-and-lists-to-collect-files)\n    - [Conditionally collect files](#conditionally-collect-files)\n- [Functions](#functions)\n    - [Start with a simple log format](#start-with-a-simple-log-format)\n    - [String and data structure operations](#string-and-data-structure-operations)\n    - [Parse log files using regular expressions](#parse-log-files-using-regular-expressions)\n    - [Advanced log format: auth.log](#advanced-log-format-authlog)\n    - [Parsing more types: Structured logging](#parsing-more-types-structured-logging)\n- [Printing results and formatting](#printing-results-and-formatting)\n- [Dependency management and continuous verification](#dependency-management-and-continuous-verification)\n    - [Pip and pyenv: Bringing structure into Python](#pip-and-pyenv-bringing-structure-into-python)\n    - [Automation: Configure CI/CD pipeline for Python](#automation-configure-cicd-pipeline-for-python)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \n\nChoose your [preferred and supported IDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors), and follow the documentation to enable Code Suggestions for [GitLab.com SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) or [GitLab self-managed instances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nProgramming languages can require installing the language interpreter command-line tools or compilers that generate binaries from source code to build and run the application.\n\n**Tip:** You can also use [GitLab Remote Development workspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to create your own cloud development environments, instead of local development environments. This blog post focuses on using VS Code and the GitLab Web IDE. \n\n### VS Code\n\n[Install VS Code](https://code.visualstudio.com/download) on your client, and open it. Navigate to the `Extensions` menu and search for `gitlab workflow`. Install the [GitLab Workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). VS Code will also detect the programming languages, and offer to install additional plugins for syntax highlighting and development experience. For example, install the [Python extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python).\n\n### Code Suggestions\n\nFamiliarize yourself with suggestions before actually verifying the suggestions. GitLab Duo Code Suggestions are provided as you type, so you do not need use specific keyboard shortcuts. To accept a code suggestion, press the `tab` key. Also note that writing new code works more reliably than refactoring existing code. AI is non-deterministic, which means that the same suggestion may not be repeated after deleting the code suggestion. While Code Suggestions is in Beta, we are working on improving the accuracy of generated content overall. Please review the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations), as this could affect your learning experience.\n\n**Tip:** The latest release of Code Suggestions supports multiline instructions. You can refine the specifications to your needs to get better suggestions. We will practice this method throughout the blog post.\n\n## Learning a new programming language: Python  \n\nNow, let's dig into learning Python, which is one of the [supported languages in Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\nBefore diving into the source code, make sure to set up your development environment.\n\n### Development environment for Python \n\n1) Create a new project `learn-python-ai` in GitLab, and clone the project into your development environment. All code snippets are available in this [\"Learn Python with AI\" project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n```shell\ngit clone https://gitlab.com/NAMESPACE/learn-python-ai.git\n\ncd learn-python-ai\n\ngit status\n```\n\n2) Install Python and the build toolchain. Example on macOS using Homebrew:\n\n```\nbrew install python\n```\n\n3) Consider adding a `.gitignore` file for Python, for example this [.gitignore template for Python](https://gitlab.com/gitlab-org/gitlab/-/blob/master/vendor/gitignore/Python.gitignore?ref_type=heads). \n\nYou are all set to learn Python! \n\n### Hello, World\n\nStart your learning journey in the [official documentation](https://www.python.org/about/gettingstarted/), and review the linked resources, for example, the [Python tutorial](https://docs.python.org/3/tutorial/index.html). The [library](https://docs.python.org/3/library/index.html) and [language reference](https://docs.python.org/3/reference/index.html) documentation can be helpful, too. \n\n**Tip:** When I touched base with Python in 2005, I did not have many use cases except as a framework to test Windows 2000 drivers. Later, in 2016, I refreshed my knowledge with the book \"Head First Python, 2nd Edition,\" providing great practical examples for the best learning experience – two weeks later, I could explain the differences between Python 2 and 3. You do not need to worry about Python 2 – it has been deprecated some years ago, and we will focus only on Python 3 in this blog post. In August 2023, \"[Head First Python, 3rd Edition](https://www.oreilly.com/library/view/head-first-python/9781492051282/)\" was published. The book provides a great learning resource, along with the exercises shared in this blog post. \n\nCreate a new file `hello.py` in the root directory of the project and start with a comment saying `# Hello world`. Review and accept the suggestion by pressing the `tab` key and save the file (keyboard shortcut: cmd s). \n\n```\n# Hello world\n```\n\nCommit the change to the Git repository. In VS Code, use the keyboard shortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to submit. \n\nUse the command palette (`cmd shift p`) and search for `create terminal` to open a new terminal. Run the code with the Python interpreter. On macOS, the binary from Homebrew is called `python3`, other operating systems and distributions might use `python` without the version.\n\n```shell\npython3 hello.py\n```\n\n![Hello World, hello GitLab Duo Code Suggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_python_code_sugestions_hello_world.png)\n\n**Tip:** Adding code comments in Python starting with the `#` character before you start writing a function or algorithm will help Code Suggestions with more context to provide better suggestions. In the example above, we did that with `# Hello world`, and will continue doing so in the next exercises.\n\nAdd `hello.py` to Git, commit all changes and push them to your GitLab project.\n\n```shell\ngit add hello.py\n\ngit commit -avm \"Initialize Python\"\n\ngit push\n```\n\nThe source code for all exercises in this blog post is available in this [\"Learn Python with AI\" project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai).\n\n## Start learning Python with a practical example \n\nThe learning goal in the following sections involves diving into the language datatypes, variables, flow control, and functions. We will also look into file operations, string parsing, and data structure operations for printing the results. The exercises will help build a command-line application that reads different log formats, works with the data, and provides a summary. This will be the foundation for future projects that fetch logs from REST APIs, and inspire more ideas such as rendering images, creating a web server, or adding Observability metrics.\n\n![Parsing log files into structured objects, example result after following the exercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\nAs an experienced admin, you can put the script into production and use real-world log format exmples. Parsing and analyzing logs in stressful production incidents can be time-consuming. A local CLI tool is sometimes faster than a log management tool.\n\nLet's get started: Create a new file called `log_reader.py` in the directory root, add it to Git, and create a Git commit.\n\n### Define variables and print them\n\nAs a first step, we need to define the log files location, and the expected file suffix. Therefore, let's create two variables and print them. Actually, ask Code Suggestions to do that for you by writing only the code comments and accepting the suggestions. Sometimes, you need to experiment with suggestions and delete already accepted code blocks. Do not worry – the quality of the suggestions will improve over time as the model generates better suggestions with more context.\n\n![Define log path and file suffix variables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_01.png){: .shadow}\n\n![Print the variables to verify](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_02.png){: .shadow}\n\n```python\n# Specify the path and file suffix in variables\npath = '/var/log/'\nfile_suffix = '.log'\n\n# Print the variables \n\nprint(path)\nprint(file_suffix)\n```\n\nNavigate into the VS Code terminal and run the Python script:\n\n```shell\npython3 log_reader.py\n```\n\n![VS Code terminal, printing the variables](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_variables.png)\n\nPython supports many different types in the [standard library](https://docs.python.org/3/library/index.html). Most common types are: Numeric (int, float, complex), Boolean (True, False), and String (str). Data structures include support for lists, tuples, and dictionaries. \n\n### Explore variable types \n\nTo practice different variable types, let's define a limit of log files to read as a variable with the `integer` type.\n\n![Log file variable](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_reader_variables_03.png){: .shadow}\n\n```python\n# Define log file limit variable \nlog_file_limit = 1024 \n```\n\nCreate a Boolean variable that forces to read all files in the directory, no matter the log file suffix. \n\n```python\n# Define boolean variable whether to read all files recursively\nread_all_files_recursively = True\n```\n\n## File I/O: Read and print a log file\n\nCreate a directory called `log-data` in your project tree. You can copy all file examples from the [log-data directory in the example project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads).  \n\nCreate a new file `sample.log` with the following content, or any other two lines that provide a different message at the end.\n\n```\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated successfully.\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup service.\n```\n\nInstruct Code Suggestions to read the file `log-data/sample.log` and print the content. \n\n![Code Suggestions: Read log file and print it](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_log_file_and_print.png){: .shadow}\n\n```python\n# Read the file in log-data/sample.log and print its content\nwith open('log-data/sample.log', 'r') as f:\n    print(f.read())\n```\n\n**Tip:** You will notice the indent here. The `with open() as f:` statement opens a new scope where `f` is available as stream. This flow requires indenting )`tab`) the code block, and perform actions in this scope, calling `f.read()` to read the file contents, and passing the immediate value as parameter into the `print()` function.\n\nNavigate into the terminal, and run the script again with `python3 log_reader.py`. You will see the file content shown in the VS Code editor, also printed into the terminal.\n\n![VS Code terminal: Read log file, and print it](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_print_logfile_show_file_sample.png)\n\n## Flow control \n\nReading one log file is not enough – we want to analyze all files in a given directory recursively. For the next exercise, we instruct Code Suggestions to create an index of all files. \n\nPrepare the `log-data` directory with more example files from the [log-data directory in the example project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-python-ai/-/tree/main/log-data?ref_type=heads). The directory tree should look as follows:\n\n```shell\ntree log-data                                                             ─╯\nlog-data\n├── sample.log\n└── var\n    └── log\n        ├── auth.log\n        ├── syslog.log\n        └── syslog_structured.log\n\n3 directories, 4 files\n```\n\n### Loops and lists to collect files \n\nModify the `path` variable to use the value `log-data/`. \n\n```python\n# Specify the path and file suffix in variables\npath = 'log-data/'\nfile_suffix = '.log'\n```\n\nTell Code Suggestions to read all file paths in the directory into a list. After the collection loop, print the list of file paths. \n\n```python\n# Read all file paths in the directory into a list\n\n# Print the list of log file paths\n```\n\n![Code Suggestion, collect file paths](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_collect_files.png){: .shadow}\n\nAn example of a suggestion could look like this:\n\n```python\n# Read all file paths in the directory into a list\nimport os\n\n# Read all file paths in the directory into a list\nlog_files = []\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Print the list of log file paths\n\nprint(log_files)\n```\n\nLet's analyze what happens here: The `import os` statement is required to bring the `os` library into the current scope, and we are able to call `os.walk()` later. The `log_files` [list](https://docs.python.org/3/library/stdtypes.html#sequence-types-list-tuple-range) is initialized as an empty list using empty brackets. `os.walk()` expects a path as directory tree to start searching for files. The function returns a generator object, which will walk the directory tree recursively. The returned tuples include the collected files, which we want to access in a loop iteraor, using the `for` loop notation. When the file ends with the defined suffix, its full path is appended to the list. This is an optimized and complex example. Do not worry about understanding everything just yet - we will revisit this later. \n\n### Conditionally collect files \n\nModify the instructions and add the Boolean flag evaluation for `read_all_files_recursively` as an input to the glob() function. \n\n```python\n# Read all file paths in the top level directory into a list\n# If read_all_files_recursively is True, read all files recursively\n```\n\n![Code Suggestions, read files recursively conditionally](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_read_files_recursively_conditionally.png){: .shadow}\n\n```\n# Read all file paths in the top level directory \nimport os\n\n# Read all file paths in the top level directory into a list\n# If read_all_files_recursively is True, read all files recursively\n\nlog_files = []\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\n\n# Print log_files\nprint(log_files)\n```\n\nThe result is not optimal yet because it always executes the first loop, and optionally the second loop. This flow leads to duplicated results when the script is executed.\n\n```\npython3 log_reader.py\n\n['log-data/sample.log', 'log-data/sample.log', 'log-data/var/log/auth.log']\n```\n\nExperiment with Code Suggestions instructions to get a solution for the problem. There are different approaches you can take: \n\n1) A potential solution is to wrap the source code into an if-then-else block, and move the `os.listdir()` loop into the else-block. \n\n```python\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):               \n                log_files.append(os.path.join(root, file))\nelse:\n    for file in os.listdir(path):\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(path, file))  \n\n```\n\n2) Alternatively, do not use `append()` to always add a new list entry, but check if the item exists in the list first. \n\n```python\nfor file in os.listdir(path):\n    if file.endswith(file_suffix):\n        # check if the entry exists in the list already\n        if os.path.isfile(os.path.join(path, file)):\n            log_files.append(os.path.join(path, file))\n\nif read_all_files_recursively:\n    for root, directories, files in os.walk(path):\n        for file in files:\n            if file.endswith(file_suffix):\n                # check if the entry exists in the list already\n                if file not in log_files:\n                    log_files.append(os.path.join(root, file))\n```\n\n3) Or, we could eliminate duplicate entries after collecting all items. Python allows converting lists into [sets](https://docs.python.org/3/library/stdtypes.html#set-types-set-frozenset), which hold unique entries. After applying `set()`, you can again convert the set back into a list. Code Suggestions knows about this possibility, and will help with the comment `# Ensure that only unique file paths are in the list` \n\n![Code Suggestions, converting a list to unique items](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_unique_list.png){: .shadow}\n\n```python\n# Ensure that only unique file paths are in the list\n\nlog_files = list(set(log_files))\n```\n\n4) Take a step back and evaluate whether the variable read_all_files_recursively makes sense. Maybe the default behavior should just be reading all files recursively?\n\n**Tip for testing different paths in VS Code:** Select the code blocks, and press [`cmd /` on macOS](https://code.visualstudio.com/docs/getstarted/keybindings) to comment out the code. \n\n## Functions \n\nLet's create a function called `parse_log_file` that parses a log file, and returns the extracted data. We will define the expected log format and columns to extract, following the [syslog format specification](https://en.wikipedia.org/wiki/Syslog). There are different log format types and also customized formats by developers that need to be taken into account – exercise for later. \n\n### Start with a simple log format \n\nInspect a running Linux VM, or use the following example log file example for additional implementation.\n\n```\nless /var/log/syslog | grep -v docker \n\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Daily dpkg database backup service...\nOct 17 00:00:04 ebpf-chaos systemd[1]: Starting Rotate log files...\nOct 17 00:00:04 ebpf-chaos systemd[1]: dpkg-db-backup.service: Deactivated successfully.\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Daily dpkg database backup service.\nOct 17 00:00:04 ebpf-chaos systemd[1]: logrotate.service: Deactivated successfully.\nOct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\nOct 17 00:17:01 ebpf-chaos CRON[727495]: (root) CMD (   cd / && run-parts --report /etc/cron.hourly)\n```\n\nWe can create an algorithm to split each log line by whitespaces, and then join the results again. Let's ask Code Suggestions for help. \n\n```python\n# Split log line \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\" by whitespaces and save in a list\n\nlog_line = \"Oct 17 00:00:04 ebpf-chaos systemd[1]: Finished Rotate log files.\"\nlog_line_split = log_line.split(\" \")\nprint(log_line_split)\n```\n\nRun the script again to verify the result.\n\n```shell\npython3 log_reader.py\n\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate', 'log', 'files.']\n```\n\nThe first three items are part of the datetime string, followed by the host, service, and remaining log message items. Let's practice string operations in Python as the next step. \n\n### String and data structure operations\n\nLet's ask Code Suggestions for help with learning to join strings, and perform list operations.\n\n1. Join the first three items with a whitespace again. \n2. Keep host and service. \n3. Join the remaining variable item count into a string, separated with whitespaces, again. \n4. Store the identified column keys, and their respective values in a new data structure: [dictionary](https://docs.python.org/3/library/stdtypes.html#mapping-types-dict). \n\n![Code suggestions for list items with string operations](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_list_items_string_join_extract.png){: .shadow}\n\n```shell \npython3 log_reader.py\n\n# Array\n['Oct', '17', '00:00:04', 'ebpf-chaos', 'systemd[1]:', 'Finished', 'Rotate', 'log', 'files.']\n\n# Dictionary \n{'datetime': 'Oct 17 00:00:04', 'host': 'ebpf-chaos', 'service': 'systemd[1]:', 'message': ' ebpf-chaos systemd[1]: Finished Rotate log files.'}\n```\n\nA working suggestion can look like the following:\n\n```python\n# Initialize results dictionary with empty values for datetime, host, service, message\n# Loop over log line split \n# Join the first three list items as date string\n# Item 4: host \n# Item 5: service\n# Join the remaining items into a string, separated with whitespaces \n# Print the results after the loop \n\nresults = {'datetime': '', 'host': '', 'service': '', 'message': ''}\n\nfor item in log_line_split:\n\n    if results['datetime'] == '':\n        results['datetime'] = ' '.join(log_line_split[0:3])\n\n    elif results['host'] == '':\n        results['host'] = log_line_split[3]\n\n    elif results['service'] == '':\n        results['service'] = log_line_split[4]\n\n    else:\n        results['message'] += ' ' + item\n\nprint(results)\n\n```\n\nThe suggested algorithm loops over all log line items, and applies the same operation for the first three items. `log_line_split[0:3]` extracts a slice of three items into a new list. Calling `join()` on a separator character and passing the array as an argument joins the items into a string. The algorithm continues to check for not initialized values for host (Item 4) and service (Item 5)and concludes with the remaining list items appended into the message string. To be honest, I would have used a slightly different algorithm, but it is a great learning curve to see other algorithms, and ways to implement them. Practice with different instructions, and data structures, and continue printing the data sets. \n\n**Tip:** If you need to terminate a script early, you can use `sys.exit()`. The remaining code will not be executed. \n\n```python\nimport sys \nsys.exit(1)\n```\n\nImagine doing these operations for different log formats, and message types – it can get complicated and error-prone very quickly. Maybe there is another approach. \n\n### Parse log files using regular expressions\n\nThere are different syslog format RFCs – [RFC 3164](https://datatracker.ietf.org/doc/html/rfc3164) is obsolete but still found in the wild as default configuration (matching the pattern above), while [RFC 5424](https://datatracker.ietf.org/doc/html/rfc5424) is more modern, including datetime with timezone information. Parsing this format can be tricky, so let's ask Code Suggestions for advice. \n\nIn some cases, the suggestions include regular expressions. They might not match immediately, making the code more complex to debug, with trial and errors. A good standalone resource to text and explain regular expressions is [regex101.com](https://regex101.com/).  \n\n**Tip:** You can skip diving deep into regular expressions using the following code snippet as a quick cheat. The next step involves instructing Code Suggestions to use these log patterns, and help us extract all valuable columns. \n\n```python\n# Define the syslog log format regex in a dictionary\n# Add entries for RFC3164, RFC5424\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n```\n\nWe know what the function should do, and its input parameters – the file name, and a log pattern to match. The log lines should be split by this regular expression, returning a key-value dictionary for each log line. The function should return a list of dictionaries. \n\n```python\n# Create a function that parses a log file\n# Input parameter: file path\n# Match log line against regex_log_pattern\n# Return the results as dictionary list: log line, pattern, extracted columns\n```\n\n![Code suggestion based on a multiline comment instruction to get a function that parses a log file based on regex patterns](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_01.png){: .shadow}\n\nRemember the indent for opening a new scope? The same applies for functions in Python. The `def` identifier requires a function name, and a list of parameters, followed by an opening colon. The next lines of code require the indent. VS Code will help with live-linting wrong indent, before the script execution fails, or the CI/CD pipelines. \n\nContinue with Code Suggestions – it might already know that you want to parse all log files, and parse them using the newly created function. \n\n![Code suggestion to parse all log files, and print the result set](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_log_format_regex_function_instructions_02.png){: .shadow}\n\nA full working example can look like this: \n\n```\nimport os\n\n# Specify the path and file suffix in variables\npath = 'log-data/'\nfile_suffix = '.log'\n\n# Read all file paths in the directory into a list\nlog_files = []\nfor root, directories, files in os.walk(path):\n    for file in files:\n        if file.endswith(file_suffix):\n            log_files.append(os.path.join(root, file))\n\n# Define the syslog log format regex in a dictionary\n# Add entries for RFC3164, RFC5424\nregex_log_pattern = {\n    'rfc3164': '([A-Z][a-z][a-z]\\s{1,2}\\d{1,2}\\s\\d{2}[:]\\d{2}[:]\\d{2})\\s([\\w][\\w\\d\\.@-]*)\\s(.*)$',\n    'rfc5424': '(?:(\\d{4}[-]\\d{2}[-]\\d{2}[T]\\d{2}[:]\\d{2}[:]\\d{2}(?:\\.\\d{1,6})?(?:[+-]\\d{2}[:]\\d{2}|Z)?)|-)\\s(?:([\\w][\\w\\d\\.@-]*)|-)\\s(.*)$;'\n}\n\n# Create a function that parses a log file\n# Input parameter: file path\n# Match log line against regex_log_pattern\n# Return the results as dictionary list: log line, pattern name, extracted columns\nimport re\n\ndef parse_log_file(file_path):\n    # Read the log file\n    with open(file_path, 'r') as f:\n        log_lines = f.readlines()\n\n    # Create a list to store the results\n    results = []\n\n    # Iterate over the log lines\n    for log_line in log_lines:\n        # Match the log line against the regex pattern\n        for pattern_name, pattern in regex_log_pattern.items():\n            match = re.match(pattern, log_line)\n\n            # If the log line matches the pattern, add the results to the list\n            if match:\n                extracted_columns = match.groups()\n                results.append({\n                    'log_line': log_line,\n                    'pattern_name': pattern_name,\n                    'extracted_columns': extracted_columns,\n                    'source_file': file_path\n                })\n\n    # Return the results\n    return results\n\n# Parse all files and print results\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    print(results)\n```\n\nLet's unpack what the `parse_log_file()` function does:\n\n1. Opens the file from `file_path` parameter. \n2. Reads all lines into a new variable `log_lines`. \n3. Creates a results list to store all items. \n4. Iterates over the log lines. \n5. Matches against all regex patterns configured in regex_log_pattern. \n6. If a match is found, extracts the matching column values.\n7. Creates a results item, including the values for the keys `log_line`, `pattern_name`, `extracted_colums`, `source_file`. \n8. Appends the results item to the results list.\n9. Returns the results list. \n\nThere are different variations to this – especially for the returned result data structure. For this specific case, log lines come as list already. Adding a dictionary object instead of a raw log line allows function callers to extract the desired information in the next step. Once a working example has been implemented, you can refactor the code later, too. \n\n### Advanced log format: auth.log\n\nParsing the syslog on a Linux distribution might not unveil the necessary data to analyze. On a virtual machine that exposes port 22 (SSH) to the world, the authentication log is much more interesting – plenty of bots and malicious actors testing default password combinations and often brute force attacks.\n\nThe following snippet from `/var/log/auth.log` on one of my private servers shows the authentication log format and the random attempts from bots using different usernames, etc. \n\n```\nOct 15 00:00:19 ebpf-chaos sshd[3967944]: Failed password for invalid user ubuntu from 93.254.246.194 port 48840 ssh2\nOct 15 00:00:20 ebpf-chaos sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Received disconnect from 93.254.246.194 port 48840:11: Bye Bye [preauth]\nOct 15 00:00:21 ebpf-chaos sshd[3967944]: Disconnected from invalid user ubuntu 93.254.246.194 port 48840 [preauth]\nOct 15 00:00:24 ebpf-chaos sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Received disconnect from 180.101.88.227 port 44397:11:  [preauth]\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: Disconnected from authenticating user root 180.101.88.227 port 44397 [preauth]\nOct 15 00:00:25 ebpf-chaos sshd[3967916]: PAM 2 more authentication failures; logname= uid=0 euid=0 tty=ssh ruser= rhost=180.101.88.227  user=root\nOct 15 00:00:25 ebpf-chaos sshd[3967998]: Invalid user teamspeak from 185.218.20.10 port 33436\n```\n\n**Tip for intrusion prevention:** Add a firewall setup, and use [fail2ban](https://en.wikipedia.org/wiki/Fail2ban) to block invalid auth logins. \n\nThe next exercise is to extend the logic to understand the free form log message parts, for example `Failed password for invalid user ubuntu from 93.254.246.194 port 48840 ssh2`. The task is to store the data in an optional dictionary with key value pairs. \n\nCreate a new function that takes the previously parsed log line results as input, and specifically parses the last list item for each line.\n\n1. Count the number of `Failed password` and `Invalid user` messages.\n2. Return the results with count, log file, pattern \n\n![Code suggestions for a log file message parser to count auth.log failures](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_parse_log_message_auth_log.png){: .shadow}\n\nA working suggestion can look like the following code:\n\n```python\n# Create a function that parses a log file message from the last extracted_columns entry \n# Input: Parsed log lines results list \n# Loop over all log lines in the list, and extract the last list item as message \n# Count failure strings in the message: Failed password, Invalid user \n# Return the results if failure count greater 0: log_file, count, failure string\ndef parse_log_file_message(results):\n    failure_results = []\n\n    # Iterate over the log lines\n    for result in results:\n        # Extract the message from the last list item\n        message = result['extracted_columns'][-1]\n\n        # Count the number of failure strings in the message\n        failure_count = message.count('Failed password') + message.count('Invalid user')\n\n        # If the failure count is greater than 0, add the results to the list\n        if failure_count > 0:\n            failure_results.append({\n                'log_file': result['source_file'],\n                'count': failure_count,\n                'failure_string': message\n            })\n\n    # Return the results\n    return failure_results\n\n# Parse all files and print results\nfor log_file in log_files:\n    results = parse_log_file(log_file)\n    failure_results = parse_log_file_message(results)\n    print(failure_results)\n```\n\nThe algorithm follows the previous implementations: First, create a results array to store matching data. Then, iterate over the already parsed log_lines in the list. Each log line contains the `extracted_columns` key, which holds the free-form message string at the end. The next step is to call the string object function `count()` to count how many times a given character sequence is contained in a string. The returned numbers are added up to the `failure_count` variable. If it is greater than zero, the result is added to the results list, including the `log_file`, `count` and `failure_string` key-value pairs. After returning the parsed log message results, loop through all log files, parse them, and print the results again. \n\nExecute the script to inspect the detected matches. Note that the data structure can be optimized in future learning steps.\n\n```\npython3 log_reader.py\n\n[{'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967944]: Failed password for invalid user ubuntu from 93.254.246.194 port 48840 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967916]: Failed password for root from 180.101.88.227 port 44397 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967998]: Invalid user teamspeak from 185.218.20.10 port 33436'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3967998]: Failed password for invalid user teamspeak from 185.218.20.10 port 33436 ssh2'}, {'log_file': 'log-data/var/log/auth.log', 'count': 1, 'failure_string': 'sshd[3968077]: Invalid user mcserver from 218.211.33.146 port 50950'}]\n\n```\n\n### Parsing more types: Structured logging\n\nApplication developers can use the structured logging format to help machine parsers to extract the key value pairs. Prometheus provides this information in the following structure in syslog:\n\n```\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.425Z caller=compact.go:519 level=info component=tsdb m\nsg=\"write block\" mint=1697558404661 maxt=1697565600000 ulid=01HCZG4ZX51GTH8H7PVBYDF4N6 duration=148.675854ms\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.464Z caller=head.go:1213 level=info component=tsdb msg\n=\"Head GC completed\" caller=truncateMemory duration=6.845245ms\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.467Z caller=checkpoint.go:100 level=info component=tsd\nb msg=\"Creating checkpoint\" from_segment=2308 to_segment=2309 mint=1697565600000\nOct 17 19:00:10 ebpf-chaos prometheus[594]: ts=2023-10-17T19:00:10.517Z caller=head.go:1185 level=info component=tsdb msg\n=\"WAL checkpoint complete\" first=2308 last=2309 duration=50.052621ms\n```\n\nThis format is easier to parse for scripts, because the message part can be split by whitespaces, and the assignment character `=`. Strings that contain whitespaces are guaranteed to be enclosed with quotes. The downside is that not all programming language libraries provide ready-to-use structured logging libraries, making it harder for developers to adopt this format. \n\nPractice following the previous example to parse the `auth.log` format with additional information. Tell Code Suggestions that you are expecting structured logging format with key-value pairs, and which returned data structure would be great:\n\n```python\n# Create a function that parses a log file message from the last extracted_columns entry \n# Input: Parsed log lines results list \n# Loop over all log lines in the list, and extract the last list item as message \n# Parse structured logging key-value pairs into a dictionary\n# Return results: log_file, dictionary \n```\n\n![Code suggestions for parsing structured logging format in the log file message part](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_propose_structured_logging_message_parser.png){: .shadow}\n\n### Printing results and formatting\n\nMany of the examples used the `print()` statement to print the content on the terminal. Python objects in the standard library support text representation, and for some types it makes more sense (string, numbers), others cannot provide much details (functions, etc.). \n\nYou can also pretty-print almost any data structure (lists, sets, dictionaries) in Python. The JSON library can format data structures in a readable format, and use a given spaces indent to draw the JSON structure on the terminal. Note that we use the `import` statement here to bring libraries into the current scope, and access their methods, for example `json.dumps`. \n\n```python\nimport json \nprint(json.dumps(structured_results, indent=4))\n```\n\n![Parsing log files into structured objects, example result after following the exercises](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_terminal_parsing_logs_and_pretty_print_results.png)\n\nPractice with modifying the existing source code, and replace the code snippets where appropriate. Alternatively, create a new function that implements pretty printing.\n\n```python\n# Create a pretty print function with indent 4 \n```\n\n![Code suggestions for pretty-print function](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/vs_code_code_suggestions_pretty_print.png){: .shadow}\n\nThis idea works in a similar fashion with creating your own logger functions...but we have to stop learning and take a break. Before we conclude the first blog post in the learning series, let's ensure that CI/CD and dependencies are set up properly for future exercises and async practice. \n\n## Dependency management and continuous verification  \n\n### Pip and pyenv: Bringing structure into Python \n\nDependencies can be managed in the [`requirements.txt` file](https://pip.pypa.io/en/stable/reference/requirements-file-format/), including optional version dependencies. Using `requirements.txt` file also has the advantage of being the single source of truth for local development environments and running continuous builds with GitLab CI/CD. They can use the same installation command:\n\n```shell\npip install -r requirements.txt\n```\n\nSome Linux distributions do not install the pip package manager by default, for example, Ubuntu/Debian require to install the `python3-pip` package. \n\nYou can manage different virtual environments using [venv](https://docs.python.org/3/library/venv.html). This workflow can be beneficial to install Python dependencies into the virtual environment, instead of globally into the OS path which might break on upgrades. \n\n```shell\npip install virtualenv\nvirtualenv venv\nsource venv/bin/activate \n```\n\n### Automation: Configure CI/CD pipeline for Python\n\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should continuously lint, test, and build the code. You can mimic the steps from the local development, and add testing more environments and versions: \n\n1. Lint the source code and check for formatting errors. The example uses [Pyflakes](https://pypi.org/project/pyflakes/), a mature linter, and [Ruff](https://docs.astral.sh/ruff/ ), a fast linter written in Rust. \n2. Cache dependencies installed using the pip package manager, following the documentation for [Python caching in GitLab CI/CD](https://docs.gitlab.com/ee/ci/caching/#cache-python-dependencies). This saves time and resources on repeated CI/CD pipeline runs.\n3. Use parallel matrix builds to test different Python versions, based on the available container images on Docker Hub and their tags. \n\n```yaml\nstages:\n  - lint\n  - test\n\ndefault:\n  image: python:latest\n  cache:                      # Pip's cache doesn't store the python packages\n    paths:                    # https://pip.pypa.io/en/stable/topics/caching/\n      - .cache/pip\n  before_script:\n    - python -V               # Print out python version for debugging\n    - pip install virtualenv\n    - virtualenv venv\n    - source venv/bin/activate\n\nvariables:  # Change pip's cache directory to be inside the project directory since we can only cache local items.\n  PIP_CACHE_DIR: \"$CI_PROJECT_DIR/.cache/pip\"\n\n# lint template\n.lint-tmpl:\n  script:\n    - echo \"Linting Python version $VERSION\"\n  parallel:\n    matrix:\n      - VERSION: ['3.9', '3.10', '3.11', '3.12']   # https://hub.docker.com/_/python\n\n# Lint, using Pyflakes: https://pypi.org/project/pyflakes/ \nlint-pyflakes:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - find . -not -path './venv' -type f -name '*.py' -exec sh -c 'pyflakes {}' \\;\n\n# Lint, using Ruff (Rust): https://docs.astral.sh/ruff/ \nlint-ruff:\n  extends: [.lint-tmpl]\n  script:\n    - pip install -r requirements.txt\n    - ruff .\n```\n\n![GitLab CI/CD Python lint job view, part of matrix builds](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_cicd_python_lint_job_log_view.png)\n\n## What is next \n\nFun fact: GitLab Duo Code Suggestions also helped writing this blog post in VS Code, knowing about the context. In the screenshot, I just wanted to add a tip about [regex101](https://regex101.com/), and GitLab Duo already knew. \n\n![Writing the GitLab blog post in VS Code with support from GitLab Duo Code Suggestions](https://about.gitlab.com/images/blogimages/learn-python-with-ai-code-suggestions-getting-started/gitlab_duo_code_suggestions_helping_write_the_learning_python_ai_blog_post.png)\n\nIn an upcoming blog, we will look into advanced learning examples with more practical (log) filtering and parallel operations, how to fetch logs from API endpoints (CI/CD job logs for example), and more data analytics and observability. Until then, here are a few recommendations for practicing async.\n\n### Async learning exercises\n\n- Implement the missing `log_file_limit` variable check. \n- Print a summary of the results in Markdown, not only JSON format. \n- Extend the script to accept a search filter as environment variable. Print/count only filtered results. \n- Extend the script to accept a date range. It might require parsing the datetime column in a time object to compare the range. \n- Inspect a GitLab CI/CD pipeline job log, and download the raw format. Extend the log parser to parse this specific format, and print a summary. \n\n### Share your feedback\n\nWhich programming language are you learning or considering learning? Start a new topic on our [community](/community/) forum or Discord and share your experience.\n\nWhen you use [GitLab Duo](/gitlab-duo/) Code Suggestions, please share your thoughts and feedback [in the feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[478,9,940,704],{"slug":3253,"featured":6,"template":684},"learning-python-with-a-little-help-from-ai-code-suggestions","content:en-us:blog:learning-python-with-a-little-help-from-ai-code-suggestions.yml","Learning Python With A Little Help From Ai Code Suggestions","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions.yml","en-us/blog/learning-python-with-a-little-help-from-ai-code-suggestions",{"_path":3259,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3260,"content":3264,"config":3269,"_id":3271,"_type":13,"title":3272,"_source":15,"_file":3273,"_stem":3274,"_extension":18},"/en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"title":3261,"description":3243,"ogTitle":3261,"ogDescription":3243,"noIndex":6,"ogImage":3244,"ogUrl":3262,"ogSiteName":669,"ogType":670,"canonicalUrls":3262,"schema":3263},"Learning Rust with a little help from AI","https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Learning Rust with a little help from AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2023-08-10\",\n      }",{"title":3261,"description":3243,"authors":3265,"heroImage":3244,"date":3266,"body":3267,"category":702,"tags":3268},[1612],"2023-08-10","Learning a new programming language can help broaden your software development expertise, open career opportunities, or create fun challenges. However, it can be difficult to decide on one specific approach to learning a new language. Artificial intelligence (AI) can help. In this tutorial, you'll learn how to leverage AI-powered GitLab Duo Code Suggestions for a guided experience in learning the Rust programming language.\n\n- [Preparations](#preparations)\n  - [VS Code](#vs-code)\n  - [Code Suggestions](#code-suggestions)\n- [Learning a new programming language: Rust](#learning-a-new-programming-language-rust)\n    - [Development environment for Rust](#development-environment-for-rust)\n    - [Hello, World](#hello-world)\n- [Cargo: Bringing structure into Rust](#cargo-bringing-structure-into-rust)\n- [Automation: Configure CI/CD pipeline for Rust](#automation-configure-cicd-pipeline-for-rust)\n- [Continue learning Rust](#continue-learning-rust)\n    - [Define variables and print them](#define-variables-and-print-them)\n    - [Explore variable types](#explore-variable-types)\n    - [Flow control: Conditions and loops](#flow-control-conditions-and-loops)\n    - [Functions](#functions)\n    - [Testing](#testing)\n- [What is next](#what-is-next)\n    - [Async learning exercises](#async-learning-exercises)\n    - [Share your feedback](#share-your-feedback)\n\n## Preparations \nChoose your [preferred and supported IDE](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-in-other-ides-and-editors), and follow the documentation to enable code suggestions for [GitLab.com SaaS](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-gitlab-saas) or [GitLab self-managed instances](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#enable-code-suggestions-on-self-managed-gitlab).\n\nProgramming languages can require an install of the language interpreter command-line tools or compilers that generate binaries from source code to build and run the application.\n\nTip: You can also use [GitLab Remote Development workspaces](/blog/quick-start-guide-for-gitlab-workspaces/) to create your own cloud development environments, instead of local development environments. This blog post focuses on using VS Code and the GitLab Web IDE. \n\n### VS Code\nOn macOS, you can [install VS Code](https://code.visualstudio.com/download) as a Homebrew cask or manually download and install it. \n\n```shell\nbrew install --cask visual-studio-code \n```\n\nNavigate to the `Extensions` menu and search for `gitlab workflow`. Install the [GitLab workflow extension for VS Code](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). \n\nTip: VS Code will also detect the programming languages, and offer to install additional plugins for syntax highlighting and development experience. \n\n### Code Suggestions\nIt can help to familiarize yourself with suggestions before actually verifying the suggestions. GitLab Code Suggestions are provided as you type, so you do not need use specific keyboard shortcuts. To accept a code suggestion, press the `tab` key. Also note that writing new code works more reliably than refactoring existing code. AI is non-deterministic, which means that the same suggestion may not be repeated after deleting the code suggestion. While Code Suggestions is in Beta, we are working on improving the accuracy of generated content overall. Please review the [known limitations](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#known-limitations), as this could affect your learning experience. \n\n## Learning a new programming language: Rust \nNow, let's dig into learning Rust, which is one of the [supported languages in Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html#supported-languages). \n\n[Rust by Example](https://doc.rust-lang.org/rust-by-example/) provides a great tutorial for beginners, together with the official [Rust book](https://doc.rust-lang.org/book/). The [Hands-on Rust book](https://hands-on-rust.com/) shows how to build a 2D game as a more practical approach. More examples are shared in [this Rust book list](https://github.com/sger/RustBooks). \n\nBefore diving into the source code, make sure to set up your development environment.\n\n### Development environment for Rust\n1) Create a new project `learn-rust-ai` in GitLab, and clone the project into your development environment. All code snippets are available in [this \"Learn Rust with AI\" project](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai).\n\n```shell\ngit clone https://gitlab.com/NAMESPACE/learn-rust-ai.git\n\ncd learn-rust-ai\n\ngit status\n```\n\n2) Install Rust and the build toolchain. Fortunately, this is straightforward [following the Rust install documentation](https://www.rust-lang.org/tools/install).\n\nTip for using the generic installer: Download the script and run it after review. \n\n```\n# Download and print the script before running it\ncurl -Lvs https://sh.rustup.rs -o rustup-init.sh\n\n# Run the Rust installer script\nsh rustup-init.sh \n```\n\nExample on macOS using Homebrew:\n\n```\nbrew install rust\n```\n\n1) Optional: Install the [rust-analyzer VS Code extension](https://marketplace.visualstudio.com/items?itemName=rust-lang.rust-analyzer).\n\n2) Each exercise will invite you to compile the code with the [`rustc` command](https://doc.rust-lang.org/rustc/what-is-rustc.html), and later using [`cargo` as build tool and package manager](https://doc.rust-lang.org/cargo/index.html).\n\nYou are all set to learn Rust! \n\n### Hello, World\nWe will start with [Rust by Example](https://doc.rust-lang.org/rust-by-example/), and follow the [Hello, World exercise](https://doc.rust-lang.org/rust-by-example/hello.html).\n\nCreate a new file `hello.rs` in the root directory of the project and start with a comment saying `// Hello world`. Next, start writing the `main` function, and verify the code suggestion.\n\n![VS Code hello.rs Rust code suggestion, asking to accept](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_suggested.png){: .shadow}\n\nAccept the suggestion by pressing the `tab` key and save the file (keyboard shortcut: cmd s). \n\n```\n// Hello world\n\nfn main() {\n    println!(\"Hello, world!\");\n}\n```\n\n![VS Code hello.rs Rust code suggestion, accepted](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_accepted.png){: .shadow}\n\nCommit the change to the Git repository. In VS Code, use the keyboard shortcut `ctrl shift G`, add a commit message, and hit `cmd enter` to submit. \n\nUse the command palette (`cmd shift p`) and search for `create terminal` to open a new terminal. \n\nBuild and run the code.\n\n```shell\nrustc hello.rs\n\n./hello\n```\n\n![hello.rs Rust code suggestion, accepted, compiled, run](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_hello_world_cli_build.png){: .shadow}\n\nTip: Adding [code comments in Rust (`//`)](https://doc.rust-lang.org/reference/comments.html) before you start writing a function or algorithm will help Code Suggestions with more context to provide better suggestions. In the example above, we did that with `// Hello world`, and will continue doing so in the next exercises. \n\n## Cargo: Bringing structure into Rust\n[Cargo](https://doc.rust-lang.org/rust-by-example/cargo.html) is the official Rust package management tool. It is more than that - you can run build and test commands because Cargo understands them as well. \n\nYou can initialize a new Cargo configuration in the current directory tree with the following command:\n\n```shell\ncargo init\n```\n\nThe directory tree invites you to add the source code into the `src/` directory, while `Cargo.toml` manages the dependencies and used compiler versions. The `.gitignore` file is also added including best practices. \n\n```shell\ntree\n.\n├── Cargo.toml\n├── README.md\n├── hello\n├── hello.rs\n└── src\n    └── main.rs\n```\n\nTry building the code and running it using `cargo`.\n\n```shell\ncargo build\n\ncargo run\n```\n\nCommit all changes and push them to your GitLab project.\n\n```shell\ngit commit -avm \"Initialize Cargo\"\n\ngit push\n```\n\nAfter exploring Cargo, let's make sure that our code is continuously tested while learning Rust. The next section explains how to set up [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/) for Rust. \n\n## Automation: Configure CI/CD pipeline for Rust\nThe [CI/CD pipeline](https://docs.gitlab.com/ee/ci/) should run two jobs in two stages: Build and test the code. The default container [image](https://docs.gitlab.com/ee/ci/yaml/#image), `rust:latest`, works in the first iteration. In order to save resources, the CI/CD configuration also supports [caching](https://docs.gitlab.com/ee/ci/caching/) for downloaded dependencies and build objects. The `CARGO_HOME` variable is set to the CI/CD job home directory to ensure everything gets appropriately cached.\n\n```yaml\nstages:\n  - build\n  - test \n\ndefault:\n  image: rust:latest\n  cache:\n    key: ${CI_COMMIT_REF_SLUG}\n    paths:                      \n      - .cargo/bin\n      - .cargo/registry/index\n      - .cargo/registry/cache\n      - target/debug/deps\n      - target/debug/build\n    policy: pull-push\n\n# Cargo data needs to be in the project directory to be cached. \nvariables:\n  CARGO_HOME: ${CI_PROJECT_DIR}/.cargo      \n```\n\nThe CI/CD jobs inherit the [`default`](https://docs.gitlab.com/ee/ci/yaml/#default) values, and specify the cargo commands in the [`script` section](https://docs.gitlab.com/ee/ci/yaml/#script).\n\n```yaml\nbuild-latest:\n  stage: build\n  script:\n    - cargo build --verbose\n\ntest-latest:\n  stage: build\n  script:\n    - cargo test --verbose\n```\n\nYou can see an example in [this MR](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/merge_requests/1/diffs).\n\n## Continue learning Rust \nMake sure to add new source code into the `src/` directory. \n\n### Define variables and print them\nPractice adding a few more [print](https://doc.rust-lang.org/rust-by-example/hello/print.html) statements into `src/main.rs`, and then build and run the code again.\n\n1) Define a variable called `name` and assign your name as string value.\n\n2) Print the name, including a string prefix saying `Hello, `. \n\n![VS Code main.rs Rust code suggestion, first step in print](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_first.png){: .shadow}\n\n![VS Code main.rs Rust code suggestion, second step in print](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_second.png){: .shadow}\n\n1) Open a new terminal in VS Code using the command palette (keyboard shortcut `cmd + shift + p`) and search for `terminal`.\n\n2) Build and run the code with the `cargo build` and `cargo run` commands. \n\n![VS Code terminal with cargo build and run output](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_print_variable_cargo_build_run_terminal.png){: .shadow}\n\nAn example solution can be found [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/variable_print.rs). \n\n### Explore variable types \nDefine different variable value types ([primitives](https://doc.rust-lang.org/rust-by-example/primitives.html)) and embed them into the `print` statements. Maybe they feel familiar with other programming languages?\n\nTip: Use code comments to see which code suggestions can be useful to learn. Start with typing `// Integer addition` and see what code suggestions you can add.\n\n![VS Code main.rs Rust code suggestion, primitive types with literals and expressions](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_primitive_types_literals_operators.png)\n\nExperiment with GitLab Duo Code Suggestions. The shown examples are non-deterministic, but you may be able to add additions, subscriptions, multiplications, etc., and the corresponding `println` statements just by accepting code suggestions and continuing the flow with `enter` or completing the code statements. This workflow can create a chain of code suggestions that can help you learn the Rust language. \n\n![Literals and expressions, first suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_01.png){: .shadow}\n![Literals and expressions, second suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_02.png){: .shadow}\n![Literals and expressions, third suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_literals_expressions_03.png){: .shadow}\n\nAn example solution can be found [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/literals_expressions.rs). \n\nThe code suggestions are not perfect. Sometimes there are errors that require you to fix the problems. When writing this blog post, I had to fix two missing semicolons at the end of the code lines. The great thing about the Rust compiler is that the error messages tell you exactly where the problem happens with suggestions to fix them. Code Suggestions and the Rust-provided build chain make writing Rust code more efficient. \n\n```rust\nprintln!(\"Hello, {}!\", name)\n\n// Integer subtraction\nlet y = 9 - 4\n```\n\n![Terminal build, errors, Rust compiler help](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_errors_rust_help.png){: .shadow}\n\nYou can try to provoke the same error by removing a semicolon at the end of a statement and then running `cargo build` in the terminal again. The Rust compiler will also warn you about unused variables to help with better code quality. The screenshot shows warnings for variable definitions, and also a CLI command to fix them. \n\n![Terminal build, warnings, Rust compiler help](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_terminal_warnings_rust_help.png){: .shadow}\n\n### Flow control: Conditions and loops \nNext, let's focus on [flow control](https://doc.rust-lang.org/rust-by-example/flow_control.html) with conditions, loops, etc., and how to implement them.\n\n1) Start typing `// Flow control` and see which suggestions are provided.\n\n2) Experiment with the code, and continue defining a boolean variable `v` which is set to true. \n\n```rust\n  // Flow control\n  let v = true;\n\n```\n\n![Conditions, boolean variable](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_01.png){: .shadow}\n![Conditions, boolean variable, if condition](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_conditions_02.png){: .shadow}\n\n1) Start typing `// Loops` and experiment with the code suggestions. \n\nLet's assume the loop looks the like following snippet. It does not have a loop counter which gets printed on every loop execution.\n\n```rust\n// Loops\nlet mut count = 0;\n\nloop {\n    count += 1;\n\n    if count == 10 {\n        break;\n    }\n}\n```\n\n2) Start typing `println!` and see which code suggestions are provided, for example `println!(\"Count: {}\", count);`. \n\n![Loops, loop counter print suggestion](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_loops_print_counter.png)\n\n3) Apply the suggestions, and execute `cargo build && cargo run` on the terminal again. \n\nLet's learn more: Rust supports different loop types, for example [while loops](https://doc.rust-lang.org/rust-by-example/flow_control/while.html), [for loops](https://doc.rust-lang.org/rust-by-example/flow_control/for.html), etc. \n\n1) Type `// While loop` and verify the code suggestions. Repeat the same for `// For loop`.\n\n```rust\n// While loops\nlet mut count = 0;\n\nwhile count \u003C 10 {\n    count += 1;\n    println!(\"Count: {}\", count);\n}\n\n// For loops\nlet a = [10, 2, 3, 4, 5];\n\nfor element in a {\n    println!(\"Element: {}\", element);\n}\n```\n\nThere is more to learn with loops and conditions: Iterate over arrays, lists, maps, slices. Practice with writing comments for `// Maps and sets` and `// Vectors and strings`. \n\n![Vectors, strings](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_flow_control_vectors_strings.png){: .shadow}\n\n```rust\n  // Maps and sets\n  let mut scores = HashMap::new();\n\n  scores.insert(String::from(\"Blue\"), 10);\n  scores.insert(String::from(\"Yellow\"), 50);\n\n  for (key, value ) in &scores {\n      println!(\"{}: {}\", key, value);\n  }\n\n  // Vectors and strings\n  let mut v = Vec::new();\n\n  v.push(1);\n  v.push(2);\n\n  for element in &v {\n      println!(\"Element: {}\", element);\n  }  \n```\n\nThis snippet will fail because the `HashMap` type needs to be imported from `std::collections::HashMap`. Add the following line on top before the main function definition: \n\n```rust\nuse std::collections::HashMap;\n``` \n\n2) Build and run the code with `cargo build && cargo run`. \n\nAn example solution is provided [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/flow_control.rs).\n\n**Async exercise**: \n\n1) Modify the input values for the different data types, and build and run the code again.\n\n2) Add a condition into the loops that print the items only when a specific condition is met (for example, the number is odd or even). \n\n### Functions \n\n[Functions](https://doc.rust-lang.org/rust-by-example/fn.html) help increase code readability and testability with unit tests. Practice creating functions with the following steps: \n\n1) Two functions `isEven` and `isOdd` to evaluate whether a number is even or odd.\n\n```rust\nfn isEven(x: i32) -> bool {\n    x % 2 == 0\n}\n\nfn isOdd(x: i32) -> bool {\n    x % 2 != 0\n}\n```\n\n2) `isPrime` function to check whether a given integer value is a prime number.\n\n```rust\nfn isPrime(x: i32) -> bool {\n    let mut i = 2;\n\n    while i * i \u003C= x {\n        if x % i == 0 {\n            return false;\n        } else {\n            i += 1;\n        }\n    } \n\n    return true\n}\n```\n\n3) Create an array of integer values, loop over it, and call the functions. Let GitLab Code Suggestions guide you with the implementation by starting to type the if conditions followed by the function name. \n\n```rust\n  // Functions\n  let mut integers = vec![1, 2, 3, 4, 5];\n\n  for i in integers.iter() {\n\n      if (isEven(i)) {\n          println!(\"{} is even\", i);\n      }\n\n      if (isOdd(i)) { \n          println!(\"{} is odd\", i);\n      }\n\n      if (isPrime(i)) { \n          println!(\"{} is prime\", i);\n      }\n\n      println!(\"{}\", i);\n  }\n```\n\nNote that passing a reference value to a function may result in an error from the Rust compiler. Follow the suggestions and build the code again. \n\n```shell\n$ cargo build && cargo run \n\nerror[E0308]: mismatched types\n   --> src/main.rs:112:21\n    |\n112 |         if (isPrime(i)) { \n    |             ------- ^ expected `i32`, found `&{integer}`\n    |             |\n    |             arguments to this function are incorrect\n    |    \nnote: function defined here\n   --> src/main.rs:136:4\n    |\n136 | fn isPrime(x: i32) -> bool {\n    |    ^^^^^^^ ------\nhelp: consider dereferencing the borrow\n    |\n112 |         if (isPrime(*i)) { \n    |                     +\n```\n\nAn example solution is provided [here](https://gitlab.com/gitlab-de/use-cases/ai/learn-with-ai/learn-rust-ai/-/blob/main/solutions/functions.rs).\n\n**Async exercise**: Create a function `containsString` and test it with an array of string values, and a string to search for, in a loop. The screenshot shows a potential implementation. \n\n![containsString function, and vector with string elements to test, suggesting its usage in the main function](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_implemented_then_suggested_in_main.png){: .shadow}\n\n### Testing \nWhile learning programming, adopt [testing](https://doc.rust-lang.org/rust-by-example/testing.html) into your process. This can be unit tests for functions, documentation testing, and integration testing. Practice with testing the previously created functions `isEven`, `isOdd`, and `isPrime`. Starty by typing `mod tests {` followed by a new line with `use super::*` to implement the example from the [Rust documentation for unit tests](https://doc.rust-lang.org/rust-by-example/testing/unit_testing.html).\n\n```rust\nmod tests {\n    use super::*;\n\n    #[test]\n    fn test_is_even() {\n        assert!(isEven(2)); \n        assert!(!isEven(3));\n    }\n\n    #[test] \n    fn test_is_odd() {\n        assert!(!isOdd(2));\n        assert!(isOdd(3));\n    }\n\n    #[test]\n    fn test_is_prime() { \n        assert!(isPrime(2));\n        assert!(!isPrime(3));\n    }\n}\n```\n\nRun `cargo test` to run the unit tests. Modify the test values to experiment with the results. \n\n```shell\ncargo test\n```\n\n![Function unit tests, cargo test output in the VS Code terminal](https://about.gitlab.com/images/blogimages/learn-rust-with-ai-code-suggestions-getting-started/learn_rust_ai_gitlab_code_suggestions_function_unit_tests_terminal_run.png)\n\nCreate unit tests that fail, and commit and push the changes to GitLab. The CI/CD pipelines will fail in this simulated breakage. The example above needs a fix for the `test_is_prime` test. Commit and push the change to verify that the pipeline passes again. \n\n```diff\n-        assert!(!isPrime(3));\n+        assert!(!isPrime(4));\n```\n\n## What is next \nIn an upcoming blog, we will look into advanced learning examples with asynchronous operations, services and external API communication in future blog posts. Until then, here are a few recommendations for practicing async.\n\n### Async learning exercises\n- [`std misc`](https://doc.rust-lang.org/rust-by-example/std_misc.html) provides asynchronous operations with threads, channels and file I/O\n- Book: [Hands-on Rust: Effective Learning through 2D Game Development and Play](https://pragprog.com/titles/hwrust/hands-on-rust/)\n- Tutorial: [Are we game yet?](https://arewegameyet.rs/resources/tutorials/)\n- Use case: [Web server with rocket.rs](https://rocket.rs/v0.5-rc/guide/quickstart/#running-examples)\n\nHere are a few more exercises and ideas for additional learning:\n1) The Rust compiler might have created warnings that need to be addressed. Follow the instructions from the `cargo build` commands and check the Git diff. \n\n```\ncargo fix --bin \"learn-rust-ai\"\n\ngit diff \n```\n\n2) [Error handling](https://doc.rust-lang.org/rust-by-example/error.html) is required when failure is detected, and the caller should know. Some errors can be recovered from within the application, others require program termination. \n\n3) The [`std` library](https://doc.rust-lang.org/rust-by-example/std.html) extends primitive types and makes programming more efficient. \n\n### Share your feedback\nWhich programming language are you learning or considering learning? Start a new topic on our [community](/community/) forum or Discord and share your experience.  \n\nIf you are using Code Suggestions Beta with [GitLab Duo](/gitlab-duo/) already, please share your thoughts and feedback [in this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/405152).\n",[835,1105,9,940,704],{"slug":3270,"featured":6,"template":684},"learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started","content:en-us:blog:learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","Learning Rust With A Little Help From Ai Code Suggestions Getting Started","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started.yml","en-us/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started",{"_path":3276,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3277,"content":3283,"config":3288,"_id":3290,"_type":13,"title":3291,"_source":15,"_file":3292,"_stem":3293,"_extension":18},"/en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat",{"title":3278,"description":3279,"ogTitle":3278,"ogDescription":3279,"noIndex":6,"ogImage":3280,"ogUrl":3281,"ogSiteName":669,"ogType":670,"canonicalUrls":3281,"schema":3282},"Mastering GitLab admin tasks with GitLab Duo Chat","Learn how to use Chat to streamline administrative tasks on self-managed instances, improving efficiency and problem-solving capabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666405/Blog/Hero%20Images/GitLab_Duo_Blog_Hero_1800x945_r2_B__1_.png","https://about.gitlab.com/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Mastering GitLab admin tasks with GitLab Duo Chat\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2024-08-09\",\n      }",{"title":3278,"description":3279,"authors":3284,"heroImage":3280,"date":3285,"body":3286,"category":702,"tags":3287},[2332],"2024-08-09","As a GitLab administrator managing a self-hosted instance, you often face complex challenges that require innovative solutions. Enter [GitLab Duo Chat](https://about.gitlab.com/gitlab-duo/) – your AI-powered assistant that can significantly streamline your administrative tasks. In this article, we'll explore how you can leverage GitLab Duo Chat to solve intricate problems efficiently, using a real-world example of updating group memberships across multiple groups.\n\n## The power of GitLab Duo Chat for admins\n\nGitLab Duo Chat is more than just conversational AI; it's a powerful tool that can assist with complex administrative tasks. By providing context-aware suggestions and code snippets, Chat can help you navigate through GitLab's extensive feature set and underlying architecture.\n\n### Case study: Updating group memberships\n\nLet's dive into a scenario where an admin needs to add an administrator user to multiple [groups](https://docs.gitlab.com/ee/user/group/) – in this case, 50,000 groups. This task, while conceptually simple, can be daunting due to its scale.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/dBd957MK_DE?si=JYTzdRjVQHyB6rpl\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Step-by-step problem-solving with GitLab Duo Chat\n\n* **Initial approach:** Our first instinct might be to use GitLab's API, but with 50,000 groups, this could put significant strain on the system.\n* **Pivoting to the Rails console:** Instead, we can use the Rails console for a more efficient solution. Here's where Chat comes in handy.\n* **Crafting the solution:** Let's walk through the process of creating a Ruby script to accomplish our task:\n\n``` python\n\ndef add_admin_to_groups(parent_group, admin_user)\n     unless parent_group.members.exists?(user_id: admin_user.id)\n       parent_group.add_owner(admin_user)\n       puts \"Added #{admin_user.name} to #{parent_group.name}\"\n     end\n\n    parent_group.descendants.each do |subgroup|\n      add_admin_to_groups(subgroup, admin_user)\n    end\n  end\n\n# Usage\n\nadmin_user = User.find_by(username: 'mr_admin')\nparent_group = Group.find_by(path: 'top_group')\nadd_admin_to_groups(parent_group, admin_user)\n```\n\n* **Iterative refinement:** Through conversation with Chat, we refined this script, addressing issues like method availability and parameter requirements.\n\n### Key takeaways\n\n#### Non-deterministic responses\n\nGitLab Duo Chat may provide different answers to the same question, encouraging exploration of multiple solutions. As an example:\n\nAn admin asks, ***How can I improve CI/CD pipeline performance?***\n\nChat's first response could be:\n***Consider using GitLab CI/CD caching to speed up subsequent pipeline runs. Here's an example .gitlab-ci.yml snippet:***\n\n```\ncache:\n  key: ${CI_COMMIT_REF_SLUG}\n  paths:\n    - node_modules/    \n ```\n\n***This caches the `node_modules` directory, reducing build times for Node.js projects.***\n\nWhen asked again, Chat's second response could be: ***To improve CI/CD performance, try parallelizing your jobs. Here's an example:***\n\n```\ntest:\n  parallel: 3\n```\n\n***This runs your test job across three parallel runners, potentially reducing overall pipeline duration.***\n\n### Error interpretation and debugging\n\nChat can help interpret error messages and suggest fixes. As an example, an admin encounters an error: \"fatal: reference is not a tree: 8a7d460\". They could ask Chat: ***I'm seeing this Git error. What does it mean and how can I fix it?***\n\nChat could respond with:\n\n***This error typically occurs when Git can't find a specific commit. It could be due to a corrupted repository or incomplete fetch. Try these steps:***\n* ***Run `git fsck` to check for corruption.***\n* ***If that doesn't work, try git fetch --all --prune to update all remotes.***\n* ***If the issue persists, you might need to re-clone the repository.***\n\n***Always back up your repository before attempting these fixes.***\n\n### Code optimization and refactoring\n\nChat can assist in optimizing and refactoring code for better performance.\n\nAn example: An admin wants to optimize a Gitaly configuration for a large instance. They could ask Chat: ***How can I optimize Gitaly for an instance with 10,000+ repositories?***\n\nChat could respond with: ***For a large instance, consider this optimized Gitaly configuration:***\n\n```\n[concurrency]\nrpc_concurrency = 100\n\n[rate_limiting]\nrpc_rate_limit = 100\n\n[cgroups]\nmountpoint = '/sys/fs/cgroup'\nhierarchy_root = 'gitaly'\nmemory_bytes = 30000000000  # Adjust based on available memory\ncpu_shares = 1024\n```\n***This configuration increases concurrency, implements rate limiting to prevent overload, and uses cgroups (control groups) to manage resource allocation. Adjust values based on your specific hardware and needs.***\n\n## Best practices for using GitLab Duo Chat in admin tasks\n\n### Formulate queries clearly and provide context\n\nWhen using Chat, the quality of the response largely depends on the quality of your query. Clear, detailed questions that provide relevant context will yield more accurate and helpful answers.\n\nPoor query: ***Why is GitLab slow?***\n\nThis query lacks specifics and context, making it difficult for Chat to provide a targeted response. \n\nA better query would be: ***Our GitLab instance with 5,000 users and 3,000 projects is experiencing slow response times, especially during peak hours (9-11 AM EST). CPU usage on the application servers spikes to 90%. How can we diagnose and address this?***\n\nThis improved query provides crucial details:\n\n* scale of the instance (5,000 users, 3,000 projects)\n* nature of the problem (slow response times)\n* timing of the issue (peak hours, 9-11 AM EST)\n* observed symptoms (90% CPU spike)\n\nWith this information, Chat can provide more targeted advice.\n\nAn even better query would be: ***We're running GitLab 15.8.3 on a 3-node cluster (8 vCPUs, 32GB RAM each) with a separate PostgreSQL 13 database and Redis 6.2 instance. Our instance hosts 5,000 users and 3,000 projects. We're experiencing slow response times (average 5s, up from our usual 1s) during peak hours (9-11 AM EST), primarily affecting merge request creation and pipeline initiation. CPU usage on the application servers spikes to 90%, while database CPU remains under 60%. Gitaly CPU usage is around 70%. We've already increased Puma workers to 8 per node. What additional diagnostics should we run and what potential solutions should we consider?***\n\nThis query provides an extensive context, including:\n* GitLab version and infrastructure details\nspecific performance metrics (response time increase)\n* affected operations (merge requests, pipelines)\n* resource usage across different components\n* steps already taken to address the issue\n\nBy providing this level of detail, you enable Chat to:\n* understand the full scope of your environment\n* identify potential bottlenecks more accurately\n* suggest relevant diagnostic steps\n* propose solutions tailored to your specific setup\n\nAvoid recommending steps you've already taken.\n\nRemember, while GitLab Duo Chat is powerful, it's not omniscient. The more relevant information you provide, the better it can assist you. By following these guidelines, you'll get the most out of your interactions with Chat, leading to more effective problem-solving and administration of your GitLab instance.\n\n### Use GitLab Duo Chat's suggestions as a starting point and refine incrementally\n\nChat is an excellent tool for getting started with complex tasks, but it's most effective when used as part of an iterative process. Begin with a broad question, then use Chat's responses to guide your follow-up questions, gradually refining your understanding and solution.\n\n#### Initial query\n\nAdmin: ***How can I set up Geo replication for disaster recovery?***\n\nChat might respond with a basic setup guide, covering:\n- prerequisites for Geo setup\n- steps to configure the primary node\n- process for adding a secondary node\n- initial replication process\n\nThis provides a foundation, but complex setups like Geo often require more nuanced understanding. Here's how you might refine your queries:\n\n**- Follow-up Query 1**\n\nAdmin: ***How do I handle custom data in Geo replication?***\nThis question addresses a specific concern not covered in the initial setup. \n\n**- Follow-up Query 2**\n\nAdmin: ***What's the best way to test failover without disrupting production?***\n\nThis query focuses on a critical operational concern. \n\n**- Follow-up Query 3**\n\nAdmin: ***Can you help me create a runbook for Geo failover?***\n\nThis final query aims to consolidate the gathered information into a practical guide. The benefits of this incremental approach:\n\n1. By breaking down the complex topic of Geo replication into smaller, focused queries, you gain a more thorough understanding of the subject.\n2. Each follow-up question allows you to address specific concerns relevant to your environment, resulting in a more customized solution.\n3. The progression from setup to testing to creating a runbook ensures that you're not just understanding the theory, but also preparing for real-world implementation.\n4. The step-by-step process of refining your queries helps in better retention of the information, as you're actively engaging with the content.\n5. Follow-up questions often reveal aspects of the task you might not have initially considered, leading to a more robust final solution.\n\n#### Best practices for incremental refinement\n\n- Start with broad questions to establish a foundation.\n- Use Chat's responses to identify areas that need more clarification or detail.\n- Ask about specific scenarios or edge cases relevant to your environment.\n- Seek practical advice on implementation, testing, and maintenance.\n- Use your growing understanding to formulate increasingly specific and complex queries.\n- Periodically summarize your understanding and ask Chat to validate or correct it.\n\nRemember, while GitLab Duo Chat is a powerful tool, it's most effective when combined with your own critical thinking and domain knowledge. Use it to guide your learning and decision-making process, but always validate important information and test solutions in a safe environment before applying them to production systems.\n\n### Potential applications for self-managed GitLab instances\n\n#### User management at scale\n\nAdmin: ***We need to onboard 1,000 new users from an acquired company. The data is in a CSV file. How can we automate this process? I am an Admin, can I use a Python script for this?***\n\nChat: ***You can use GitLab's API for bulk user creation. Here's a Python script to get you started:***\n\n```python\nimport requests\nimport csv\n\ndef create_user(username, email, name, password):\n    url = 'https://gitlab.example.com/api/v4/users'\n    headers = {'PRIVATE-TOKEN': 'your_access_token'}\n    data = {\n        'username': username,\n        'email': email,\n        'name': name,\n        'password': password,\n        'skip_confirmation': True\n    }\n    response = requests.post(url, headers=headers, data=data)\n    return response.status_code == 201\n\nwith open('new_users.csv', 'r') as f:\n    reader = csv.reader(f)\n    for row in reader:\n        username, email, name, password = row\n        if create_user(username, email, name, password):\n            print(f\"Created user: {username}\")\n        else:\n            print(f\"Failed to create user: {username}\")\n```\n***This script reads user data from a CSV file and creates users via the GitLab API. Remember to handle errors and rate limiting for large batches.***\n\n#### Project and group administration\n\nAdmin: ***We need to implement a standardized project structure across our organization. How can we automate this? Assume I am an admin. Write me a script to do this via the API.***\n\nChat: ***You can use GitLab's API to create a project template and then apply it to new projects. Here's a bash script to create a new project with a standard structure:***\n\n```\n#!/bin/bash\n\nTOKEN=\"your_access_token\"\nGROUP_ID=\"your_group_id\"\n\ncreate_project() {\n  curl --header \"PRIVATE-TOKEN: $TOKEN\" \\\n  --data \"name=$1&namespace_id=$GROUP_ID\" \\\n  \"https://gitlab.example.com/api/v4/projects\"\n}\n\nadd_file() {\n  curl --request POST --header \"PRIVATE-TOKEN: $TOKEN\" \\\n  --form \"branch=main\" \\\n  --form \"content=$2\" \\\n  --form \"commit_message=Add $1\" \\\n  \"https://gitlab.example.com/api/v4/projects/$3/repository/files/$1\"\n}\n\nPROJECT_ID=$(create_project \"New-Standardized-Project\" | jq '.id')\n\nadd_file \"README.md\" \"# Welcome to our standardized project\" $PROJECT_ID\nadd_file \".gitlab-ci.yml\" \"stages:\\n  - build\\n  - test\\n  - deploy\" $PROJECT_ID\nadd_file \"CONTRIBUTING.md\" \"## How to contribute to this project\" $PROJECT_ID\n\necho \"Project created with ID: $PROJECT_ID\"\n```\n\n***This script creates a new project with a README, a basic CI/CD configuration, and a CONTRIBUTING guide. You can expand this to include more files or even clone a template repository.***\n\n#### Performance optimization\n\nAdmin: ***Our merge request creation process is slow. How can we diagnose and improve this? Assume I am an admin.***\n\nChat: ***As an instance admin, there are a few potential areas you can investigate to diagnose and improve the merge request creation process:\nDatabase Performance: The provided quotes show some example SQL queries related to merge request metrics. Slow database queries can significantly impact merge request creation performance. You should review the database performance, indexes, and query plans for any queries involved in the merge request creation process.***\n\n- ***Background jobs: Merge request creation may involve background jobs (e.g., for data processing, notifications, etc.). If these jobs are backed up or running slowly, it can impact the overall merge request creation time. Check the background job queues and workers.***\n- ***Code optimization: Analyze the code responsible for merge request creation to identify any potential bottlenecks or inefficiencies. This could include things like unnecessary database queries, inefficient algorithms, or lack of caching.***\n- ***Resource utilization: Monitor the resource utilization (CPU, memory, disk I/O) of the GitLab application servers during merge request creation. High resource utilization can lead to performance degradation.***\n- ***Network performance: If your GitLab instance is distributed across multiple nodes or servers, network performance between these components can impact merge request creation times. Check for any network bottlenecks or latency issues.***\n- ***Caching: Ensure that appropriate caching mechanisms are in place and functioning correctly. Inefficient or incorrect caching can lead to unnecessary overhead during merge request creation.***\n- ***Load testing: Perform load testing to simulate real-world scenarios and identify any performance bottlenecks or scalability issues that may arise under high load.***\n- ***Logging and monitoring: Enable detailed logging and monitoring for the merge request creation process. This can help identify specific areas of slowness or bottlenecks.***\n\n***To effectively diagnose and improve the merge request creation process, you'll likely need to combine multiple techniques, such as profiling, code analysis, database optimization, and load testing. Additionally, consulting the GitLab documentation and community resources can provide valuable insights and best practices specific to your GitLab version and configuration.***\n\n#### Troubleshooting complex issues\n\nAdmin: ***We're experiencing intermittent 502 errors on our GitLab instance. How can we troubleshoot this? I am an instance admin, how can I check the logs to find answers?***\n\nChat: ***To troubleshoot intermittent 502 errors on your GitLab instance as an admin, you can follow these steps:***\n\n***1. Check the main GitLab logs while reproducing the issue by running:***\n\n```\nsudo gitlab-ctl tail | tee /tmp/\u003Ccase-ID-and-keywords>.log\n```\n\n***2. Press Ctrl+C to stop tailing the logs.***\n\n***3. Run GitLabSOS to gather broader diagnostics about your instance:***\n\n```\nsudo /opt/gitlab/embedded/bin/gitlab-sos\n```\n\n***4. Use the fast-stats tool to analyze performance statistics from the logs:***\n\n```\nsudo /opt/gitlab/embedded/bin/fast-stats /var/log/gitlab/gitlab-rails/production.log\n```\n\n***5.Look for relevant log entries using the correlation ID that is present in most requests.***\n\n***6. Check the Pages logs (gitlab_pages_error.log) for any specific errors related to Pages communication issues with the GitLab API or loopback timeouts if using an AWS Network Load Balancer.***\n\n***7. Ensure your gitlab-secrets.json file is synced across all GitLab Pages instances if using `domain_config_source=auto`.***\n\n***8. If using systemd, configure GitLab Pages to listen on an explicit IP and port to avoid /tmp being cleaned up.***\n\n## Get started with GitLab Duo Chat\n\nGitLab Duo Chat is a powerful ally for administrators of self-managed GitLab instances. By leveraging its capabilities, you can tackle complex tasks more efficiently, learn new techniques, and ultimately become a more effective GitLab administrator.\n\nWe encourage you to experiment with Chat in your administrative workflows. Remember to use it responsibly and always verify the solutions it provides.\n\n> [Try GitLab Duo free for 60 days](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/).\n\n### Resources\n- [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/)\n- [GitLab Rails Console Cheat Sheet](https://docs.gitlab.com/ee/administration/operations/rails_console.html)\n- [GitLab API documentation](https://docs.gitlab.com/ee/api/)\n- [10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n- [GitLab Duo Chat 101: Get more done on GitLab with our AI assistant](https://about.gitlab.com/blog/gitlab-duo-chat-101-get-more-done-on-gitlab-with-our-ai-assistant/)\n",[704,9,478,680,678],{"slug":3289,"featured":90,"template":684},"mastering-gitlab-admin-tasks-with-gitlab-duo-chat","content:en-us:blog:mastering-gitlab-admin-tasks-with-gitlab-duo-chat.yml","Mastering Gitlab Admin Tasks With Gitlab Duo Chat","en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat.yml","en-us/blog/mastering-gitlab-admin-tasks-with-gitlab-duo-chat",{"_path":3295,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3296,"content":3302,"config":3307,"_id":3309,"_type":13,"title":3310,"_source":15,"_file":3311,"_stem":3312,"_extension":18},"/en-us/blog/memory-safe-vs-unsafe",{"title":3297,"description":3298,"ogTitle":3297,"ogDescription":3298,"noIndex":6,"ogImage":3299,"ogUrl":3300,"ogSiteName":669,"ogType":670,"canonicalUrls":3300,"schema":3301},"How to secure memory-safe vs. manually managed languages","Learn how GitLab reduces source code risk using scanning, vulnerability management, and other key features.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749672878/Blog/Hero%20Images/securityscreen.jpg","https://about.gitlab.com/blog/memory-safe-vs-unsafe","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to secure memory-safe vs. manually managed languages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2023-03-14\",\n      }",{"title":3297,"description":3298,"authors":3303,"heroImage":3299,"date":3304,"body":3305,"category":814,"tags":3306},[1767],"2023-03-14","\nThe National Security Agency (NSA) has published an executive summary showcasing the \nrisk of using [manually managed languages](https://en.wikipedia.org/wiki/Manual_memory_management) over [memory-safe languages](https://en.wikipedia.org/wiki/Garbage_collection_(computer_science)) in application\ndevelopment. Manual memory management may introduce major bugs and security risks into your application if\nthe memory is managed incorrectly.\n\nSecurity bugs introduced by manually managed languages can be catastrophic to the function of the\napplication, as well as the information contained in the application. These bugs may cause\nperformance slowdowns, application crashes, remote code execution, information leakage, and\nsystem failures.\n\nBugs that may be introduced include the following:\n\n* [Memory leak](https://en.wikipedia.org/wiki/Memory_leak): Memory no longer being used is not released, which reduces the amount of available memory.\n* [Buffer overflow](https://en.wikipedia.org/wiki/Buffer_overflow): Overwriting of memory locations adjacent to a buffers boundary.\n* [Segmentation fault](https://en.wikipedia.org/wiki/Segmentation_fault): An application tries to access a restricted piece of memory.\n* [Wild pointers](https://en.wikipedia.org/wiki/Dangling_pointer): Pointer points to the memory which has been deallocated.\n* [Undefined behavior](https://en.wikipedia.org/wiki/Undefined_behavior): An application with unpredictable behavior.\n\nTo provide some insight on the prevalance of risk introduced by manually managed languages, Microsoft\nrevealed that within the span of 12 years, [70% of their vulnerabilities](https://github.com/Microsoft/MSRC-Security-Research/blob/master/presentations/2019_02_BlueHatIL/2019_01%20-%20BlueHatIL%20-%20Trends%2C%20challenge%2C%20and%20shifts%20in%20software%20vulnerability%20mitigation.pdf) were due to mismanagement of memory.\nGoogle reported that there was a [similar percentage of vulnerabilities](https://security.googleblog.com/2021/09/an-update-on-memory-safety-in-chrome.html) introduced by memory safety\nviolations within the Chrome browser.\n\nAll these vulnerabilities can be exploited by malicious actors who may compromise a device, potentially leading to a compromise of a larger network infrastructure. With this large risk presented by mismanaged memory,\nthe NSA advises organizations to consider using memory-safe languages wherever possible and providing\nmechanisms to harden applications built with manually managed languages.\n\n## Memory-safe languages vs. manually managed languages\n\nA memory-safe language is a language where memory allocation and garbage collection are abstracted away from\nthe developer and handled by the programming language itself. These languages include **Python**, **Java**, and **Go**,\nto name a few.\n\nIn contrast, manually managed languages provide a developer with full control over the system memory (with some exceptions).\nThe most popular manually managed languages are **C** and **C++**.\n\nEach language type has a purpose and use case. There are times when a memory-safe language is recommended, but there are also\ntimes when it may not suit the application requirements.\n\nBelow is a list of some pros and cons of each language type:\n\n| Language type | Pros | Cons |\n| ------------- | ---- | ---- |\n| Memory safe | Memory mangement abstracted from developer, reduced risk of memory errors | Reduced efficency/performance, unpredictable garbage collection |\n| Manually managed | Enhanced efficency/performance, no garbage collection overhead | Prone to memory-related failures |\n\nManually managed languages provide the developer with more power, but also introduce a greater amount\nof risk, so they should only be used where required.\n\n## Memory 'unsafe' language security scanning\n\nAlthough many organizations are promoting the use of memory-safe languages vs. manually managed ones, it is unrealistic\nto remove manually managed languages from a developer's toolbox. Therefore, developers must get ahead of all the\nbugs/vulnerabilities that may be introduced. This can be done by scanning application source code.\n\nGitLab supports various scanners for memory-unsafe languages. Below you can see the scanners\nused for C and C++:\n\n| Language | Scanners |\n| -------- | -------- |\n| C | [Semgrep with GitLab-managed rules](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep) |\n| C++ | [Flawfinder](https://gitlab.com/gitlab-org/security-products/analyzers/flawfinder) |\n\nNow let's take a look at how [GitLab's static application security testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) allows us to find and resolve vulnerabilities.\nBelow is an application which doesn't crash, but may generate unexpected behavior:\n\n```C\n#include \u003Cstdio.h>\n\nint main()\n{  \n  char msg[5] = \"Hello\";\n\n  /* Add exclamation, to a position that doesn't exist*/\n  msg[8] = '!';\n\n  /* print each letter 1 by 1 */\n  /* Notice we are going further than the length of the array */\n  int i;\n  for (i = 0; i \u003C 10; ++i)\n  {\n    printf(\"%i: %c \\n\", i, msg[i]);\n  }\n\n  return 0;\n}\n```\n\nWhen running the GitLab SAST scanner, the vulnerability is detected and a solution is provided:\n\n![GitLab SAST scanner results](https://about.gitlab.com/images/blogimages/memory-safe-vs-manually-managed/CWE_120.png)\n\nIt shows you need to **perform bounds checking, use functions that limit length**, or\n**ensure that the size is larger than the maximum possible length.** You can also see the\n[CWE](https://cwe.mitre.org/data/definitions/120.html) for more information on how the system may be impacted.\nNote that vulnerabilities are actionable. These actions include the ability to dismiss a vulnerability and add\nadditional information for the security team to review, or a confidential issue can be created for review.\n\nThese scanners allow [DevSecOps](/topics/devsecops/) teams to resolve security issues before code makes it into production and safeguard their application\nfrom memory issues. Note that not all memory issues are easily detected due to the nature of manual memory management.\nTherefore, it is also important to add unit tests, fuzzing, and run checks using the GitLab CI to further ensure the reliability\nand security of your application.\n\nThe following applications contain examples of creating a GitLab pipeline for C applications:\n- [General Build, SAST, and Run](https://gitlab.com/tech-marketing/devsecops/initech/other/cul8r)\n- [Coverage-based fuzzing](https://gitlab.com/gitlab-org/security-products/demos/coverage-fuzzing/c-cpp-fuzzing-example)\n\n## Memory-safe language security scanning\n\nAs more developers move to memory-safe languages, it is important that the tools\nused to prevent vulnerabilities support these languages as well. GitLab provides a rich feature set for\nsecuring application source code, especially for memory-safe languages.\n\nBelow is a table of some the popular languages GitLab supports. To see the full list, visit the\n[GitLab SAST Language/Framework Support](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks) page.\n\n| Language | Scanners |\n| -------- | -------- |\n| Python | [Semgrep with GitLab-managed rules](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep), [Bandit](https://gitlab.com/gitlab-org/security-products/analyzers/bandit) |\n| Go | [Semgrep with GitLab-managed rules](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep), [GoSec](https://gitlab.com/gitlab-org/security-products/analyzers/gosec) |\n| Java | [Semgrep with GitLab-managed rules](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep), [SpotBugs with the find-sec-bugs plugin](https://gitlab.com/gitlab-org/security-products/analyzers/spotbugs), [MobSF (beta)](https://gitlab.com/gitlab-org/security-products/analyzers/mobsf) |\n| JavaScript | [Semgrep with GitLab-managed rules](https://gitlab.com/gitlab-org/security-products/analyzers/semgrep), [ESLint security plugin](https://gitlab.com/gitlab-org/security-products/analyzers/eslint) |\n| Ruby | [brakeman](https://gitlab.com/gitlab-org/security-products/analyzers/brakeman) |\n\nGitLab uses a mix of open source tools developed in-house as well as commonly used tools within the open source community.\nIt is important to note that GitLab's security research team creates custom rules to better reduce false positives as well\nas enhance the number of vulnerabilities found.\n\nHere are some Python functions, which can be exploited and then data can be obtained via [SQL injection](https://owasp.org/www-community/attacks/SQL_Injection):\n\n```python\ndef select_note_by_id(conn, id=None, admin=False):\n   query = \"SELECT id, data FROM notes WHERE secret IS FALSE\"\n   cur = conn.cursor()\n\n   # Admin doesn't have search by id function, since only used in the UI\n   if admin:\n       query = \"SELECT id, data, ipaddress, hostname, secret FROM notes\"\n\n   if id:\n       if admin:\n           query = query + \" WHERE id = %s\" % id\n       else:\n           # NOTE: Vulnerable to SQL injection, can get secret notes\n           # by adding 'OR 1=1', since not parameterized\n           query = query + \" AND id = %s\" % id\n\n   try:\n       cur.execute(query)\n   except Exception as e:\n       note.logger.error(\"Error: cannot select note by id - %s\" % e)\n\n   allItems = cur.fetchall()\n   conn.close()\n\n   if len(allItems) == 0:\n       return []\n\n   return allItems\n```\n\nWhen running the GitLab SAST scanner, you can see the SQL injection vulnerability is detected. A solution\nis provided with the line of code affected as well as identifiers that provide more information on how the\n[CWE](https://cwe.mitre.org/data/definitions/89.html) can affect your system.\n\n![SQL Injection and solution](https://about.gitlab.com/images/blogimages/memory-safe-vs-manually-managed/CWE_89.png)\n\nNotice that there is also training to enable developers to understand the vulnerability and how\nit can be exploited, and to make them more security-aware.\n\n## Other application attack vectors\n\nUsing a memory-safe language along with a SAST scanner reduces vulnerability risk, but there are more attack vectors to consider, including configurations, infrastructure, and dependencies. This is why it is important to scan all aspects of your application.\n\nGitLab offers the following scanners to help you achieve full coverage:\n\n| Scanner type | Description |\n| ------------ | ----------- |\n| [Dynamic application security testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/) | Examines applications for vulnerabilities like these in deployed environments. |\n| [Infrastructure as code (IaC) scanning](https://docs.gitlab.com/ee/user/application_security/iac_scanning/) | Scans your IaC (Terraform, Ansible, AWS CloudFormation, Kubernetes, etc.) configuration files for known vulnerabilities. |\n| [Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) | Finds security vulnerabilities in your software dependencies. |\n| [Container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) | Scans your applications container images for known vulnerabilities. |\n| [License scanning - CycloneDX](https://docs.gitlab.com/ee/user/compliance/license_scanning_of_cyclonedx_files/index.html) | Capable of parsing and identifying over 500 different types of licenses and can extract license information from packages that are dual-licensed or have multiple different licenses that apply. |\n| [Secret detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/) | Scans your repository for secrets. |\n| [Coverage-guided fuzzing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) | Sends random inputs to an instrumented version of your application in an effort to cause unexpected behavior. |\n| [Web API fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/) | Sets operation parameters to unexpected values in an effort to cause unexpected behavior and errors in the API backend. |\n\nAside from full scanner coverage, it important to add guardrails to prevent vulnerable code from compromising a production environment.\nThis can be done by requiring approval from the security team for merging any code with vulnerabilities using [GitLab policies](https://docs.gitlab.com/ee/user/application_security/policies/).\n\nTo get started using these tools and more, check out the [GitLab Application Security](https://docs.gitlab.com/ee/user/application_security/) page.\nIt's as simple as signing up for GitLab Ultimate and adding some templates to your .gitlab-ci.yml.\n\n## Managing vulnerabilities of all types\n\nAlthough we can find and address vulnerabilities before they make it into production, it is not possible to\neliminate all risk. This is why it is important to be able to assess the security posture of your project or\ngroup of projects.\n\nFor this, GitLab provides [Vulnerability Reports](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/), which allow you to manage and triage vulnerabilities\nwithin the main branch of the application. You can sort through all the vulnerabilities for a project or\ngroup of projects using a variety of different criteria.\n\n![Vulnerability report screenshot](https://about.gitlab.com/images/blogimages/memory-safe-vs-manually-managed/vulnerability_report.png)\n\nClicking on a vulnerability sends you to its [Vulnerability Page](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/).\nThere you can review details on the vulnerability, manage its status, collaborate with other members of\nthe security team, as well as create confidential issues to assign to developers.\n\n![Vulnerability Page](https://about.gitlab.com/images/blogimages/memory-safe-vs-manually-managed/vulnerability_page.png)\n\n---\n\nThanks for reading! To learn more about available Security features, check out GitLab's [application security documentation](https://docs.gitlab.com/ee/user/application_security/)\nand get started securing your application today. You can also sign up for a [30-day free GitLab Ultimate trial](https://about.gitlab.com/free-trial/)\nand test the [Simple Notes Application](https://gitlab.com/tech-marketing/devsecops/initech/simple-notes), which contains a [full tutorial](https://tech-marketing.gitlab.io/devsecops/initech/simple-notes/) on getting started\nwith implementing and using many of GitLab's security features.\n\n## References\n\nBelow are some references used in this blog:\n\n* [The Federal Government is Moving on Memory Safety for Cybersecurity](https://www.nextgov.com/cybersecurity/2022/12/federal-government-moving-memory-safety-cybersecurity/381275/)\n* [Future of Memory Safety - Challenges and Recommendations](https://advocacy.consumerreports.org/wp-content/uploads/2023/01/Memory-Safety-Convening-Report-1-1.pdf)\n* [NSA Software Memory Safety Report](https://media.defense.gov/2022/Nov/10/2003112742/-1/-1/0/CSI_SOFTWARE_MEMORY_SAFETY.PDF)\n* [Memory Safety Wiki](https://en.wikipedia.org/wiki/Memory_safety)\n* [Manual Memory Management Wiki](https://en.wikipedia.org/wiki/Manual_memory_management)\n* [Unsafe Languages - University of Washington CS Lecture Notes](https://courses.cs.washington.edu/courses/cse341/04wi/lectures/26-unsafe-languages.html)\n* [GitLab SAST](https://docs.gitlab.com/ee/user/application_security/sast/)\n* [GitLab Application Security](https://docs.gitlab.com/ee/user/application_security/)\n* [GitLab Vulnerability Reports](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/)\n\n_Cover image by [Mohammad Rahmani](https://unsplash.com/@afgprogrammer?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/s/photos/C-programming?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_",[814,835,9],{"slug":3308,"featured":6,"template":684},"memory-safe-vs-unsafe","content:en-us:blog:memory-safe-vs-unsafe.yml","Memory Safe Vs Unsafe","en-us/blog/memory-safe-vs-unsafe.yml","en-us/blog/memory-safe-vs-unsafe",{"_path":3314,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3315,"content":3320,"config":3326,"_id":3328,"_type":13,"title":3329,"_source":15,"_file":3330,"_stem":3331,"_extension":18},"/en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"title":3316,"description":3317,"ogTitle":3316,"ogDescription":3317,"noIndex":6,"ogImage":2205,"ogUrl":3318,"ogSiteName":669,"ogType":670,"canonicalUrls":3318,"schema":3319},"Migrating from Bamboo Server to GitLab CI: Getting started","Theoretical reasoning and practical proposal on migrating an existing CI/CD infrastructure of some multi-component application from Bamboo Server to GitLab CI","https://about.gitlab.com/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ivan Lychev\"}],\n        \"datePublished\": \"2022-07-06\",\n      }",{"title":3321,"description":3317,"authors":3322,"heroImage":2205,"date":3323,"body":3324,"category":769,"tags":3325},"How to migrate Atlassian Bamboo Server's CI/CD infrastructure to GitLab CI, part one",[2727],"2022-07-06","\n\nWhen I faced a task of migrating from `Atlassian Bamboo Server` to `GitLab CI/CD`, I was not able to find any comprehensive information regarding something similar. So I designed a process on my own. This demo shows how to migrate a CI/CD structure for an existing multi-component application from a discontinued [Atlassian Bamboo Server](https://www.atlassian.com/migration/assess/journey-to-cloud) to [GitLab CI/CD](https://docs.gitlab.com/ee/index.html) (Community Edition).\n\nThe accompanying repository is https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app.\n\nIn this first part of a two-part series, you will find a description of the current state of affairs - i.e., how the CI/CD has been organized within Bamboo Server, how the Bamboo Build and Deploy plans are designed for bootstrapping infrastructure and deploying the components of the application, and the architecture of the application itself.\n\nAnd in part two, we'll take a deeper look at the virtues of `GitLab CI/CD`.\n\n## Initial state\n\n(Note: This is not a description of some particular project but more a kind of compilation of several projects I worked on.)\n\nThe application solution allows the client to fulfill a particular business purpose (the nature of which is not relevant here and thus not specified) and consists of more than 50 discrete components (further referred to as `applications` or just `apps` or `components`). I refrain from calling them microservices as each of them looks more like a full-fledged application communicating with other siblings using REST API and messages in Kafka topics. Some of them expose a web UI to external or internal users and some are just utility parts serving the needs of other components or performing internal operations, etc.\n\nCode for each app is stored in its own Git repository (further just `repo`). So, a `multi-repo` approach is used for them. Each app may be written in different languages and packaged as one or several OCI-images for deployment.\n\nEach app repo looks like:\n```\n📦 \u003Csome-app-git-repo>\n ┣ 📂src \u003C-- application source code\n ┣ 📂docker-compose\n ┃ ┗ 📜docker-compose.yml \u003C-- analogue of K8s manifests\n ┗ 📜Dockerfile \u003C-- conventionally, \"Dockerfile\" name is used for OCI image specification file\n```\n\nFor running the applications, the client uses an outdated orchestration system (one from pre-Kubernetes epoch). So each app repo contains a Docker-compose compatible file describing deployment directives for that outdated orchestration system (in essence, similar to Kubernetes Deployment manifests). \n\nFor all of the build and deploy activities Atlassian Bamboo Server is used. \n\nSome details for those not familiar with the Bamboo Server - in an opinionated manner it explicitly separates so-called `build` pipelines and `deployment` pipelines. The former are supposed to build application code and produce some artifacts for further deployment (in our case those artifacts are OCI images uploaded to OCI registry and docker-compose.yml files referring to those images). The latter ones are supposed to take some particular set of artifacts and apply them to some particular `environment`. An `environment` (referred to `env` in the future for brevity) here is just an abstract deployment target characterized by a set of environment variables attached to it and exposed to the apps deployed into it. In reality, an `env` is implemented as a set of resources (virtual machines, databases, object storage locations, etc.) required by the applications.\n\nIn Bamboo, one `build` pipeline usually corresponds to one `deployment` pipeline so when the latter is started it just takes the artifacts from the attached `build` pipeline as input. \n\nThe client uses a `production` env, `preproduction` env, and numerous (up to several hundreds) so-called `staging` (short-lived) envs where different development teams and software engineers can test various combinations of the apps (here we assume that they have ~80-100 distinguish components of the application solution and several hundreds of software developers which gives a lot of possible combinations and requires so many `staging` envs).\n\nRoughly, a configuration of a `deploy` pipeline consists of a specification of the source artifacts (which are provided by the attached `build` pipeline as described earlier) and a specification of the set of envs where those artifacts (effectively, an application) can be deployed to.\n\nCurrent installation uses sophisticated dynamic generation of envs set for each app deployment pipeline. Roughly speaking, they have a central configuration file with the list of all existing envs where for each env a list of apps allowed to be deployed to it is denoted. Each time the file is modified (i.e., an env is created or deleted), the deployment pipelines are automatically being updated so as in the result each of them contains a list of envs corresponding for each app. You will have more idea about this aspect when you have looked at the implementation section later.\n\nIn the Bamboo UI this looks like:\n\n![envs_list_on_build_result_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/envs_list_on_build_result_page.png)\n\nHere you can see an application build result page where on the right-hand side under the `Included in deployment project` title you can see a list of envs into which you can deploy the application. (Keep in mind that besides `build` and `deployment` pipelines, the Bamboo also uses a notion of `releases` - this is just some kind of an intermediate entity that should be created out of a build result to make it possible to deploy that build into some env). The `cloud-with-upwards-arrow` button in the `Actions` column starts a corresponding `deploy` pipeline with automatically passing the link to a build result (in a form of a `release` entity in Bamboo terminology) and the name of the env next to which the button has been clicked (the procedure of how a list of envs is created for a `deploy` pipe is described above).\n\nA concept of a `release` is specific to Bamboo Server, though it provides some amenities. For example, on the Release details page you can see a list of envs where a release has been deployed to. On the `Commits` tab you can backtrack a release to the application code in a SVC. And the `Issues` tab shows attached Jira tickets.\n\n![bamboo_release_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_release_details.png)\nRelease details page\n{: .note.text-center}\n\nAn env details page also enumerates releases history for this env (in scope of one particular application though as an env is specified for each deployment pipeline individually):\n\n![bamboo_env_details](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/bamboo_env_details.png)\nEnv details page \n{: .note.text-center}\n\nAnd upon clicking the `cloud-with-upwards-arrow` button the Bamboo shows diff of Jira tickets and commits in respect to the previous `release` (only if both releases are made from artifacts from the same Git branch):\n\n![deploy_launch_page](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/deploy_launch_page.png)\nDeploy launch page\n{: .note.text-center}\n\nSo, in general, the current path from source control to an env for each app looks like:\n\n![svc_to_env_path](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/svc_to_env_path.png)\n\nThe Build plans are triggered automatically upon Git commits or Git tags. Most of the Deployment plans are started by the project members manually when needed. Each Deploy plan contains a step that checks if a user who started the plan has permissions to deploy into an env (for example, only members of the team which owns an env are allowed to deploy to that env and the deployment to the production env is allowed only for a set of eligible project members).\n\n## The task\n\nThe task is to migrate the aforementioned design from Bamboo Server to `GitLab` while keeping a similar deployment scheme (leveraging GitLab's `Environments` feature).\n\nAlso the following should be considered:\n\n - team members (software engineers, quality assurance specialists) are supposed to be able to manage environments on their own in a user-friendly self-service manner.\n - there should not be any discrepancy in IaC for different environments (per `12-factor apps` best practices), i.e. for any kind of an environment, be it a development or production one, the same set of IaC (here - Terraform files) should be used.\n  - the core ideas and workflows established in the previous situation (implemented with Atlassian Bamboo) should be kept to make the migration smoother for the members of the projects (also sometimes referred to as just users). \n\n## Implementation\n\n### Implementation's GitLab groups\\projects structure\n\n```\n📦 \u003CGitLab root group>\n ┣ 📂 apps GitLab group\n ┃ ┣ 📃 app1 GitLab project\n ┃ ┣  ...\n ┃ ┗ 📃 appN GitLab project\n ┣ 📂 ci GitLab group\n ┃ ┣ 📃 library GitLab project\n ┃ ┗ 📃 oci-registry GitLab project\n ┗ 📂 infra GitLab group\n  ┣ 📃 environment-blueprints GitLab project\n  ┣ 📃 environment-set GitLab project\n  ┗ 📃 k8s-gitops GitLab project\n```\n\n*Description*:\n\nThe most important content is in the `ci/library` repo (the shared ci configs) and `environment-set` repo. The other repos don't require much attention: The `k8s-gitops` purpose is not implemented and the repo is empty, the `apps` group just imitates source code for some apps, and the `ci/oci-registry` serves a role of an OCI registry for the solution.\n\nThe `apps` GitLab group merely contains the apps source code per se. Each GitLab project in this group corresponds to one app. Each app repo is expected to contain the source code itself (in the `src` directory for example), a `k8s` directory with k8s manifests, and an OCI image specification file (traditionally often called `Dockerfile`). \n\nThe `ci` GitLab group contains the `ci/library` project that holds shared `.gitlab-ci.yaml` files used by other projects (in a manner similar to Jenkins' shared libraries) and the `ci/oci-registry` serves as an OCI-image registry for various images used by the demo project (it also contains a Git repository with gitlab-ci files to build some utility images with tools used in various pipelines). For simplicity, the latter stores all the images throughout all the projects of the demo, though it's clearly not the best choice for a real-life situation when different sets of images of a set of separate projects/registries should be created.\n\nThe `infra` group holds applications infrastructure creation related Git repositories:\n\nThe `infra/k8s-gitops` is mostly irrelevant to the topic of this demo. In this demo it's presumed that Kubernetes is used as a computation workload platform and when a k8s cluster is created for an environment all the k8s manifests are supposed to be put into this repo (where each branch corresponds to a single environment) to be consumed by a GitOps tool installed into the cluster.\n\nThe `infra/environment-blueprints` holds parametrized IaC templates describing all the resources required for a full-fleged environment. In this example, the Terraform is used as an IaC tool though the principles are similar for its analogs (CloudFormation, for instance). The blueprints are parametrized in such manner that in the defaults values they hold some sensible values (most likely set to different values depending on the kind of a environment they were used to bootstrap - for example, a production env and everything else). It's implied that there might coexist several versions of the blueprints (implemented by using Git branches or Git tags) so each environment (see the next paragraph about `infra/environment-set`) can explicitly specify which version it wants to use (in case of using Terraform by specifying Git reference in the module's `source` field).\n\nHere I would like once again to highlight a digression from the best practices. For simplicity in the `infra/environment-blueprints` repo all the parts of an environment are combined into one single Terraform module (or a workspace, or a Stack in CloudFormation's terminology). In that way all the resources are always updated or changed within a single `terraform apply` command, which is cumbersome for large infrastructures containing a lot of resources. For larger infrastructures it would be more manageable to split into disparate Terraform modules (or CloudFormation Stacks, or Azure ARM Resource Groups) and thus make it possible for the infrastructure to be changed/updated in parts according to which exact components of it have changed. This might raise another question - how to manage dependencies in between such parts if they are present? For that, we would use some kind of an external (in respect to the IaC tool itself) orchestration tool like AWS Step Functions... or even GitLab's DAG feature!\n\nFinally, the `infra/environment-set` project represents an actual expected state of resources for each environment (a branch corresponds to an environment). See the README.md file in the Git repo for details. In short, each branch here is meant to contain a `main.tf` file referring to some version of the blueprints in the `infra/environment-blueprints` project, a set of Terraform files with overrides for any default variables set in the blueprints modules and other utility files like with a list of users allowed to deploy to the environment (such a list is to be checked by the deployments job in the apps projects).\n\n### **Important!**\n\nWhile looking at the implementation keep im mind that this solution deliberately omits some crucial aspects of any project infrastructure like security or monitoring, just for the sake of keeping this solution manageable and comprehensible. Implementing security and monitoring aspects would make the solution cumbersome and much longer to prepare. That is also true for the `k8s-gitops` repository - it's implied that in a real-life solution this would actively participate in the deployment process and hold Kubernetes clusters state in a GitOps approach but currently, this repo is just a placeholder. In the practical guide later you will see a description of the process of controlling environments using different branches in the `infra/environment-set` project. Ideally, such a workflow should use Merge Requests though for simplicity this implementation skips using MRs.\n\nAnother important thing that's possible not clear in this solution is configuration management, i.e. how configuration settings unique to each environment are provided to the applications inside an environment. Well, given that our applications run within Kubernetes cluster and that the cluster state is placed into a dedicated repo (`k8s-gitops` in our case), the configuration settings situation is simple - for each app the Terraform files in the `infra/environment-blueprints` should output all the sensible configuration values for the resources (like S3 bucket names, RDS endpoint URLs, etc.). Then, using Terraform itself or some other tool to create/update an environment, an additional step would collect all those outputs, transform them into k8s ConfigMap manifests, and put them into the GitOps repo. \n\nFor the secrets, we can go several ways. The most simplistic (though not flexible and not easy for secret rotation) way is to use some kind of encryption at rest like Mozilla's SOPS so that the secrets are being encrypted when they are put into the GitOps repo and decrypted when deployed into K8s. Another (and better ?) way - do not store secrets at rest at all but use either a third-party tool like Hashicorp Vault (with dynamic secrets generation) or cloud native features like [AWS IAM Roles for Service Accounts](https://aws.amazon.com/blogs/containers/diving-into-iam-roles-for-service-accounts/).\n\n## Bootstrap the demo\n\nThe accompanying repository, https://gitlab.com/iLychevAD/ci-cd-for-a-multi-component-app, contains Terraform files that enable you to install a copy of the demo structure into your own GitLab account to see it in action:\n\n`*.tf` files in the root directory and in the `tf_modules` directory describe the structure and configuration of the GitLab projects and groups. In the `repo_content` directory there is a content for the GitLab repositories in the projects. The repositories are filled with those files by the Terraform scripts.\n\nThe demo was tested with GitLab Community Edition `15.0.0-pre revision 4bda1cc84df`. The Terraform scripts do not create any real resources but just imitate them using `null_resource` and `local-exec`.\n\nThe bootstrapping process is conducted inside a container image (see the steps below) so it's platform-agnostic and in terms of tools all you need to spin up the demo is some containerization engine installed on your PC (i.e., Docker, Podman, etc).\n\n**Steps**:\n\n1. In the GitLab web UI manually create a root group to bootstrap the demo into (see `root_gitlab_group.tf` for a web-link why it's not possible to automate). Notice its ID - you need to provide it at the next step.\n\n2. Clone this repository.\n    Download an official Hashicorp's Terraform image and enter its interactive shell. All the further commands are supposed to be performed inside that shell:\n    \n    ```\n    docker run --rm -it --name ci-cd-for-a-multi-component-app \\\n      -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n      -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n      -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n      -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n      -v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo \\\n      --entrypoint /bin/sh \\\n      public.ecr.aws/hashicorp/terraform:1.1.9\n    ```\n    \n    Explanation:\n    \n    `-e TF_VAR_gitlab_token=\u003Cyour GitLab account access token>` - Terraform's `gitlab` provider needs a GitLab access token with sufficient permissions to spin up the demo. Provide it as a Bash environment variable - `TF_VAR_gitlab_token` (see `provider.tf`). It is also used by the `upload_avatar` module.\n    \n    `-v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys` - on the left-hand side here specify some directory on your local PC where you would like to store SSH keys needed for deploying the demo. Thus they are persisted even if you exit the container. See bullet point `4` for more details.\n    \n    `-e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` and\n    \n    `-e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key` - set the names for the aforementioned keys\n    \n    `-v \u003Cpath to the directory where you cloned the project into>:/repo -w /repo` - we mount the project content from your local PC into the running container. Note that because of that the Terraform local state file will be stored inside that directory on your PC.\n\n3. Install tools - bash and curl:\n    \n    ```\n    apk add bash curl\n \n    /bin/bash\n    ```\n\n4. Upon bootstrapping the demo, the repositories' content is pushed into (i.e. is restored) from the `repo_content` directory. (When the demo is destroyed the content of the repositories is automatically pulled (i.e. is saved) into the same directory - probably you dont need this but I implemented that for my convinience during creating the demo.) We need to create an SSH key pair and need it be the same throughout both phases. In this step we generate it:\n    \n    ```ssh-keygen -t rsa -N '' -f /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key \u003C\u003C\u003C y```\n    \n    ```chmod 0400 /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key```\n    \n    A trick used in `tf_modules/gitlab_project_with_restore_backup/main.tf` requires that in the host section of the SSH public key the location of the private key is specified (in a form like `filename@~/.ssh/\u003Cfilename>`). Otherwise the `tf_modules/gitlab_project_with_restore_backup` won't work. Edit accordingly:\n    \n    ```sed -i -e 's|^\\(ssh-rsa .*\\) \\(.*\\)$|\\1 ci-cd-for-a-multi-component-app-deploy-key@/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key|' /deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub```\n    ```\n\nNow you can proceed with bootstrapping the demo using Terraform:\n\nInitialize Terraform by `terraform init` so it installs all the providers.\n\nDeploy the demo with Terraform by `terraform apply`.\n\n**Notice**: During Terraform execution you may see an error:\n```\nError: POST https://gitlab.com/api/v4/projects/multi-component-app-root-group/ci/library/deploy_keys: 400 {message: {deploy_key.fingerprint_sha256: [has already been taken]}}\n\n```\nI believe this is some glitch in the GitLab API. To fix just run `terraform apply` once again until it shows no errors.\n\nAfter that you should see the following structure in GitLab in the root group:\n\n![gitlab_projects_tree](https://about.gitlab.com/images/blogimages/migration-from-atlassian-bamboo-server-to-gitlab-ci/gitlab_projects_tree.png)\n\nAll the projects should be filled with files from the `repo_content` directory.\n\nDo not delete the directory with the cloned project and the files created inside it if later you would want to clean up the things. See the next section for instructions.\n\n## Cleaning up\n\nLaunch a container image the same way you did for bootstrapping the demo (see the previous section). It's supposed that you didnt delete any files in `\u003Cpath to a location where to store ssh key-pairs on your PC>` and `\u003Cpath to the direcory where you cloned the project into>`: \n\n```\ndocker run --rm -it --name ci-cd-for-a-multi-component-app \\\n  -e TF_VAR_gitlab_token=\u003Cyour GitLab account access token> \\\n  -v \u003Cpath to a location where to store ssh key-pairs on your PC>:/deploy-keys \\\n  -e TF_VAR_deploy_key_readwrite=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_deploy_key_readonly=/deploy-keys/ci-cd-for-a-multi-component-app-deploy-key.pub \\\n  -e TF_VAR_root_gitlab_group_id=\u003CGitLab group ID> \\\n  -v \u003Cpath to the direcory where you cloned the project into>:/repo -w /repo \\\n  --entrypoint /bin/sh \\\n  public.ecr.aws/hashicorp/terraform:1.1.9\n```\n\nInstall curl:\n\n```apk add curl```\n\nDo `terraform destroy`.\n\n**Notice**: You may see some errors regarding deleting the `oci-registry` project with OCI images. In that case just delete the images and remove the project manually or wait while GitLab does that itself later.\n\nNow if you want you can remove the cloned project directory and the `\u003Cpath to a location where to store ssh key-pairs on your PC>` directory.\n\nIf you would like to deploy the demo once again without removing the directory with the cloned repo dont forget to remove files created during the previous demo deployment, namely `terraform.tfstate` files in the root directory and `.git` directories everywhere in the `repo_content` directory.\n\nIn the [second part](/blog/how-to-migrate-atlassians-bamboo-servers-ci-cd-infrastructure-to-gitlab-ci-part-two/) of this tutorial, we'll look at a real-world example of how this can work.\n\n\n\n\n\n",[108,773,9],{"slug":3327,"featured":6,"template":684},"migration-from-atlassian-bamboo-server-to-gitlab-ci","content:en-us:blog:migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","Migration From Atlassian Bamboo Server To Gitlab Ci","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci.yml","en-us/blog/migration-from-atlassian-bamboo-server-to-gitlab-ci",{"_path":3333,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3334,"content":3340,"config":3346,"_id":3348,"_type":13,"title":3349,"_source":15,"_file":3350,"_stem":3351,"_extension":18},"/en-us/blog/migration-guide-github-advanced-security-to-gitlab-ultimate",{"title":3335,"description":3336,"ogTitle":3335,"ogDescription":3336,"noIndex":6,"ogImage":3337,"ogUrl":3338,"ogSiteName":669,"ogType":670,"canonicalUrls":3338,"schema":3339},"Migration guide: GitHub Advanced Security to GitLab Ultimate","Understand the similarities and differences between GitLab Ultimate and GitHub Advanced Security. Then follow this in-depth tutorial to make the move to the GitLab DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666187/Blog/Hero%20Images/blog-image-template-1800x945__6_.png","https://about.gitlab.com/blog/migration-guide-github-advanced-security-to-gitlab-ultimate","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Migration guide: GitHub Advanced Security to GitLab Ultimate\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-05-01\",\n      }",{"title":3335,"description":3336,"authors":3341,"heroImage":3337,"date":3342,"body":3343,"category":814,"tags":3344},[1767],"2024-05-01","GitLab is the most comprehensive AI-powered DevSecOps platform, enabling organizations to deliver more secure software faster with one platform for your entire software delivery lifecycle. GitHub provides an Advanced Security add-on, which enables additional security features within GitHub. However, it lacks the depth and breadth of security features provided natively by GitLab. Organizations looking to migrate to GitLab Ultimate to enhance their security across all areas of the SDLC can use this guide to compare the two offerings and as a tutorial to move to the GitLab platform.\n\nThis article includes:\n\n- [A comparison between GitLab Ultimate and GitHub Advanced Security](#a-comparison-between-gitlab-ultimate-and-github-advanced-security)\n- [How to migrate a GitHub repository to GitLab](#how-to-migrate-a-github-repository-to-gitlab)\n- [How to migrate from GitHub Advanced Security to GitLab Ultimate feature-by-feature](#how-to-migrate-feature-by-feature)\n- [An introduction to additional GitLab Ultimate's security features](#additional-gitlab-ultimate-security-features)\n\n## A comparison between GitLab Ultimate and GitHub Advanced Security\n\n[GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) is GitLab's top subscription tier for enterprises looking to deliver secure software faster. GitHub Advanced Security is an add-on to GitHub Enterprise, which enables additional security features.\n\n### Similarities between GitLab Ultimate and GitHub Advanced Security\n\nGitLab Ultimate and GitHub Advanced Security both provide:\n- Static Application Security Testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)), secret scanning, and dependency scanning\n- contextual vulnerability intelligence and resolution advice\n- a list of dependencies or software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/))\n- security metrics and insights\n\n### Differences between GitLab Ultimate and GitHub Advanced Security\n\nGitLab Ultimate differs from GitHub Advanced Security in the following ways:\n\n- GitLab natively provides additional code scanners such as container scanning, Dynamic Application Security Testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)), Web API fuzz testing, and more. These scanners are a mix of optimized proprietary and open source technologies with custom rulesets. For a full list, see the [GitLab AppSec documentation](https://docs.gitlab.com/ee/user/application_security/secure_your_application.html).\n- GitLab provides [granular security guardrails](https://docs.gitlab.com/ee/user/application_security/policies/) to prevent insecure code from being merged without approval.\n- GitLab security scanners can be run in [air-gapped or limited-connectivity environments](https://docs.gitlab.com/ee/user/application_security/offline_deployments/).\n- GitLab provides the [Compliance Center](https://docs.gitlab.com/ee/user/compliance/compliance_center/), which enables oversight of compliance violations across an entire organization.\n\nGitLab Ultimate also provides additional security and compliance capabilities, portfolio and value stream management, live upgrade assistance, and more. See the [GitLab Ultimate documentation](https://about.gitlab.com/pricing/ultimate/) to learn more about these additional features.\n\n## How to migrate a GitHub repository to GitLab\n\nGitLab provides a built-in importer, which allows you to import your GitHub projects from either GitHub.com or GitHub Enterprise to GitLab. The importer allows you to migrate not only the GitHub Repository to GitLab, but several other objects, including issues, collaborators (members), and pull requests. For a complete list of what can be migrated, see the [GitHub imported data documentation](https://docs.gitlab.com/ee/user/project/import/github.html#imported-data). You can perform the migration as follows:\n1. On the left sidebar, at the top, select **Create new (+)**.\n2. Select **New project/repository** under the **In GitLab** section.\n3. Select **Import project**.\n\n![Import project selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/1-Import-Project.png)\n\n4. Press the **GitHub** button.\n    - If using GitLab self-managed, then you must [enable the GitHub importer](https://docs.gitlab.com/ee/administration/settings/import_and_export_settings.html#configure-allowed-import-sources).\n    - Note that other importers can be initiated in the same way.\n5. Now, you can do one of the following:\n    - Authorize with GitHub Oauth by selecting **Authorize with GitHub**.\n    - Use a GitHub personal access token:\n       - Go to [https://github.com/settings/tokens/new](https://github.com/settings/tokens/new).\n       - In the **Note** field, enter a token description.\n       - Select the **repo** scope.\n       - Optionally, to import Collaborators, select the **read:org** scope.\n       - Press the **Generate token** button.\n       - On the GitLab import page, in the Personal Access Token field, paste the GitHub personal access token.\n6. Press the **Authenticate** button.\n7. Select the items you wish to migrate.\n8. Select the projects you wish to migrate and to where.\n9. Press the **Import** button.\n\nYour imported project should now be in your workspace. For additional guidance on migrating from GitHub to GitLab, watch this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0Id5oMl1Kqs?si=HEpZVy94cpfPfAky\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can also perform the migration using a [GitHub personal access token](https://docs.gitlab.com/ee/user/project/import/github.html#use-a-github-personal-access-token) or the [GitLab REST API](https://docs.gitlab.com/ee/user/project/import/github.html#use-the-api). The importer also allows importing from other sources such as Bitbucket or Gitea. To learn more, read the [importer documentation](https://docs.gitlab.com/ee/user/project/import/).\n\n## How to migrate feature-by-feature\n\nLet’s go over how to leverage each feature provided by GitHub Advanced Security in GitLab Ultimate. You must have a [GitLab Ultimate license](https://about.gitlab.com/pricing/ultimate/) to continue. GitLab provides a [free 30-day trial](https://about.gitlab.com/free-trial/devsecops/) to get you started.\n\n### Code scanning\nGitHub provides code scanning to provide contextual vulnerability intelligence and advice for static source code. The same can be done within GitLab by enabling [SAST](https://docs.gitlab.com/ee/user/application_security/sast/). GitLab SAST scanners cover a wider set of programming languages and frameworks than GitHub’s [CodeQL](https://docs.github.com/en/code-security/code-scanning/introduction-to-code-scanning/about-code-scanning-with-codeql#about-codeql).\n\nTo enable code scanning in GitLab, you can simply add the [SAST template](https://docs.gitlab.com/ee/user/application_security/sast/#configure-sast-in-your-cicd-yaml) to your `.gitlab-ci.yml`:\n\n```yaml\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n```\n\nOnce the template has been added, any time new code is checked in, SAST will auto-detect the [programming languages](https://docs.gitlab.com/ee/user/application_security/sast/#supported-languages-and-frameworks ) used in your project. It will then scan the source code for known vulnerabilities.\n\n**Note:** Security scanners can also be added to your project using GitLab's [security configuration](https://docs.gitlab.com/ee/user/application_security/configuration/), which can automatically create a merge request to update your pipeline. To learn more, see the [Configure SAST by using the UI documentation](https://docs.gitlab.com/ee/user/application_security/sast/#configure-sast-by-using-the-ui).\n\nSAST results of the diff between the feature-branch and the target-branch display in the merge request widget. The merge request widget displays SAST results and resolutions that were introduced by the changes made in the merge request.\n\n![Security scanning in merge request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/2-SAST-MR-View.png)\n\nEach vulnerability displays data to assist with remediation, including detailed description, severity, location, and resolution information:\n\n![SAST vulnerability details](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/3-SAST-MR-View-Detailed.png)\n\nYou can take action on these vulnerabilities:\n\n- **Dismiss vulnerability**: Allows a developer to dismiss the vulnerability with a comment. This assists the security team performing a review.\n- **Create issue**: Allows an issue to be created to keep track of a vulnerability that requires additional oversight.\n\nThese changes can also be seen inline when changing to the **Changes** view within the merge request.\n\n![SAST vulnerability changes view](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/4-SAST-MR-View-Changes.png)\n\n#### Customizing SAST scanners\n\nGitLab allows you to override a SAST job definition so you can change properties like variables, dependencies, or rules. You can do this by declaring a job with the same name as the SAST job to override. Then, place this new job after the template inclusion and specify any additional keys under it.\n\nFor example, the following configuration:\n- overwrites the version the `semgrep-sast` scanner uses\n- runs a script to fetch modules from private projects before running `gosec-sast`\n- configures all scanners to search at a maximum depth of 10\n\n```yaml\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\nvariables:\n  SEARCH_MAX_DEPTH: 10\n\nsemgrep-sast:\n  variables:\n    SAST_ANALYZER_IMAGE_TAG: \"3.7\"\n\ngosec-sast:\n  before_script:\n    - |\n      cat \u003C\u003CEOF > ~/.netrc\n      machine gitlab.com\n      login $CI_DEPLOY_USER\n      password $CI_DEPLOY_PASSWORD\n      EOF\n```\n\n**Note:** The available SAST jobs can be found in the [`SAST.gitlab-ci.yml` template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/SAST.gitlab-ci.yml). Configurations can be found in the [Available SAST CI/CD variables documentation](https://docs.gitlab.com/ee/user/application_security/sast/#available-cicd-variables).\n\n#### Customizing SAST rulesets\n\nFor each SAST analyzer, GitLab processes the code then uses rules to find possible weaknesses in source code. These rules determine what types of weaknesses the scanner reports.\n\n- For Semgrep-based SAST scanners, GitLab creates, maintains, and supports the rules that are used. It combines the Semgrep open source engine, GitLab-managed detection rules, and GitLab proprietary technology for vulnerability tracking and false positive detection.\n- For other SAST analyzers, the rules are defined in the upstream projects for each scanner.\n\nYou can customize the behavior of the SAST scanners by defining a ruleset configuration file in the repository being scanned:\n- Disable predefined rules (available for all analyzers)\n- Override predefined rules (available for all analyzers)\n- Replace predefined rules by synthesizing a custom configuration using passthroughs\n\nFor more information and examples on configuring SAST rules, see the [SAST rules](https://docs.gitlab.com/ee/user/application_security/sast/rules.html) and [Customizing rulesets documentation](https://docs.gitlab.com/ee/user/application_security/sast/customize_rulesets.html).\n\n### Secret scanning\n\nGitHub provides secret scanning, which can find, block, and revoke leaked secrets. The same can be done within GitLab by enabling [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/).\n\nTo enable Secret Detection in GitLab, you can simply add the following template to your `.gitlab-ci.yml`:\n\n```yaml\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n```\n\nOnce the template has been added, any time new code is checked in (or a pipeline is run), the secret scanner will scan the source code for known secrets. Pipeline Secret Detection scans different aspects of your code, depending on the situation. For all methods except the “Default branch”, Pipeline Secret Detection scans commits, not the working tree. See the [Secret detection coverage documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#coverage) to learn more about how secret scanning works.\n\nWhen creating a merge request, Secret Detection scans every commit made on the source branch. Just like in SAST, each detected vulnerability provides the following information (such as location) and identifiers to assist with the remediation process:\n\n![Secret Detection vulnerability details](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/5-Secret-Detection-MR-Detailed.png)\n\nSimilar to SAST, you can take action on these vulnerabilities straight from the merge request, including dismissing vulnerabilities, and creating issues.\n\n#### Customizing Secret Detection jobs\n\nGitLab allows you to override a Secret Detection job definition so you change properties like variables, dependencies, or rules. You can do this by declaring a job with the same name as the Secret Detection job. Then place this new job after the template inclusion and specify any additional keys under it. For example, the following configuration:\n\n- overwrites the stage the secret detection job runs on to `security`\n- enables the historic scanning\n- changes the Secrets Analyzer version to 4.5\n\n```yaml\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n\nsecret_detection:\n  stage: security\n  variables:\n    SECRET_DETECTION_HISTORIC_SCAN: \"true\"\n    SECRETS_ANALYZER_VERSION: \"4.5\"\n```\n\n**Note:** The available Secret Detection jobs can be found in the [SAST.gitlab-ci.yml template](https://gitlab.com/gitlab-org/gitlab/-/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml). Available configurations can be found in the [Available Secret Detection CI/CD variables documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#customizing-analyzer-settings).\n\n#### Customizing Secret Detection rulesets\n\nThe Secret Detection analyzer allows you to customize which secrets are reported in the GitLab UI. The following customization options can be used separately, or in combination:\n\n- disable predefined rules\n- override predefined rules\n- synthesize a custom configuration\n- specify a remote configuration file\n\nFor example, by creating the file `.gitlab/secret-detection-ruleset.toml`, in the root directory of your project, the default GitLeaks package is extended to ignore test tokens from detection:\n\n```yaml\n### extended-gitleaks-config.toml\ntitle = \"extension of gitlab's default gitleaks config\"\n\n[extend]\n### Extends default packaged path\npath = \"/gitleaks.toml\"\n\n[allowlist]\n  description = \"allow list of test tokens to ignore in detection\"\n  regexTarget = \"match\"\n  regexes = [\n    '''glpat-1234567890abcdefghij''',\n  ]\n```\n\nFor more information on overriding the predefined analyzer rules, check out the [Secret Detection documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#override-predefined-analyzer-rules).\n\n#### Automatic response to leaked secrets\n\nGitLab Secret Detection automatically responds when it finds certain types of leaked secrets. Automatic responses can:\n- automatically revoke the secret\n- notify the partner that issued the secret and the partner can then revoke the secret, notify its owner, or otherwise protect against abuse\n\nGitLab can also notify partners when credentials they issue are leaked in public repositories on GitLab.com. If you operate a cloud or SaaS product and you’re interested in receiving these notifications, you can implement a Partner API, which is called by the GitLab Token Revocation API.\n\nSee the [Automatic response to leaked secrets documentation](https://docs.gitlab.com/ee/user/application_security/secret_detection/automatic_response.html) to learn more.\n\n### Supply chain security\n\nGitHub enables you to secure, manage, and report on software supply chains with automated security and version updates and one-click SBOMs. GitLab can meet your supply chain security needs using the Dependency Scanning and Dependency List (SBOM) features.\n\nTo enable Dependency Scanning in GitLab, you can simply add the following template to your `.gitlab-ci.yml`:\n\n```yaml\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n```\n\nOnce the template has been added, any time new code is checked in, Dependency Scanning will auto-detect the [package managers](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#supported-languages-and-package-managers) used in your project. It will then scan the dependencies used for known vulnerabilities.\n\nDependency Scanning results of the diff between the feature-branch and the target-branch display in the merge request widget. The merge request widget displays Dependency Scanning results and resolutions that were introduced by the changes made in the merge request. Within a merge request, each vulnerability displays relevant information to assist with remediation such as identifiers, evidence, and solutions:\n\n![Dependency Scanner vulnerability details](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/6-Dependency-Scanner-MR-View-Detailed.png)\n\nSimilar to SAST and Secret Detection, you can take action on these vulnerabilities straight from the merge request, including dismissing vulnerabilities and creating issues.\n\n#### Configuring Dependency Scanning\n\nTo override a job definition (for example, to change properties like variables or dependencies), declare a new job with the same name as the one to override. Place this new job after the template inclusion and specify any additional keys under it. For example, the following code:\n\n- disables automatic remediation of vulnerable dependencies\n- requires a build job to complete before Dependency Scanning\n\n```yaml\ninclude:\n  - template: Jobs/Dependency-Scanning.gitlab-ci.yml\n\ngemnasium-dependency_scanning:\n  variables:\n    DS_REMEDIATE: \"false\"\n  dependencies: [\"build\"]\n```\n\nTo learn more about configuring the dependency scanners, see the [Customizing analyzer behavior documentation](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-analyzer-behavior).\n\n#### Generating an SBOM\n\nGitLab provides a Dependency List (SBOM) to review your project or group dependencies and key details about those dependencies, including their known vulnerabilities. This list is a collection of dependencies in your project, including existing and new findings. The Dependency List is generated after the dependency scanner runs successfully on the [default branch](https://docs.gitlab.com/ee/user/project/repository/branches/default.html). To access the Dependency List:\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. Select **Secure > Dependency List**.\n\n![Dependency list (SBOM)](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/7-Dependency-List.png)\n\nFrom here you can see the following information on your dependencies:\n\n| Field\t| Description |\n| ----- | ----------- |\n| Component\t| The dependency’s name and version. |\n| Packager | The packager used to install the dependency. |\n| Location | For system dependencies, this lists the image that was scanned. For application dependencies, this shows a link to the packager-specific lock file in your project that declared the dependency. It also shows the dependency path to a top-level dependency, if any, and if supported. |\n| License | Links to dependency’s software licenses. A warning badge that includes the number of vulnerabilities detected in the dependency. |\n| Projects | Links to the project with the dependency. If multiple projects have the same dependency, the total number of these projects is shown. To go to a project with this dependency, select the Project's number, then search for and select its name. The project search feature is supported only on groups that have up to 600 occurrences in their group hierarchy. |\n\n\u003Cp>\u003C/p>\n\nSee the [Dependency List documentation](https://docs.gitlab.com/ee/user/application_security/dependency_list/) to learn more.\n\n### Security and compliance administration\n\nGitHub Advanced Security allows you to view security metrics and insights and assess code security risk. Now let’s examine how to do the same with GitLab Ultimate.\n\n#### Viewing security metrics and insights\n\nGitLab provides [Security dashboards](https://docs.gitlab.com/ee/user/application_security/security_dashboard/) to help assess the security posture of your applications. These dashboards display a collection of metrics, ratings, and charts for the vulnerabilities detected by the security scanners run on your project:\n\n- vulnerability trends over a 30-, 60-, or 90-day timeframe for all projects in a group\n- a letter grade rating for each project based on vulnerability severity\n- the total number of vulnerabilities detected within the past 365 days, including their severity\n\nTo access the Security dashboard:\n\n1. On the left sidebar, select **Search or go to** and find your project or group.\n2. From the side tab, select **Secure > Security** dashboard.\n3. Filter and search for what you need.\n\nThe group view displays your security posture for all projects in your group:\n\n![Group Security dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/8-SD-Group.png)\n\nThe project view displays your security posture for just the project:\n\n![Project Security dashboard](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/9-SD-Project.png)\n\n#### Assess code security risk\n\nGitLab Ultimate features a [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/), which provides information about vulnerabilities from scans of the default branch. It contains cumulative results of all successful jobs, regardless of whether the pipeline was successful. At all levels, the Vulnerability Report contains:\n\n- totals of vulnerabilities per severity level\n- filters for common vulnerability attributes\n- details of each vulnerability, presented in tabular layout\n\n![Vulnerability Report](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/10-Vulnerability-Report.png)\n\nClicking on a vulnerability enables access to its [Vulnerability Page](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/), which contains details of the vulnerability including a description, location, identifiers, and more. Below is an example of the Vulnerability Page for an SQL Injection vulnerability detected by our SAST scanner:\n\n![SQL Injection Vulnerability Page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/11-Vulnerability-Page-1.png)\n\nFrom here the security team can collaborate by [changing the status of a vulnerability](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#change-the-status-of-a-vulnerability) along with a reason and [creating issues to better track changes](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#create-a-gitlab-issue-for-a-vulnerability).\n\nFrom the Vulnerability Page, you can also leverage [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of features, to explain the vulnerability and [automatically create a merge request that resolves the vulnerability](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-resolution).\nGitLab Duo's [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-explanation) uses a large language model to:\n\n- summarize the vulnerability.\n- help developers and security analysts to understand the vulnerability, how it could be exploited, and how to fix it\n- provide a suggested mitigation\n\n![SQL Injection GitLab Duo AI explanation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/13-Explain-Vulnerability.png)\n\n## Additional GitLab Ultimate security features\n\nGitLab Ultimate contains many more security features that cannot be found within GitHub Advanced Security. A few examples of these additional security features are: additional security scanners for the complete software development lifecycle (SDLC), granular security guardrails, and custom permissions.\n\n### Security scanners for the entire SDLC\n\nOur portfolio of security scanners extends spans the SDLC.\n\n| Scanner Name | Scans | Languages/Files scanned |\n|  -------------- | ----- | ------------------------- |\n| [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/) | Static source code | C/C++, Java, Python, Go, JavaScript, C#, and more |\n| [Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/ee/user/application_security/dast/) | Running web application, live API | Language-agnostic |\n| [Infrastructure as Code (IaC) Scanning](https://docs.gitlab.com/ee/user/application_security/iac_scanning/) | IaC files |Terraform, AWS Cloud Formation, Ansible, and more |\n| [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) | Static and running container images | Dockerfile |\n| [Dependency Scanning and License Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/) | Application dependencies | Requirements.txt, Yarn, Gradle, Npm, and more |\n| [Web API Fuzz Testing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/) | Sends random/malformed data to web-api | OpenAPI, GraphQL, HAR, Postman Collection |\n| [Coverage-guided Fuzz Testing](https://docs.gitlab.com/ee/user/application_security/coverage_fuzzing/) | Sends random/malformed data to function | C/C++, Go, Swift, Python, Rust, Java, JavaScript, AFL |\n\n\u003Cp>\u003C/p>\n\nGitLab also allows you to integrate [third-party scanners](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/) and [custom scanners](https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab/) into the platform. Once integrated, the scanner results are automatically presented in various places in GitLab, such as the Pipeline view, merge request widget, and Security dashboard. See the [Security Scanner Integration documentation](https://docs.gitlab.com/ee/development/integrations/secure.html) to learn more.\n\n### Granular security and compliance policies\n\nPolicies in GitLab provide security and compliance teams with [a way to enforce controls globally in their organization](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/). Security teams can ensure:\n\n- security scanners are enforced in development team pipelines with proper configuration\n- all scan jobs execute without any changes or alterations\n- proper approvals are provided on merge requests based on results from those findings\n\n![Merge Request Security Policies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/14-MR-Policy.png)\n\nCompliance teams can centrally enforce multiple approvers on all merge requests and ensure various settings are enabled on projects in scope of organizational requirements, such as enabling or locking merge request and repository settings. To learn more see the [GitLab Security Policy](https://docs.gitlab.com/ee/user/application_security/policies/) documentation.\n\n### Custom roles and granular permissions\n\n[GitLab Ultimate provides custom roles](https://about.gitlab.com/blog/how-to-tailor-gitlab-access-with-custom-roles/), which allow an organization to create user roles with the precise privileges and permissions required for that organization’s needs.\n\nFor example, a user could create a “Security Auditor” role with permissions to view security vulnerabilities in the system, but not be able to view source code, nor perform any changes within the repository. This granular set of permissions enables well-defined separation of duties.\n\n![Custom role creation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/15-Custom-Roles.png)\n\nTo learn more see the [Custom Roles](https://docs.gitlab.com/ee/user/custom_roles.html) and [available Granular Permissions](https://docs.gitlab.com/ee/user/custom_roles/abilities.html) documentation.\n\n### Compliance Center\n\nThe Compliance Center is the central location for compliance teams to manage their compliance standards’ adherence reporting, violations reporting, and compliance frameworks for their group. The Compliance Center includes the following:\n\n- [Compliance standards adherence dashboard](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_standards_adherence_dashboard.html) lists the adherence status of projects complying to the GitLab standard.\n- [Compliance violations report](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_violations_report.html) shows a high-level view of merge request activity for all projects in the group.\n- [Compliance frameworks report](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_frameworks_report.html) shows all the compliance frameworks in a group.\n- [Compliance projects report](https://docs.gitlab.com/ee/user/compliance/compliance_center/compliance_projects_report.html) shows the compliance frameworks that are applied to projects in a group.\n\n![Compliance Center](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674404/Blog/Content%20Images/16-Compliance-Center.png)\n\nThese dashboards assist with making sure separation of duties is being followed to optimize compliance within your organization. To learn more see the [Compliance Center documentation](https://docs.gitlab.com/ee/user/compliance/compliance_center/).\n\n## Read more\n\nThis article covers only a portion of the wide range of security features GitLab Ultimate offers. Check out these resources to learn more about how GitLab Ultimate can help enhance your organizational security and developer efficiency:\n\n- [Why GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/)\n- [Getting Started with DevSecOps Tutorial](https://gitlab-da.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/)\n- [Getting Started with DevSecOps Sample Project](https://gitlab.com/gitlab-da/tutorials/security-and-governance/devsecops/simply-vulnerable-notes)\n- [Import your project from GitHub to GitLab documentation](https://docs.gitlab.com/ee/user/project/import/github.html)\n- [Migrating from GitHub Actions documentation](https://docs.gitlab.com/ee/ci/migration/github_actions.html)\n- [Tutorial: Create and run your first GitLab CI/CD pipeline](https://docs.gitlab.com/ee/ci/quick_start/)\n- [Tutorial: Create a complex pipeline](https://docs.gitlab.com/ee/ci/quick_start/tutorial.html)\n- [CI/CD YAML syntax reference](https://docs.gitlab.com/ee/ci/yaml/)",[9,3345,814,478,1041],"zero trust",{"slug":3347,"featured":90,"template":684},"migration-guide-github-advanced-security-to-gitlab-ultimate","content:en-us:blog:migration-guide-github-advanced-security-to-gitlab-ultimate.yml","Migration Guide Github Advanced Security To Gitlab Ultimate","en-us/blog/migration-guide-github-advanced-security-to-gitlab-ultimate.yml","en-us/blog/migration-guide-github-advanced-security-to-gitlab-ultimate",{"_path":3353,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3354,"content":3360,"config":3365,"_id":3367,"_type":13,"title":3368,"_source":15,"_file":3369,"_stem":3370,"_extension":18},"/en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo",{"title":3355,"description":3356,"ogTitle":3355,"ogDescription":3356,"noIndex":6,"ogImage":3357,"ogUrl":3358,"ogSiteName":669,"ogType":670,"canonicalUrls":3358,"schema":3359},"Modernizing a simple C++ application to Java with GitLab Duo","Learn how to refactor code from memory unsafe languages to memory safe languages with the help of GitLab's AI capabilities, saving time and effort on application modernization.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659507/Blog/Hero%20Images/AdobeStock_623844718.jpg","https://about.gitlab.com/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Modernizing a simple C++ application to Java with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-12-11\",\n      }",{"title":3355,"description":3356,"authors":3361,"heroImage":3357,"date":3362,"body":3363,"category":702,"tags":3364},[699],"2024-12-11","Memory unsafe languages are those that do not handle any memory management on behalf of the developer. For example, when programming in C or C++, if you need memory during runtime, you will need to allocate and deallocate the memory yourself, running the risk of ending up with memory leaks in cases when you inadvertently forget to deallocate it. Other languages like Ada and FORTRAN provide some memory management but may not prevent memory leaks. Many organizations, including those in the public sector, have applications that have been developed using languages that are memory unsafe and are often looking to modernize these to a memory safe language, such as Java, Python, JavaScript, or Golang.\n\nThis tutorial focuses on a specific example of modernizing a simple C++ application to Java by refactoring it with the help of [GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI capabilities, and shows how much time and effort you can save in the migration.\n\n## Understanding the simple C++ application\n\nLet’s make the assumption that we have been tasked with the migration of a C++ application to a memory safe language, namely Java. The C++ application can be found in the following project (thank you to [@sugaroverflow](https://gitlab.com/sugaroverflow) for contributing this sample application):\n\n[https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/refactor-to-java/air-quality-application](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/refactor-to-java/air-quality-application)\n\nSince this is the first time we are seeing this application, let’s invoke GitLab Duo Code explanation to better understand what it does. We open file `main.cpp` in Visual Studio Code and select the entirety of this file. We then right-click and select **GitLab Duo Chat > Explain selected snippet** from the popup menu.\n\n![duo-code-explanation-menu-option](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675546/Blog/Content%20Images/code-explanation-menu-option.png)\n\nThe GitLab Duo Chat window opens up and the slash command `/explain` is executed for the selected code. Chat returns a very thorough and detailed description and explanation in natural language form of what each function does in the file as well as examples on how to run the compiled program.\n\n![code-explanation-text](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/code-explanation-text.png)\n\nIn short, the simple C++ application takes a U.S. zip code as input and returns the air quality index for that zip code.\n\n## Compiling and running the C++ application\n\nTo further understand this simple C++ application, we proceed to compile and run it. We could have asked Chat how to do this, however, the project has a README file that provides the commands to compile the project, so we go ahead and use those by entering them in the Terminal window of VS Code.\n\n![compile-command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/compile-command.png)\n\nAfter the compilation finishes, we change directory to the `build` subdirectory in the project, which is where the compilation process places the executable file for this application. Then, we run the executable by entering the following command:\n\n`./air_quality_app 32836`\n\nAnd we see the response as follows:\n\n`Air Quality Index (AQI) for Zip Code 32836: 2 (Fair)`\n\n![cplus-plus-app-execution-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/cplus-plus-app-execution-output.png)\n\nThis confirms to us that the application was successfully compiled and it’s executing appropriately.\n\n## Refactoring the application to Java\n\nLet’s start migrating this C++ application to Java. We take advantage of GitLab Duo Chat and its refactoring capabilities by using the slack command `/refactor`. We qualify the slash command with specific instructions on what to do for the refactoring. We enter the following command in the Chat input field:\n\n> /refactor this entire application to Java. Provide its associated pom.xml to build and run the Java application. Also, provide the directory structure showing where all the resulting files should reside for the Java application.\n\n![refactor-chat-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/refactor-chat-output.png)\n\nChat returns a set of Java files that basically refactor the entire C++ application to the memory safe language. In addition and per the prompt, Chat returns the pom.xml file, needed by [maven](https://docs.gitlab.com/ee/api/packages/maven.html) for the building and execution of the refactored application as well as its directory structure, indicating where each generated file should reside.\n\nWe copy and save all the generated files to our local directory.\n\n## Creating the Java project\n\nIn VS Code, we now proceed to open an empty project in which we will set up the directory structure of the new Java application and its contents.\n\nWe create all the previously generated Java files in their corresponding directories in the new project and paste their contents in each.\n\n![java-files-created](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-files-created.png)\n\nLastly, we save all the files to our local disk.\n\n## Asking for help to build and run the Java application\n\nAt this point, we have an entire Java application that has been refactored from C++. Now, we need to build it but we don’t quite remember what maven command we need to use to accomplish this.\n\nSo we ask GitLab Duo Chat about this. We enter the following prompt in the Chat input field:\n\n> How do you build and run this application using maven?\n\n![maven-info-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/maven-info-output.png)\n\nChat returns with a thorough explanation on how to do this, including examples of the maven command to build and run the newly created Java application.\n\n## Building and running the Java application\n\nGitLab Duo Chat understands the application and environment context and responds that we first need to create an environment variable called `API_KEY` before we can run the application.\n\nIt also provides the maven command to execute to build the application, which we enter in the Terminal window:\n\n```unset\nmvn clean package\n``` \n\n![java-build-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-build-output.png)\n\nOnce the build finishes successfully, we copy the generated command to run the application from the Chat window and paste it in the Terminal window:\n\n```unset\njava -jar target/air-quality-checker-1.0-SNAPSHOT-jar-with-dependencies.jar 90210\n```\n\n![java-app-execution-output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675547/Blog/Content%20Images/java-app-execution-output.png)\n\nThe application successfully executes and returns the string:\n\n```unset\nAir Quality Index (AQI) for Zip Code 90210: 2 (Fair)\n```\n\nWe have confirmed that the modernized version of the application, now refactored in Java, runs just like its original C++ version.\n\n## Watch this tutorial in action\n\nWe have seen that by leveraging the power of GitLab Duo in your modernization activities, you can save a great deal of time and effort, freeing you to spend more time innovating and creating value to your organization.\n\nHere is a video to show you, in action, the tutorial you just read:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/LJ7GOr_P0xs?si=_ZjF75DAXEQnY2Mn\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> #### Want to get started with GitLab Duo? [Start a free, 60-day trial today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)\n\n## Learn more\n- [Refactor code into modern languages with AI-powered GitLab Duo](https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo/)\n- [Secure by Design principles meet DevSecOps innovation in GitLab 17](https://about.gitlab.com/blog/secure-by-design-principles-meet-devsecops-innovation-in-gitlab-17/)\n- [How to secure memory-safe vs. manually managed languages](https://about.gitlab.com/blog/memory-safe-vs-unsafe/)\n",[704,478,9,183],{"slug":3366,"featured":6,"template":684},"modernizing-a-simple-c-application-to-java-with-gitlab-duo","content:en-us:blog:modernizing-a-simple-c-application-to-java-with-gitlab-duo.yml","Modernizing A Simple C Application To Java With Gitlab Duo","en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo.yml","en-us/blog/modernizing-a-simple-c-application-to-java-with-gitlab-duo",{"_path":3372,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3373,"content":3379,"config":3385,"_id":3387,"_type":13,"title":3388,"_source":15,"_file":3389,"_stem":3390,"_extension":18},"/en-us/blog/mr-reviews-with-vs-code",{"title":3374,"description":3375,"ogTitle":3374,"ogDescription":3375,"noIndex":6,"ogImage":3376,"ogUrl":3377,"ogSiteName":669,"ogType":670,"canonicalUrls":3377,"schema":3378},"How to do GitLab merge request reviews in VS Code","Code review is critical to modern software development. We're making it easier by bringing merge request reviews right into VS Code.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666775/Blog/Hero%20Images/cover.jpg","https://about.gitlab.com/blog/mr-reviews-with-vs-code","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to do GitLab merge request reviews in VS Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2021-01-25\",\n      }",{"title":3374,"description":3375,"authors":3380,"heroImage":3376,"date":3382,"body":3383,"category":769,"tags":3384},[3381],"Tomas Vik","2021-01-25","\n\nThis post will give you an idea of how VS Code can aid your code review process. You'll get an overview of the features that GitLab VS Code Extension currently supports, as well as what we plan to introduce in the future.\n\nReviewing merge requests is a core part of GitLab: both the product (since [version 2.0.0](https://gitlab.com/gitlab-org/gitlab/blob/6a3621202e3f7274150862198f59d2579c326650/changelogs/archive.md#L7222), released in 2011) and the company. We recognize that certain review tasks are hard to do just by looking at the diff, and we strive to make them easier. One such task might be looking in the codebase for duplicated code or examples of a particular coding style.\n\nWe decided to aid code reviewers in two ways:\n\n## First way: The GitLab Web IDE\n\nFirst, we introduced the [Web IDE](/blog/introducing-gitlab-s-integrated-development-environment/), which helps our users work [with the codebase in the browser](/direction/create/ide/web_ide/#overview). You can quickly open multiple files, make changes, and commit them. The Web IDE is handy when you need to make a small change, or you don't have the project cloned locally.\n\nThe second way is more recent. We always wanted to bring the code review experience closer to code editors, where developers spend a large portion of their time. But the editor market is very fragmented (you find out the hard way if Emacs and Vim users meet at a party). And it isn't feasible to build GitLab support into all major editors (however, there are plenty of editor plugins maintained by the community[^1]). \n\n## Second way: Bringing code reviews into the editor\n\nRecently, as [VS Code gained a significant user share](https://insights.stackoverflow.com/survey/2019#development-environments-and-tools), it started to make sense to [commit to maintaining the GitLab VS Code extension](/blog/use-gitlab-with-vscode/), which was started as a community project by one, at the time, GitLab employee: [Fatih](https://gitlab.com/fatihacet). After an initial housekeeping period, we started chipping away tasks that will ultimately bring the code review experience into the editor.\n\nIn my previous post I talked about the great [VS Code Extension API](/blog/vscode-extension-development-with-gitlab/). This API gives extensions almost full control over the editor. When the API introduced commenting functionality two years ago, extensions could start contributing comments to the editor windows. These comments are shown similarly as comments on a Google Doc. Being able to natively show comments is perfect for reviewing code changes in the editor and other extensions that provide code reviews are already using this commenting API[^2].\n\n![Merge request review in VS Code](https://about.gitlab.com/images/blogimages/mr-reviews-with-vs-code/full-mr-review-screen.png){: .shadow.medium.center}\nMerge request review in VS Code\n{: .note .text-center}\n\nOver the last few milestones, we started showing MR changes in VS Code and even showing discussions on these. This means that you can open an MR in your editor and read through the code and comments without switching windows and context. I find this really useful because I can still interact with my editor the way I'm used to, even as I'm reviewing MRs. I can use full-text search to find if the MR duplicates existing code or I can open a different test file and compare whether the code style matches.\n\nCurrently, the interaction with MR is mostly read-only. That means you can see the changes and discussions, but you can't add or change comments, yet[^3]. But even in this current form, you can benefit from having the VS Code functionality so close to your review, especially for the initial understanding of the change.\n\n![VS Code supports Markdown in the comments](https://about.gitlab.com/images/blogimages/mr-reviews-with-vs-code/mr-review-long-comment.png){: .shadow.medium.center}\nVS Code supports Markdown in the comments\n{: .note .text-center}\n\n## What's next\n\nOver the next few milestones, we plan to make the commenting as interactive as you know it from the GitLab web interface. We'll start with editing existing comments, adding emoji reactions and resolving discussion threads. Lastly, we'll implement the full review functionality with creating comments and reviews[^4]. Each [iteration](https://handbook.gitlab.com/handbook/values/#iteration) will make the feature a bit more useful.\n\nI'm excited about the potential to stay in my editor for both creating and reviewing merge requests. I'm already using the current merge request review feature to get the initial understanding of what the MR tries to achieve. I can explore the related code more quickly in my editor. If you'd like to help us build the code review feature or just look at the current state of development, visit the [Merge Request Review epic](https://gitlab.com/groups/gitlab-org/-/epics/4607).\n\nYou can check out a walkthrough our initial proof of concept of merge request reviews in VS Code below:\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/kKA6i8oqZAA\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n[^1]: [IntelliJ](https://plugins.jetbrains.com/plugin/7447-gitlab-integration-plugin), [Atom](https://atom.io/packages/search?q=gitlab), [vim](https://github.com/shumphrey/fugitive-gitlab.vim), [Emacs](https://github.com/nlamirault/emacs-gitlab), ...\n[^2]: [Jira and Bitbucket](https://marketplace.visualstudio.com/items?itemName=Atlassian.atlascode), [GitHub Pull Requests and Issues](https://marketplace.visualstudio.com/items?itemName=GitHub.vscode-pull-request-github)\n[^3]: You can work around that by using the MR overview and commenting there.\n[^4]: [MR review: interacting with existing comments - POC](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/269) and [MR review: new comments and reviews POC](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/293) represent the initial investigation.\n\n[Cover image](https://art.ljubicapetkovic.com/cc-licensed/) by [Ljubica Petkovic](https://art.ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[793,230,9],{"slug":3386,"featured":6,"template":684},"mr-reviews-with-vs-code","content:en-us:blog:mr-reviews-with-vs-code.yml","Mr Reviews With Vs Code","en-us/blog/mr-reviews-with-vs-code.yml","en-us/blog/mr-reviews-with-vs-code",{"_path":3392,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3393,"content":3399,"config":3404,"_id":3406,"_type":13,"title":3407,"_source":15,"_file":3408,"_stem":3409,"_extension":18},"/en-us/blog/oidc",{"title":3394,"description":3395,"ogTitle":3394,"ogDescription":3395,"noIndex":6,"ogImage":3396,"ogUrl":3397,"ogSiteName":669,"ogType":670,"canonicalUrls":3397,"schema":3398},"Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform","Learn a new method to authenticate using JWT to increase the security of CI/CD workflows.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667094/Blog/Hero%20Images/container-security.jpg","https://about.gitlab.com/blog/oidc","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure GitLab CI/CD workflows using OIDC JWT on a DevSecOps platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dov Hershkovitch\"}],\n        \"datePublished\": \"2023-02-28\",\n      }",{"title":3394,"description":3395,"authors":3400,"heroImage":3396,"date":3401,"body":3402,"category":1103,"tags":3403},[3015],"2023-02-28","\n\nSecuring CI/CD workflows can be challenging. This blog post walks you through the problem validation, explores the JWT token technology and how it can be used with OIDC authentication, and discusses implementation challenges with authorization realms. You will learn about the current possibilities and future plans with GitLab 16.0. \n\n### Variables vs. secrets\nVariables are an efficient way to control and inject parameters into your jobs and pipelines, making managing and configuring the CI/CD workflows easier. You can read more about [how to use CI/CD variables](https://about.gitlab.com/blog/demystifying-ci-cd-variables/). An extra layer of security on top of variables to mask and protect, for now, is our “best-effort” to prevent sensitive variables from being accidentally revealed. However, variables are not a drop-in replacement for secrets. [Securing secrets natively](https://gitlab.com/gitlab-org/gitlab/-/issues/217355) is a solution that GitLab aspires to provide. Meanwhile, we recommend storing sensitive information in a dedicated secrets management solution. As a company, we will provide you abilities to integrate and retrieve secrets as part of your CI/CD workflows. \n\n## Security shifting left\nSensitive information like passwords, secret tokens, or shared IDs required to access tools and platforms need to be securely stored. They must also be highly available to their owners and the teams who use them. There are various secrets management solutions and frameworks available. They have addressed one problem but created new problems. For example: \"Which tool is right for our needs?\" More importantly, in software development: \"What's the best way to integrate this into our DevOps processes so that we're secure but still operating as efficiently as possible?\" Ignoring the security protocols in your organization is not an option. However, sensitive information should be stored as securely as possible. Something as simple as an access token stored in plain text can lead to security leaks and business incidents in the worst-case scenarios.\n\n## Initial support for JWT\nThe [JSON Web Token (JWT)](https://en.wikipedia.org/wiki/JSON_Web_Token) aims to build the integration bridge as an open standard for security claims exchange. It is a signed, short-lived, contextualized token that allows everyone to implement authentication between different products securely. The JWT consists of three parts: a header, a payload, and a signature.\n\n- The header represents the type of the token and the encryption algorithm.\n- The signature ensures that the token hasn't been altered.\n- The payload comprises a series of claims representing the information exchanged between two parties, which includes information about a GitLab user (ID, email, login) and the pipeline information (pipeline ID, job ID, environment, and more).\n\n_Example of GitLab JWT payload_\n\n```\n{\n  \"jti\": \"c82eeb0c-5c6f-4a33-abf5-4c474b92b558\",\n  \"iss\": \"gitlab.example.com\",\n  \"iat\": 1585710286,\n  \"nbf\": 1585798372,\n  \"exp\": 1585713886,\n  \"sub\": \"job_1212\",\n  \"namespace_id\": \"1\",\n  \"namespace_path\": \"mygroup\",\n  \"project_id\": \"22\",\n  \"project_path\": \"mygroup/myproject\",\n  \"user_id\": \"42\",\n  \"user_login\": \"myuser\",\n  \"user_email\": \"myuser@example.com\",\n  \"pipeline_id\": \"1212\",\n  \"pipeline_source\": \"web\",\n  \"job_id\": \"1212\",\n  \"ref\": \"auto-deploy-2020-04-01\",\n  \"ref_type\": \"branch\",\n  \"ref_protected\": \"true\",\n  \"environment\": \"production\",\n  \"environment_protected\": \"true\"\n}\n```\nUsing this information (called \"claims\"), you can implement an authentication condition where the token will get rejected if one of those claims does not match. You can use this to restrict access to only the authorized users and jobs in your pipelines.\n\nGitLab 12.10 added [initial support for JWT token-based connections](https://about.gitlab.com/releases/2020/04/22/gitlab-12-10-released/#retrieve-cicd-secrets-from-hashicorp-vault), which was later [enhanced](https://about.gitlab.com/releases/2020/09/22/gitlab-13-4-released/#use-hashicorp-vault-secrets-in-ci-jobs) with the `secrets:` keyword, as well as the `CI_JOB_JWT` predefined CI/CD variable, which is automatically injected into every job in a pipeline. This implementation was restricted to Hashicorp Vault, and users can use it to read secrets directly from the vault as part of their CI/CD workflow.\n \n### OIDC (JWT Version 2)\nThe logic we used to build the initial support for JWT opened up the possibility of connecting to other providers as well, but the first iteration was still restricted to Hashicorp Vault users.\n\nThis problem was addressed in GitLab 14.7 when we [released](https://about.gitlab.com/releases/2022/01/22/gitlab-14-7-released/#openid-connect-support-for-gitlab-cicd) the first \"Alpha\" version of JWT V2, which provided [Open ID Connect (OIDC)](https://openid.net/connect/) support for CI/CD.\n\nOIDC is an identity layer implemented on top of the JSON web token. You can securely authenticate against many products and services that implement OIDC, including AWS, GCP, and many more, making better use of the token's potential. Similar to our first JWT iteration, we added another [predefined CI/CD variable](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html) `CI_JOB_JWT_V2` which is also automatically injected into every job in a CI/CD pipeline.\n\n### Securely store your secrets \nYour software supply chain should include everything needed to deliver and run your software. Securing your supply chain means you need to secure your software and the surrounding (cloud-native) infrastructure. In [GitLab 15.9](https://about.gitlab.com/releases/2023/02/22/gitlab-15-9-released/), we've added additional layers of protection to move our OIDC token from an Experiment to General Availability, increasing the security of your CI/CD workflows. \n\n\n#### Opt-in JWT token\nJSON web tokens (V1 and V2) are stored in CI/CD variables, which are injected automatically into all jobs in a CI/CD pipeline. However, it is likely most jobs in your pipeline do not need the token. In addition to the inefficiency of injecting unused tokens into all jobs in a pipeline, there is a potential security vulnerability. All it takes is one compromised job for this token to be leaked and used by an attacker to retrieve sensitive information from your organization. To minimize this risk, we've added the ability to restrict the token variable from all jobs in your pipeline and expose it only to the specific jobs that need it.\n\nTo declare the JSON web token in a job that needs it, configure the job in the `.gitlab-ci.yml` configuration file following this example:\n\n```yaml\njob_name:\n  id_token:\n    MY_JOB_JWT: # or any other variable name\n  ...\n```\n\nYou can minimize the token exposure across your pipeline, but ensure it is available to the jobs that require it.\n\n#### Audience claim (`aud:`)\nClaims constitute the payload part of a JSON web token and represent a set of information exchanged between two parties. The JWT standard distinguishes between reserved, public, and private claims.\n\nThe audience (`aud:`) claim is a reserved claim, which identifies the audience that the JWT is intended for (the target of the token). In other words, which services, APIs, or products should accept this token. If the audience claim does not match, the token is rejected, so the audience claim is an essential part of software supply chain security.\n\nThe option to configure the audience claim is done in the CI/CD configuration when [declaring the usage of the JWT token](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html#id-tokens), if we'll continue from the previous example:\n\n```yaml\njob_name:\n  id_token:\n    MY_JOB_JWT: # or any other variable name\n        aud: \"...\" # mandatory field\n  script:\n    - my-authentication-script.sh MY_JOB_JWT….. # use the declared variables in a script\n  ```\n\nConfiguring the audience claim is mandatory for Vault users that leverage the [GitLab/Vault native integration](https://docs.gitlab.com/ee/ci/secrets/#use-vault-secrets-in-a-ci-job) (using the 'secrets:' keyword).\n\n```yaml\njob_name:\n  secrets:\n    VAULT_JWT_1: # or any other variable name\n      id_token:\n        aud: 'devs' # audience claim configuration\n    STAGING_DATABASE_PASSWORD: # VAULT_JWT_1 is the token to be used\n      vault: staging/db/password@ops\n```\n\n### Breaking changes and backward compatibility \nWe understand the increasing demand to secure your software supply chain. We recognize that many of our current users already use the JWT in what will soon be the \"old JWT method\" (V1). To mitigate this conflict, we've decided that moving to the new (OIDC) JWT method is optional until the next major release (GitLab 16.0). To use the new (OIDC) token, users must opt-in to this change from the UI settings and update the pipeline configuration, as explained in the previous sections. Users can continue using the Experiment or the \"old method\" until GitLab 16.0. (At that point, only the \"new\" (OIDC) JWT token and method will be available.)\n\nSeveral breaking changes were announced for both [Vault users](https://docs.gitlab.com/ee/update/deprecations.html#hashicorp-vault-integration-will-no-longer-use-ci_job_jwt-by-default) and [users of the JWT \"old\" methods](https://docs.gitlab.com/ee/update/deprecations.html#old-versions-of-json-web-tokens-are-deprecated). Those changes are scheduled for GitLab 16.0.\n\n## Three ways to use the JWT token\nThere are three ways to use a JWT to authenticate against different products in your CI/CD pipeline:\n- The \"old\" method, using the `secrets:` keyword and the `CI_JOB_JWT` variable, which is mainly used to integrate with Hashicorp Vault.\n- An \"Alpha\" version that uses the `CI_JOB_JWT_V2` OIDC token to integrate with different cloud providers.\n- A production-ready OIDC token, which is a secured version of the `CI_JOB_JWT_V2` token, used to authenticate with a variety of different products, like Vault, GCP, AWS, and so on.\n\nAll three methods are available until the next major version (GitLab 16.0). At that point, only the secured OIDC token will be available.\n\nTo prepare for this change, you should:\n\n1. Configure your pipelines to use the fully configurable and more secure [id_token](https://docs.gitlab.com/ee/ci/yaml/index.html#id_tokens) keyword.\n2. Enable the [Limit JSON Web Token (JWT) access setting](https://docs.gitlab.com/ee/ci/secrets/id_token_authentication.html#enable-automatic-id-token-authentication), which prevents the old tokens from being exposed to any jobs. (This setting will be permanently enabled for all projects in GitLab 16.0).\n3. If you use GitLab/Hashicorp native integration (using the [secrets:vault](https://docs.gitlab.com/ee/ci/yaml/#secretsvault) keyword), ensure the bound audience is prefixed with `https://`.\n\nThis should ensure a smooth transition to [GitLab 16.0](/upcoming-releases/) without breaking your existing workflows.\n\n\n",[9,835,771,772],{"slug":3405,"featured":6,"template":684},"oidc","content:en-us:blog:oidc.yml","Oidc","en-us/blog/oidc.yml","en-us/blog/oidc",{"_path":3411,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3412,"content":3418,"config":3424,"_id":3426,"_type":13,"title":3427,"_source":15,"_file":3428,"_stem":3429,"_extension":18},"/en-us/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes",{"title":3413,"description":3414,"ogTitle":3413,"ogDescription":3414,"noIndex":6,"ogImage":3415,"ogUrl":3416,"ogSiteName":669,"ogType":670,"canonicalUrls":3416,"schema":3417},"Pair GitLab and The Good Docs Project template to improve release notes","Creating compelling, detailed, human-readable notes for software releases is important. Using GitLab and this template from The Good Docs Project makes it easier.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099541/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_432673748_5xWPNsktdz2QChWhl16jGq_1750099540656.jpg","https://about.gitlab.com/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Pair GitLab and The Good Docs Project template to improve release notes\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Aaron Peters, Member, Good Docs Project\"}],\n        \"datePublished\": \"2024-01-23\",\n      }",{"title":3413,"description":3414,"authors":3419,"heroImage":3415,"date":3421,"body":3422,"category":724,"tags":3423},[3420],"Aaron Peters, Member, Good Docs Project","2024-01-23","Release notes allow software users to quickly understand the changes that come with the latest version of software. They also allow software publishers to highlight changes as important, or provide crucial information about the impact an upgrade may have. Some tools allow developers to \"generate\" release notes based on sources of data (such as completed items in DevOps systems), but notes produced this way tend to simply list changes without context. Writing release notes, however, provides teams with the opportunity to \"tell the story\" of the changes the new software version will bring.\n\nThough this process certainly requires a greater investment of time than publishing a basic changelog does, your users will certainly appreciate the results: release notes that explain the key elements of the release (such as new features, improvements, and known issues) in a well-organized, human-readable way.\n\n[The Good Docs Project's](https://thegooddocsproject.dev/welcome/) release notes template is designed to help you do exactly that. And the combination of GitLab's work management platform and our own [Release Notes template](https://gitlab.com/tgdp/templates/-/tree/main/release-notes?ref_type=heads) makes the job of putting out good, informative release notes easier.\n\n## The anatomy of quality release notes\n\nRelease notes that provide readers with a good picture of the version's changes require two primary inputs:\n\n- **A list of the changes included in the release**\n  At The Good Docs Project, all the management of the work of our contributors occurs in GitLab. So it's easy to refer to our release plans to identify which additions and improvements were completed and included in the release.\n- **A description of those changes including reasoning, importance, and impact**\n  This is where our project's Release Notes template can assist. Rather than staring at a blank page, wondering where to start, users can begin to fill in our template step-by-step, adjusting to taste.\n\nWe'll walk through each of these steps in the following sections as they occurred when creating the release notes to [our recent Dragon release](https://gitlab.com/tgdp/templates/-/releases/v1.1.0).\n\n## Gathering a release's changes\n\nAt The Good Docs Project, we use GitLab features — including setting milestones, creating/assigning issues, and tagging releases — to get our work out into the community (our prior blog post here at GitLab describes this process). The platform allows our worldwide contributor base to easily discover new things to work on and update everyone on their progress once they select something. When the time comes to package a release, it brings the added benefit of a tidy list of issues included in the project at the time of release.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176084/Blog/hxo08y06apkumwuwm80c.png\" alt=\"The Milestone screen in GitLab provides an easy-to-scan list of work included in the release\" width=\"100%\" height=\"auto\">\n\nWhen creating the release notes for our project's Dragon milestone, we reviewed all the items included in the **Closed** column on the Milestone screen. This allowed us to pick the most important changes to highlight, while leaving out issues that wouldn't significantly impact a user's experience.\n\n## Crafting the release notes\n\nEquipped with a list of all the key updates in the release, we start writing the release notes. Our project's [Release Notes template](https://gitlab.com/tgdp/templates/-/blob/main/release-notes/template-release-notes.md?ref_type=heads) provides a ready-made Markdown skeleton comprised of key sections based on our contributors' research and experience. The accompanying [usage guide](https://gitlab.com/tgdp/templates/-/blob/main/release-notes/guide-release-notes.md?ref_type=heads) and [example of the template in action](https://gitlab.com/tgdp/templates/-/blob/main/release-notes/example-release-notes.md?ref_type=heads) provides additional tips and suggestions for writing effective release notes. The latter references our **Chronologue** project, a fictional telescope and application that can see through time, which is naturally well-documented.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176091/Blog/jcpfxjqb5jpidssm3jlr.png\" alt=\"The Release Notes template comes ready to populate with 'the story' of your latest release\" width=\"100%\" height=\"auto\">\n\nOf course, our template is simply a starting point. Teams should always feel free to add sections where they make sense, remove them where they don't, and make the style of it their own. For example, we left out the **Bug fixes** and **Known issues** sections in our latest Dragon release notes, instead focusing on the new additions and improvements this release brought.\n\n## Adding release notes to the release\n\nGitLab's build tools also make it easy to add our notes while actually creating the release. First, we tagged one of our project's commits, then created a release from the tag. On GitLab's **Releases > New** screen, we can copy and paste the Markdown we wrote to automatically format the release notes.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176096/Blog/otwduhvokgnqclon4ugx.png\" alt=\"Our templates are already in Markdown format, so when it's time to paste them into the release it works automagically!\" width=\"100%\" height=\"auto\">\n\nAnd just like that our release notes are done. With the assistance of the template, they required just an hour to write. And after an additional half-hour of work creating the release, we're ready to send our work out to the community. Our experience using the combination of GitLab and our templates has made the process of shipping our templates a piece of cake.\n\nIf you'd like to check out our templates, feel free to browse [our GitLab project](https://gitlab.com/tgdp).\nOr visit our [community page](https://thegooddocsproject.dev/community/) to learn how to join us in leveling up the state of technical documentation.\n\n*The [GitLab Open Source Partners](https://go.gitlab.com/030Ue3) are building the future of open source on GitLab. [Connect with them](https://gitlab.com/gitlab-com/marketing/developer-relations/open-source-program/gitlab-open-source-partners) on Gitlab.com.*\n",[727,478,266,9],{"slug":3425,"featured":90,"template":684},"pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes","content:en-us:blog:pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes.yml","Pair Gitlab And The Good Docs Project Template To Improve Release Notes","en-us/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes.yml","en-us/blog/pair-gitlab-and-the-good-docs-project-template-to-improve-release-notes",{"_path":3431,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3432,"content":3438,"config":3444,"_id":3446,"_type":13,"title":3447,"_source":15,"_file":3448,"_stem":3449,"_extension":18},"/en-us/blog/parent-child-pipelines",{"title":3433,"description":3434,"ogTitle":3433,"ogDescription":3434,"noIndex":6,"ogImage":3435,"ogUrl":3436,"ogSiteName":669,"ogType":670,"canonicalUrls":3436,"schema":3437},"How to get started with Parent-child pipelines","We introduced improvements to pipelines to help scale applications and their repo structures more effectively. Here's how they work.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667040/Blog/Hero%20Images/parent_pipeline_graph.png","https://about.gitlab.com/blog/parent-child-pipelines","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to get started with Parent-child pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chris Ward\"}],\n        \"datePublished\": \"2020-04-24\",\n      }",{"title":3433,"description":3434,"authors":3439,"heroImage":3435,"date":3441,"body":3442,"category":769,"tags":3443},[3440],"Chris Ward","2020-04-24","As applications and their repository structures grow in complexity, a repository `.gitlab-ci.yml` file becomes difficult to manage, collaborate on, and see benefit from. This problem is especially true for the increasingly popular \"[monorepo](https://en.wikipedia.org/wiki/Monorepo)\" pattern, where teams keep code for multiple related services in one repository. Currently, when using this pattern, developers all use the same `.gitlab-ci.yml` file to trigger different automated processes for different application components, likely causing merge conflicts, and productivity slowdown, while teams wait for \"their part\" of a pipeline to run and complete.\n\nTo help large and complex projects manage their automated workflows, we've added two new features to make pipelines even more powerful: Parent-child pipelines, and the ability to generate pipeline configuration files dynamically.\n\n## Meet Parent-child pipelines\n\nSo, how do you solve the pain of many teams collaborating on many inter-related services in the same repository? \nLet me introduce you to [Parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html), released with with [GitLab 12.7](/releases/2020/01/22/gitlab-12-7-released/#parent-child-pipelines). Splitting complex pipelines into multiple pipelines with a parent-child relationship can improve performance by allowing child pipelines to run concurrently. This relationship also enables you to compartmentalize configuration and visualization into different files and views. \n\n### Creating a child pipeline\n\nYou trigger a child pipeline configuration file from a parent by including it with the `include` key as a parameter to the `trigger` key. You can name the child pipeline file whatever you want, but it still needs to be valid YAML.\n\nThe parent configuration below triggers two further child pipelines that build the Windows and Linux version of a C++ application. \n\n```cpp\n#include \u003Ciostream>\nint main()\n{\n  std::cout \u003C\u003C \"Hello GitLab!\" \u003C\u003C std::endl;\n  return 0;\n}\n```\n\nThe setup is a simple one but hopefully illustrates what is possible.\n\n```yaml\nstages:\n  - triggers\n\nbuild_windows:\n  stage: triggers\n  trigger:\n    include: .win-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n\nbuild_linux:\n  stage: triggers\n  trigger:\n    include: .linux-gitlab-ci.yml\n  rules:\n    - changes:\n      - cpp_app/*\n```\n\nThe important values are the `trigger` keys which define the child configuration file to run, and the parent pipeline continues to run after triggering it. You can use all the normal sub-methods of `include` to use local, remote, or template config files, up to a maximum of three child pipelines.\n\nAnother useful pattern to use for parent-child pipelines is a `rules` key to trigger a child pipeline under certain conditions. In the example above, the child pipeline only triggers when changes are made to files in the _cpp_app_ folder.\n\nThe Windows build child pipeline (`.win-gitlab-ci.yml`) has the following configuration, and unless you want to trigger a further child pipeline, it follows standard a configuration format:\n\n```yaml\nimage: gcc\nbuild:\n  stage: build\n  before_script:\n    - apt update && apt-get install -y mingw-w64\n  script:\n    - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n  artifacts:\n    paths:\n      - helloGitLab.exe\n```\n\nDon't forget the `-y` argument as part of the `apt-get install` command, or your jobs will be stuck waiting for user input.\n\nThe Linux build child pipeline (`.linux-gitlab-ci.yml`) has the following configuration, and unless you want to trigger a further child pipeline, it follows standard a configuration format:\n\n```yaml\nimage: gcc\nbuild:\n  stage: build\n  script:\n    - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n  artifacts:\n    paths:\n      - helloGitLab\n```\n\nIn both cases, the child pipeline generates an artifact you can download under the _Job artifacts_ section of the Job result screen.\n\nPush all the files you created to a new branch, and for the pipeline result, you should see the two jobs and their subsequent child jobs.\n\n![Parent-child pipeline result](https://about.gitlab.com/images/blogimages/non-dynamic-pipelines.png){: .shadow.medium.center}\nThe result of a parent-child pipeline\n{: .note.text-center}\n\n## Dynamically generating pipelines\n\nTaking Parent-child pipelines even further, you can also dynamically generate the child configuration files from the parent pipeline. Doing so keeps repositories clean of scattered pipeline configuration files and allows you to generate configuration in your application, pass variables to those files, and much more.\n\nLet's start with the parent pipeline configuration file:\n\n```yaml\nstages:\n  - setup\n  - triggers\n\ngenerate-config:\n  stage: setup\n  script:\n    - ./write-config.rb\n    - git status\n    - cat .linux-gitlab-ci.yml\n    - cat .win-gitlab-ci.yml\n  artifacts:\n    paths:\n      - .linux-gitlab-ci.yml\n      - .win-gitlab-ci.yml\n\ntrigger-linux-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .linux-gitlab-ci.yml\n        job: generate-config\n\ntrigger-win-build:\n  stage: triggers\n  trigger:\n    include:\n      - artifact: .win-gitlab-ci.yml\n        job: generate-config\n```\n\nDuring our self-defined `setup` stage the pipeline runs the `write-config.rb` script. For this article, it's a Ruby script that writes the child pipeline config files, but you can use any scripting language. The child pipeline config files are the same as those in the non-dynamic example above. We use `artifacts` to save the generated child configuration files for this CI run, making them available for use in the child pipelines stages.\n\nAs the Ruby script is generating YAML, make sure the indentation is correct, or the pipeline jobs will fail.\n\n```ruby\n#!/usr/bin/env ruby\n\nlinux_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        script:\n            - g++ cpp_app/hello-gitlab.cpp -o helloGitLab\n        artifacts:\n            paths:\n                - helloGitLab\nYML\n\nwin_build = \u003C\u003C~YML\n    image: gcc\n    build:\n        stage: build\n        before_script:\n            - apt update && apt-get install -y mingw-w64\n        script:\n            - x86_64-w64-mingw32-g++ cpp_app/hello-gitlab.cpp -o helloGitLab.exe\n        artifacts:\n            paths:\n                - helloGitLab.exe\nYML\n\nFile.open('.linux-gitlab-ci.yml', 'w'){ |f| f.write(linux_build)}\nFile.open('.win-gitlab-ci.yml', 'w'){ |f| f.write(win_build)}\n```\n\nThen in the `triggers` stage, the parent pipeline runs the generated child pipelines much as in the non-dynamic version of this example but instead using the saved `artifact` files, and the specified `job`.\n\nPush all the files you created to a new branch, and for the pipeline result, you should see the three jobs (with one connecting to the two others) and the subsequent two children.\n\n![Dynamic parent-child pipeline result](https://about.gitlab.com/images/blogimages/dynamic-pipelines.png){: .shadow.medium.center}\nThe result of a dynamic parent-child pipeline\n{: .note.text-center}\n\n## Pipeline flexibility\n\nThis blog post showed some simple examples to give you an idea of what you can now accomplish with pipelines. With one parent, multiple children, and the ability to generate configuration dynamically, we hope you find all the tools you need to [build CI/CD workflows](/topics/ci-cd/) you need.\n\nYou can also watch a demo of Parent-child pipelines below:\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/n8KpBSqZNbk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[108,680,1247,9],{"slug":3445,"featured":6,"template":684},"parent-child-pipelines","content:en-us:blog:parent-child-pipelines.yml","Parent Child Pipelines","en-us/blog/parent-child-pipelines.yml","en-us/blog/parent-child-pipelines",{"_path":3451,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3452,"content":3458,"config":3466,"_id":3468,"_type":13,"title":3469,"_source":15,"_file":3470,"_stem":3471,"_extension":18},"/en-us/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection",{"title":3453,"description":3454,"ogTitle":3453,"ogDescription":3454,"noIndex":6,"ogImage":3455,"ogUrl":3456,"ogSiteName":669,"ogType":670,"canonicalUrls":3456,"schema":3457},"Prevent secret leaks in source code with GitLab Secret Push Protection","Learn how Secret Push Protection, now generally available, adds to a defense-in-depth detection strategy and decreases the resources needed to remediate secret leaks.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097761/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%286%29_6vL96ttKF8zJLLqfPpvFs_1750097761137.png","https://about.gitlab.com/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Prevent secret leaks in source code with GitLab Secret Push Protection\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amar Patel\"},{\"@type\":\"Person\",\"name\":\"Sara Meadzinger\"}],\n        \"datePublished\": \"2024-06-24\",\n      }",{"title":3453,"description":3454,"authors":3459,"heroImage":3455,"date":3462,"body":3463,"category":814,"tags":3464,"updatedDate":3465},[3460,3461],"Amar Patel","Sara Meadzinger","2024-06-24","Secret Push Protection is now generally available for all GitLab Ultimate and GitLab Dedicated customers. [Secret Push Protection](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/) blocks secrets such as keys and API tokens from being pushed to GitLab. The content of each commit is checked for [high-confidence secrets](https://docs.gitlab.com/ee/user/application_security/secret_detection/detected_secrets.html) when pushed to GitLab. If any high-confidence secrets are detected, the push is blocked. By protecting secrets from leaking in the first place, your team can greatly reduce risk and reduce time spent on rotating secrets.\n\n## The risk of leaked secrets\n\nSecrets, such as tokens and API keys, are frequently used by applications to authenticate and provide access to sensitive data. Developers sometimes inadvertently hardcode these secrets, and then push that code into source management systems, like GitLab. Hardcoded secrets stored in plain text are a low-effort, high-value target for malicious actors, as numerous recent high-profile breaches have demonstrated. Secrets do not require any special skills to exploit and many secrets do not automatically expire. Therefore, once a malicious actor has access to a secret, they can continue using it indefinitely to cause data breaches, service disruptions, IP theft, source code theft, and software supply chain compromises. Both [Verizon’s annual Data Breach Investigations Report](https://www.verizon.com/business/resources/reports/dbir) and [IBM’s annual Cost of a Data Breach report](https://www.ibm.com/reports/data-breach) have repeatedly reported that compromised credentials, which include secrets, are one of the most frequent and expensive source of breaches. \n\nIBM’s research also indicates that taking a DevSecOps, or shift-left, approach is the most effective way to reduce the average cost of a data breach. Until now, GitLab’s primary secret detection method has been [Pipeline Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/), which scans committed files after they have been pushed to GitLab and identifies secrets that are already leaked. Once a secret has leaked, it should be considered compromised and must be rotated according to the steps outlined by the secret issuer. Remediating detected secrets requires security teams and developers to work closely together to follow the steps outlined by a secret issuer to rotate the leaked secret. It can be a tedious, confusing, and risky process. Utilizing GitLab’s Secret Push Protection feature, you can shift secret detection further left, protect your secrets from leaking in the first place, and reduce the amount of time and energy required to remediate leaks.\n\n## How Secret Push Protection works\nOnce [Secret Push Protection](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/) is enabled on a project, developers are blocked from pushing code to projects that contain any high-confidence secrets. This ensures a performant experience when pushing your code and also results in a lower number of false alerts. **Note:** Here is the [list of high-confidence patterns Secret Push Protection supports](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/detected_secrets.html). \n\nWhile we are checking the contents of each commit, we've [excluded](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/#coverage) a number of factors in order to optimize the performance of this workflow. Because of this, we recommend using Secret Push Protection in a layered approach alongside [Pipeline Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline). Using both features in tandem maximizes coverage to identify more leaked secrets across the software development lifecycle.\n\n# Get started with Secret Push Protection\n\nWe've put a video playlist together to help you get started on using this feature:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/videoseries?si=kRG65YbljQ-Nu2wa&amp;list=PL05JrBw4t0KoADm-g2vxfyR0m6QLphTv-\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Enable Secret Push Protection\n\nOn GitLab Dedicated and Self-managed, you must allow the use of Secret Push Protection in your instance and then enable it per project. On GitLab.com, you only need to enable it per project.\n\nYou must have at least the Maintainer role to enable push protection for the project.\n\n1. On the left sidebar, select **Search** or **Go to** and find your project.\n1. On the left sidebar, select **Secure > Security configuration**.\n1. Turn on the Secret Push Protection toggle.\n\n![secret push protection - toggle](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097769/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-10-18_at_12.24.47_PM_aHR0cHM6_1750097769198.png)\n\n## Skip push protection\n\nIn some instances, when a push is blocked, you might find it necessary to skip Secret Push Protection. For example, a developer may need to commit a placeholder secret for testing. You can skip Secret Push Protection via a Git option or commit message, meeting developers in whichever Git client they are using. \n\n## Add exclusions\n\nWe released exclusions, giving you flexibility to exclude certain paths, rules from the default ruleset, or raw values from being scanned, detected, and blocked by push protection. From the Security Configuration page, Maintainers and project Owners can manage push protection exclusion lists within the UI on a per-project basis. \n\n![secret push protection - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097769/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097769199.png)\n\n## Audit events\n\nDisabling Secret Push Protection, or even skipping it altogether, can prove to be costly if not done for the appropriate reasons. We've introduced [audit events](https://docs.gitlab.com/ee/user/compliance/audit_events.html) to help administrators and security teams understand where and how this feature is being used, and to assist in any secrets-related investigations.\n\nWe currently log audit events when Secret Push Protection is: \n\n- enabled/disabled at an instance level\n- enabled/disabled at project level\n- skipped via a push option\n- skipped via a commit message \n\nAnd when an exclusion is:\n- created\n- updated\n- deleted \n\n![secret push protection - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097769/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097769200.png)\n\nThese audit events can be used in conjunction with [audit event streaming](https://docs.gitlab.com/ee/administration/audit_event_streaming/) to manage audit logs in third-party systems (like SIEMs), enabling customers to capture trends such as: how many times push protection is being skipped; which projects frequently bypass push protection; and which secrets are commonly skipped and may need to be excluded moving forward. \n\n# Dogfooding Secret Push Protection\n\nWe [dogfood everything](https://about.gitlab.com/handbook/engineering/development/principles/#dogfooding) here at GitLab. We've [collaborated](https://gitlab.com/groups/gitlab-org/-/epics/13523) with various teams across the organization to enable this feature across key projects, including our primary GitLab codebase. This process has enabled us to identify and address improvements early in the development process, and it has increased our confidence in the stability, performance, and customer workflows for the release of this feature.\n\n# What's next\n\nYou can help us improve this feature by commenting on [this Secret Push Protection feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/467408). We’ll incorporate your feedback and make [additional improvements](https://gitlab.com/groups/gitlab-org/-/epics/13107) as we continue to add new capabilities to the feature.\n\n> Learn more about the [Secret Push Protection](https://docs.gitlab.com/ee/user/application_security/secret_detection/secret_push_protection/).\n\n# Read more\n\n- [How Secret Detection can proactively revoke leaked credentials](https://about.gitlab.com/blog/how-secret-detection-can-proactively-revoke-leaked-credentials) \n- [How to implement secret management best practices with GitLab](https://about.gitlab.com/the-source/security/how-to-implement-secret-management-best-practices-with-gitlab/)\n- [GitLab native secrets manager to give software supply chain security a boost](https://about.gitlab.com/blog/gitlab-native-secrets-manager-to-give-software-supply-chain-security-a-boost)",[814,9,680,478,678],"2024-10-17",{"slug":3467,"featured":6,"template":684},"prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection","content:en-us:blog:prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection.yml","Prevent Secret Leaks In Source Code With Gitlab Secret Push Protection","en-us/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection.yml","en-us/blog/prevent-secret-leaks-in-source-code-with-gitlab-secret-push-protection",{"_path":3473,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3474,"content":3480,"config":3485,"_id":3487,"_type":13,"title":3488,"_source":15,"_file":3489,"_stem":3490,"_extension":18},"/en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"title":3475,"description":3476,"ogTitle":3475,"ogDescription":3476,"noIndex":6,"ogImage":3477,"ogUrl":3478,"ogSiteName":669,"ogType":670,"canonicalUrls":3478,"schema":3479},"Provision group runners with Google Cloud Platform and GitLab CI","This tutorial will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098300/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_623844718_4E5Fx1Q0DHikigzCsQWhOG_1750098300048.jpg","https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Provision group runners with Google Cloud Platform and GitLab CI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sarah Matthies\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-19\",\n      }",{"title":3475,"description":3476,"authors":3481,"heroImage":3477,"date":3482,"body":3483,"category":769,"tags":3484},[1490,831],"2024-11-19","Are you interested in hosting your own servers to run your GitLab CI/CD pipelines but don’t know where to begin? Setting up a GitLab Runner to run your pipelines on your own infrastructure can seem like a daunting task as it requires infrastructure knowledge and the know-how to maintain that infrastructure. Typically this process requires the provision of infrastructure, the installing of dependency, and testing that it works with your GitLab instance.\n\nThis article highlights how easy it is to easily spin up a GitLab Runner of your own utilizing GitLab’s Google Cloud Integration. Follow this tutorial and it will teach you how to set up a new group runner on GitLab.com using Google Cloud Platform in less than 10 minutes!\n\nYou will learn how to:\n\n- Create a new group runner.\n- Configure the new group runner’s tags and description.\n- Register the new group runner by adding in configurations.\n- Provision the GitLab Runner utilizing `gcloud cli` and Terraform.\n- Have your GitLab Runner pick up its first GitLab CI job.\n\n## Prerequisites\n- A terminal with Bash installed\n- Owner access on a Google Cloud Platform project\n- Terraform (or OpenTofu) [Version 1.5](https://releases.hashicorp.com/terraform/1.5.7/) or greater \n- [gcloud CLI](https://cloud.google.com/sdk/docs/install) \n- 10 minutes\n\n## Tutorial\n1. Create a new group runner under __Build > Runners > New Group Runner__.\n\n__Note:__ Navigate to the group level.\n\n![GitLab Runner setup screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750098317126.png)\n\n2. Configure the new group runner's tags, description, and any additional configurations.\n\n![New Group Runner setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098317127.png)\n\n3. Select __Google Cloud__.\n\n![Select Google Cloud screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098317129.png)\n\n4. Copy your project ID from Google Cloud Platform.\n\n![Copy project ID from GCP screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098317131.png)\n\n5. Fill out your Google Cloud project ID and choose a region, zone, and type of machine you want to use.\n\n![Screen to fill out Google Cloud information](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750098317132.png)\n\n6\\. Once this information is filled out, click **Setup instructions**.\n\nRun the bash script provided in Step 1 above.\n\n**Note:** This script was saved to a file called `setup.sh` for ease of use. You may copy this right into your terminal if you are confident in debugging.\n\n![Setup instructions screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750098317134.png)\n\n![Script for GitLab Runner](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750098317135.png)\n\n7\\. Create a `main.tf` file and follow the instructions in GitLab.\n\n**Note:** If you want to use OpenTofu instead of Terraform, you can still copy the code and only have to adjust the Terraform commands for applying the configuration. \n\n![Install and register GitLab Runner screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750098317136.png)\n\nOnce successfully provisioned, you should be see the following:\n\n![GitLab Runner code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750098317137.png)\n\n8\\. If you close the instructions and click the **View runners** button, you will now have a newly provisioned runner present with \"Never contacted\" as its status.\n\n![Newly provisioned runner on screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098317/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098317139.png)\n\n9\\. In any project, add the following `.gitlab-ci.yml`.\n\n```  \nstages:  \n  - greet\n\nhello_job:  \n  stage: greet  \n  tags:  \n    - gcp-runner  \n  script:  \n    - echo \"hello\"  \n```\n\nVolia! You have set up your first GitLab Runner utilizing Google Cloud Platform.\n\n# Next steps\n\nNow that you have provisioned your very own GitLab Runner, consider optimizing it for your specific use case. Some things to consider with your runner moving forward:\n\n- Is the runner I provisioned the right size? Does it need additional resources for my use case? \n- Does the GitLab Runner contain all the dependency my builds need?  \n- How can I store the GitLab Runner as infrastructure as code?\n\n> Make sure to bookmark the [Provisioning runners in Google Cloud documentation](https://docs.gitlab.com/ee/ci/runners/provision_runners_google_cloud.html) for easy reference.\n",[9,478,771,108,1000,1248,230],{"slug":3486,"featured":6,"template":684},"provision-group-runners-with-google-cloud-platform-and-gitlab-ci","content:en-us:blog:provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","Provision Group Runners With Google Cloud Platform And Gitlab Ci","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci.yml","en-us/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci",{"_path":3492,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3493,"content":3499,"config":3504,"_id":3506,"_type":13,"title":3507,"_source":15,"_file":3508,"_stem":3509,"_extension":18},"/en-us/blog/publishing-an-astro-site-with-pages",{"title":3494,"description":3495,"ogTitle":3494,"ogDescription":3495,"noIndex":6,"ogImage":3496,"ogUrl":3497,"ogSiteName":669,"ogType":670,"canonicalUrls":3497,"schema":3498},"How to publish your Astro Site with GitLab Pages","Learn how to deploy an Astro Site with GitLab Pages.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682473/Blog/Hero%20Images/shot-by-cerqueira-0o_GEzyargo-unsplash.jpg","https://about.gitlab.com/blog/publishing-an-astro-site-with-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to publish your Astro Site with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Janis Altherr\"}],\n        \"datePublished\": \"2022-10-24\",\n      }",{"title":3494,"description":3495,"authors":3500,"heroImage":3496,"date":3501,"body":3502,"category":769,"tags":3503},[1529],"2022-10-24","\n\nAstro is an amazing new framework to create content-focused static sites and GitLab Pages is a great way to deploy a site built with Astro. Here's a step-by-step guide on how to build and deploy an Astro Site with GitLab Pages.\n\n## Create the project locally\n\nFirst, create the Astro Project locally using the Astro CLI.\n\nNote: Even though we're offering a [project template](https://gitlab.com/pages/astro),\nwe recommend using the CLI locally to scaffold your project. This ensures you can create your project with the latest defaults.\n\n```shell\nnpm create astro@latest\n```\n\nNow follow the CLI instructions. As part of the setup, Astro will create the\nproject folder for you. During the course of the setup Astro will ask whether you'd like to initialize a new Git repository. Answer this with `y` (yes).\n\nOnce the Astro CLI is done scaffolding your project, `cd` into the new folder:\n\n```shell\ncd \u003Cyour-project>\n```\n\n## Configure Astro for GitLab Pages\n\nAstro comes with a few defaults that are incompatible with GitLab Pages. So before continuing, we need to set up a compatible config.\nEdit your `astro.config.mjs` to include the following:\n\n```javascript\n// astro.config.mjs\nimport { defineConfig } from 'astro/config';\n\n// https://astro.build/config\nexport default defineConfig({\n  // GitLab Pages requires exposed files to be located in a folder called \"public\".\n  // So we're instructing Astro to put the static build output in a folder of that name.\n  outDir: 'public',\n\n  // The folder name Astro uses for static files (`public`) is already reserved\n  // for the build output. So in deviation from the defaults we're using a folder\n  // called `static` instead.\n  publicDir: 'static',\n});\n```\n\nWhy are we doing this? GitLab Pages is a way to publish some files in a\nrepository, no matter what build tool you used to generate them. Unlike with\nother deployment tools the exposed files and the source code can live \ntogether in one place. So to ensure you don't accidentally expose sensitive\nfiles we're requiring you to consciously put them into a\nfolder named \"public\".\n\nBy default, Astro uses `public` for something different – the static \nassets. So we have to change that behavior. The above config tells Astro\nthat we'll put the static files in a folder named `static` and want the _output_\nfiles to be put in a folder named, as required, `public`.\n\nAstro already generated that assets folder under the old name while\nscaffolding, so we'll have to rename it. Inside your Astro project folder, run:\n\n```shell\nmv public static\n```\n\nDepending on your project configuration, GitLab Pages will deploy your site \nat a URL that follows the format simlar to `https://\u003Cuser-or-group>.gitlab.\nio/\u003Cproject-name>`. If you want to use the default URL, you need to adjust Astro\nto the fact that the site is not mounted at the root path, otherwise it may \nnot load static assets (such as the CSS files) correctly. \n\n[Visit the documentation](https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#gitlab-pages-default-domain-names)\nto find out the URL schema of the project you intend to create, then add the\nfollowing line to your `astro.config.mjs`. (Skip this step if you're creating\na user or group page):\n\n```javascript\n// astro.config.mjs\nexport default defineConfig({\n  // ...\n  base: '/\u003Cproject-name>'\n  // In case the project is owned by a subgroup, use:\n  // base: '/\u003Csubgroup>/\u003Cproject-name>'\n});\n```\n\nAstro [recommends](https://docs.astro.build/en/reference/configuration-reference/#site) \nadding the final site's full URL to generate the sitemap, so add it now to your\n`astro.config.mjs`:\n\n```javascript\n// astro.config.mjs\nexport default defineConfig({\n  // ...\n  site: 'https://\u003Cuser-or-group>.gitlab.io'\n  \n  // Note: Instead of specifying both `base` and `site`, you can simply\n  // use the full URL here:\n  // site: 'https://\u003Cuser-or-group>.gitlab.io/\u003Cproject-name>'\n  // or for pages owned by a subgroup:\n  // site: 'https://\u003Cgroup>.gitlab.io/\u003Csubgroup>/\u003Cproject-name>'\n});\n```\n\nNow that you've successfully configured your project, you can commit your\nchanges.\n\n```shell\ngit add -A\ngit commit -m \"Initial commit\"\n```\n\n## Set up the remote repository\n\nYou can't push the code as we have yet to set up the remote repository. Visit\nGitLab and create a new project. When asked, select \"Create blank project.\"\n\nIn the setup screen, select \"GitLab Pages\" as the deployment target. Choose the\nvisibility level however you like. This is mainly asking whether your source \ncode is public, although it does affect the initial setting (see \"Making a \nprivate project's site public\" below).\n\nMake sure you unset the checkbox next to \"Initialize repository with a README\",\notherwise GitLab will begin a new Git history that you will have to reconcile\nwith your existing local one.\n\nOnce the Project is set up, follow the instructions on how to add an _existing\nrepository_ – if you don't have an existing remote, so you can just run:\n\n```shell\ngit remote add origin \u003Cgit-project-url>\ngit push -u origin --all\n```\n\nNow you've synced your local code with Gitlab, let's finish publishing it with\nPages.\n\n## Create a Pages pipeline\n\nIn GitLab, go to your project's settings and select Pages. You will be welcomed\nby a screen that helps you build a `.gitlab-ci.yml` file.\n\n![Screenshot: The \"Get stated with Pages\" UI](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_1.png)\n\nEnter \"node:lts\" as the build image. This will give you the latest node \nenvironment with long-time support.\n\nWe've already configured Astro to output our files in a folder named `public`,\nso you can check the checkbox asking you to confirm this.\n\nOn the next page, enter `npm ci` as the installation step. Running `npm ci` \ninstead of `npm install` is recommended for CI environments such as GitLab\nPipelines as it uses the `package-lock.json` to match the installed version \nwith the one you used during development. See the [npm documentation](https://docs.npmjs.com/cli/v8/commands/npm-ci)\nto learn more about `npm ci`.\n\n![Screenshot: Inputting the installation step](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_2.png)\n\nOn the last page, enter the build command \"npm run build\". Again, click \"next\".\n\n![Screenshot: Inputting the build step](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_3.png)\n\nNext to the inputs you see the pipeline file that has been built for you. \nThis is the one we want to add to the repository to enable Pages.\n\n![Screenshot: The finished file and the commit step](https://about.gitlab.com/images/blogimages/astro-pages/wizard_step_4.png)\n\nHow does it work in detail? If GitLab sees a job named `pages`, it will \nlook for artifacts inside a root folder `public` and then create a \nGitLab Pages deployment from it.\n\nThe `rules` section ensures the pages deployment is only triggered by \ncommits to the default branch. Every time you push a change to your default \nbranch, Pages will publish the new changes. \n\nIf you're happy with the pipeline, enter a commit message and click \"commit\".\n(Make sure you run `git pull` locally before doing any more changes to \nprevent issues with diverging histories.)\n\nNow having added a commit with a `.gitlab-ci.yml` file, GitLab has kicked off\na pipeline. Visit CI/CD > Pipelines to see the progress. After a couple of \nminutes, you should see the pipeline has succeeded. (If it's showing \"failed\", \nclick on the status button to see the job logs.)\n\n![Screenshot: Pipelines](https://about.gitlab.com/images/blogimages/astro-pages/pipeline_overview.png)\n\nOnce the pipeline has completed, go back to Settings > Pages. You should now see\nthe various settings of your site, including your new site's URL. Click on \nit and, congratulations, you've just deployed your Astro Site wit GitLab \nPages!\n\n![Screenshot: The deployed page](https://about.gitlab.com/images/blogimages/astro-pages/deployed_site.png)\n\n## Making a private project's site public\n\nBy default, a private project's Pages site is only accessible to project \nmembers. If you want your source code to be private, but still have a public \nsite, go to Settings/General and expand \"visibility, project features, permissions\", scroll down to \"Pages\" and set \nit to \"Everyone\".\n\n## Keep reading\n\n- [Tutorial: Use the GitLab UI to deploy your static site](https://docs.gitlab.com/ee/user/project/pages/getting_started/pages_ui.html)\n- [Astro Docs: Deploy your Astro Site to GitLab Pages](https://docs.astro.build/en/guides/deploy/gitlab/)\n- [Watch a video on how to create a Pages Pipeline with the Wizard](https://youtu.be/49hgxqPGofw)\n",[9,230,678],{"slug":3505,"featured":6,"template":684},"publishing-an-astro-site-with-pages","content:en-us:blog:publishing-an-astro-site-with-pages.yml","Publishing An Astro Site With Pages","en-us/blog/publishing-an-astro-site-with-pages.yml","en-us/blog/publishing-an-astro-site-with-pages",{"_path":3511,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3512,"content":3517,"config":3523,"_id":3525,"_type":13,"title":3526,"_source":15,"_file":3527,"_stem":3528,"_extension":18},"/en-us/blog/publishing-obsidian-notes-with-gitlab-pages",{"title":3513,"description":3514,"ogTitle":3513,"ogDescription":3514,"noIndex":6,"ogImage":2205,"ogUrl":3515,"ogSiteName":669,"ogType":670,"canonicalUrls":3515,"schema":3516},"Publishing Obsidian.md notes with GitLab Pages","How to publish your Obsidian.md documents to a GitLab Pages site","https://about.gitlab.com/blog/publishing-obsidian-notes-with-gitlab-pages","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Publishing Obsidian.md notes with GitLab Pages\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Scott Hampton\"}],\n        \"datePublished\": \"2022-03-15\",\n      }",{"title":3513,"description":3514,"authors":3518,"heroImage":2205,"date":3520,"body":3521,"category":769,"tags":3522},[3519],"Scott Hampton","2022-03-15","\n\n[Obsidian.md](https://obsidian.md) is a \"knowledge base\" application that uses plain text Markdown files stored in a local folder to\norganize your notes. The product has been growing in popularity, partly because of how extensible it is. There are a\nlot of community built plugins to help users configure the application to support their specific workflow. There are\nmany people that use Obsidian to write their blog posts. [Obsidian offers a paid service to publish your notes directly](https://obsidian.md/publish)\nand is completely compatible with features Obsidian offers. I suggest you support the Obsidian developers if their product\nworks for you. If you are looking for an alternative way to publish, this blog post provides a tutorial for how to publish your notes using GitLab\nPages.\n\nYou can find an Obsidian.md example in [this demonstration project](https://gitlab.com/gitlab-org/frontend/playground/obsidian-and-gitlab-pages-demo)\nwhich deploys [a GitLab Pages site](https://gitlab-org.gitlab.io/frontend/playground/obsidian-and-gitlab-pages-demo/). \n\n## What is Obsidian markdown?\n\nObsidian is markdown-based system, which means it incorporates tags, plugins and backlinks to create an easy-to-use system. It makes it possible for you to use symbols inside the text that are interpreted as text formatting. This [link](https://www.markdownguide.org/cheat-sheet/) is a cheat sheet of all the mardown syntax elements.\n\n### Benefits of Obsidian.md\n\nPerhaps the most significant benefit of Obsidian markdown (md) is its simple, straightforward design and the excellent support provided. It is also extensible, with plenty of community plugins available. \n\nThere is no proprietary formatting, encoding. This gives you greater control over how you backup files and manage change tracking.\n\nObsidian doesn't support git right out the box, it requires a community plugin called Obsidian Git. However, one the plugin is installed, “you end up with the greatest change tracking/archiving tool at your disposal,” one user [raves](https://www.faesel.com/blog/why-every-developer-needs-to-use-obsidian).\n\n### How is Obsidian.md different from other markdown languages?\n\nObsidian markdown [differs from other markdown editors](https://cylab.be/blog/149/what-is-obsidianmd-and-why-you-should-use-it) in that it uses the “Linked Thought” feature, which refers to a group of note-taking applications that allow you to link thoughts and notes together seamlessly. Because it is based on the [Markdown language](https://en.wikipedia.org/wiki/Markdown), it is light-weight. The tool expands on the markdown language with additional functionality, such as creating links between files, offering \"hover over preview\" of links and easy inclusion and management of sources.\n\nFor example, Obsidian lets you hover over any links added to a document and see a small preview of what the links refers to. You just need to position your mouse over the \"Format your notes\" link.\n\n### Some notable features of Obsidian.md\n\nThere’s a visually-striking graph view that’s acts as a map of all your files stored in Obsidian. There is also a markdown format importer that can find and replace certain Markdown syntax elements in your files, and support for [math and diagram](https://publish.obsidian.md/help/How+to/Format+your+notes) syntax.\n\nAlso noteworthy is that Obsidian makes it easy to publish notes online and it stores all of your files in plaintext markdown files.\n\nObsidian supports CommonMark and GitHub Flavored Markdown (GFM) so you can embed notes and other files. It stores data in folders of markdown files so you can access your notes with other text editors or markdown apps. Obsidian also lets you open existing folders of markdown files.\n\n## Is Obsidian good for notes?\n\nObsidian is a very capable, free note-taking app (with advanced, paid tiers available as well). It touts itself as a [“second brain”](https://obsidian.md/) that is good for creating a knowledge base, markdown file editor and linking notes together. It is designed to take notes quickly and is easy to use, making it an ideal app. You just open the app, create a new note and start typing.\n\nIt works across multiple platforms, including Windows, iOS, Android and Linux.\n\nObsidian has been called the [“most advanced note-taking app.”](https://deskoflawyer.com/secure-note-taking-apps/)\n\n## Setting up Obsidian notes\n\nOnce you download the app, you will see the main Obsidian window, which has the different options on the left, then the folder/files panel and the composition area where you an create and edit your notes.\n\nThere are four icons on the left side: collapse panel, open quick switcher, open graph view, and open markdown importer. The collapse panel shows (or hides) the left panel.When you tap the open quick switcher button, it brings up a text box where you can begin to type. The open graph view shows a graph listing the connections each page has. The open markdown importer lets you import markdown files into Obsidian from other applications.\n\nYou’ll also see three buttons: \n\n1. Open another vault \n2. Help\n3. Settings\n\nThe vault refers to a collection of notes that you can open or create.\n\nYou have the option of either creating a note directly or creating a note via a link. In the former instance, in the folder panel, click on the “new note” button or use the keyboard shortcut for Windows: Control N, or for Mac: Command N. Now you’ve created a new note.\n\nAn interesting time-saving feature is that you can create a note via a link and assign a name to that new note. You have to click on the link to actually create it.\n\nYou can find a helpful guide [here](https://www.sitepoint.com/obsidian-beginner-guide/).\n\n## Organizing an Obsidian note using folders\n\nWhen you begin using Obsidian you have to designate where you want to keep your notes. If you already have your notes in markdown format in a folder, you would choose the “open folder as vault” option. Otherwise, you can create a new vault and choose a location to store your notes.\n\nYou can drag and drop notes to move them around. There are three icons at the top pane that allow you to create a new note, make a new folder, or change the sorting order.\n\nObsidian has a powerful search feature that checks the content of your notes and returns all results very quickly. Access it by clicking on the magnifying glass icon at the top to begin a  search of your notes.\n\nYou’ll already be in editor mode by default when you open Obsidian and you can edit your notes or write new ones. All markdown syntax is visible in this mode. Press Ctrl + E to switch to preview mode, and the syntax will disappear and the note will appear formatted.\n\nIf you type a hashtag before a word, Obsidian will detect it and assign it to the note, regardless of where it is in your text.\n\n## Get going with Obsidian.md\n\n[Obsidan.md](https://obsidian.md), at it's core, is an application that helps manage your markdown files. You can download the application\nvia their site and create a \"workspace\" folder when you first start the application. When using the application, all of your notes\nwill be created in the folder you choose as your \"workspace\".\n\n![Obsidian application](https://about.gitlab.com/images/blogimages/obsidian_md.png){: .shadow}\n\n### Workspace file structure\n\nInside your Obsidian workspace, you can have any number of folders and markdown files. When you open a folder in Obsidian as your \"workspace\",\nObsidian will automatically add a folder `.obsidian`, which contains your workspace configuration such as application styles and plugins.\nA basic workspace file structure could look something like this:\n\n```\n.\n├── workspace_folder/\n│   └── Other pages/\n│   │   └── Another page.md\n│   └── .obsidian\n│   └── index.md\n```\n\n`index.md`\n```markdown\n# Home\n\nThis is a basic home page, and a link to another page in my documents.\n\nSee [[Another page]] - note that this link uses wikilinks which Obsidian uses to help you easily link to other notes in your workspace.\n```\n\n`Other pages/Another page.md`\n```markdown\n# Another page\n\nThis is another page besides the home page.\n```\n\n## Generating a static site to host your notes\n\nIn order to publish your notes to GitLab Pages, you need to create a static site to show and navigate your notes.\nThere are several open source tools that generate static sites from Markdown documents. After experimenting\nwith a few, I found [MkDocs](https://www.mkdocs.org/) to be the easiest and most compatible with Obsidian.\n\nIf you would like to use MkDocs locally, you can install it with `pip install mkdocs`\n(Python and [pip as package manager](https://pypi.org/project/pip/) are required).\nThis is not necessary, because in this tutorial we'll utilize GitLab CI pipelines to install MkDocs and build our site.\n\nThere are two small steps you need to make in order to get your existing Obsidian notes working with MkDocs.\n\n### File structure\n\nAll files that are not your workspace notes will be created outside of your workspace folder. The following folder structure is\nhow this final demo project is going to look.\n\n```\n.\n├── wiki/\n│   └── .obsidian\n│   └── index.md\n├── .gitlab-ci.yml\n├── mkdocs.yml\n└── requirements.txt\n```\n\n - `wiki/` - this is your Obsidian workspace folder\n - `.obsidian` - the application configuration folder Obsidian uses for your workspace. This will not affect the site.\n - `index.md` - MkDocs looks for `index.md` in your workspace folder to use as your site's home page.\n - `.gitlab-ci.yml` - the GitLab CI configuration file used to deploy your site.\n - `mkdocs.yml` - the MkDocs configuration file use to build and customize your site.\n - `requirements.txt` - this file defines the Python package dependencies for MkDocs.\n\n### Basic MkDocs Configuration\n\nYou'll need to create a configuration file `mkdocs.yml` for MkDocs to know how you would like your site to look.\nHere are the first four lines we need to configure our notes.\n\n```yaml\nsite_name: My Obsidian Notes\nsite_url: https://group-name.gitlab.io/repo-name\nsite_dir: public\ndocs_dir: ./wiki\n```\n\n- `site_name` - is what will be used as the main title for the web site.\n- `site_url` - is used as the \"canonical URL\" of the site. You will need to use [the default URL provided by GitLab Pages](https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#gitlab-pages-default-domain-names) or your custom domain here.\n- `site_dir` - GitLab Pages requires HTML source code to be contained in a `public` folder. This setting tells MkDocs to put the generated files in the `public` folder.\n- `docs_dir` - this is the relative path to your workspace folder. I like to name mine `wiki` because it's my personal wikipedia. You can name this folder whatever you want.\n\nWe'll come back to this configuration file later to add more custom styles to your site.\n\n## Configuring GitLab CI\n\nWe need to configure a GitLab CI job to install MkDocs and build the web site based on our Obsidian notes. The following\n`.gitlab-ci.yml` file has the basic setup for this:\n\n```yaml\nimage: python:3.8-slim\n\npages:\n  stage: deploy\n  script:\n    # Install all of the python packages for mkdocs\n    - pip install -r requirements.txt\n    # Build the site using mkdocs\n    # --strict aborts the build on any warnings\n    # --verbose enables verbose output so that it's easier to see what mkdocs is doing\n    # neither --strict nor --verbose are necessary if you choose not to use them\n    - mkdocs build --strict --verbose\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n```\n\nThis job will only run when a change is made to the default branch (`main` in this case).\n\n### Python Packages\n\nNote the line `pip install -r requirements.txt` in the above `.gitlab-ci.yml` file. This line is installing MkDocs and any\nadditional plugins you use to customize your site. You'll need to create a `requirements.txt` file for this script to work:\n\n```text\n# Documentation static site generator & deployment tool\nmkdocs>=1.1.2\n```\n\nWe'll come back to this `requirements.txt` file to add a couple more packages to customize our site later.\n\n## Customizing your site\n\nOne of the benefits of using MkDocs is that it has a lot of extensions you can add on to customize your site. You can\nchange the theme of the site, which adjusts the colors and layout. You can also add extensions that improve how your\nmarkdown notes are displayed and interacted with on the site.\n\n### Theme\n\nMkDocs includes two built-in themes (`mkdocs` and `readthedocs`), [as documented on their website](https://www.mkdocs.org/user-guide/choosing-your-theme/).\nThere are also a lot of [community built themes](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes) you can search through and choose to use.\nMy current favorite theme is [Material](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes#material-for-mkdocs-). You can install it by adding it our `requirements.txt` and choosing\nit as your theme in the `mkdocs.yml` configuration file, or if you are installing it locally you can install it with `pip install mkdocs-material`.\n\n`requirements.txt`\n```text\n# Material theme\nmkdocs-material>=8.1.7\n```\n\n`mkdocs.yml`\n```yaml\ntheme:\n  name: material\n  palette:\n    scheme: slate\n```\n\nI have chosen the `slate` scheme for the material theme which makes it darker. You can choose more configuration options\nbased on [their website documentation](https://squidfunk.github.io/mkdocs-material/setup/changing-the-colors/).\n\n### Extensions\n\nMkDocs includes [built-in extensions](https://www.mkdocs.org/user-guide/configuration/#markdown_extensions) that you can add to your `mkdocs.yml` configuration file. The\n[Material](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Themes#material-for-mkdocs-) theme package also comes with many more extensions that we can use. Below are some of my favorite\nfor working with Obsidian:\n\n```yaml\n# Extensions\nmarkdown_extensions:\n  - footnotes\n  - attr_list\n  - pymdownx.highlight\n  - pymdownx.superfences\n  - pymdownx.details\n  - pymdownx.magiclink\n  - pymdownx.tasklist\n  - pymdownx.emoji\n  - admonition\n  - toc:\n    permalink: true\n```\n\n- `footnotes` - adds the ability to define inline footnotes, whech are then rendered below all Markdown content of a document. [See documentation here](https://squidfunk.github.io/mkdocs-material/reference/footnotes/).\n- `attr_list` - allows you to add HTML attributes and CSS classes to almost every Markdown inline and block-level element with special syntax. [See documentation here](https://squidfunk.github.io/mkdocs-material/setup/extensions/python-markdown/#attribute-lists).\n- `pymdownx.highlight` - adds support for syntax highlighting of code blocks. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/highlight/).\n- `pymdownx.superfences` - allows for arbitrary nesting of code and content blocks inside each other. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/superfences/).\n- `pymdownx.details` - allows for creating collapsible content blocks. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/details/).\n- `pymdownx.magiclink` - provides a number of useful link related features such as auto-link HTML and emails. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/magiclink/).\n- `pymdownx.tasklist` - adds support for tasklist syntax. [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/tasklist/).\n- `pymdownx.emoji` - adds support for inserting emoji via simple short names enclosed within colons (`:short_name:`). [See documentation here](https://facelessuser.github.io/pymdown-extensions/extensions/emoji/).\n- `admonition` - allows you to create \"callouts\" in your documentation. [See documentation here](https://squidfunk.github.io/mkdocs-material/reference/admonitions/).\n- `toc:permalink` - adds a table of contents to your page based on your markdown document, and ensures each link is a permanent link that can be reused. [See documentation here](https://python-markdown.github.io/extensions/toc/).\n\n### Plugins\n\nMkDocs also has a community of plugins that add more features when building your site. MkDocs includes some plugins by default that you can use in the configuration file, but in order to use community plugins you have to add them to the\n`requirements.txt` file to be installed as packages. The following two plugins are ones that I've found useful, but you\ncan look at [the list of community plugins here](https://github.com/mkdocs/mkdocs/wiki/MkDocs-Plugins):\n\n```yaml\nplugins:\n  - search\n  - roamlinks\n```\n\n- `search` - provides a search bar at the top of your site to easily search your documents. [See documentation here](https://www.mkdocs.org/user-guide/configuration/#search).\n- `roamlinks` - adds support for Obsidian's wikilinks feature. [See documentation here](https://github.com/Jackiexiao/mkdocs-roamlinks-plugin).\n\n`requirements.txt`\n```text\n# Wikilinks support\nmkdocs-roamlinks-plugin>=0.1.3\n```\n\nIf installing locally, you can install roamlinks with `pip install mkdocs-roamlinks-plugin`.\n\n## Combining it all together\n\nAfter all of the above work is done, you should have a file structure that looks like this:\n\n```\n.\n├── wiki/\n│   └── .obsidian\n│   └── index.md\n├── .gitlab-ci.yml\n├── mkdocs.yml\n└── requirements.txt\n```\n\nHere are the contents of the three main files that you've been editing:\n\n`.gitlab-ci.yml`\n```yaml\nimage: python:3.8-slim\n\npages:\n  stage: deploy\n  script:\n    - pip install -r requirements.txt\n    - mkdocs build --strict --verbose\n  artifacts:\n    paths:\n      - public\n  only:\n    - main\n```\n\n`mkdocs.yml`\n```yaml\nsite_name: My Obsidian Notes\nsite_url: https://group-name.gitlab.io/repo-name\nsite_dir: public\n\ntheme:\n  name: material\n  palette:\n    scheme: slate\n\n# Extensions\nmarkdown_extensions:\n  - footnotes\n  - attr_list\n  - pymdownx.highlight\n  - pymdownx.superfences\n  - pymdownx.details\n  - pymdownx.magiclink\n  - pymdownx.tasklist\n  - pymdownx.emoji\n  - admonition\n  - toc:\n    permalink: true\n\nplugins:\n  - search\n  - roamlinks\n```\n\n`requirements.txt`\n```text\n# Documentation static site generator & deployment tool\nmkdocs>=1.1.2\n\n# Material theme\nmkdocs-material>=8.1.7\n\n# Wikilinks support\nmkdocs-roamlinks-plugin>=0.1.3\n```\n\nNow that your files are all finished, the last step is to push your changes to your GitLab repository and wait for your pipeline\nto finish. Once finished, you can go to [your default domain provided by GitLab](https://docs.gitlab.com/ee/user/project/pages/getting_started_part_one.html#gitlab-pages-default-domain-names) or you can\n[configure GitLab Pages to use a custom domain](https://docs.gitlab.com/ee/administration/pages/index.html).\n\nHere's a screenshot of the demonstration site created in this tutorial:\n\n![Obsidian application](https://about.gitlab.com/images/blogimages/obsidian_mkdocs_site.png){: .shadow}\n\n## Is the Obsidian note-taking secure?\n\nUsers overall believe Obsidian is safe to use. One user said you [maintain full control](https://becomeawritertoday.com/obsidian-review/) over your notes and it provides the ability to encrypt your vault.\n\n[This lawyer](https://deskoflawyer.com/secure-note-taking-apps/) maintains that Obsidian is the most-secure note-taking app available. Others claim there are [no security threats](https://thebusinessblocks.com/is-obsidian-one-of-the-most-secure-and-best-notetaking-apps/) with Obsidian and users don’t have to worry about data being lost or transferred to third parties.\n\nBecause your files are stored on your own computer, this keeps your data safe and private according to another [user](https://www.online-tech-tips.com/computer-tips/how-to-use-obsidian-as-a-personal-wiki-on-your-computer/).\n\n### Where to find more information on Obsidian markdown\n\nYou can find more information in this [Obsidian markdown guide](https://www.markdownguide.org/tools/obsidian/). An Obsidian roadmap is available [here](https://trello.com/b/Psqfqp7I/obsidian-roadmap). Of course, you can also go to the [Obsidan website](https://obsidian.md/).\n\nShare your Obsidian.md deployments in the comments.\n",[9],{"slug":3524,"featured":6,"template":684},"publishing-obsidian-notes-with-gitlab-pages","content:en-us:blog:publishing-obsidian-notes-with-gitlab-pages.yml","Publishing Obsidian Notes With Gitlab Pages","en-us/blog/publishing-obsidian-notes-with-gitlab-pages.yml","en-us/blog/publishing-obsidian-notes-with-gitlab-pages",{"_path":3530,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3531,"content":3537,"config":3542,"_id":3544,"_type":13,"title":3545,"_source":15,"_file":3546,"_stem":3547,"_extension":18},"/en-us/blog/quick-start-guide-for-gitlab-workspaces",{"title":3532,"description":3533,"ogTitle":3532,"ogDescription":3533,"noIndex":6,"ogImage":3534,"ogUrl":3535,"ogSiteName":669,"ogType":670,"canonicalUrls":3535,"schema":3536},"Quickstart guide for GitLab Remote Development workspaces","Learn how to create a workspace from your GitLab account and work directly from the remote development environment.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749664219/Blog/Hero%20Images/2023-06-22-quickstart-workspaces-cover-image2.png","https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quickstart guide for GitLab Remote Development workspaces\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-06-26\",\n      }",{"title":3532,"description":3533,"authors":3538,"heroImage":3534,"date":3539,"body":3540,"category":769,"tags":3541},[1080],"2023-06-26","\nGitLab 16.0 introduced [Remote Development workspaces (beta)](https://about.gitlab.com/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects), an exciting addition to the GitLab platform that empowers teams to build and deliver software more efficiently.\n\nThis guide provides step-by-step instructions on how to create a workspace directly from your GitLab account and work directly from the remote development environment. You will work in the Web IDE, a Visual Studio Code browser version, seamlessly integrated into the workspace. \n\nFrom this quick start, you will learn how to create a workspace, use the Web IDE Terminal to install dependencies or start your server, and view your running application. \n\nTo learn more about Remote Development in GitLab, we recommend reading this informative blog post, \"[A first look at workspaces](https://about.gitlab.com/blog/introducing-workspaces-beta/),\" and the [workspaces docs](https://docs.gitlab.com/ee/user/workspace/).\n\nHere are the steps covered in this tutorial:\n\n- [Prerequisites](#prerequisites)\n- [Locate DevFile at the root of repository](#locate-devfile-at-the-root-of-repository)\n- [Create your workspace](#create-your-workspace)\n- [Install dependencies and previewing your application in the workspace](#install-dependencies-and-previewing-your-application-in-the-workspace)\n- [Make changes to the application and previewing the updated version](#make-changes-to-the-application-and-previewing-the-updated-version)\n- [Commit the change](#commit-the-change)\n- [Explore the demo](#explore-the-demo)\n- [Try out workspaces](#try-out-workspaces)\n\n## Prerequisites \nPrior to enabling developers to create workspaces, there are a few prerequisites such as bring your own Kubernetes cluster, and install and configure the GitLab agent for Kubernetes on it. Additionally, certain configuration steps must be completed on the cluster. You can find detailed instructions for all these steps in [our workspaces prequisites documentation](https://docs.gitlab.com/ee/user/workspace/configuration.html#prerequisites). Once the prerequisites are properly configured, developers who hold Developer role or above within the root group will gain the ability to create workspaces.\n\n## Locate DevFile at the root of repository\nA [devfile](https://devfile.io/docs/2.2.0/devfile-ecosystem) is a declarative configuration file, in YAML syntax, used to define and describe the development environment for a software project. It provides a standardized way to specify the necessary tools, languages, runtimes, and other components required for developing an application.\n\nTo initiate a workspace, it is necessary to have a devfile located at the root of the repository. In this blog post, we will utilize a project that contains a devfile, accessible [here](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/raw/main/.devfile.yaml). \n\n```yaml\nschemaVersion: 2.2.0\ncomponents:\n  - name: tooling-container\n    attributes:\n      gl/inject-editor: true\n    container:\n      # NOTE: THIS IMAGE EXISTS ONLY FOR DEMO PURPOSES AND WILL NOT BE MAINTAINED\n      image: registry.gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/debian-bullseye-ruby-3.2-node-18.12:rubygems-3.4-git-2.33-lfs-2.9-yarn-1.22-graphicsmagick-1.3.36-gitlab-workspaces\n      memoryRequest: 1024M\n      memoryLimit: 2048M\n      cpuRequest: 500m\n      cpuLimit: 1000m\n      endpoints:\n      - name: http-3000\n        targetPort: 3000\n```\nFor more information, see the [GitLab documentation](https://docs.gitlab.com/ee/user/workspace/#devfile) and [devfile documentation](https://devfile.io/docs/2.2.0/devfile-schema).\n\n## Create your workspace \n1. Make sure you have a [Developer role or above](https://docs.gitlab.com/ee/user/permissions.html) in the root group, and the above prerequisites configured properly.\n2. Fork [this project](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app) to the GitLab group for which you have a Developer role or above. \n3. Switch contexts and select `Your work`.\n![Your work](https://about.gitlab.com/images/blogimages/2023-07-10-your-work.png){: .shadow}\n4. Select `Workspaces`.\n5. Select `New workspace`.\n6. Select the project you forked or another project that has a `.devfile.yaml` file at the root of the repository. \n7. Select the [cluster agent](https://docs.gitlab.com/ee/user/workspace/#prerequisites) owned by the group the project belongs to.\n8. In `Time before automatic termination`, enter the number of hours until the workspace automatically terminates. This timeout is a safety measure to prevent a workspace from consuming excessive resources or running indefinitely. \n9. Select `Create workspace`. \n\n![create ws](https://about.gitlab.com/images/blogimages/create_workspace.png){: .shadow}\n\nThe workspace will be deployed to the cluster and might take a few minutes to start. To access the workspace, under Preview, select the workspace link.\n\n![ws list](https://about.gitlab.com/images/blogimages/workspaces_list.png){: .shadow}\n\n## Install dependencies and previewing your application in the workspace\nAfter creating your workspace, the [Web IDE using VS Code](https://docs.gitlab.com/ee/user/workspace/#web-ide) is injected into it, and the repository is cloned to the image. Consequently, you gain immediate access to your code and can commence working on it right away.\n\nYou can now open the terminal, install any missing dependencies, and start the application.\n\n![Terminal](https://about.gitlab.com/images/blogimages/ws-terminal.png){: .shadow}\n\n1. To open the terminal, from the left menu, select `Terminal`, `New Terminal`. \n2. Type `npm install` to install the dependencies listed in the [package.json](https://gitlab.com/gitlab-da/use-cases/remote-development/example-nodejs-express-app/-/blob/main/package.json) file.\n3. Type `npm start` to start the application.\n\nThe log will indicate that the application has started on port 3000.\n\n![log](https://about.gitlab.com/images/blogimages/server_log.png){: .shadow}\n\nYou can now access your application by opening the browser and using the workspace URL. Change the number before ‘workspace’ in the URL to the port number on which your application is listening (e.g., 3000). For example, if your workspace URL is `https://\u003Cprefix>-workspace-73241-25728545-rqvpjm.workspaces.gitlab.dev`, and your application is running on port 3000, update `\u003Cprefix>` to 3000 to access your application.\n\n## Make changes to the application and previewing the updated version\nIn the Web IDE, navigate to the `server.js` file, modify the text in line 9. \n\nAfterward, refresh the browser where your application is opened to see the applied changes. \n\n## Commit the change \n1. In the Web IDE click on the merge icon in the activity bar.\n2. Click the line with the `server.js` to view your change side by side.\n3. To stage your change, click the plus icon next to `server.js`.\n4. Type a commit message describing your change.\n5. Click Commit. \n6. Click Sync changes to push the commit to the GitLab server.\n\n  ![commit](https://about.gitlab.com/images/blogimages/commit-stage.png){: .shadow}\n\n## Explore the demo \nExplore further with this [click-through demo of workspaces](https://go.gitlab.com/qtu66q).\n\n## Try out workspaces\nRemote Development workspaces offer a convenient and efficient way to work on projects without the need for local development setups. They provide a streamlined workflow and enable developers to focus on writing code rather than dealing with complex environment setups.\n\nBy adopting workspaces, developers can collaborate effectively, improve productivity, and simplify the development process. \n\nGive workspaces a try and revolutionize your remote development experience today!\n\nCover image by \u003Ca href=\"https://unsplash.com/@pankajpatel?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Pankaj Patel\u003C/a> on \u003Ca href=\"https://unsplash.com/photos/_SgRNwAVNKw?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText\">Unsplash\u003C/a>\n{: .note}\n",[9,773,108],{"slug":3543,"featured":6,"template":684},"quick-start-guide-for-gitlab-workspaces","content:en-us:blog:quick-start-guide-for-gitlab-workspaces.yml","Quick Start Guide For Gitlab Workspaces","en-us/blog/quick-start-guide-for-gitlab-workspaces.yml","en-us/blog/quick-start-guide-for-gitlab-workspaces",{"_path":3549,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3550,"content":3556,"config":3561,"_id":3563,"_type":13,"title":3564,"_source":15,"_file":3565,"_stem":3566,"_extension":18},"/en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai",{"title":3551,"description":3552,"ogTitle":3551,"ogDescription":3552,"noIndex":6,"ogImage":3553,"ogUrl":3554,"ogSiteName":669,"ogType":670,"canonicalUrls":3554,"schema":3555},"Quick vulnerability remediation with GitLab Advanced SAST + Duo AI ","Shorten your mean time to remediation by pairing Advanced SAST and artificial intelligence. This detailed demo shows you how.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098458/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945_24mPf16vAPHORs3d9y62q_1750098458538.png","https://about.gitlab.com/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quick vulnerability remediation with GitLab Advanced SAST + Duo AI \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-10-22\",\n      }",{"title":3551,"description":3552,"authors":3557,"heroImage":3553,"date":3558,"body":3559,"category":702,"tags":3560},[1767],"2024-10-22","With GitLab 17.4, we’ve made [GitLab Advanced SAST generally available](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/). [GitLab Advanced SAST](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html) is a static application security testing scanner designed to discover vulnerabilities by performing cross-function and cross-file taint analysis. By following the paths user inputs take, the analyzer identifies potential points where untrusted data can influence the execution of your application in unsafe ways, ensuring the vulnerabilities are detected even when they span multiple functions and files.\n\nGitLab Advanced SAST can be used together with [GitLab Duo Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability) in order to reduce the mean time to remediation (MTTR). GitLab Duo can provide practical, AI-powered examples of how threat actors can exploit vulnerabilities and offer light-weight remediation guidance, which can be used with cross-file analysis to enhance application security (AppSec) efficiency.\n\nThis tutorial will show you how to:\n* enable GitLab Advanced SAST\n* read results from the scanner\n* review the code flow of a vulnerability\n* use GitLab AI to quickly remediate the vulnerability\n\n## Enable GitLab Advanced SAST\n\nFollow the instructions below to enable GitLab Advanced SAST. You can also view this video to get started:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDa1MHOcyn8?si=5SYuKgP-BdBryqcU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Run GitLab Advanced SAST on each code commit\n\nBefore using Advanced SAST, the following prerequisites must be met:\n\n- GitLab Ultimate Subscription ([free 30-day trial](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com%2F))\n- GitLab SaaS or GitLab Self-managed (running Version 17.4)\n\nTo enable the GitLab Advanced SAST scanner:\n\n- On the left sidebar, select **Search** or **Go to** and find your project.\n- Add or edit the `.gitlab-ci.yml` to include the following:\n    - Test stage\n    - `Jobs/SAST.gitlab-ci.yml` template\n    - `GITLAB_ADVANCED_SAST_ENABLED` variable set to true\n- Apply the change.\n\nYour newly merged `.gitlab-ci.yml` should contain the following:\n\n```yaml\nstages:\n  - test\n\ninclude:\n  - template: Jobs/SAST.gitlab-ci.yml\n\nvariables:\n  GITLAB_ADVANCED_SAST_ENABLED: 'true'\n```\n\nThis will now run the `gitlab-advances-sast` job within the test stage of your application along with all the other jobs you have defined. Advanced SAST will replace the semgrep SAST scanner for the [supported programming languages](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html#supported-languages).\n\n![Running `gitlab-advances-sast` job within the test stage of your application](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098466/Blog/Content%20Images/Blog/Content%20Images/1_aHR0cHM6_1750098466629.png)\n\n\u003Ccenter>\u003Ci>GitLab Advanced SAST job in pipeline\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\n**Note:** You can fully configure the job as you would any job in GitLab. For more information, see the [CI/CD YAML syntax documentation](https://docs.gitlab.com/ee/ci/yaml/).\n\n## Remediate vulnerabilities in merge request (pre-production)\n\nJust like our previous SAST scanner, Advanced SAST allows you to scan source code in the diff of a feature branch. This allows us to address any incoming vulnerabilities before they make it into production. Here we can see the scanner results for the diff within a merge request:\n\n![Advanced SAST scanner results for the diff within a merge request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/2_aHR0cHM6_1750098466630.png)\n\nWhen selecting a newly detected vulnerability, we get the following details to assist with remediation:\n\n- **Status:** The status of the vulnerability (Needs triage, Confirmed, Dismissed, Resolved)\n- **Description:** Detailed information on the detected vulnerability\n- **Detection time:** Time vulnerability was detected\n- **Location:** Line of code where vulnerability is detected\n- **Severity:** Severity of vulnerability from CVE database\n- **Training:** Gamified training from our partners\n- **Solutions:** Information on how to remediate or resolve a vulnerability\n- **Identifiers:** Relevant links showcasing detailed description, exploitation, and remediation\n\n![Merge request with vulnerability insights](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/MR_with_vulnerability_insights_aHR0cHM6_1750098466632.png)\n\n\u003Ccenter>\u003Ci>Merge request with vulnerability insights\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br> \nVulnerabilities detected within an MR are actionable, meaning they can be dismissed or an issue can be created and populated with relevant vulnerability information.\n\nDismissing an issue saves AppSec teams time, because they can see relevant developer information when reviewing an MR. Creating a confidential issue allows developers and AppSec teams to further collaborate on resolving a vulnerability where a fix is not straightforward. Confidential issues have limited permissions and can be used with confidential merge requests to prevent possible malicious actors from exploiting.\n\nTo further support separation of duties and prevent vulnerable code from making it into production, you can require approval from certain people (for example, the security team) in order to merge vulnerable code.\n\n![GitLab security policies in action](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/security_policies_in_action_aHR0cHM6_1750098466634.png)\n\n\u003Ccenter>\u003Ci>Security policies in action\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\n**Note:** Learn more about Security Policies and how to implement them in the [Security Policy documentation](https://docs.gitlab.com/ee/user/application_security/policies/).\n\n## Manage vulnerabilities in production\n\nWhile preventing vulnerabilities from making it into production is crucial for application security, it is equally as important to manage vulnerabilities in production. When security scanners are run on a default or production-level branch, a [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/) will be populated with the latest vulnerability data which can be used to triage and manage vulnerabilities.\n\n![GitLab Vulnerability Report sorted by Advanced SAST](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/5_aHR0cHM6_1750098466636.png)\n\n\u003Ccenter>\u003Ci>GitLab Vulnerability Report sorted by Advanced SAST\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nWhen selecting a vulnerability you get similar vulnerability details as seen in a merge request, making for a single source of truth for developers and AppSec teams.\n\n![Vulnerability page with vulnerability insights](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/Vuln_page_with_vulnerability_insights_aHR0cHM6_1750098466637.png)\n\n\u003Ccenter>\u003Ci>Vulnerability page with vulnerability insights\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nAppSec teams can triage a vulnerability by changing its status and adding relevant details on the status change. Issues can be created to track the progress of a fix. From here, a developer can be assigned.\n\n## Examine vulnerable code flow\n\nFor vulnerabilities detected with Advanced SAST, we can see a \"Code flow\" tab on the Vulnerability page.\n\n![Advanced SAST - image 7](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/7_aHR0cHM6_1750098466638.png)\n\n\u003Ccenter>\u003Ci>GitLab Advanced SAST code flow\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nIn this example, you can see that a vulnerability is traced across multiple functions, giving deeper insight into the best practices we should put in place to not only resolve the vulnerability, but prevent similar vulnerabilities in the future.\n\n## Use GitLab Duo Vulnerability Explanation\n\nGitLab Duo can help you mitigate or remediate a vulnerability by using a large language model to:\n\n- Summarize the vulnerability\n- Help developers and security analysts understand the vulnerability\n- Show how the vulnerability can be exploited\n- Provide a suggested remediation or mitigation\n\nTo use Vulnerability Explanation, the following is required:\n\n- GitLab Ultimate subscription\n- GitLab Duo Enterprise seat\n- GitLab Duo must be enabled for your group or instance\n\nFrom the vulnerability report, you can select a SAST vulnerability and go to its Vulnerability page. From the Vulnerability page, you can do any of the following to explain the vulnerability:\n\n- Select the text below the vulnerability description\n- You can use AI by asking GitLab Duo Chat to explain this vulnerability and offer a suggested fix.\n- In the upper right, from the \"Resolve with merge request\" dropdown list, select **Explain Vulnerability**, then select **Explain vulnerability**.\n- Open GitLab Duo Chat and use the explain a vulnerability command: `/vulnerability_explain`.\n\nThen the vulnerable code will be processed by Anthropic’s Claude 3 Haiku model and provide the following data:\n\n![GitLab Duo Vulnerability Explanation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098467/Blog/Content%20Images/Blog/Content%20Images/vuln_explain_2_aHR0cHM6_1750098466640.png)\n\n## Putting it all together\n\nNow, let's put it all together with a concrete example. I will use the [OWASP Juice Shop](https://owasp.org/www-project-juice-shop/) as my demo application and run GitLab Advanced SAST to detect a vulnerability in production. Then I will use the vulnerability code flow and GitLab Duo to investigate vulnerability exploitation, and remediation. You can [follow along with this demo](https://gitlab.com/gitlab-da/tutorials/security-and-governance/owasp/juice-shop) and see this workflow in action by watching:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/H1S43oM44k0?si=2LYorTjByOHbCAko\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe detection and remediation workflow is as follows:\n\n- Enable GitLab Advanced SAST and run it on the project’s default branch.\n- Open the Vulnerability Report and sort by **Tool:GitLab Advanced SAST**.\n- Select the **Improper neutralization of special elements in data query logic** vulnerability found in `Basket.ts`.\n- Use the vulnerability code flow to understand the vulnerable paths.\n- Run **Explain this vulnerability** to see exploit information.\n- Run the application locally to attempt exploitation.\n- Change vulnerability status to \"Confirmed\" and provide relevant info.\n- Determine remediation path using all relevant data:\n    - Vulnerability page insights, Code Flow, Vulnerability Explanation results\n- Create a new branch and apply remediation.\n- Run the remediated application locally and try to exploit again.\n- Create a merge request with the fix.\n- Code change will be tested using CI to assure we don’t break the application.\n- Validate and merge MR.\n- Test exploit in deployed environment.\n- Change vulnerability status to \"Resolved\" on the Vulnerability page.\n\n**Note:** There are many ways to triage and remediate vulnerabilities, make sure to follow best practices set by your organization.\n\n# Useful links\n\nTo learn more about GitLab and how you can get started with enhancing your organization’s application security posture, check out the following resources.\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) \n* [GitLab Duo](https://about.gitlab.com/gitlab-duo/)  \n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab Software Supply Chain Security Solutions](https://about.gitlab.com/solutions/supply-chain/)  \n* [GitLab Continuous Software Compliance](https://about.gitlab.com/solutions/continuous-software-compliance/)  \n* [JuiceShop Demo Application](https://gitlab.com/gitlab-da/tutorials/security-and-governance/owasp/juice-shop)  \n* [GitLab AppSec documentation](https://docs.gitlab.com/ee/user/application_security/)  \n* [Advanced SAST  documentation](https://docs.gitlab.com/ee/user/application_security/sast/gitlab_advanced_sast.html)  \n* [Explain this Vulnerability documentation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#explaining-a-vulnerability)  \n* [Code Flow documentation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-code-flow)  \n* [Security Policy documentation](https://docs.gitlab.com/ee/user/application_security/policies/) \n* [OWASP Juice Shop documentation](https://owasp.org/www-project-juice-shop/)\n",[704,814,9,680,478],{"slug":3562,"featured":90,"template":684},"quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai","content:en-us:blog:quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai.yml","Quick Vulnerability Remediation With Gitlab Advanced Sast Duo Ai","en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai.yml","en-us/blog/quick-vulnerability-remediation-with-gitlab-advanced-sast-duo-ai",{"_path":3568,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3569,"content":3575,"config":3579,"_id":3581,"_type":13,"title":3582,"_source":15,"_file":3583,"_stem":3584,"_extension":18},"/en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai",{"title":3570,"description":3571,"ogTitle":3570,"ogDescription":3571,"noIndex":6,"ogImage":3572,"ogUrl":3573,"ogSiteName":669,"ogType":670,"canonicalUrls":3573,"schema":3574},"Quickly resolve broken CI/CD pipelines with AI","When your CI/CD pipeline fails, it leads to delays, decreased productivity, and stress. AI-powered Root Cause Analysis makes problem-solving faster and smarter.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097355/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_78Dav6FR9EGjhebHWuBVan_1750097355230.png","https://about.gitlab.com/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Quickly resolve broken CI/CD pipelines with AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-12-03\",\n      }",{"title":3570,"description":3571,"authors":3576,"heroImage":3572,"date":2707,"body":3577,"category":702,"tags":3578},[1080],"CI/CD pipelines are the backbone of efficiency in software development. They help teams test, build, and deploy code quickly. But when these pipelines break, everything slows down — deadlines get missed, and developers are left frustrated as they work to fix things and keep projects on track.\n\n![CI/CD pipeline with multiple failed jobs](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097362/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097362772.png)\n\n\u003Ccenter>\u003Ci>CI/CD pipeline with multiple failed jobs\u003C/i>\u003C/center>\u003Cbr>\u003C/br>\n\n**So, why do pipelines break in the first place?** Let’s break it down.\n\n## Reasons for pipeline failures\n\nA pipeline failure occurs when the automated workflow in your [CI/CD pipeline](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) — a series of steps that can include building, testing, and deploying code — does not execute as expected and ends with an error message. This failure can prevent code from being properly built, tested, or deployed, causing delays in software delivery and requiring troubleshooting to resolve. \n\nPipeline failures can happen for a variety of reasons. Some common causes include:\n- Syntax errors: A small mistake in the code, like a missing semicolon or incorrect variable name, can cause the pipeline to fail.\n- Failed tests: Unit or integration tests might fail due to broken code, incorrect configurations, or mismatched dependencies.\n- Misconfigurations: Incorrect pipeline settings or environment configurations can lead to failed builds or deployments.\n\nThere are also more complex issues that add to the challenge:\n- Infrastructure-as-Code ([IaC](https://about.gitlab.com/topics/gitops/infrastructure-as-code/)) issues: Problems in provisioning cloud infrastructure, such as errors in Terraform scripts or CloudFormation templates, can prevent a successful deployment.\n- Kubernetes and GitOps challenges: Misconfigurations in [Kubernetes clusters](https://about.gitlab.com/blog/kubernetes-the-container-orchestration-solution/) or issues with [GitOps](https://about.gitlab.com/topics/gitops/) workflows (e.g., syncing Kubernetes states with Git repositories) can cause pipeline failures that are difficult to diagnose.\n- Long, messy stack traces: When an error occurs deep in the system, stack traces can become long and hard to decipher, especially when they span multiple components or services.\n\nThese challenges make troubleshooting more difficult and time-consuming, as finding the root cause often involves sifting through complex logs, reviewing configuration files, and testing different solutions.\n\n## The real impact of failed pipelines\n\nWhen a pipeline fails, it doesn’t just delay your deployment — it brings stress and frustration. Developers are forced to pause their work and dive into troubleshooting, which often leads to a chain reaction of disruptions. This makes it harder to meet deadlines and increases the pressure on the entire team. But why is manual troubleshooting so stressful?\n\n### Manual troubleshooting \n\nThe time it takes to fix a broken pipeline varies. It depends on things like:\n- How well the developer knows the project\n- How experienced they are with similar issues\n- Their overall problem-solving skills\n\nManually digging through logs to figure out what went wrong is a tough and tedious process. Logs can come from all over the place, including application errors and system messages, and they’re often messy and hard to interpret. And on top of that, fixing the pipeline usually requires a lot of jumping back and forth between tasks, adding more time to the process.\n\nThis is where [GitLab Duo](https://about.gitlab.com/gitlab-duo/) comes in. GitLab Duo can sift through all that messy data and spot issues much faster, simplifying the process so you don’t need to be an expert to figure out what went wrong. With AI, fixing your pipelines becomes faster, easier, and much less stressful.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176104/Blog/zxvvu7p9vc3qpmwl32ya.png\" alt=\"broken pipeline\">\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176108/Blog/bpx6dqilfhltzboyp8k8.png\" alt=\"fix suggestions for broken pipelines\">\n\n## GitLab Duo Root Cause Analysis with generative AI\n\nWhen your CI/CD pipeline breaks, you don’t have to spend hours manually troubleshooting. Enter [GitLab Duo’s Root Cause Analysis (RCA)](https://docs.gitlab.com/ee/user/gitlab_duo/#root-cause-analysis). This AI-powered tool quickly identifies the exact cause of the failure and suggests fixes — right within the DevSecOps platform. No matter how long or complicated your stack traces are, RCA analyzes all the data, breaks it down, and gives you clear, actionable insights.\n\n**It tells you exactly what caused the error, provides steps to fix it, and even pinpoints the specific files and lines of code that need attention.** And, to make it even easier, it suggests code fixes to get everything back on track. This makes troubleshooting a lot faster and more straightforward.\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176111/Blog/nmagby9hoksskogve53m.png\" alt=\"root cause of failure\">\n\n\u003Cimg src=\"https://res.cloudinary.com/about-gitlab-com/image/upload/v1752176115/Blog/dndis1cedwbmbnj33q3v.png\" alt=\"example fix\">\n\n## Keep the conversation going with follow-up questions\n\nWith GitLab Duo RCA, you don’t just get answers — you can ask follow-up questions to dig deeper. Want to explore alternative solutions? No problem. You can add [more context](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html#the-context-chat-is-aware-of) by referencing other files, issues, or epics in your repo. For example, you could open your `.gitlab-ci.yml` file in the IDE and ask the chat, “Based on this file, and the analyzed CI/CD pipeline, how would you propose to optimize the pipeline?” \n\n## Privacy first – everything stays in GitLab\nOne of the key benefits of GitLab Duo RCA is that it works right out of the box within GitLab. You won’t have to switch tools or go hunting for external help. Plus, your [logs and sensitive data stay secure](https://about.gitlab.com/privacy/) - there’s no need to send them off to external AI solutions. RCA is seamlessly integrated within GitLab, offering valuable insights without ever compromising privacy.\n\n![broken pipelines - image 6](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097363/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097362773.png)\n\n## Get started today\n\nWant to see how AI can supercharge your development process, making it smoother and faster? Dive into our GitLab Duo Enterprise product tour below and discover how GitLab Duo’s AI-powered insights can transform every stage of your development journey — from planning and coding to troubleshooting and deployment. Click the image below to start the tour!\n\n[![GitLab Duo Enterprise tour](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097363/Blog/Content%20Images/Blog/Content%20Images/Screenshot_2024-12-02_at_12.41.10_PM_aHR0cHM6_1750097362774.png)](https://gitlab.navattic.com/duo-enterprise)\n\n> [Start a free, 60-day trial of GitLab Duo today!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/)",[704,478,9,680],{"slug":3580,"featured":6,"template":684},"quickly-resolve-broken-ci-cd-pipelines-with-ai","content:en-us:blog:quickly-resolve-broken-ci-cd-pipelines-with-ai.yml","Quickly Resolve Broken Ci Cd Pipelines With Ai","en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai.yml","en-us/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai",{"_path":3586,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3587,"content":3593,"config":3599,"_id":3601,"_type":13,"title":3602,"_source":15,"_file":3603,"_stem":3604,"_extension":18},"/en-us/blog/rebase-in-real-life",{"title":3588,"description":3589,"ogTitle":3588,"ogDescription":3589,"noIndex":6,"ogImage":3590,"ogUrl":3591,"ogSiteName":669,"ogType":670,"canonicalUrls":3591,"schema":3592},"How to use Git rebase in real life","From fixup to autosquash here are real world ways to leverage Git rebase.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682486/Blog/Hero%20Images/rebase-in-real-life.jpg","https://about.gitlab.com/blog/rebase-in-real-life","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Git rebase in real life\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Toon Claes\"}],\n        \"datePublished\": \"2022-11-08\",\n      }",{"title":3588,"description":3589,"authors":3594,"heroImage":3590,"date":3596,"body":3597,"category":769,"tags":3598},[3595],"Toon Claes","2022-11-08","\n\nMy colleague [Chris](/company/team/#chriscool) recently wrote about [how to take advantage of Git\nrebase](/blog/take-advantage-of-git-rebase/). In this post we'll\nexplain how you can take these techniques, and apply them to daily developer life.\n\n## Fixup\n\nImagine you have created a merge request, and there are some pipeline failures\nand some comments from reviews, and suddenly your [commit history](/blog/keeping-git-commit-history-clean/) looks something\nlike this:\n\n```shell\n$ git log --oneline\n\n8f8ef5af (HEAD -> my-change) More CI fixes\ne4fb7935 Apply suggestion from reviewer\nc1a1bec6 Apply suggestion from reviewer\n673222be Make linter happy\na0c30577 Fix CI failure for X\n5ff160db Implement feature Y\nf68080e3 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\nIn this example there are 2 commits implementing feature X and Y, followed by a\nhandful of commits that aren't useful on their own. We used the fixup feature of\nGit rebase to get rid of them.\n\n### Finding the commit\n\nThe idea of this technique is to integrate the changes of these follow-up\ncommits into the commits that introduced each feature. This means for each\nfollow-up commit we need to determine which commit they belong to.\n\nBased on the filename you may already know which commits belong together, but if\nyou don't you can use git-blame to find the commit.\n\n```shell\ngit blame \u003Crevision> -L\u003Cstart>,\u003Cend> \u003Cfilename>\n```\n\nWith the option `-L` we'll specify a range of a line numbers we're interested in.\nHere `\u003Cend>` cannot be omitted, but it can be the same as `\u003Cstart>`. You can\nomit `\u003Crevision>`, but you probably shouldn't because you want to skip over the\ncommits you want to rebase away. Your command will look something like this:\n\n```shell\n$ git blame 5ff160db -L22,22 app/model/user.rb\n\nf68080e3 22) scope :admins, -> { where(admin: true) }\n```\n\nThis tells us line `22` was touched by `f68080e3 Implement feature X`.\n\nNow repeat this step until you know the commit for each of the commits you want\nto rebase out.\n\n### Interactive rebase\n\nThe next step is to start the interactive rebase:\n\n```shell\n$ git rebase -i main\n```\n\nHere you're presented with the list of instructions in your `$EDITOR`:\n\n``` text\npick 8f8ef5af More CI fixes\npick e4fb7935 Apply suggestion from reviewer\npick c1a1bec6 Apply suggestion from reviewer\npick 673222be Make linter happy\npick a0c30577 Fix CI failure for X\npick 5ff160db Implement feature Y\npick f68080e3 Implement feature X\n```\n\nNow you'll need to change these instructions to something like this:\n\n```text\nfixup 8f8ef5af More CI fixes\nfixup e4fb7935 Apply suggestion from reviewer\nfixup 673222be Make linter happy\npick 5ff160db Implement feature Y\nfixup c1a1bec6 Apply suggestion from reviewer\nfixup a0c30577 Fix CI failure for X\npick f68080e3 Implement feature X\n```\n\nAs you can see I've reordered the commits, and I've changed some occurrences of\n`pick` to `fixup`.\n\nThe Git rebase will process this list bottom-to-top. It takes each line with\n`pick` and uses its commit message. On each line starting with `fixup` it\nintegrates the changes into the commit below. When you've saved this file and\nclosed your `$EDITOR`, the Git history will look something like this:\n\n```shell\n$ git log --oneline\n\ne880c726 (HEAD -> my-change) Implement feature Y\ne088ea06 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\n## Autosquash\n\nUsing autosquash can be an alternative technique to the above. First we'll\nuncommit all the commits we want to get rid of.\n\n```shell\ngit checkout f68080e3\n```\n\nNow all changes only exist in your working tree, and are gone from the commit\nhistory. You can use `git add` or `git add -p` to stage all changes related to\n`e088ea06 Implement feature X`. Instead of running `git commit` or `git commit -m`\nwe'll use the `--fixup` option:\n\n```shell\n$ git commit --fixup e088ea06\n```\n\nNow the history will look something like:\n\n```shell\n$ git log --oneline\n\ne744646b (HEAD -> my-change) fixup! Implement feature X\n5ff160db Implement feature Y\nf68080e3 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\nAll remaining changes should now belong to `5ff160db Implement feature Y` so we\ncan run:\n\n```shell\n$ git add .\n\n$ git commit --fixup 5ff160db\n\n$ git log --oneline\n\n18c0fff9 (HEAD -> my-change) fixup! Implement feature Y\ne744646b fixup! Implement feature X\n5ff160db Implement feature Y\nf68080e3 Implement feature X\n3cdbc201 (origin/main, origin/HEAD, main) Merge branch 'other-change' into 'main'\n...\n```\n\nYou can now review the `fixup!` commits and if you're happy with it, run:\n\n```shell\n$ git rebase -i --autosquash main\n```\n\nYou see we provide the extra option `--autosquash`. This option will look for\n`fixup!` commits and automatically reorder those and set their instruction to\n`fixup`. Normally there's nothing for you to be done now, and you can just close\nthe instruction list in your editor. If you type `git log` now you'll see the\n`fixup!` commits are gone.\n\n## Alternatives\n\nFinally, there are some tools that allow you to _absorb_ commits more easily, for\nexample:\n\n* [lib.rs/crates/git-absorb](https://lib.rs/crates/git-absorb)\n* [github.com/MrFlynn/git-absorb](https://github.com/MrFlynn/git-absorb)\n* [gitlab.com/bertoldia/git-absorb](https://gitlab.com/bertoldia/git-absorb)\n* [github.com/tummychow/git-absorb](https://github.com/tummychow/git-absorb)\n* [github.com/torbiak/git-autofixup](https://github.com/torbiak/git-autofixup)\n\n[Cover image](https://unsplash.com/photos/qAShc5SV83M) by [Yung Chang](https://unsplash.com/@yungnoma) on [Unsplash](https://unsplash.com/).\n{: .note}\n",[726,940,9],{"slug":3600,"featured":6,"template":684},"rebase-in-real-life","content:en-us:blog:rebase-in-real-life.yml","Rebase In Real Life","en-us/blog/rebase-in-real-life.yml","en-us/blog/rebase-in-real-life",{"_path":3606,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3607,"content":3612,"config":3616,"_id":3618,"_type":13,"title":3619,"_source":15,"_file":3620,"_stem":3621,"_extension":18},"/en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo",{"title":3608,"description":3609,"ogTitle":3608,"ogDescription":3609,"noIndex":6,"ogImage":931,"ogUrl":3610,"ogSiteName":669,"ogType":670,"canonicalUrls":3610,"schema":3611},"Refactor code into modern languages with AI-powered GitLab Duo ","This detailed tutorial helps developers use AI to modernize code by switching to a new programming language and gain knowledge about new features in the same language.","https://about.gitlab.com/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactor code into modern languages with AI-powered GitLab Duo \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":3608,"description":3609,"authors":3613,"heroImage":931,"date":2487,"body":3614,"category":702,"tags":3615},[1612],"Whether you are tasked with modernizing the code base or framework by switching to a new programming language, or you need knowledge about new language features in the same language, AI-powered [GitLab Duo](https://about.gitlab.com/gitlab-duo/) can help. Learn how to approach code refactoring challenges with best practices using examples from the past 20 years of my coding career. \n\nThe prompts and examples in this article are shown in different IDEs: VS Code and JetBrains IDEs (IntelliJ IDEA, PyCharm, and CLion) with the [GitLab Duo extensions/plugins](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html) installed. The development environment uses GitLab.com, including updates to Anthropic Claude 3.5 as Large Language Model (LLM) for GitLab Duo [Code Suggestions](https://docs.gitlab.com/ee/user/gitlab_duo/#code-suggestions) and [Chat](https://docs.gitlab.com/ee/user/gitlab_duo/#gitlab-duo-chat). Spoiler: They are even more powerful and efficient.\n\nYou can navigate into each section of the article, or read top-down. The source code and challenges with exercises are provided for self-learning, too.\n\n- [Refactor code to modern programming language standards](#refactor-code-to-modern-programming-language-standards)\n    - [Generate Java 7 and refactor to Java 8](#generate-java-7-and-refactor-to-java-8)\n    - [Refactor across C++ standards](#refactor-across-c%2B%2B-standards)\n        - [Migration: Refactor C++03 into C++14](#migration-refactor-c%2B%2B03-into-c%2B%2B14)\n        - [Downgrade: Refactor C++23 to C++11](#downgrade-refactor-c%2B%2B23-to-c%2B%2B11)\n    - [Explain and refactor COBOL](#explain-and-refactor-cobol)\n- [Refactor a language into another language](#refactor-a-language-into-another-language)\n    - [Refactor C to Rust](#refactor-c-to-rust)\n    - [Refactor Perl to Python](#refactor-perl-to-python)\n- [More refactoring hands-on](#more-refactoring-hands-on)\n    - [Refactor JavaScript](#refactor-javascript)\n    - [Refactor Bash to ZSH or SH](#refactor-bash-to-zsh-or-sh)\n    - [More use cases and tutorials](#more-use-cases-and-tutorials)\n- [Key takeaways](#key-takeaways)\n\n## Refactor code to modern programming language standards\n\nThe next sections show how to refactor, explain, and understand the language specific differences in a defined programming language. \n\n### Generate Java 7 and refactor to Java 8 \n\nA customer wanted to know if GitLab Duo supports Java 7. While testing the Code Suggestions and Chat prompts to generate Java 7 specific code, I also thought of migration help. Anthropic Claude 3.5 is capable of understanding the language-specific differences, and so is GitLab Duo.\n\nThe following example shows a prompt to generate Java 7 code with Code Suggestions.\n\n```java\n// Generate a new class for a File Manager. Use Java 7 features only.\nimport java.io.*;\nimport java.nio.file.*;\nimport java.util.*;\n\npublic class FileManager {\n    private Path rootDirectory;\n\n    public FileManager(String rootPath) throws IOException {\n        this.rootDirectory = Paths.get(rootPath);\n        if (!Files.exists(rootDirectory)) {\n            Files.createDirectories(rootDirectory);\n        }\n    }\n\n    public void createFile(String fileName, String content) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        Files.write(filePath, content.getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);\n    }\n\n    public String readFile(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        return new String(Files.readAllBytes(filePath));\n    }\n\n    public void deleteFile(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        Files.delete(filePath);\n    }\n\n    public List\u003CString> listFiles() throws IOException {\n        List\u003CString> fileList = new ArrayList\u003C>();\n        try (DirectoryStream\u003CPath> stream = Files.newDirectoryStream(rootDirectory)) {\n            for (Path file : stream) {\n                fileList.add(file.getFileName().toString());\n            }\n        }\n        return fileList;\n    }\n\n    public void copyFile(String sourceFileName, String destinationFileName) throws IOException {\n        Path sourcePath = rootDirectory.resolve(sourceFileName);\n        Path destinationPath = rootDirectory.resolve(destinationFileName);\n        Files.copy(sourcePath, destinationPath, StandardCopyOption.REPLACE_EXISTING);\n    }\n\n    public void moveFile(String sourceFileName, String destinationFileName) throws IOException {\n        Path sourcePath = rootDirectory.resolve(sourceFileName);\n        Path destinationPath = rootDirectory.resolve(destinationFileName);\n        Files.move(sourcePath, destinationPath, StandardCopyOption.REPLACE_EXISTING);\n    }\n\n    public long getFileSize(String fileName) throws IOException {\n        Path filePath = rootDirectory.resolve(fileName);\n        return Files.size(filePath);\n    }\n\n    public boolean fileExists(String fileName) {\n        Path filePath = rootDirectory.resolve(fileName);\n        return Files.exists(filePath);\n    }\n}\n```\n\nYou can continue with the following steps in your IDE:\n\n1. Learn and understand the generated code: Select the code in the editor (blue color in the screenshot), switch into GitLab Duo Chat and send the `/explain` slash command as prompt.\n\n![Validate the generated code with the `/explain` slash command in GitLab Duo Chat](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/intellij_java7_generate_refactor.png)\n\n2. Refactor the Java 7 code into Java 8: Select the code in the editor, switch into Chat, and send `/refactor using Java 8 features` as refined prompt.\n3. Practice an alternative to refactoring: Create a new file `java8.java` and generate Java 8 specific code using Code Suggestions with the prompt `// Generate a new class for a File Manager. Use Java 8 features only.`.\n4. Continue writing Java 8 code in the same context with AI-powered code completions.\n\nYou can watch all steps in this recording.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/XKRv6uBkD2I\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe source code is available in the [GitLab Duo Challenge - Generate and refactor Java 7 to 8 project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-generate-refactor-java-7-to-8).\n\n### Refactor across C++ standards\n\nThe C++ standard has a long history and can be considered stable and mature. Depending on the target architecture and platforms, the supported standard depends on the compiler on the host operating system.\n\nIn my career, I needed to support 25 different Linux and Windows distributions in open source projects, and have seen varying compiler support for newer language features and C++ standards. In 2015, we settled on C++11 features, and later never touched based on C++14 or C++17. \n\nRefactoring functions and algorithms to a new C++ standard would have required too much effort, and help explaining the differences was needed. Back then, we researched through books, online search, Stack Overflow, and sometimes tools assistance for refactoring.\n\nThings have changed with AI-powered assistants and GitLab Duo: You can instruct Code Suggestions and Chat to provide source code in a specific C++ standard. Then, it is also possible to compare the different outputs. \n\nThe following example uses a prompt to specifically generate source code for the C++03 standard. It implements a simple config parser for the `.ini` configuration format.\n\n```c++\n/* Create a config parser which can parse an ini format\nUse C++03 standard only\n */\n\n#include \u003Ciostream>\n#include \u003Cfstream>\n#include \u003Cstring>\n#include \u003Cmap>\n#include \u003Csstream>\n\nclass ConfigParser {\nprivate:\n    std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> > config;\n\npublic:\n    ConfigParser() {}\n\n    bool parse(const std::string& filename) {\n        std::ifstream file(filename.c_str());\n        if (!file.is_open()) {\n            return false;\n        }\n\n        std::string line;\n        std::string currentSection;\n\n        while (std::getline(file, line)) {\n            // Trim whitespace\n            size_t first = line.find_first_not_of(\" \\t\");\n            size_t last = line.find_last_not_of(\" \\t\");\n            if (first == std::string::npos) continue; // Skip empty lines\n            line = line.substr(first, (last - first + 1));\n\n            if (line[0] == '[' && line[line.length() - 1] == ']') {\n                // This is a section\n                currentSection = line.substr(1, line.length() - 2);\n            } else {\n                // This is a key-value pair\n                size_t delimiterPos = line.find('=');\n                if (delimiterPos != std::string::npos) {\n                    std::string key = line.substr(0, delimiterPos);\n                    std::string value = line.substr(delimiterPos + 1);\n                    config[currentSection][key] = value;\n                }\n            }\n        }\n\n        file.close();\n        return true;\n    }\n\n    std::string getValue(const std::string& section, const std::string& key) const {\n        std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> >::const_iterator sectionIt = config.find(section);\n        if (sectionIt != config.end()) {\n            std::map\u003Cstd::string, std::string>::const_iterator keyIt = sectionIt->second.find(key);\n            if (keyIt != sectionIt->second.end()) {\n                return keyIt->second;\n            }\n        }\n        return \"\";\n    }\n\n    void printConfig() const {\n        std::map\u003Cstd::string, std::map\u003Cstd::string, std::string> >::const_iterator sectionIt;\n        for (sectionIt = config.begin(); sectionIt != config.end(); ++sectionIt) {\n            std::cout \u003C\u003C \"[\" \u003C\u003C sectionIt->first \u003C\u003C \"]\" \u003C\u003C std::endl;\n            std::map\u003Cstd::string, std::string>::const_iterator keyIt;\n            for (keyIt = sectionIt->second.begin(); keyIt != sectionIt->second.end(); ++keyIt) {\n                std::cout \u003C\u003C keyIt->first \u003C\u003C \" = \" \u003C\u003C keyIt->second \u003C\u003C std::endl;\n            }\n            std::cout \u003C\u003C std::endl;\n        }\n    }\n};\n\nint main() {\n    ConfigParser parser;\n    if (parser.parse(\"config.ini\")) {\n        std::cout \u003C\u003C \"Config file parsed successfully.\" \u003C\u003C std::endl;\n        parser.printConfig();\n\n        std::cout \u003C\u003C \"Value of 'key1' in 'section1': \" \u003C\u003C parser.getValue(\"section1\", \"key1\") \u003C\u003C std::endl;\n    } else {\n        std::cout \u003C\u003C \"Failed to parse config file.\" \u003C\u003C std::endl;\n    }\n    return 0;\n}\n```\n\nThe same prompts can be applied to [explaining code](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide), [generating tests](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide), and even [refactoring code](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide): `/explain focus on C++03 features`, `/tests for C++03 features`, `/refactor into C++03`. \n\nThe following C++ standards have been tested working with GitLab Duo Code Suggestions:\n\n- C++03\n- C++11\n- C++14\n- C++17\n- C++20\n- C++23\n\nYou can find the source code in the [Challenge - Refactor Cpp Standards Config Parser project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser). \n\nThere are many different combinations to refactor source code between the C++ standards. The following examples reflect on modernizing the C++ code with version upgrades, but also test a downgrade where ambitious developers wanted to use C++20 but had to use C++11 to support enterprise customer environments.\n\n#### Migration: Refactor C++03 into C++14 \n\nThe goal is to modernize the code, and understand the changes between C++03 and C++14. Open the [03.cpp](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser/-/blob/main/03.cpp?ref_type=heads) source code in the IDE, select it, and use the following Chat prompt: \n\n```markdown\n/refactor into C++14 and explain the changes\n```\n\nPotential Chat response: \n\n```markdown\n…\n\nExplanations of the changes:\n\n1. Default constructor: Changed `ConfigParser() {}` to `ConfigParser() = default;` to use the default constructor.\n2. File opening: Removed `.c_str()` when opening the file, as C++11 and later support `std::string` directly.\n3. Lambda functions: Introduced a lambda function trim to handle string trimming, making the code more readable and reusable.\n4. Auto keyword: Used `auto` for type deduction in various places, improving code readability.\n5. Range-based for loops: In the `printConfig` method, replaced traditional for loops with range-based for loops, which are more concise and less error-prone.\n6. Structured bindings: Used structured bindings in the range-based for loops to directly access key-value pairs.\n7. If statements with initializers: In the `getValue` method, used if statements with initializers to combine the find operation and the check in a single line.\n8. String member functions: Used `front()` and `back()` instead of array-style access for the first and last characters of strings.\n9. Uniform initialization: Used uniform initialization `{}` instead of parentheses `()` for object creation.\n10. Removed `std::endl`: Replaced `std::endl` with `'\\n'` for better performance, as `std::endl` forces a flush of the output buffer.\n\nThese changes make the code more modern, concise, and potentially more efficient while maintaining the same functionality.\n\n```\n\n![Changes reflected in code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp03_to_14_01.png)\n\n![Changes reflected in code 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675060/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp03_to_14_02.png)\n\n#### Downgrade: Refactor C++23 to C++11\n\nSometimes, developers started a new feature proof-of-concept in a modern C++ standard like C++20. The actual supported versions for enterprise customer environments require older compilers with C++11 support. Downgrading the versions come at a cost, and require evaluating which language features are still supported, and which other disadvantages could come across.\n\nOpen the [23.cpp](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-cpp-standards-config-parser/-/blob/main/23.cpp?ref_type=heads) source code in the IDE, select it, and use the following Chat prompt: \n\n```markdown\n/refactor into C++11, explain the necessary changes and potential problems\n```\n\nPotential Duo Chat response: \n\n```markdown\nThe main changes required to make this code compatible with C++11 are:\n\n1. Remove the C++23 standard comment, as it's no longer applicable.\n2. Replace `std::string::find_first_not_of` and `std::string::find_last_not_of` with C++11 compatible alternatives in the trim function.\n3. Use `const auto&` instead of `auto` for loop variables to avoid unnecessary copying.\n\nPotential problems in the original code:\n\n1. The original code used C++23 features, which might not be widely supported yet.\n2. The `trim` function used `std::string::find_first_not_of` and `std::string::find_last_not_of`, which are not available in C++11.\n3. The use of `auto` instead of `const auto&` in some loops could lead to unnecessary copying of complex objects.\n\nThese issues have been addressed in the refactored version. The `trim` function now uses `std::find_if_not` with lambda functions to achieve the same result in a C++11 compatible way. The use of `const auto&` ensures that no unnecessary copying occurs when iterating over the config map.\n```\n\n![The `trim` function now uses `std::find_if_not` with lambda functions to achieve the same result in a C++11 compatible way](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp23_to_11_01.png)\n\n![The use of `const auto&` ensures that no unnecessary copying occurs when iterating over the config map](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/duo_refactor_cpp_clion_chat_cpp23_to_11_02.png)\n\n**Async practice**: Test more version refactoring scenarios.\n\n### Explain and refactor COBOL\n\nYou can use GitLab Duo to explain the source code, analyze, fix and refactor for COBOL programs. I have never written nor learned COBOL, and found this helpful [COBOL Programming Course](https://github.com/openmainframeproject/cobol-programming-course) with many examples.\n\nI then asked Chat how to get started with COBOL, create a COBOL program, and compile a COBOL program on macOS.\n\n```markdown\nPlease explain what COBOL is and its syntax\n\nPlease create a COBOL program that shows the first steps\n\nTell me more about the COBOL compiler. Which system do I need? Can I do it on my macOS?\n\n```\n\n![Asking GitLab Duo Chat to explain and its syntax](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/vscode_chat_cobol_generate_example.png)\n\nOpen a COBOL program, select the source code, switch to Duo Chat and send the `/explain` prompt to explain purpose and functionality.\n\nYou can also refine the prompts to get more high-level summaries, for example:\n\n```markdown \n/explain like I am five\n```\n\n> Tip: Programming languages share similar algorithms and functionality. For COBOL, Chat offered to explain it using Python, and, therefore, I adjusted future prompts to ask for an explanation in Python.\n\n```markdown\n/explain in a different programming language\n```\n\nYou can also use the `/refactor` slash command prompt in Chat to improve the code quality, fix potential problems, and try to refactor COBOL into Python.\n\n```markdown\n/refactor fix the environment error\n\n/refactor fix potential problems\n\n/refactor into Python\n```\n\nThe [GitLab Duo Coffee Chat - Challenge: Explain and Refactor COBOL programs](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-explain-refactor-cobol-program) recording shows all discussed steps in a practical use case, including how to find a missing period: \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/pwlDmLQMMPo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Refactor a language into another language\n\nModernization and code quality improvements sometimes require the change of a programming language. Similar refactor prompts with GitLab Duo can help speed up the migration process. The COBOL example with Python is just one of many requirements in enterprise environments -- let's dive into more use cases.\n\n### Refactor C to Rust \n\nIn early 2024, several programming languages, like C, have been called out for not being memory safe. The recommendations for future projects include [memory safe languages](https://about.gitlab.com/blog/memory-safe-vs-unsafe/) like Rust. But how do you start a migration, and what are the challenges?\n\nLet's try it with a simple example in C. The code was generated using Code Suggestions and should print the basic operating system information, like the name, version, and platform. The C code compiles cross-platform on Windows, Linux, and macOS.\n\n```c\n// Read OS files to identify the platform, name, versions\n// Print them on the terminal\n#include \u003Cstdio.h>\n#include \u003Cstdlib.h>\n#include \u003Cstring.h>\n\n#ifdef _WIN32\n    #include \u003Cwindows.h>\n#elif __APPLE__\n    #include \u003Csys/utsname.h>\n#else\n    #include \u003Csys/utsname.h>\n#endif\n\nvoid get_os_info() {\n    #ifdef _WIN32\n        OSVERSIONINFOEX info;\n        ZeroMemory(&info, sizeof(OSVERSIONINFOEX));\n        info.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);\n        GetVersionEx((OSVERSIONINFO*)&info);\n\n        printf(\"Platform: Windows\\n\");\n        printf(\"Version: %d.%d\\n\", info.dwMajorVersion, info.dwMinorVersion);\n        printf(\"Build: %d\\n\", info.dwBuildNumber);\n    #elif __APPLE__\n        struct utsname sys_info;\n        uname(&sys_info);\n\n        printf(\"Platform: macOS\\n\");\n        printf(\"Name: %s\\n\", sys_info.sysname);\n        printf(\"Version: %s\\n\", sys_info.release);\n    #else\n        struct utsname sys_info;\n        uname(&sys_info);\n\n        printf(\"Platform: %s\\n\", sys_info.sysname);\n        printf(\"Name: %s\\n\", sys_info.nodename);\n        printf(\"Version: %s\\n\", sys_info.release);\n    #endif\n}\n\nint main() {\n    get_os_info();\n    return 0;\n}\n```\n\nOpen the source code in [`os.c`](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-c-to-rust/-/blob/897bf57a14bb7be07d842e7f044f93a61456d611/c/os.c) in JetBrains CLion, for example. Select the source code and use the Chat prompt `/explain` to explain purpose and functionality. Next, use `/refactor` in the Chat prompt to refactor the C code, and then take it one step further: `/refactor into Rust`. \n\nInitialize a new Rust project (Tip: Ask Duo Chat), and copy the generated source code into the `src/main.rs` file. Run `cargo build` to compile the code. \n\n![Initialize a new Rust project, and copy the generated source code into the `src/main.rs` file. Run `cargo build` to compile the code.](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/jetbrains_clion_c_rust.png)\n\nIn the [GitLab Duo Coffee Chat: Challenge - Refactor C into Rust](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-c-to-rust) recording, you can learn all steps, and additionally, you'll see a compilation error which gets fixed with the help of Chat and `/refactor` slash command. The session also shows how to improve the maintanability of the new Rust code by adding more error handling. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/nf8g2ucqvkI\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Refactor Perl to Python \n\nThat one script that runs on production servers, does its job, the author left the company ten years ago, and nobody wants to touch it. The problem might also apply to multiple scripts, or even a whole application. A decision was made to migrate everything to modern Python 3, with the goal to modernize the code, and understand the changes between Perl and Python.\n\nA customer recently asked in a GitLab Duo workshop whether a direct migration is possible using GitLab Duo. Short answer: Yes, it is. Longer answer: You can use refined Chat prompts to refactor Perl code into Python, similar to other examples in this article.\n\nOpen the `script.pl` source code in IDE, select it, and open Chat.\n\n```perl\n#!/usr/bin/perl\nuse strict;\nuse warnings;\n\nopen my $md_fh, '\u003C', 'file.md' or die \"Could not open file.md: $!\";\n\nmy $l = 0;\nmy $e = 0;\nmy $h = 0;\n\nwhile (my $line = \u003C$md_fh>) {\n  $l++;\n  if ($line =~ /^\\s*$/) {\n    $e++;\n    next;\n  }\n  if ($line =~ /^#+\\s*(.+)/) {\n    print \"$1\\n\";\n    $h++; \n  }\n}\n\nprint \"\\nS:\\n\"; \nprint \"L: $l\\n\";\nprint \"E: $e\\n\"; \nprint \"H: $h\\n\";\n```\n\nYou can use the following prompts to:\n\n1. `/explain` its purpose, and `/refactor` to improve the code.\n2. `/refactor into Python` to get a working Python script.\n\n![Refactor into Python](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/pycharm_duo_refactor_perl_python.png)\n\n> Tip: You can refactor Perl code into more target languages. The [GitLab Duo Coffee Chat: Challenge - Refactor Perl to Python](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-perl-python) recording shows PHP, Ruby, Rust, Go, Java, VB.NET, C#, and more.\n> \n> If you want to continue using Perl scripts, you can configure [Perl as additional language](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages) in Duo Code Suggestions. Chat already understands Perl and can help with questions and slash command prompts, as you can see in the following recording.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/03HGhxXg9lw\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More Refactoring Hands-on \n\n### Refactor JavaScript \n\nEddie Jaoude shows how to refactor JavaScript to improve code quality or add functionality in a practical example. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/mHn8KOzpPNY\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Refactor Bash to ZSH or SH\n\nI have used Bash as a shell for 20 years and most recently switched to ZSH on macOS. This resulted in script not working, or unknown errors in my terminal. Another use case for refactoring are shell limitations – some operating systems or Linux/Unix distributions do not provide Bash, only SH, for example, Alpine.\n\n![Refactor shell scripts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675059/Blog/Content%20Images/intellj_refactor_shell_scripts.png)\n\nThe [GitLab Duo Coffee Chat: Challenge - Refactor Shell Scripts](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/code-challenges/challenge-refactor-shell-scripts) shows an example with a C program that can tail syslog files, and a build script written in Bash. Throughout the challenge, Chat is queried with `/explain` and `/refactor` prompts to improve the code. It is also possible to refactor Bash into POSIX-compliant SH or ZSH. The session concludes with asking Chat to provide five different Shell script implementations, and explain the key summaries. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/mssqYjlKGzU\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### More use cases and tutorials\n\n- [Documentation: GitLab Duo use cases](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html)\n- [Tutorial: Top tips for efficient AI-powered code suggestions with GitLab Duo](https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo/)\n- [Tutorial: 10 best practices for using AI-powered GitLab Duo Chat](https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat/)\n\n## Key takeaways \n\n1. GitLab Duo provides efficient help with explaining and refactoring code. \n1. You can refactor code between language standards, and ask follow-up questions in Chat.\n1. Code Suggestions prompts can generate specific language standards, and code completion respects the current code context. \n1. Refactoring code into new programming languages helps with longer term migration and modernization plans.\n1. Code can be \"downgraded\" into older system's supported language standards.\n1. GitLab Duo can explain complex code and programming languages with different programming language examples.\n1. The update to Anthropic Claude 3.5 on GitLab.com has improved the quality and speed of Code Suggestions and Chat once again (self-managed upgrade to 17.3 recommended).\n1. There are no boundaries except your imagination, and production pain points.\n\nLearn more about efficient Code Suggestions and Chat workflows, and start your AI-powered code refactoring journey with GitLab Duo today!\n\n> [Start your 60-day free trial of GitLab Duo!](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro_)\n",[704,9,835],{"slug":3617,"featured":6,"template":684},"refactor-code-into-modern-languages-with-ai-powered-gitlab-duo","content:en-us:blog:refactor-code-into-modern-languages-with-ai-powered-gitlab-duo.yml","Refactor Code Into Modern Languages With Ai Powered Gitlab Duo","en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo.yml","en-us/blog/refactor-code-into-modern-languages-with-ai-powered-gitlab-duo",{"_path":3623,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3624,"content":3630,"config":3635,"_id":3637,"_type":13,"title":3638,"_source":15,"_file":3639,"_stem":3640,"_extension":18},"/en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component",{"title":3625,"description":3626,"ogTitle":3625,"ogDescription":3626,"noIndex":6,"ogImage":3627,"ogUrl":3628,"ogSiteName":669,"ogType":670,"canonicalUrls":3628,"schema":3629},"Refactoring a CI/CD template to a CI/CD component","CI/CD components are the next generation of CI/CD templates, enhancing pipeline creation and maintenance. Learn how to transition from templates to components.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665989/Blog/Hero%20Images/AdobeStock_618473457.jpg","https://about.gitlab.com/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactoring a CI/CD template to a CI/CD component\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2024-03-04\",\n      }",{"title":3625,"description":3626,"authors":3631,"heroImage":3627,"date":3632,"body":3633,"category":769,"tags":3634},[1080],"2024-03-04","GitLab recently introduced [CI/CD components](https://docs.gitlab.com/ee/ci/components/) as the next generation of the traditional CI/CD templates, and a novel approach to constructing CI/CD pipelines. CI/CD components offer reusable pipeline configurations that can be customized using input parameters.\n\nAlthough GitLab continues to support templates, they come with certain drawbacks that are addressed by the introduction of components. Therefore, we highly recommend refactoring existing templates into CI/CD components.\n\nThis article will guide you through the steps of converting your current GitLab CI/CD templates into reusable CI/CD components. Prior familiarity with how to create CI/CD components is a prerequisite, which you can learn about in this blog post: [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/).\n\n## How to convert a template to a component\n\nThese are the steps to convert a CI/CD template to a CI/CD component:\n1. Create a component project if you don’t have one. \n2. Copy your existing templates to the ‘templates’ directory in the component project. \n3. For each template, review the jobs listed in it and assess whether you prefer to distribute them across different components or retain some or all within the same component. While it's possible to include multiple jobs in a single component, it's advisable to create components that perform minimal tasks. This approach enhances ease of reuse and flexibility.\n4. Create a new section at the top of the configuration for the input parameters and meta data using the `spec` keyword. \n5. Replace any custom CI/CD variables and any other hard-coded values with [inputs](https://about.gitlab.com/blog/use-inputs-in-includable-files/) to maximize flexibility for consumption. Consider parameterizing elements such as stage, image, job name/job prefix, etc. \n6. Follow the [best practices](https://docs.gitlab.com/ee/ci/components/index.html#best-practices) for components.\n7. Improve the configuration, for example by enabling [merge request pipelines](https://docs.gitlab.com/ee/ci/pipelines/merge_request_pipelines.html) or making it [more efficient](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html).\n\nHere is a code example of a job in an existing template:\n\n![existing template](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678076/Blog/Content%20Images/Screenshot_2024-03-03_at_12.05.25.png)\n\nAnd this is the refactored [component code](https://gitlab.com/components/aws/-/blob/main/templates/ec2-deploy-production.yml?ref_type=heads):\n\n![Converted component](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678077/Blog/Content%20Images/Screenshot_2024-03-03_at_12.07.14.png)\n\nOnce your components are ready, you can publish them to the CI/CD catalog so others will be able to find and consume them. \n\n## Take a product tour\n\nWe've prepared a brief product tour so you can quickly dive into the CI/CD catalog and see it in action (use the \"Next\" button to progress through the demo).\n\n[![Product tour of CI/CD catalog](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678076/Blog/Content%20Images/Screenshot_2024-03-03_at_12.18.09.png)](https://gitlab.navattic.com/cicd-catalog)\n\n## Additional resources \n\nPlease refer to the official documentation on CI/CD components for more on how to [convert templates to components](https://docs.gitlab.com/ee/ci/components/#convert-a-cicd-template-to-a-component).\n\nYou can explore [an additional practical example](https://docs.gitlab.com/ee/ci/components/examples.html#cicd-component-migration-examples), demonstrating the steps to convert GitLab Go templates to CI/CD components.\n\nThen, you can watch the following video where [Fabio Pitino](https://about.gitlab.com/company/team/#fabiopitino), GitLab Principal Engineer, demonstrates the process of refactoring GitLab AWS templates to CI/CD components.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/dGCPrIAuBmE?si=1vjG_aEziY5jn-YC\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line –->\n",[108,835,9],{"slug":3636,"featured":90,"template":684},"refactoring-a-ci-cd-template-to-a-ci-cd-component","content:en-us:blog:refactoring-a-ci-cd-template-to-a-ci-cd-component.yml","Refactoring A Ci Cd Template To A Ci Cd Component","en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component.yml","en-us/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component",{"_path":3642,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3643,"content":3649,"config":3655,"_id":3657,"_type":13,"title":3658,"_source":15,"_file":3659,"_stem":3660,"_extension":18},"/en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow",{"title":3644,"description":3645,"ogTitle":3644,"ogDescription":3645,"noIndex":6,"ogImage":3646,"ogUrl":3647,"ogSiteName":669,"ogType":670,"canonicalUrls":3647,"schema":3648},"Refactoring JavaScript to TypeScript with GitLab Duo Workflow","Learn how we used our autonomous AI agent, which sits in your development environment, to convert a real-world JavaScript application to TypeScript.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749660174/Blog/Hero%20Images/Workflow_1800x945.png","https://about.gitlab.com/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Refactoring JavaScript to TypeScript with GitLab Duo Workflow\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Frédéric Caplette\"}],\n        \"datePublished\": \"2025-05-22\",\n      }",{"title":3644,"description":3645,"authors":3650,"heroImage":3646,"date":3652,"body":3653,"category":702,"tags":3654},[3651],"Frédéric Caplette","2025-05-22","TypeScript adoption continues to grow, with over 88% of developers reporting they either use or want to use it. Yet, migrating existing JavaScript codebases to TypeScript is often a time-consuming process. Enter [GitLab Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/): secure, agentic AI that sits right inside your development environment, helping transform high-level tasks into executable workflows. In this article, you'll learn how we used Duo Workflow to update Duo Workflow, converting a real-world JavaScript application to TypeScript. We'll also review the technical process and broader implications for development workflows.\n\nThis video walks through visually what you'll read below:\n\n\u003Cdiv style=\"padding:56.25% 0 0 0;position:relative;\">\u003Ciframe src=\"https://player.vimeo.com/video/1085078036?badge=0&amp;autopause=0&amp;player_id=0&amp;app_id=58479\" frameborder=\"0\" allow=\"autoplay; fullscreen; picture-in-picture; clipboard-write; encrypted-media\" style=\"position:absolute;top:0;left:0;width:100%;height:100%;\" title=\"Refactor JavaScript to TypeScript with GitLab Duo Workflow\">\u003C/iframe>\u003C/div>\u003Cscript src=\"https://player.vimeo.com/api/player.js\">\u003C/script>\n\n## The challenge: Refactor JS to TS\n\nWe decided to migrate Duo Workflow client-related logic to TypeScript for better type safety and auto-complete. A JavaScript-to-TypeScript migration involves more than just changing file extensions. It requires:\n\n1. Analyzing existing code patterns to determine appropriate types  \n2. Handling edge cases where type inference is ambiguous  \n3. Ensuring consistency across the codebase  \n4. Managing dependencies and third-party libraries  \n5. Validating that runtime behavior remains unchanged\n\nDoing all of this work manually can be very time consuming and not much fun. Thankfully, managing your projects is easier and more efficient with Duo Workflow – even when the project is Duo Workflow.\n\n## How it works\n\nIf you've used AI coding assistants before, you're likely familiar with their chat-based interfaces (like [GitLab Duo Chat](https://about.gitlab.com/blog/gitlab-duo-chat-now-generally-available/)). These chats can provide code snippets, suggested directory structures, and more. GitLab Duo Workflow fundamentally differs from these in several key ways:\n\n1. It's contextually aware of your entire codebase, not just open files or highlighted text. You can also add context from relevant GitLab issues or merge requests.  \n2. It develops and executes a multi-stage plan, allowing for more complete solutions.  \n3. It keeps a human in the loop, ensuring you understand and approve of what is happening at each step of the way.  \n4. It stages changes to your repository, giving you control over what to accept.\n\nDuo Workflow uses an agentic AI approach, powered by Claude 3.5 Sonnet, to understand code semantics while maintaining a deep integration with GitLab's DevOps platform.\n\nLet's walk through a real-world example of refactoring a JavaScript file using GitLab Duo Workflow. In our case, we started with the prompt:\n\n```unset\nI need to refactor a JavaScript file packages/webview_duo_workflow/src/app/stores/workflow.js to TypeScript. Please analyze the code, identify any issues, and convert it to TypeScript with appropriate type definitions.\n```\n\n### Analysis\n\nAfter prompting, Duo Workflow first analyzed the codebase structure:\n\n* Identified that `workflow.js` is a Pinia store implementation  \n* Noted it uses imports from various modules including `constants.ts` (which was already in TypeScript)  \n* Recognized GraphQL queries import exists but the file wasn't accessible\n\nBased on this analysis, Duo Workflow identified specific typing needs:\n\n**Type definitions needed for:**\n\n* Store State interface with precise types:  \n  * `areWorkflowsLoading`: boolean  \n  * `isLoadingWorkflow`: boolean  \n  * `isLoadingRecentWorkflows`: boolean  \n  * `recentWorkflows`: array  \n  * `workflows`: array  \n  * `workflowsPageInfo`: object  \n  * `initialState`: object  \n  * `activeWorkflow`: object with specific shape\n\n**Interface definitions required:**\n\n* Workflow interface with properties:  \n  * `id`: string  \n  * `goal`: string  \n  * `status`: string  \n  * `checkpoint`: object  \n  * `type`: string\n\n**Parameter types needed for:**\n\n* GraphQL response types for workflows data  \n* Event handler parameter types  \n* Function parameter types for actions like `getWorkflowById`, `runWorkflow`, etc.\n\nDuo Workflow identified several complex patterns requiring special handling:\n\n* Uses optional chaining extensively  \n* Has nested object destructuring  \n* Contains GraphQL integration  \n* Uses Pinia store patterns with state, getters, and actions  \n* Implements an events mapping system\n\nThe system recognized external type dependencies:\n\n* Pinia store types  \n* GraphQL query response types  \n* Functions from an internal Bridge module were also properly recognized (for `sendRequest`, `sendGraphqlRequest`)\n\nThe final analysis concluded: \"The file will need to be renamed from .js to .ts and appropriate type definitions added while maintaining the existing functionality.\"\n\n### Planning\n\nBased on this thorough analysis, Duo Workflow created a structured plan consisting of specific tasks:\n\n1. **Create a new TypeScript file** `workflow.ts` in the same directory as the original JS file.\n\n2. **Create TypeScript interfaces** for the store state and workflow types at the top of the new file, including:\n\n   * WorkflowState interface for store state  \n   * Workflow interface for workflow object structure  \n   * WorkflowPageInfo interface for pagination info  \n   * WorkflowCheckpoint interface for checkpoint data\n\n3. **Copy and add type annotations** to the store implementation from `workflow.js`, including:\n\n   * State return type  \n   * Getter return types  \n   * Action parameter types  \n   * Action return types\n\n4. **Update all imports** in `workflow.ts` to:\n\n   * Add type imports from Pinia  \n   * Update local imports to use .ts extensions where applicable  \n   * Import any required type definitions\n\n### Execution\n\nAfter the plan is complete, we were prompted to  “Approve plan.” Before clicking approve, we reviewed each step and ensured we were comfortable with the plan. After approval, Duo Workflow showed its progress through each step with visual indicators and explanations of what API operations were supporting each task (like \"Supported by: create_file_with_contents\" or \"Supported by: edit\\_file\"). When the work was done, we reviewed the changes before committing. \n\n## What we learned\n\nThis JavaScript-to-TypeScript migration example showcases a significant evolution in AI-assisted development. What makes GitLab Duo Workflow particularly interesting is its approach to:\n\n### Task-oriented programming vs. suggestion-only assistance\n\nUnlike many AI assistants that simply offer code snippets or suggestions, Duo Workflow understands and executes complete tasks. The difference is significant — rather than saying \"here's some TypeScript code you might use,\" it says \"I'll convert this file for you, here's my plan, and here are the changes I'm making.\"\n\n### Contextual understanding of the entire codebase\n\nThe tool demonstrates awareness of project structure, related files (like constants.ts and GraphQL queries), and the relationships between components. This contextual understanding allows for more sophisticated conversions than localized transformations.\n\n### Step-by-step execution with visibility\n\nThe plan-based approach, with clear steps and progress indicators, provides transparency into what would otherwise be a black-box process. This allows developers to understand what the AI is doing and how it's approaching the problem.\n\n> GitLab Duo Workflow is currently available in private beta for GitLab Ultimate customers. [Sign up for the waitlist today!](https://about.gitlab.com/gitlab-duo/workflow/)\n\n## Learn more\n\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)  \n- [GitLab Duo Workflow](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)  \n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)",[704,9,478,680],{"slug":3656,"featured":90,"template":684},"refactoring-javascript-to-typescript-with-gitlab-duo-workflow","content:en-us:blog:refactoring-javascript-to-typescript-with-gitlab-duo-workflow.yml","Refactoring Javascript To Typescript With Gitlab Duo Workflow","en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow.yml","en-us/blog/refactoring-javascript-to-typescript-with-gitlab-duo-workflow",{"_path":3662,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3663,"content":3668,"config":3672,"_id":3674,"_type":13,"title":3675,"_source":15,"_file":3676,"_stem":3677,"_extension":18},"/en-us/blog/remediating-vulnerabilities-with-insights-and-ai",{"title":3664,"description":3665,"ogTitle":3664,"ogDescription":3665,"noIndex":6,"ogImage":1820,"ogUrl":3666,"ogSiteName":669,"ogType":670,"canonicalUrls":3666,"schema":3667},"Remediating vulnerabilities with GitLab's security insights and AI","Learn how to leverage vulnerability insights and the Explain this Vulnerability AI feature to not only resolve a vulnerability, but also understand it.","https://about.gitlab.com/blog/remediating-vulnerabilities-with-insights-and-ai","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Remediating vulnerabilities with GitLab's security insights and AI\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2023-08-31\",\n      }",{"title":3664,"description":3665,"authors":3669,"heroImage":1820,"date":1308,"body":3670,"category":702,"tags":3671},[1767],"\nWe recently introduced [GitLab Duo](https://about.gitlab.com/gitlab-duo/), a complete suite of AI capabilities to power your DevSecOps workflows. GitLab Duo's AI features not only enable you to write secure code faster, but also enhance productivity by providing helpful explanations and insights into your code. For instance, you can harness the power of AI to prevent security breaches. In this tutorial, we will go over the Explain this Vulnerability AI feature, which is in beta, and how it can be used with vulnerability insights to remediate vulnerabilities.\n\nYou will learn the following:\n* How the Explain this Vulnerability AI feature works\n* Prerequisites for Explain this Vulnerability and other GitLab AI features\n* How GitLab Vulnerability Insights assists in remediation\n* How to remediate a SQL-injection vulnerability using GitLab's vulnerability insights and Explain this Vulnerability\n* Additional GitLab AI capabilities (GitLab Duo currently requires connectivity to access Google large language models (LLMs), however, there are plans to expand these features to limited-connectivity environments)\n\nSee the following video for a quick overview of Vulnerability Insights + AI \"Explain this Vulnerability\". \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/1UagZx_CUks\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nYou can also see a detailed walkthrough of [Leveraging GitLab Vulnerability Insights + AI to Remediate a SQL-Injection](https://youtu.be/EJXAIzXNAWQ?feature=shared) in the [Solving a SQL injection using vulnerability insights and AI](#solving-a-sql-injection-using-vulnerability-insights-and-ai) section below.\n\n## What is the Explain this Vulnerability AI feature?\nThe [Explain this Vulnerability](https://docs.gitlab.com/ee/user/ai_features.html#explain-this-vulnerability-in-the-web-ui) feature\nleverages an LLM powered by Google AI to assist you in securing your application by:\n* Summarizing detected vulnerabilities\n* Helping developers and security analysts understand the vulnerability and its implications\n* Showing how a vulnerability can be exploited with detailed example code\n* Providing in-depth solutions to the vulnerability\n* Providing suggested mitigation along with sample code tuned toward your project's programming language\n\nTo begin using Explain this Vulnerability, you must have the following prerequisites configured:\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/) SaaS subscription\n* [Experiment features enabled](https://docs.gitlab.com/ee/user/group/manage.html#enable-experiment-features)\n* [Third-party AI features enabled](https://docs.gitlab.com/ee/user/group/manage.html#enable-third-party-ai-features)\n* Static application security testing ([SAST](https://docs.gitlab.com/ee/user/application_security/sast/)) vulnerability finding in the default branch of a project\n* [Maintainer](https://docs.gitlab.com/ee/user/permissions.html) or greater role in the vulnerable project \n* [SAST scanner](https://docs.gitlab.com/ee/user/application_security/sast/) enabled in the vulnerable project\n* An active internet connection\n\nOnce the prerequisites have been configured, to begin using Explain this Vulnerability, perform the following steps:  \n\n1) Navigate to the [Vulnerability Report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/).  \n2) Find a SAST vulnerability finding.  \n3) Scroll to the bottom of the [vulnerability page](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/).  \n4) Press the **Try it out** button in \"Explain this Vulnerability and how to mitigate it with AI\" section.  \n\n![View of the \"Try it out\" button at bottom of screen](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_try_it_out_dialog.png)\n\nOnce you click the button, GitLab will begin to generate the following:\n* **What is the vulnerability?**: Details on the vulnerability and how it may affect your application\n* **How can an attacker take advantage of the vulnerability?**: Commands that a malicious actor can use to exploit the vulnerability\n* **How can the vulnerability be fixed?**: Details on how the vulnerability can be remediated\n* **Example of vulnerable code**: The actual vulnerable code in the language of your application\n* **Example of fixed code**: Code showing a fix that should be applied to remediate the vulnerability in the language of your application\n* **References**: Links providing details relevant to the vulnerability\n* **User rating request**: Allows for user input, which is used to improve the model\n\n![AI response depicting the above list](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\nThis information can be used together with vulnerability insights to resolve the vulnerability. Now let's discuss vulnerability insights.\n\n## Vulnerability insights\nVulnerability insights provide detailed information on a vulnerability and how to resolve it. This detailed information\nincludes:\n\n* **Description**: A detailed description of the vulnerability and its implications\n* **Severity**: The severity of the vulnerability based on the [CVSS rating](https://nvd.nist.gov/vuln-metrics/cvss)\n* **Project**: The project where the vulnerability was found\n* **Tool**: The type of scanner that found the vulnerability\n* **Scanner**: The specific name of the scanner that found the vulnerability\n* **Location**: The line of code where the vulnerability is present\n* **Identifiers**: Links that identify and provide additional information on the vulnerability such as the CVE/CWE page\n* **Training**: Security training available from our partners to educate developers on the vulnerability\n* **Solution**: Information on how to remediate the vulnerability\n* **Method**: The [REST API method](https://www.w3schools.in/restful-web-services/rest-methods) used to exploit the vulnerability (dynamic scanners only)\n* **URL**: The URL in which the vulnerability was detected (dynamic scanners only)\n* **Request/response**: The request sent and response received when exploiting the vulnerability (dynamic scanners only)\n\n**Note**: Results may vary depending on the scanner used.\n\nHaving all this information not only allows you to resolve a vulnerability with ease but also enhances your security\nknowledge. All these insights are provided as a single source of truth that both developer and security teams can view and\ntake action on asynchronously.\n\nDevelopers can leverage insights within a merge request (MR). The MR insights show the vulnerabilities in the diff\nbetween a feature branch and the branch you are merging into. This allows you to continuously iterate until you have resolved\na vulnerability and then alert security engineers when approval is required, giving developers the power to resolve\nvulnerabilities themselves.\n\n![MR insights sample](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_mr_view.png)\n\nThe security team can leverage insights via the [vulnerability report](https://docs.gitlab.com/ee/user/application_security/vulnerability_report/). The vulnerability report shows vulnerabilities present in the `default` branch, which is typically linked to production. From here, the security team can collaborate on a resolution as well as triage and manage vulnerabilities.\n\n![Vulnerability report sample](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_vulnerability_report.png)\n\n**Note**: Currently, the Explain this Vulnerability feature can only be seen in the Vulnerability Report view. It is currently\nbeing considered for the MR view, see [future iterations under consideration](https://gitlab.com/groups/gitlab-org/-/epics/10284#future-iterations-under-consideration) for more information.\n\n## Solving a SQL injection using vulnerability insights and AI\nBy leveraging both vulnerability insights and Explain this Vulnerability, we have all the resources necessary to\nnot only resolve a vulnerability but also understand it. Let's see how we can use these features to [solve a SQL injection](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/documentation/anatomy_of_a_vulnerability/). \n\nNow let's go over the steps to remediate a SQL injection. You can follow along with the video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/EJXAIzXNAWQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Privacy notice**: Explain this Vulnerability only uses `public repos` to train the LLM. Code in private repositories\nis not transferred to the LLM.\n\nI will be using the [Simple Notes project](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes) to showcase this. You can set up DevSecOps within GitLab yourself by going over the following [tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/). After you have done so, you can run through the following:\n\n1) Navigate to **Secure > Vulnerability Report**.\n\n2) Sort by **SAST** under **Scanner**.\n\n3) Find and select a SQL injection vulnerability. a SQL injection will be titled something like\n`Improper Neutralization of Special Elements used in an SQL Command ('SQL Injection')`.\n\n4) Examine the vulnerability insights.\n\n* **Description**: Detected possible formatted SQL query.\n* **Location**: File: [notes/db.py:100](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/-/blob/24ff1847aa70c4d51482fe28f019e3724b399aaf/notes/db.py#L100)\n* **Identifier**: [bandit.B608](https://semgrep.dev/r/gitlab.bandit.B608), [CWE-89](https://cwe.mitre.org/data/definitions/89.html)\n* **Solution**: Use parameterized queries instead=\n* **Training**: [Secure Code Warrior](https://portal.securecodewarrior.com/?utm_source=partner-integration:gitlab&partner_id=gitlab#/contextual-microlearning/web/injection/sql/python/vanilla), [SecureFlag](https://knowledge-base.secureflag.com/vulnerabilities/sql_injection/sql_injection_python.html), and [Kontra](https://application.security/gitlab/free-application-security-training/owasp-top-10-sql-injection)\n\n![SQL Injection Walkthrough - Insights](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/vulnerability_insights_vulnerability_report.png)\n\n5) Scroll down to the \"Explain this vulnerability and how to mitigate it with AI** section and click the **Try it out** button.\n\n**Privacy notice**: If the **Send code to prompt** radio button is selected, response quality is improved. However, the actual code is\nused in a query to the LLM (even in private repositories).\n\n![SQL Injection Walkthrough - AI \"Try it out\" button](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_try_it_out_dialog.png)\n\n6) Examine the provided AI solutions.\n\n![SQL Injection Walkthrough - AI response](https://about.gitlab.com/images/blogimages/2023-08-31-solving-vulnerabilities-with-insights-and-ai/ai_explain_this_vulnerability_results.png)\n\n7) Exploit the vulnerability\nWe can use the information provided in the **AI response**, the samples in the **vulnerability insight CWE identifier**,\nand the applications [API guide](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/documentation/api_guide/) to generate a malicious curl command as follows:\n\n```bash\n# A REGULAR API-CALL\n$ curl http://{LOAD_BALANCER_IP}/{APPLICATION_PATH}/api\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog')]\"}\n\n# API CALL PASSING '1 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN IDENTIFIERS\n# NOTE: `1%20or%201%3D1` IS URL ENCODED '1 or 1=1'\n$ curl http://{LOAD_BALANCER_IP}/{APPLICATION_PATH}/api\\?id\\=1%20or%201%3D1\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog'), (5, 'meow'), (6, 'bark'), (7, 'ribbit'), (8, 'grunt')]\"}\n```\n\nThis shows us that we can exploit the SQL injection since we exposed data we should not have access to.\nExploiting a vulnerability is not always as simple, so it is important to combine resources as noted above\nto figure out exploitability.\n\n8) Determine a fix.\n\nNow that we know this is a problem within our system, we can use the provided information to create an merge request (MR) to resolve\nand then test the MR in a non-production environment. Reviewing the vulnerability insights and AI response, we know we can solve this\nin a variety of ways. For example, we can:\n  \n* Use parameterized queries rather than directly calling the query\n* Sanitize the input before passing it to the `execute()` method\n\nTo enhance our knowledge, we should read [CWE-89](https://cwe.mitre.org/data/definitions/89.html) provided in the Identifiers.\n\n9) Open the [GitLab WebIDE](https://docs.gitlab.com/ee/user/project/web_ide/) or editor of your choice.\n\n10) Open the vulnerable file and scroll to the affected line of code. We found this using the information provided in the insights.\n\n11) Apply the suggested change by reviewing the vulnerability insights and AI response. I changed the following:\n\n```python\ntry:\n  query = \"SELECT id, data FROM notes WHERE (secret IS FALSE AND id = %s)\" % id\n  if admin:\n    query =\"SELECT id, data, secret FROM notes WHERE (id = %s)\" % id\n  # NOT USING A PARAMETERIZED QUERY - SQL INJECTION CAN BE PASSED IN (,id)\n  cur.execute(query)\nexcept Exception as e:\n  note.logger.error(\"Error: cannot select note by id - %s\" % e)\n```\n\nto \n\n```python\ntry:\n  query = \"SELECT id, data FROM notes WHERE (secret IS FALSE AND id = %s)\"\n  if admin:\n    query =\"SELECT id, data, secret FROM notes WHERE (id = %s)\"\n  # USING A PARAMETERIZED QUERY - SQL INJECTION CANNOT BE PASSED IN (,id)\n  cur.execute(query, (id,))\nexcept Exception as e:\n  note.logger.error(\"Error: cannot select note by id - %s\" % e)\n```\n\nWe know this is the solution because parameterized queries as explained do not allow actual SQL\ncommands to be run. Therefore, a SQL injection cannot be passed as the `id`. Adding a parameterized\nquery is easy since it is built into the Python db library we are using.\n\nThere may be multiple solutions to a vulnerability. It is up to the user to decide what is best\nfor their application and workflow. The AI response provides a typical solution, but more can be\nexamined and applied. For example, the AI response said we can add the following:\n\n```python\ncur.execute(query.replace(\"'\", \"''\"))\n```\n\nThis would escape the single quotes in the input, making it safe to pass to the `execute()` method.\nIt is a valid solution with less code required. However, I wanted to restructure my code, so I applied\nanother solution found in the vulnerability insights.\n\n12) Create an MR with the fix. In my environment, feature branches are automatically deployed\nto a new environment independent from production so we can test our features before merging them\nto production.\n\n13) Test the change in a non-production environment.\n\nOnce we push the MR, we can see if the vulnerability has been resolved and we can test in a non-production\nenvironment:\n\n```bash\n# A REGULAR API-CALL\n$ curl http://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\n\n{\"Note\":\"[(1, 'cat'), (2, 'dog'), (3, 'frog'), (4, 'hog')]\"}\n\n# API CALL PASSING '1 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN IDENTIFIERS\n# NOTE: `1%20or%201%3D1` IS URL ENCODED '1 or 1=1'\n$ curl http://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\\?id\\=1%20or%201%3D1\n\n{\"Note\":\"[(1, 'cat')]\"}\n```\n\nWe can see that now the additional query parameters `or 1=1` are ignored and only the first element\nis returned, meaning only the `1` was passed. We can further test if we can get item `5` which we should\nnot have access to:\n\n```bash\n# API CALL PASSING '5 or 1=1' AS SHOWN IN AI RESPONSE AND DETAILED IN IDENTIFIERS\n# NOTE: `5%20or%201%3D1` IS URL ENCODED '5 or 1=1'\n$ curl http://{LOAD_BALANCER_IP}/{NEW_BRANCH_FIXED_APPLICATION_PATH}/api\\?id\\=5%20or%201%3D1\n{\"Note\":\"[]\"}\n```\n\nSuccess, the SQL injection is no longer present!\n\n14) Merge into production.\n\nNow that we know the vulnerability has been resolved we can go ahead and merge our fix! This is how you can use vulnerability insights\nto help resolve your vulnerabilities. If you wish to test all this for yourself, check out the complete [GitLab DevSecOps tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/).\n\n## Additional GitLab AI features\nAs we have seen above, Explain this Vulnerability assists you in remediating the vulnerabilities within your\ndefault branch, but that's not the only AI feature GitLab has available! Other AI features to enhance your productivity include:\n\n* [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions.html): Enables you to write code more efficiently by viewing code suggestions as you type\n* [Suggested Reviewers](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/#suggested-reviewers): Helps you receive faster and higher-quality reviews by automatically finding the right people to review a merge request\n* [Value Stream Forecasting](https://docs.gitlab.com/ee/user/analytics/value_streams_dashboard.html): Predicts productivity metrics and identifies anomalies across your software development lifecycle\n* [Summarize Issue Comments](https://docs.gitlab.com/ee/user/ai_features.html#summarize-issue-discussions): Quickly gets everyone up to speed on lengthy conversations to ensure you are all on the same page\n* [Summarize Proposed Merge Request Changes](https://docs.gitlab.com/ee/user/ai_features.html#summarize-my-merge-request-review): Helps merge request authors drive alignment and action by efficiently communicating the impact of their changes\n* [Summarize Merge Request Review](https://docs.gitlab.com/ee/user/ai_features.html#summarize-merge-request-changes): Enables better handoffs between authors and reviewers and helps reviewers efficiently understand merge request suggestions\n* [Generate Tests in Merge Requests](https://docs.gitlab.com/ee/user/ai_features.html#generate-suggested-tests-in-merge-requests): Automates repetitive tasks and helps you catch bugs early\n* [GitLab Chat](https://docs.gitlab.com/ee/user/ai_features.html#gitlab-duo-chat): Helps you quickly identify useful information in large volumes of text, such as documentation\n* [Explain this Code](https://docs.gitlab.com/ee/user/ai_features.html#explain-selected-code-in-the-web-ui): Allows you to get up to speed quickly by explaining source code\n\nVisit our [GitLab Duo site](https://about.gitlab.com/gitlab-duo/) to learn more about these features, GitLab's mission around AI, and our partnership with Google.\n",[704,814,9],{"slug":3673,"featured":6,"template":684},"remediating-vulnerabilities-with-insights-and-ai","content:en-us:blog:remediating-vulnerabilities-with-insights-and-ai.yml","Remediating Vulnerabilities With Insights And Ai","en-us/blog/remediating-vulnerabilities-with-insights-and-ai.yml","en-us/blog/remediating-vulnerabilities-with-insights-and-ai",{"_path":3679,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3680,"content":3686,"config":3691,"_id":3693,"_type":13,"title":3694,"_source":15,"_file":3695,"_stem":3696,"_extension":18},"/en-us/blog/remote-development-beta",{"title":3681,"description":3682,"ogTitle":3681,"ogDescription":3682,"noIndex":6,"ogImage":3683,"ogUrl":3684,"ogSiteName":669,"ogType":670,"canonicalUrls":3684,"schema":3685},"Behind the scenes of the Remote Development Beta release","Discover the epic journey of GitLab's Remote Development team as they navigate last-minute pivots, adapt, and deliver new features for users worldwide.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679888/Blog/Hero%20Images/remotedevelopment.jpg","https://about.gitlab.com/blog/remote-development-beta","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Behind the scenes of the Remote Development Beta release\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"David O'Regan\"}],\n        \"datePublished\": \"2023-08-16\",\n      }",{"title":3681,"description":3682,"authors":3687,"heroImage":3683,"date":3688,"body":3689,"category":769,"tags":3690},[2332],"2023-08-16","\nIn May 2023, the Create:IDE team faced an epic challenge – to merge the [Remote Development Rails monolith integration branch](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/105783) into the `master` branch of the GitLab Project. This was no small ask, as the merge request was of considerable size and complexity. In this blog post, we'll delve into the background, justifications, and process behind this endeavor.\n\nThe merge request titled \"Remote Development feature behind a feature flag\" was initiated by the Create:IDE team, aiming to merge the branch \"remote_dev\" into the \"master\" branch in the Rails monolith GitLab project. The MR contained `4` commits, `258` pipelines, and `143` changes that amounted to a total of `+7243` lines of code added to the codebase.\n\nInitially, the MR was created to reflect the work related to \"Remote Development\" under the \"Category: Remote Development.\" It was primarily intended to have CI pipeline coverage for the integration branch and was not meant for individual review or direct merging. The plan was to merge this code into the master branch via the [\"Remote Development Beta - Review and merge\" Epic](https://gitlab.com/groups/gitlab-org/-/epics/10258).\n\n![SUM](https://about.gitlab.com/images/blogimages/remote-development/SUM.png){: .shadow.medium}\n\n### How the Remote Development project started\nAs a team, we embarked on an ambitious journey to create a greenfield feature: the [Remote Development](https://docs.gitlab.com/ee/user/project/remote_development/) offering at GitLab. This feature had a vast scope, many unknowns, and required solving numerous new problems. To efficiently tackle this task, we decided to work on an integration branch using a [low-ceremony process](https://stackoverflow.com/questions/68092498/what-does-low-ceremony-mean). This decision enabled us to develop and release the feature in an impressively short time frame of less than four months.\n\nWorking on an integration branch provided us the flexibility to make significant progress, but it was always intended to eventually break down the work into smaller, iterative MRs that would follow the standard [GitLab review process](https://docs.gitlab.com/ee/development/code_review.html). We had a [detailed plan](https://gitlab.com/gitlab-org/remote-development/gitlab-remote-development-docs/-/blob/main/doc/integration-branch-process.md#master-mr-process-summary) for this process, but we realized that following the original plan would not allow us to meet our goal of releasing of the feature in GitLab 16.0.\n\n### Merging the integration branch MR without breaking it up\nDuring the development of the Remote Development feature, our team faced several challenges that led us to adopt a new approach for merging the integration branch into the master. First, as part of our [velocity-based XP/Scrum style process](https://about.gitlab.com/handbook/engineering/development/dev/create/ide/#-remote-development-iteration-planning), we realized that meeting the 16.0 release goal would require us to cut scope. A velocity report, \"[Velocity-based agile planning report](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/118436),\" highlighted that breaking down and reviewing individual MRs would take too long, considering the impending due date and the likelihood of last-minute scope additions.\n\nSecond, we [made the decision](https://gitlab.com/gitlab-org/gitlab/-/issues/398227#note_1361192858) to release workspaces as a **beta feature for public projects** for customers in [GitLab 16.0](/releases/2023/05/22/gitlab-16-0-released/#remote-development-workspaces-available-in-beta-for-public-projects). This approach reduced the complexity of the rollout plan and allowed us to get valuable feedback earlier, but required us to enable the feature by default earlier than planned. To align with this decision, we determined that merging the integration branch after review was the best course of action. An announcement was made to explain the change in plan, and we set specific timelines for the review process to ensure smooth coordination.\n\n> Hello Reviewers/Maintainers 👋 We have opened up a Zoom room through all of next week as an easy sync place for us all to collaborate and triage questions. As the MR is quite large, it might be overwhelming to determine where to begin. To help, we will aim to furnish a summary of what we have included, such as two new database tables and a couple of GraphQL/REST APIs. We will also be available through the week in the Zoom room and without it being too prescriptive of a approach, I would suggest we do a sync walkthrough of the MR first and then kick off the reviews.\n\nAddressing the concerns about risk, team members discussed the challenges and potential solutions. While there were apprehensions, we were confident in the overall quality of the feature. A disciplined plan for merging MRs was initially considered, but based on our velocity metrics, it was evident that meeting the public beta release goal required a new strategy.\n\nDespite the deviations from our usual practices, we acknowledged the urgency to deliver the initial release on time. The decision was not taken lightly, and we ensured that the merge had extensive [test coverage](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) and [feature flags](https://docs.gitlab.com/ee/operations/feature_flags.html) in place to address any potential issues. We accepted that some aspects would be overlooked in the initial MR review cycle, but we committed to addressing them in subsequent iterations.\n\n### Keeping the pipeline green and stable for the merge\nTo ensure the successful merge of the integration branch containing the Remote Development feature, our team made significant efforts to keep the pipeline green and stable. As the MR was quite large and contained critical functionality, it was crucial to maintain a high level of quality and reduce the risk of introducing regressions.\n\nTo address these challenges, the team adopted a disciplined approach to [CI/CD](https://about.gitlab.com/topics/ci-cd/). Throughout the development process, CI pipelines were carefully monitored, and any failing tests or issues were promptly addressed. The team conducted rigorous testing and code reviews to identify and fix potential bugs and ensure that the changes did not negatively impact the existing functionality of the codebase.\n\nAdditionally, extensive test coverage was put in place to ensure that the new feature worked as expected and did not cause unintended side effects. The team utilized GitLab's [test coverage visualization](https://docs.gitlab.com/ee/ci/testing/test_coverage_visualization.html) capabilities to track the extent of test coverage and identify areas that required additional testing.\n\n![PIPE](https://about.gitlab.com/images/blogimages/remote-development/PIPE.png){: .shadow.medium}\n\n## The merging process\nAs part of the Remote Development team, we took a strategic approach to the merging process. We identified three categories of follow-up tasks that needed to be addressed after the release:\n\n1. **To-dos:** This category encompassed follow-up issues that required further attention.\n2. **Disabled linting rules:** Any issues related to disabled linting rules were included in this category.\n3. **Follow-up from review:** Non-blocking concerns raised during the review process were categorized here.\n\nTo manage this process effectively, we organized these categories into [child epics](https://docs.gitlab.com/ee/user/group/epics/manage_epics.html#multi-level-child-epics) under the main epic representing the merging effort.\n\n1. Child epic for [to-do follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10472)\n2. Child epic for [disabled linting rules follow-up issues](https://gitlab.com/groups/gitlab-org/-/epics/10473)\n3. Child epic for [follow-up issues from review](https://gitlab.com/groups/gitlab-org/-/epics/10474)\n\n\n## Reviewer resources\nDuring the integration branch merge process for the Remote Development feature, we ensured a smooth and collaborative review experience for all involved. To facilitate this, we set up the following resources and documented the information in GitLab's issue, epic, and MR reviews for better persistence and traceability:\n\n1. **Dedicated Slack channel:** We had a Slack channel that served as our primary hub for coordinating reviews and resolving any blockers that arose during the process. The discussions, decisions, and important points discussed in this channel were documented in the related GitLab issues and epics. This approach enabled us to maintain a historical record of the conversations for to refer back to in the future.\n2. **General Slack channel:** For non-urgent or non-blocking questions and discussions, reviewers could use the a general Slack channel. Similar to the dedicated channel, we documented the relevant information from this channel in the corresponding issues and MR reviews in GitLab.\n3. **Addressing urgent issues:** When urgent issues required immediate attention, reviewers could directly address our technical leads [Vishal Tak](https://gitlab.com/vtak) and/or [Chad Woolley](https://gitlab.com/cwoolley-gitlab) in their Slack messages. However, we kindly requested that [direct messages were avoided](https://about.gitlab.com/handbook/communication/#avoid-direct-messages) to promote open collaboration. The resolutions to these urgent issues were documented in the corresponding GitLab issues or MR discussions.\n4. **Zoom collaboration room:** The collaborative sessions held in the open Zoom room were not only beneficial for real-time discussions but also for fostering a collaborative environment. After each session, we summarized the key points and decisions made during the meeting in the associated GitLab issue or MR, making sure all important outcomes were captured and accessible to the team.\n\nThroughout the review process, we were committed to maintaining a seamless and well-documented workflow. By capturing all relevant information in GitLab issues, epics, and MR reviews, we ensured that the knowledge was persistently available, and future team members could easily understand the context and decisions made during the integration process.\n\n## Application security review\nDuring the application security review process, we focused on providing a secure and reliable Remote Development feature for our users. Here are the key resources and updates related to the application security review:\n\n1. **Main application security review issue:** The main application security review issue served as the central hub for tracking security-related considerations. You can find the defined process we followed [here](https://about.gitlab.com/handbook/security/security-engineering/application-security/appsec-reviews.html).\n2. **Application security review comment:** The application security review issue contained a comment indicating that the merge was not blocked unless there were severe issues that could impact production. \"In order to maintain a smooth merge process, we do not block MRs from being merged unless we identify severe issues that could prevent the feature from going into production, such as S1 or S2 level problems. If you are aware of any design flaws or concerns that might qualify as such issues, please bring them to our attention. We can review them together and address any questions or concerns that arise. Let's work collaboratively to find an approach that works for both parties. 👍\"\n3. **Engineering perspective:** For managing the application security review process from an engineering team perspective, we had a dedicated issue, which is kept confidential for security reasons. \n4. **Security and authentication matters:** All security and authentication concerns pertaining to the Beta release were documented within the [`Remote Development Beta -Auth` epic](https://gitlab.com/groups/gitlab-org/-/epics/10377). As of April 30, 2023, we are delighted to announce that **no known issues or obstacles were found that would impede the merge**. This represents a significant accomplishment, considering the intricate nature of this new feature.\n5. **Initial question raised:** During the application security review, one initial question was raised, and we promptly addressed it. You can track the issue and our response [here](https://gitlab.com/gitlab-org/gitlab/-/issues/409317).\n\n## Database review\nTo ensure the reliability and efficiency of the Remote Development feature, we sought guidance from the database reviewer. Although the team had not conducted a thorough self-review, we were fully prepared to address any blocking issues raised during the review process. Our references for the review were:\n\n- [Database review documentation](https://docs.gitlab.com/ee/development/database_review.html)\n- [Database reviewer guidelines](https://docs.gitlab.com/ee/development/database/database_reviewer_guidelines.html)\n\nAs an example, during the database migration review, a discussion arose between [Alper Akgun](https://gitlab.com/a_akgun) and Chad, regarding the efficient ordering of columns in the workspaces table. Alper initially suggested placing integer values at the beginning of the table based on relevant documentation.\n\nChad questioned the benefit of this suggestion, pointing out that the specific integer field, `max_hours_before_termination`, would still be padded with empty bytes even if moved to the front, due to its current position between two text fields.\n\nAlper proposed an alternative approach, emphasizing that organizing variable-sized fields (such as `text`, `varchar`, `arrays`, `json`, `jsonb`) at the end of the table could be sufficient for the workspaces table.\n\nUltimately, Chad took the initiative to implement the changes, moving all variable length fields to the end of the table, and documented the discussion as a comment to address review suggestions.\n\nWith this collaborative effort, the workspaces table was efficiently optimized, and the team gained valuable insights into database column ordering strategies.\n\n![DB](https://about.gitlab.com/images/blogimages/remote-development/DB.png){: .shadow.medium}\n\n## Ruby code review\nDuring the Ruby code review phase, we followed a meticulous approach by conducting a comprehensive self-review of every line of code. Our goal was to ensure the highest code quality and address any potential issues identified by the reviewers effectively.\n\nTo ensure clarity, it's important to clarify that the Ruby code review primarily focused on backend changes and server-side improvements. This included optimizing performance, enhancing functionalities, and refining the overall codebase to deliver a seamless user experience.\n\nFor the code review process, we referred to the [Code review documentation](https://docs.gitlab.com/ee/development/code_review.html), a valuable resource that guided us in maintaining industry best practices and adhering to the GitLab community's coding standards.\n\n### Example: Enhance error messages for unavailable features\nAs an example during the code review, we addressed an essential aspect of the workspace method, focusing on how we handle scenarios related to the `remote_development_feature_flag` and the `remote_development` licensed feature. The primary objective was to enhance the error messages presented to users when these features are not available.\n\nInitially, the code employed identical error messages for both cases, making it less clear to users whether the issue was due to a missing license or a disabled feature flag. This ambiguity could lead to confusion and hinder the user experience.\n\n#### The suggested improvement\nDuring the review, one of our maintainers, [Peter Leitzen](https://gitlab.com/splattael), raised an important question: \"Are we OK with having only a single error message for both cases (missing license and missing feature flag)?\"\n\nRecognizing the importance of clear communication, Chad proposed enhancing the error messages to provide distinct descriptions for each case. This improvement aimed to empower users by precisely conveying the reason behind the unavailability of certain features.\n\n#### The revised implementation\nFollowing Chad's suggestion, the code underwent the following changes:\n\n```ruby\nunless ::Feature.enabled?(:remote_development_feature_flag)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development_feature_flag' feature flag is disabled\"\nend\n\nunless License.feature_available?(:remote_development)\n  # TODO: Could have `included Gitlab::Graphql::Authorize::AuthorizeResource` and then use\n  #       raise_resource_not_available_error!, but didn't want to take the risk to mix that into\n  #       the root query type\n  raise ::Gitlab::Graphql::Errors::ResourceNotAvailable,\n    \"'remote_development' licensed feature is not available\"\nend\n\nraise_resource_not_available_error!('Feature is not available') unless current_user&.can?(:read_workspace)\n```\n\n#### The value of distinct error messages\nBy implementing distinct and descriptive error messages, we reinforce our commitment to user-centric development. Users interacting with our system will receive accurate feedback, helping them navigate potential roadblocks effectively. This enhancement not only improves the user experience but also streamlines troubleshooting and support processes.\n\nThis code review example highlights the significance of concise and informative error messages in delivering a top-notch user experience within the GitLab ecosystem. Our team's collaborative efforts ensure that users can confidently interact with our platform, knowing they'll receive clear and helpful error messages when needed.\n\n![BE1](https://about.gitlab.com/images/blogimages/remote-development/BE1.png){: .shadow.medium}\n\n### Example: Improving performance and addressing N+1 issues in WorkspaceType\nIn a recent code review, our team focused on optimizing the WorkspaceType and addressing potential N+1 query problems. The discussion involved two key contributors, [Laura Montemayor](https://gitlab.com/lauraX) and Chad, who worked together to enhance the performance of the codebase.\n\n#### Identifying the performance concerns\nDuring the review, Laura raised a performance concern regarding the possibility of N+1 queries in the WorkspaceType resolver. She suggested that preloading certain associations could be beneficial to avoid this common performance issue.\n\n#### A separate issue for N+1 control\nChad took prompt action and created a separate issue specifically aimed at resolving the N+1 query problems. The new issue, titled \"Address review feedback: Resolve N+1 issues,\" would address the concerns raised by Laura and implement the necessary preloading.\n\n#### Evaluating the potential N+1 impact\nChad provided insightful information about the low risk of real N+1 impact from two particular fields in the current implementation. He elaborated on how the queries for user and agent associations would largely be cache hits due to scoping and usage patterns. Chad diligently examined the cache hits happening in development, confirming the potential optimization.\n\nHere's a code snippet from the initial implementation:\n\n```ruby\n# Initial Implementation\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association\n  def user\n    object.user\n  end\n\n  # Resolver for the agent association\n  def agent\n    object.agent\n  end\nend\n```\n\n#### Treating performance as a priority\nBoth contributors acknowledged the significance of addressing the performance concern, with Laura emphasizing its importance. They agreed to prioritize the separate issue dedicated to resolving the N+1 queries and ensuring proper test coverage.\n\nHere's a code snippet from the revised implementation:\n\n```ruby\n# Revised Implementation with Preloading\nclass WorkspaceType \u003C BaseType\n  field :user, ::Types::UserType,\n    description: \"User associated with this workspace\",\n    null: true\n\n  field :agent, ::Types::AgentType,\n    description: \"Agent associated with this workspace\",\n    null: true\n\n  # Resolver for the user association with preloading\n  def user\n    ::Dataloader.for(::User).load(object.user_id)\n  end\n\n  # Resolver for the agent association with preloading\n  def agent\n    ::Dataloader.for(::Agent).load(object.agent_id)\n  end\nend\n```\n\n#### Considering future usage\nChad expressed excitement about the possibility of the new feature gaining significant usage. He humorously stated that encountering enough legitimate traffic on workspaces to trigger any performance impact would be a delightful problem to have, as it would indicate a growing user base.\n\n#### Collaboration and performance improvement\nThe code review exemplifies the collaborative and proactive approach of our team in optimizing the WorkspaceType. The team's dedication to addressing performance concerns ensures that our codebase remains performant and efficient, even as our user base grows.\n\n![BE2](https://about.gitlab.com/images/blogimages/remote-development/BE2.png){: .shadow.medium}\n\n## Frontend code review\nThe frontend code review process was managed by our resident `Create: IDE` frontend maintainers, [Paul Slaughter](https://gitlab.com/pslaughter) and [Enrique Alcátara](https://gitlab.com/ealcantara). Additionally, a significant portion of the new frontend UI code had already undergone separate reviews and was merged to master, contributing to the overall quality of the Remote Development feature.\n\n### Example: Collaborative code improvement for ApolloCache Mutators\nPaul started a thread on an old version of the diff related to `ee/spec/frontend/remote_development/pages/create_spec.js``. The code snippet in question involved creating a mock Apollo instance and writing queries to the cache.\n\n#### The initial implementation\nInitially, the code involved writing to the cache twice, which raised concerns among the maintainers, Paul and Enrique. Paul pointed out that the duplicate write was unintentional and wondered if the writeQuery was even necessary, given the removal of @client directives. However, he also acknowledged the need to test that the created workspace was added to the ApolloCache.\n\n```javascript\n// Initial Implementation\nconst buildMockApollo = () => {\n  // ... Other mock setup ...\n  \n  // Initial writeQuery for userWorkspacesQuery\n  mockApollo.clients.defaultClient.cache.writeQuery({\n    query: userWorkspacesQuery,\n    data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n  });\n\n  // ... Other mock setup ...\n};\n```\n\n#### Identifying a potential issue\nEnrique agreed that the duplicate write was unintentional and probably introduced during a rebase. He explained that pre-populating the cache with a user workspaces query empty result was essential for the mutator to have a place to add the workspace. However, he encountered difficulties in making the workaround work effectively in unit tests.\n\n#### Resolving the issue\nPaul highlighted the significance of pre-populating the cache with the user workspaces query empty result. He suggested leaving a comment to explain the necessity of the initial writeQuery, as it would be implicitly coupled to future writeQuery operations.\n\n```javascript\n// Resolving the Issue - Leaving a Comment\n// Pre-populate the cache with user workspaces query empty result to provide a place\n// for the mutator to add the Workspace later. This is needed for both test and production environments.\nmockApollo.clients.defaultClient.cache.writeQuery({\n  query: userWorkspacesQuery,\n  data: USER_WORKSPACES_QUERY_EMPTY_RESULT.data,\n});\n```\n\nHowever, upon further investigation, Paul discovered that the writeQuery might not be needed, and the issue might be a symptom of an underlying problem. He decided to open a separate thread to address this concern and indicated that he would work on a separate MR to handle it.\n\n```javascript\n// Resolving the Issue - Opening a Separate Thread and MR\n// Open a separate thread to discuss potential underlying issues.\n// Plan to work on a separate MR to handle it.\n// Stay tuned for updates!\n```\n\n![FE](https://about.gitlab.com/images/blogimages/remote-development/FE.png){: .shadow.medium}\n\n## What we learned\nAs part of the Remote Development team, we faced the challenge of merging the Remote Development Rails monolith integration branch to meet our ambitious release goal. We adapted to last-minute pivots and focused on minimizing risks during the review process. The successful merge brought us one step closer to benefiting GitLab users worldwide. We acknowledged areas for improvement and remained committed to refining the feature's quality. Our journey reflects our dedication to delivering results, embracing change, and pushing boundaries in the DevOps community. The release of the Remote Development feature in GitLab 16.0 is a significant milestone for GitLab, and we continue to iterate and grow, providing innovative solutions for developers worldwide.\n\nAn outcome of this process was an ongoing conversation to propose a [simplified review process for greenfield features](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/125117). Through this proposal, we aim to distill the lessons we learned during this experience and provide guidance to future teams facing similar challenges.\n\n## What is next for Remote Development?\nAfter the merge of the MR, several changes were implemented:\n- The first production tests were conducted to ensure the stability and functionality of the merged code.\n- Collaboration took place between the Dev Evangelism and Technical Marketing teams, focusing on [creating content](https://gitlab.com/groups/gitlab-com/marketing/developer-relations/-/epics/190). This collaboration aimed to troubleshoot any issues that arose during the merge.\n- Feedback from the community was taken into account, and changes were made to address the concerns raised. This feedback was incorporated into an [issue](https://gitlab.com/gitlab-org/gitlab/-/issues/410031) and influenced the overall roadmap and direction of the project.\n\nDo you want to [contribute to GitLab](/community/contribute/)? Come and join in the conversation in the `#contribute` channel on GitLab's [Discord](https://discord.gg/gitlab), or just pop in and say \"Hi.\"\n\n",[9,835,940,1865,1290,727],{"slug":3692,"featured":6,"template":684},"remote-development-beta","content:en-us:blog:remote-development-beta.yml","Remote Development Beta","en-us/blog/remote-development-beta.yml","en-us/blog/remote-development-beta",{"_path":3698,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3699,"content":3704,"config":3711,"_id":3713,"_type":13,"title":3714,"_source":15,"_file":3715,"_stem":3716,"_extension":18},"/en-us/blog/rust-programming-language",{"title":3700,"description":3701,"ogTitle":3700,"ogDescription":3701,"noIndex":6,"ogImage":2601,"ogUrl":3702,"ogSiteName":669,"ogType":670,"canonicalUrls":3702,"schema":3703},"A guide to Rust programming language","Rust is a well-loved programming language but it is a mindset shift from options like C++. Here's a tutorial and an inside look at Rust code and its capabilities.","https://about.gitlab.com/blog/rust-programming-language","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"A guide to Rust programming language\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2020-07-21\",\n      }",{"title":3700,"description":3701,"authors":3705,"heroImage":2601,"date":3707,"body":3708,"category":3709,"tags":3710},[3706],"Valerie Silverthorne","2020-07-21","\n\n## What is Rust?\n\nRust is an open source programming language that has been the \"most loved language\" on developer community Stack Overflow's annual survey for the last four years. While it's a popular language in that sense, only a very, _very_ small number of developers actually use Rust language today – a July 2020 look at the PYPL PopularitY of Programming Languages Index ranks it at number 18 with just .81% interest. (For comparison Python is at nearly 32% and Java is over 17%.)\n\nSo why the intense love of the Rust programming language? To put it simply, Rust coding was created to solve problems present in other languages and if you can take the time to unlock its (admittedly difficult) secrets, you're rewarded with cleaner, faster, and most importantly, safer code. Rust code resolves pain points that you see in countless other programming languages with far fewer downsides. Utilizing Rust allows developers to decide when they no longer need memory at the time of compilation which creates more efficiency around memory usage.\n\n[Antony Saba](/company/team/#asaba), a senior security engineer with Strategic Security at GitLab, recently talked about Rust during a company-wide series of meetings ([Contribute 2020](/events/gitlab-contribute/)). He speaks from experience as his last employer was a Rust-based company. \"Okay, so what's Rust's promise?\" Saba asked. \"Rust's promise is that it should be easier, and everybody should be able to fearlessly write at a systems level and not have to worry about memory safety or thread safety, or at least worry about it in the way that is supported by the language and the tools.\"\n\nLet's unpack what that means.\n\n## History of Rust programming language\n\nThe [open source Rust community](https://www.rust-lang.org) describes the language as fast, reliable and productive. \"Hundreds of companies around the world are using Rust in production for fast, low-resource cross-platform solutions,\" the organization says. Firefox and DropBox are two well-known users of Rust today, and Mozilla (creator of Firefox) was the first original supporter of Rust.\n\n### Who created Rust?\n\nRust code was originally developed as an open source project by software developer Graydon Hoare while working at Mozilla Research in 2006 and has been maintained by the Rust Foundation since 2021. It’s now one of the top drivers of the Rust programming language. \n\nThink of Rust as the answer to a data-rich problem that will likely need lots of computational cycles. Mozilla's [Rust documentation](https://research.mozilla.org/rust/) specifically calls out the language as ideal for \"game engines, operating systems, file systems, browser components and simulation engines for virtual reality.\"\n\n## Benefits of programming in Rust\n\nThe top benefit of Rust coding is its adept memory management. Although there are other programming languages that emphasize memory safety like Rust code, Rust handles the concept differently in that it doesn’t use a garbage collector as other programming languages do. Instead, Rust uses a borrow checker to track variable scope and object lifetime while simultaneously administering high-quality memory safety and stopping concurrent data races. \n\nThe benefits of programing in Rust don’t stop at memory management. It’s fast and reliable for creating web apps and creating cross-platform applications, and it can integrate with preexisting code. \n\nOne of the other major benefits of Rust programming language is that it is well-suited for projects that demand extremely high performance. Its ability to process large amounts of data and CPU-intensive operations makes it a strong competitor in the developer space. \n\nOther Rust feature benefits offer a list of features that makes it stand out from other programming languages. Here are some of the features:\n\n1. It’s more user-friendly.\n2. You’ll find high-quality documentation about the language.\n3. It has a better resolution of memory errors and concurrent programs than C and C++ languages.\n4. It’s incredibly fast and highly secure compared to other languages.\n\n## The Rust ecosystem\n\nThe [JetBrains 2021 Developer Ecosystem Report](https://www.jetbrains.com/lp/devecosystem-2021/rust/) found that Rust developers have mostly been using it for less than six months, and often reach for the language for \"hobby\" or personal projects. What are devs primarily writing with Rust? The report found command line interface tools, systems programming and web development were the most popular options.\n\nMany companies have started using Rust, though. In 2020, [Discord switched from Go to Rust](https://discord.com/blog/why-discord-is-switching-from-go-to-rust), and Shopify, Dropbox, AWS and many others use it as well. \n\n## The basics of Rust programming language\n\nRust is a bit of a hybrid, according to Mozilla's Rust documentation. Rust offers developers the syntax advantages of high-level languages with the \"control and performance of a low-level language,\" the documentation explains.\n\nRust is a statically typed language rather than a dynamic one. Though developers like to argue the merits of both, Rust, like popular TypeScript, eliminates the frustration of \"dynamic typing.\" Data is constrained and checked by a compiler so confusion is minimized. Rust programming also makes it very hard to ignore errors – Steve Donovan, author of [\"A Gentle Guide to Rust,\"](https://stevedonovan.github.io/rust-gentle-intro/) jokes it can be hard not to think the compiler is shouting at you when you make a mistake.\n\nDonovan identifies Rust's key principles as:\n\n* Strictly enforcing safe borrowing of data\n* Functions, methods, and closures to operate on data\n* Tuples, structs, and enums to aggregate data\n* Pattern matching to select and destructure data\n* Traits to define behaviour on data\n\n## Types of Rust coding\n\nRust treats values by breaking them down into \"types\" in order to handle the data appropriately. According to MIT's guide to Rust, there are a number of types that can be split into scalar or integer types. Scalar types will likely be familiar to those who work with other programming languages: characters, Booleans, floating-point numbers and integers. They all represent a single value. Compound types are what they sound like – multiple types together.  \n\n## Who uses Rust?\n\nAll of the guardrails mentioned lead to a language that can create fast-moving code with few things that slow it down. There's no runtime or garbage collection, making coding in Rust ideal for applications where memory usage is at a premium (like embedded devices). But if there is a place where Rust really stands out, it's security. Donovan points out that Rust is \"safe by default,\" unlike C or C++. No one can corrupt memory by default, he writes.\n\n## The Rust programming language and productive coding\n\nAfter three years of coding in Rust, Antony was quick to say he's probably more productive with the language than any other.\n\n\"I really do feel like Rust was the most productive language I've ever used,\" he says. \"Once you are doing everything in that functional style, you're writing less code, but it's still clearer, because you don't have temporary variables. They're a thing that you don't really end up using when you're writing code that way. So, to me, it's those little things that I get the productivity out of.\"\n\n## Rust can be touchy, but rewarding\n\nProductive, sure, but there's a learning curve with Rust.\n\n\"It's true the borrow checker is the hardest part,\" Antony says. \"But the thing is, once you get past that, there is a serious dopamine hit when that program compiles, because it means now you only have your own logic errors to deal with. Part of that pain is explicitly some of the things that you assume, and some of the little white lies that you tell yourself when you're starting, especially with a C program. Because when you start your C program, it's like, 'all right, I have a couple command line parameters, I don't really want to write all my functions just to pass them, so I'm just going to declare a couple global variables and shove them in there, and I'll clean it up later.' Right? It's one of those little lies we tell ourselves. But you can't have immutable global variables in Rust. It just won't let you. You have to wrap it – you may as well just do the functions right. They're going to use your command-line arguments. It's the same with thread safety. You kind of have to do that upfront, and you don't get to make that assumption.\"\n\n## Looking to the future of the Rust programming language\n\nRust has a bright future, even if it might not be as widespread as other languages, Antony explains. \"I don't think it's ever going to be as popular as Go, just because Google is Google, and there's a lot of places that Go is really good for,\" he says. \"But for those places where you really want that fearless development, I think it'll continue to have a strong hold there.\"\n\nWatch Antony's Rust demo in full here:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/INT_rGJr6JQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n**Read more about programming languages:**\n\nCan we solve the [COBOL programmer shortage?](/blog/cobol-programmer-shortage/)\n\nWhy we use [Ruby on Rails](/blog/why-we-use-rails-to-build-gitlab/) to build GitLab\n\nHow [Modern C and C ++ work](/blog/conan-c-cpp-package-management-integration/)\n\nCover image by [Zsolt Palatinus](https://unsplash.com/@sunitalap) on [Unsplash](https://unsplash.com)\n{: .note}\n","insights",[727,814,9],{"slug":3712,"featured":6,"template":684},"rust-programming-language","content:en-us:blog:rust-programming-language.yml","Rust Programming Language","en-us/blog/rust-programming-language.yml","en-us/blog/rust-programming-language",{"_path":3718,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3719,"content":3725,"config":3730,"_id":3732,"_type":13,"title":3733,"_source":15,"_file":3734,"_stem":3735,"_extension":18},"/en-us/blog/safe-without-silos-in-gitlab",{"title":3720,"description":3721,"ogTitle":3720,"ogDescription":3721,"noIndex":6,"ogImage":3722,"ogUrl":3723,"ogSiteName":669,"ogType":670,"canonicalUrls":3723,"schema":3724},"SAFe without silos in GitLab","Learn how to map the Scaled Agile Framework to the native capabilities of the DevSecOps platform and the advantages that come from doing so.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097569/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2811%29_2hcwWx49wQ7CHfvhhkVH6S_1750097569126.png","https://about.gitlab.com/blog/safe-without-silos-in-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"SAFe without silos in GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2025-04-08\",\n      }",{"title":3720,"description":3721,"authors":3726,"heroImage":3722,"date":3727,"body":3728,"category":2669,"tags":3729},[2666],"2025-04-08","Let's talk about what happens when your organization adopts the Scaled Agile Framework (SAFe) to scale to enterprise levels. You've got multiple teams working on complex products, and you need a way to coordinate all that work. But here's a common headache: Your planning happens in one tool, while your actual development work lives somewhere else entirely.\n\nThis divide creates real problems day-to-day. Developers jump between systems constantly. Product managers struggle to get an accurate picture of progress. And everyone wastes time manually copying information from one place to another. It's precisely the kind of disjointed experience that SAFe was designed to eliminate.\n\nWhile your development teams might already be using GitLab for source code management, CI/CD, and security, you may wonder whether GitLab can also support your planning needs within the SAFe framework. The good news is that GitLab's Agile project management capabilities offer strong support for SAFe, in this article, you'll learn how GitLab maps to SAFe concepts and ceremonies, all within the same DevSecOps platform your software developers already know and love.\n\n## What is SAFe?\n\nSAFe, or the Scaled Agile Framework, is a way to bring Agile principles to large organizations without losing speed, alignment, or customer focus. It takes the iterative and flexible teamwork model of small teams and applies its principles across big organizations that have multiple teams, roadmaps, and stakeholders. This brings the organization into alignment, all planning and executing in the same direction. For product managers, SAFe helps connect strategy to execution so you’re not just shipping fast, you’re shipping the right things, backed by clear priorities and cross-team alignment.\n\nSAFe reduces silos, encourages collaboration, and helps teams rally around customer outcomes, not just tasks. When integrated in GitLab, the magic really happens: visibility, traceability, and delivery all live in one place.\n\n## SAFe terminology in GitLab\n\nFirst, let's establish how SAFe concepts map to GitLab:\n\n| SAFe | GitLab |\n| :---- | :---- |\n| Epic | Top-level Epic |\n| Capability | Sub-epic (Level 1) |\n| Feature | Sub-epic (Level 2) |\n| User Story | Issue |\n| Task | Task |\n| Team | Custom Field / Scoped Label |\n| Sprint | Iteration |\n| Program Increment (PI) | Milestone |\n| Value Stream | Top-level Group |\n| Agile Release Train (ART) | Top-level Group |\n\n\u003Cbr>\u003C/br>\n\nWith this mapping as your guide, you can set up GitLab to mirror your SAFe implementation. The group structure lets you organize around your value streams and ARTs, while the work item hierarchy (with up to seven levels of nested epics!) gives you all the depth you need for complex product portfolios. Whether you're working at the portfolio level (with top-level groups), program level (with subgroups), or team level (with projects), GitLab's organizational structure aligns perfectly with SAFe's hierarchy.\n\n## Supporting SAFe ceremonies in GitLab\n\nNow for the fun part - how do you actually run your SAFe ceremonies in GitLab? Let's walk through each one.\n\n### PI planning\n\nTo facilitate the cross-team alignment and dependency management that makes PI planning successful, GitLab offers several capabilities:\n\n* Use the [Roadmap](https://docs.gitlab.com/user/group/roadmap/) view to visualize features across teams and time periods\n* Assign features to the PI [milestone](https://docs.gitlab.com/user/project/milestones/)\n* Document and visualize cross-team [dependencies](https://docs.gitlab.com/user/project/issues/related_issues/#blocking-issues) as they're identified\n\nGitLab gives you flexibility for PI planning through both the Epic boards (which can be configured to show team assignments) and the Roadmap view (which shows features over time like a Gantt chart). You can switch between these views during your planning session depending on whether you're focusing on the timeline or team organization.\n\n![Roadmap view and epic board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097576746.gif)\n\n\u003Cbr>\u003C/br>\n\n![Roadmap view with Gantt chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097576747.png)\n\n### Refinement\n\nAs a product manager, running effective refinement sessions means having clear visibility into your feature backlog. You can run your refinement session right inside GitLab. No more updating one tool during the meeting and then having to update another tool afterward.\n\nGitLab powers refinement sessions with:\n\n* [Epic boards](https://docs.gitlab.com/user/group/epics/epic_boards/) that group features based on status\n* The ability to view story points directly in the [overview](https://docs.gitlab.com/user/group/epics/epic_boards/#view-count-of-issues-weight-and-progress-of-an-epic)\n* Comprehensive [drawer views](https://docs.gitlab.com/user/group/epics/manage_epics/#open-epics-in-a-drawer) that let you interact with work items without losing context\n* The ability to create and link [child issues](https://docs.gitlab.com/user/group/epics/manage_epics/#add-an-issue-to-an-epic) directly from epics\n\n![SAFe - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097576749.gif)\n\n### Sprint planning\n\nWhen it's time to figure out what your team can tackle in the next sprint, GitLab gives you:\n\n* [Issue boards](https://docs.gitlab.com/user/project/issue_board/) that provide a comprehensive view of your backlog\n* [Total weight](https://docs.gitlab.com/user/project/issue_board/#sum-of-issue-weights) of user stories displayed directly on boards\n* The ability to easily move issues between iterations\n* A collapsible view that simplifies moving stories between sprints\n\nThis means you can keep everything in one place and spend your planning meetings actually planning instead of jumping between tools.\n\n![Sprint planning with GitLab](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097576751.gif)\n\n*💡 Check out [this tutorial on using GitLab to facilitate Scrum](https://docs.gitlab.com/tutorials/scrum_events/) for a detailed glimpse into the power of GitLab in Agile planning and sprint tracking.*\n\n### Daily stand-ups\n\nYour team can gather around the board during daily stand-ups and actually see what everyone's working on, what's stuck, and what's ready for review – all in one view. For your dev team's daily stand-ups, GitLab lets you:\n\n* Create [iteration-scoped](https://docs.gitlab.com/user/project/issue_board/#iteration-lists) boards that show the current sprint's work\n* Display story points/weights directly on cards\n* Use the [drawer view](https://docs.gitlab.com/user/project/issues/managing_issues/#open-issues-in-a-drawer) to access details without leaving the context\n* Highlight tasks at risk through [health status](https://docs.gitlab.com/user/project/issues/managing_issues/#health-status)\n\n![Daily stand-up board](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097576755.png)\n\n### Sprint review\n\nWant to know how your team is doing over time? GitLab provides comprehensive metrics with:\n\n* [Burndown and burnup charts](https://docs.gitlab.com/user/group/iterations/#iteration-burndown-and-burnup-charts) for iterations\n* Velocity tracking\n* [Lead and cycle time](https://docs.gitlab.com/user/group/value_stream_analytics/#lifecycle-metrics) metrics\n* Dashboards that can be scoped to teams\n\nThese metrics help you understand if your team is getting faster, where they're getting stuck, and what you might want to talk about in your next retrospective.\n\n![Burndown and burnup charts](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097577/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097576758.png)\n\n## 5 reasons a unified platform provides an advantage\n\nI know there are plenty of planning tools that can handle SAFe ceremonies. But there are game-changing reasons why I genuinely believe GitLab is different:\n\n1. **No more context switching** - Your planning, coding, testing, and security all happen in one place.\n2. **Everything's connected** - You can trace work from the big epic down to the code and deployment.\n3. **Everyone's on the same page** - Developers, product folks, and security teams all work together in the same tool.\n4. **Total visibility** - Stakeholders have one place to check for updates.\n5. **The full picture** - You see planning and development metrics together, so you know what's really going on.\n\nIf your dev teams already love GitLab, why make them jump to another tool for planning or create some complex, cobbled-together integrations? Bringing your SAFe planning into GitLab creates a much smoother experience for everyone.\n\n## Implementation principles\n\nI've worked with teams transitioning from traditional SAFe tools to GitLab, and here's what I've learned: Focus on **what each ceremony is trying to accomplish**, not on recreating exact replicas of your old tools.\n\nThe teams that get the most out of GitLab are the ones who embrace its native capabilities instead of fighting against them. Yes, it takes some initial work to figure out how to map your SAFe concepts and set up your workflows. But once you do, you'll find your processes actually get simpler rather than more complex.\n\nThe key is defining conventions that everyone follows. Which labels mean what? How will you track teams? What goes in an epic versus an issue? With a little upfront investment in these decisions, you'll end up with an intuitive system that eliminates all that cross-tool coordination overhead.\n\n## Getting started\n\nReady to give this a shot? Here's how to start implementing SAFe in GitLab:\n\n1. **Set up your structure** - Create groups and subgroups that [match your organization](https://about.gitlab.com/blog/best-practices-to-set-up-organizational-hierarchies-that-scale/).\n2. **Define your work breakdown** - Decide how you'll use [epics](https://about.gitlab.com/blog/unlocking-agile-excellence-gitlab-epics-for-seamless-portfolio-management/), [issues](https://docs.gitlab.com/user/project/issues/managing_issues/), and [tasks](https://docs.gitlab.com/user/tasks/).\n3. **Create your iterations** - Set up your [sprint schedule](https://docs.gitlab.com/user/group/iterations/#create-an-iteration-cadence).\n4. **Add your milestones** - [Milestones](https://docs.gitlab.com/user/project/milestones/#create-a-milestone) will represent your Program Increments in GitLab.\n5. **Build your boards** - Create different views for different ceremonies.\n6. **Agree on conventions** - Document how you'll use labels and custom fields.\n\nTaking time to think through these decisions upfront will save you many headaches later. And remember, you don't have to perfect it on day one - you can always adjust as you learn.\n\n## Bringing it all together\n\nGitLab gives you a solid foundation for running SAFe, especially if your dev teams are already GitLab fans. When you bring planning and development into the same tool, you eliminate those painful handoffs, make collaboration way easier, and get everything moving faster.\n\nThe beauty of GitLab's planning tools is that they're flexible enough to adapt to your specific flavor of SAFe. You're not locked into rigid workflows - you can evolve your approach as your teams mature and your needs change.\n\n> Ready to see how much better life is without those planning silos? [Start your free trial today](https://about.gitlab.com/free-trial/) and experience firsthand how GitLab can transform your SAFe implementation.\n\n*💡 If you liked this topic check out this related post - [GitLab for Agile Software Development](https://about.gitlab.com/blog/gitlab-for-agile-software-development/)*\n",[980,478,680,678,9],{"slug":3731,"featured":90,"template":684},"safe-without-silos-in-gitlab","content:en-us:blog:safe-without-silos-in-gitlab.yml","Safe Without Silos In Gitlab","en-us/blog/safe-without-silos-in-gitlab.yml","en-us/blog/safe-without-silos-in-gitlab",{"_path":3737,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3738,"content":3743,"config":3749,"_id":3751,"_type":13,"title":3752,"_source":15,"_file":3753,"_stem":3754,"_extension":18},"/en-us/blog/second-law-of-complexity-dynamics",{"title":3739,"description":3740,"ogTitle":3739,"ogDescription":3740,"noIndex":6,"ogImage":2205,"ogUrl":3741,"ogSiteName":669,"ogType":670,"canonicalUrls":3741,"schema":3742},"How pursuit of simplicity complicates container-based CI","Simplicity always has a certain player in mind - learn how to avoid antipatterns by ensuring simplicity themes do not compromise your productivity by over-focusing on machine efficiencies.","https://about.gitlab.com/blog/second-law-of-complexity-dynamics","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"When the pursuit of simplicity creates complexity in container-based CI pipelines\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"}],\n        \"datePublished\": \"2022-05-24\",\n      }",{"title":3744,"description":3740,"authors":3745,"heroImage":2205,"date":3746,"body":3747,"category":769,"tags":3748},"When the pursuit of simplicity creates complexity in container-based CI pipelines",[675],"2022-05-24","\n\nIn a GitLab book club, I recently read \"[The Laws of Simplicity](http://lawsofsimplicity.com/),\" a great book on a topic that has deeply fascinated me for many years. The book contains an acronym that expresses simplicity generation approaches: SHE, which stands for \"shrink, hide, embody.\" These three approaches for simplicity generation all share a common attribute: They are all creating illusions - not eliminations.\n\nI've seen this illusion repeat across many, many realms of pursuit for many years. Even in human language, vocabulary development, jargon, and acronyms all simply encapsulate worlds of complexity that still exist, but can be more easily referenced in a compact form that performs SHE on the world of concepts.\n\nAny illusion has a boundary or curtain where in front of the curtain the complexity can be dealt with by following simple rules, but, behind the curtain, the complexity must be managed by a stage manager. \n\nFor instance, when the magic show creates the spectre of sawing people in half, what appears to be a simple box is in fact an exceedingly elaborate contraption. Not only that, but the manufacturing process for an actual simple box and the sawing box are markedly different in terms of complexity. The manufacturing of complexity and its result are essentially the tradeoff for what would be the real-world complexity of actually sawing people in half and having them heal and stand up unharmed immediately afterward.\n\nTo bring this into the technical skills realm, consider that when you leverage a third-party component or API to add functionality, you only need to know the parameters to obtain the desired result. The people maintaining that component or API must know the quantum mechanics detail level of how to perform that work in a reliable and complete way.\n\nDocker containers are a mechanism for embodying complexity, and are used in scaled applications and within container-based CI. When a [CI/CD](/topics/ci-cd/) automation engineer uses container-based CI, it is possible to make things more complex and more expensive when attempting to do exactly the opposite.\n\nAt its core, this post is concerned with how it can happen that pursuing a simpler world through containers can turn into an antipattern - a reversal of desired outcomes - many times, without us noticing that the reversal is affecting our productivity. The prison of a paradigm is secure indeed.\n\n### The Second Law of Complexity Dynamics\n\nOver the years I have come to believe that the pursuit of reducing complexity has similar characteristics to [The Second Law of Thermodynamics](https://www.grc.nasa.gov/www/k-12/airplane/thermo2.html). The net result of a change between mass and energy results in the the same net amount of mass and energy, but their ratio and form have changed. In what I will coin \"The Second Law of Complexity Dynamics,\" complexity is similarly \"conserved,\" it is just reformed.\n\nIf complexity is not eliminated by simplifying efforts, we reduce its impact in a given realm by changing the ratio of complexity and simplicity on each side of one or more curtains. But alas, complexity did not die, it just hid and is now someone else's management challenge. It is important not to think of this as cheating. There is no question that hiding complexity carries the potential for massive efficiency gains when the world behind the hiding mechanisms becomes the realm of specialty skills and specialists. When it truly externalizes the complexity management for one party, the world becomes more simple for that party.\n\nHowever, the devil is in the details. If the hypothesis of \"no net elimination of complexity\" is correct, it is then important where the complexity migrates to. If it migrates to another part of the same process that must also be managed by the same people, then it may not result in a net gain of efficiency. If it migrates out of a previously embodied realm, then, in the pusuit of simplicity, we can actually reduce our overall efficiency when the process is considered as a whole.\n\n### Container-based CI pipelines as a useful case in point\n\nI see the potential for efficiency reversals to crop up in my daily work time and again, and an interesting place where I've seen it lately is in the tradeoff of linking together hyper-specialized modules of code in containers for CI versus leveraging more generalized modules.\n\nIn creating container-based pipelines, I experience the potential for an efficiency reversal I have to consciously manage.\n\nContainers make a simplicity tradeoff by design. They create a full runtime environment for a very single purpose but in doing so they strip back the container internals so far that general compute tasks are difficult inside them. If you step behind their \"complexity embodying\" curtain into the container, their simplistic environment can require more complex code to operate within.\n\nIn GitLab CI pipelines that utilize containers, all the scripts of jobs run inside the containers that are specified as their runtime environment. When one selects a specialized container - such as the alpine git container or the skopeo image management container - the code is subject to the limitations of the shell that container employs (if it has one at all).\n\nContainers were devised to be hyper-specialized, purpose-specific runtimes that assure they can always run and run quickly for scaled applications. However, for many containers this means no shell or a very stripped back shell like busybox sh. It frequently also means not including the package manager for the underlying Linux distribution.\n\nTime and again, I've found myself degrading the implementation of my shell code in key ways that make it more complex, so that it can run under these stripped back shells. In these cases, I do not benefit from the complexity hiding of newer versions of advanced shells like Bash v5. One of the areas is advanced Bash shell expansions, which embody a huge world of complex parsing and avoid a bunch of extraneous utilities. And another is advanced if and case statement comparison logic that processes regular expressions without external utilities and performs many other abstracted comparisons. There are many other areas of the language where this comes into play, but these two stand out.\n\n![](https://about.gitlab.com/images/blogimages/second-law-of-complexity-dynamics-container-pipeline-tradeoffs.png)\n\nSo by having a simpler shell like busybox sh, the simplicities of advanced shell features become *unhidden* and join my side of the curtain. Now I have to manage them in my code. But then, guess what? No package manager means the inability to install other Linux utilities and languages extensions that I could also employ to push that same complexity back out of my space. And, of course, it means installing Bash v5 would be difficult as well.\n\nSo the simplicity proposition of a tightly optimized purpose-specific container can reverse the purported efficiency gains in the very important realm of the code I have to write. It also means I frequently have to break up my code into multiple jobs to utilize the specializations of these containers in a sequence or to transport the results of a specialized container into a fuller coding environment. This increases the complexity of the pipeline as I now have to pass artifacts and variable data from one job to another with a host of additional YAML directives, and sometimes deploy infrastructure (e.g., [Runner caching](https://docs.gitlab.com/ee/ci/caching/#:~:text=For%20runners%20to%20work%20with,GitLab.com%20behave%20this%20way)).\n\nIn the case of CI using containers, when the simplicity tradeoffs move complexity to things I do not maintain, such as base containers, operating system packages, and full shell environments, into things I do maintain, such as CI YAML and Shell Script code, then I am also inheriting long-term complexity maintenance. In the cloud, we know this as undifferentiated heavy lifting.\n\nInterestingly, the proliferation of specialized containers can also require more machine resources and can lengthen processing time as containers are retrieved from registries and loaded and artifacts and source code are copied in and out of each job-based container.\n\n### Simplicity target: Efficiency\n\nIt's easy to lose sight of the amount of human effort and ingenuity being applied to knowing and managing the coding structure, rather than being applied to solving the real automation problems of the CI pipeline. The net complexity of the pipeline can also mean it is hard to maintain an understanding of it even if you are working in it every day - and for newcomers onboarding, it can be many weeks before they fully understand how the system works.\n\nOf course, I can create my own containers for CI pipelines, but now I've added the complexity of container development and continuous updates of the same in order for my pipeline code to be operational and stay healthy. I am still behind the curtain for that container. For teams whose software is not itself containerized, the prospect of learning to build containers just for CI can create a lot of understandable friction to adopting a container-based CI development process. This friction may be unnecessary if we make a key heuristic adaptation.\n\n### Walking the tightwire above the curtain\n\nSo how do I manage the tensions of these multiple worlds of complexity when it comes to container-based pipelines to try to avoid efficiency reversals in the net complexity of the pipeline?\n\nIt is simple. I will describe the method and then the key misapplied heuristic and how to adjust it.\n\n1. I hold that the primary benefits of container-based CI are a) dependency isolation by job (so that you don’t have a massive and brittle CI build machine specification to handle all possible build requirements), and b) clean CI build agent state by obtaining a clean container copy for each job. These benefits do not imply having to abide by microservices container resource planning and doing so is what creates an antipattern in my productivity.\n\n2. I frequently use a Bash 5 container (version pegged if need be) where all the complexity that advanced shell capabilities embody for me stay behind the curtain.\n\n3. Instead of running a hyper-minimalized container for a given utility, I do a runtime install of that utility (gasp!) in a container that has my rich shell. I utilize version pegging during the install if I feel version safety is paramount on the utility. Alternatively, if a very desirable runtime of some type is difficult to setup and does not have a package, I look for a container that has a package manager that matches a packaged version of the runtime and also allows me to install my advanced scripting language if needed.\n\n4. If, and only if, the net time of the needed runtime installs exceeds the net pipeline time to load a string of specialized containers (with artifact handling) plus my time to develop and manage a pipeline dependency in the form of a custom container, then do I consider possibly creating a pipeline specific container.\n\n5. Through this process a balancing principle also emerges. Since I have been doing runtime installs as a development practice, I have actually already MVPed what a pipeline specific container would need to have installed. I can literally copy the installation lines into a Docker file if I wish. I can also notice if I have commonality across multiple pipelines where it makes sense to create a multi-pipeline utility container.\n\nIn a recent project, following these principles caused me to avoid the skopeo container and instead install it on the Bash 5 container using a package manager.\n\nIf your team is big into Python or PowerShell as your CI language, it would make sense to start with recent releases of those containers. The point is not advanced Bash -but an advanced version of your general CI scripting language that prevents you from creating work arounds in your code for problems that are well-solved in publicly available runtimes.\n\nKeep in mind that this adjustment is very, very focused on containers **in CI pipelines**, which, by nature, reflect general compute processing requirements where many vastly different operations are required in a pipeline. I am not advocating this approach for true microservices applications where, by design, a given service has very defined purpose and characteristics and, at scale, massively benefits from the machine efficiency of hyper-minimalized, purpose-specific granularity.\n\n### Misapplied heuristics\n\nFrequently when a pattern has an inflection point at which it becomes an antipattern, it is due to misapplying the heuristics of the wrong realm. In this case, I believe, that normal containerization patterns for microservices apps are well founded, but they apply narrowly to \"engineered hyper-specialized compute\" of a granule we call \"a microservice\" (note the word \"micro\" applies to the scope of compute activities). Importantly, they apply because the process itself is designed as hyper-specialized around a very specific task. The container contents (included dependencies), immutability principle (no runtime change), and the runtime compute resources can be managed exceedingly minimally because of the small and highly specific scope of computing activities that occur within the process.\n\nThis is essentially the embodiment of the 12 Factor App principle called “[VIII. Concurrency](https://12factor.net/concurrency),” which asserts that scaling should be horizontal scaling of the same minimalized process, not vertical scaling of compute resources inside a given process. If the system experiences 10x work for a particular activity, we create 10 processes, we do not request 10x memory and 10x CPU within one running process. Microservices architecture tightly controls the amount of work in each request so that it is hyper-predictable in its compute resource requirements and, therefore, scalable by adding identical processes.\n\nCI compute, by nature, is the opposite of hyper-specialized. Across build, test, package, deploy, etc., etc., there are many huge variations in required machine resources of memory, CPU, network I/O and high-speed disk access and, importantly, included dependencies. The generalized compute nature also occurs due to varying inputs so the same defined process might need a lot more resources due to the nature of the raw input data. For example, varying input volume (e.g. a lot versus few data items) or varying input density (e.g. processing binary files versus text files). \n\nIt is the process that is being containerized that holds the attribute of generalized compute (bursty on at least some compute resources) or hyper-specialized (narrow definition of work to be done and therefore well-known compute resources per unit of completed work). Containerizing a process that exhibits generalized compute requirements is useful, but planning the resources of that container as if containerizing it has transformed the compute requirements into hyper-minimalized is the inflection point at which it becomes an antipattern, actually eroding the sought-after benefits we set out to create.\n\nIn the model I employ for leveraging containers in CI, the loosening of the hyper-specialization, immutablility (no-runtime installs), and very narrow compute resources principles of microservices simply reflects the real world in that CI compute as a whole exhibits the nature of generalized, not hyper-specialized, compute characteristics.\n\n> Another realm where this seems true is desired state configuration management technologies - also known as “Configuration as Code”. It is super simple if there are pre-existing components or recipes for all that you need to do but as soon as you have to build some for yourself, you enter a world of creating imperative code against a declarative API boundary (there's the \"embodiment\" curtain - the declarative API boundary). Generally, if you have not had to implement imperative code to process declaratively, this new world takes some significant experience to become proficient.\n\n### Iterating SA: Experimental improvements for your next project\n\n1. In general, favor simplicity boundaries that reduce your work, especially in the realm of undifferentiated heavy lifting. In the realm of container-based CI, this includes having a rich coding language and a package manager to acquire additional complexity embodying utilities quickly and easily.\n\n2. In general, be suspicious of an underlying antipattern if you have to spend an inordinate amount of time coding and maintaining workarounds in the service of simplicity. In the realm of container-based CI, this would be containers that are ultra-minimalized around microservices performance characteristics when they don’t hyper-scale as a standing service within CI.\n\n3. In general, stand back and examine the net complexity of the code and frameworks that will have to be maintained by yourself or your team and check if you’ve made tradeoffs that have a net negative tax on your efficiency. When complexity that can be managed by machines enters your workspace at high frequency, then you have a massive antipattern of human efficiency.\n\n4. It is frequent that when the hueristics being applied create negative human efficiency they also create negative machine efficiency. Watch for this effect in your projects. The diagram in the post shows that over-minimalized containers can easily lead to using a lot more of them - all of which has machine overhead as well.\n\nIf the above resonates, CI pipeline engineers might want to consider loosening the \"microservices\" heuristics of hyper-specialization, ultra-minimalization,  and immutability (no dynamic installs) for CI pipeline containers in order to ensure that the true net complexity level of the code they have to maintain is in balance and their productivity is preserved.\n\n### Appendix: Working examples of this idea\n\n- [AWS CLI Tools in Containers](https://gitlab.com/guided-explorations/aws/aws-cli-tools) has both Bash and PowerShell Core (on Linux OS) available so that one container set can suit the automation shell preference of both Linux and Windows heritage CI automation engineers.\n\n- CI file [installs yq dynamically](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L47-48) in the Bash container, but then [only installs the heavier jq and skopeo](https://gitlab.com/guided-explorations/gl-k8s-agent/gitops/envs/world-greetings-env-1/-/blob/main/.gitlab-ci.yml#L63) if needed by the work implied, which demonstrates a way to be more efficient even when runtime installs are desired.\n\n- [Bash and PowerShell Script Code Libraries in Pure GitLab CI YAML](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/script-code-libraries-in-pure-gitlab-ci-yaml) shows how to have libraries of CI script code available to every container in a pipeline without encapsulating the libraries in a container themselves and with minimalized CI YAML complexity compared to YAML anchors, references, or extends. While the method is a little bit challenging to setup, from then on out it pays back by decoupling scripting libraries from any other pipeline artifact.\n\n- [CI/CD Extension Freemarker File Templating](https://gitlab.com/guided-explorations/ci-cd-plugin-extensions/ci-cd-plugin-extension-freemarker-file-templating) shows the install is very quick and only affects one job and still version pegs the installed utility.\n",[771,772,9,793,1000],{"slug":3750,"featured":6,"template":684},"second-law-of-complexity-dynamics","content:en-us:blog:second-law-of-complexity-dynamics.yml","Second Law Of Complexity Dynamics","en-us/blog/second-law-of-complexity-dynamics.yml","en-us/blog/second-law-of-complexity-dynamics",{"_path":3756,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3757,"content":3763,"config":3768,"_id":3770,"_type":13,"title":3771,"_source":15,"_file":3772,"_stem":3773,"_extension":18},"/en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration",{"title":3758,"description":3759,"ogTitle":3758,"ogDescription":3759,"noIndex":6,"ogImage":3760,"ogUrl":3761,"ogSiteName":669,"ogType":670,"canonicalUrls":3761,"schema":3762},"Secure and publish Python packages: A guide to CI integration","Learn how to implement a secure CI/CD pipeline across five stages with the GitLab DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662080/Blog/Hero%20Images/AdobeStock_1097303277.jpg","https://about.gitlab.com/blog/secure-and-publish-python-packages-a-guide-to-ci-integration","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure and publish Python packages: A guide to CI integration\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-01-21\",\n      }",{"title":3758,"description":3759,"authors":3764,"heroImage":3760,"date":3765,"body":3766,"category":814,"tags":3767},[916],"2025-01-21","Supply chain security is a critical concern in software development. Organizations need to verify the authenticity and integrity of their software packages. This guide will show you how to implement a secure CI/CD pipeline for Python packages using GitLab CI, incorporating package signing and attestation using Sigstore's Cosign.\n\nYou'll learn:\n\n- [Why sign and attest your Python packages?](#why-sign-and-attest-your-python-packages%3F)\n- [Pipeline overview](#pipeline-overview)\n- [Complete pipeline implementation: Setting up the environment](#complete-pipeline-implementation-setting-up-the-environment)\n   * [Environment configuration](#environment-configuration)\n   * [Configuration breakdown](#configuration-breakdown)\n-  The 6 stages\n\n    1. [Building](#building-crafting-the-package)\n    2. [Signing](#signing-the-digital-notarization)\n    3. [Verification](#verification-the-security-checkpoint)\n    4. [Publishing](#publishing-the-controlled-release)\n    5. [Publishing signatures](#publishing-signatures-making-verification-possible)\n    6. [Consumer verification](#consumer-verification-testing-the-user-experience)\n\n## Why sign and attest your Python packages?\n\nHere are four reasons to sign and attest your Python packages:\n\n* **Supply chain security:** Package signing ensures that the code hasn't been tampered with between build and deployment, protecting against supply chain attacks.\n* **Compliance requirements:** Many organizations, especially in regulated industries, require cryptographic signatures and provenance information for all deployed software.\n* **Traceability:** Attestations provide a verifiable record of build conditions, including who built the package and under what circumstances.\n* **Trust verification:** Consumers of your package can cryptographically verify its authenticity before installation.\n\n## Pipeline overview\n\nEnsuring your code's integrity and authenticity is necessary. Imagine a pipeline that doesn't just compile your code but creates a cryptographically verifiable narrative of how, when, and by whom your package was created. Each stage acts as a guardian, checking and documenting the package's provenance.\n\nHere are six stages of a GitLab pipeline that ensure your package is secure and trustworthy:\n\n* Build: Creates a clean, standard package that can be easily shared and installed.\n* Signing: Adds a digital signature that proves the package hasn't been tampered with since it was created.\n* Verification: Double-checks that the signature is valid and the package meets all our security requirements.\n* Publishing: Uploads the verified package to GitLab's package registry, making it available for others to use.\n* Publishing Signatures: Makes signatures available for verification.\n* Consumer Verification: Simulates how end users can verify package authenticity.\n\n## Complete pipeline implementation: Setting up the environment\n\nBefore we build our package, we need to set up a consistent and secure build environment. This configuration ensures every package is created with the same tools, settings, and security checks.\n\n### Environment configuration\n\nOur pipeline requires specific tools and settings to work correctly.\n\nPrimary configurations:\n\n* Python 3.10 for consistent builds\n* Cosign 2.2.3 for package signing\n* GitLab package registry integration\n* Hardcoded package version for reproducibility\n\n**Note about versioning:** We've chosen to use a hardcoded version (`\"1.0.0\"`) in this example rather than deriving it from git tags or commits. This approach ensures complete reproducibility and makes the pipeline behavior more predictable. In a production environment, you might want to use semantic versioning based on git tags or another versioning strategy that fits your release process.\n\nTool requirements:\n\n* Basic utilities: `curl`, `wget`\n* Cosign for cryptographic signing\n* Python packaging tools: `build`, `twine`, `setuptools`, `wheel`\n\n### Configuration breakdown\n\n```yaml\nvariables:\n  PYTHON_VERSION: '3.10'\n  PACKAGE_NAME: ${CI_PROJECT_NAME}\n  PACKAGE_VERSION: \"1.0.0\"\n  FULCIO_URL: 'https://fulcio.sigstore.dev'\n  REKOR_URL: 'https://rekor.sigstore.dev'\n  CERTIFICATE_IDENTITY: 'https://gitlab.com/${CI_PROJECT_PATH}//.gitlab-ci.yml@refs/heads/${CI_DEFAULT_BRANCH}'\n  CERTIFICATE_OIDC_ISSUER: 'https://gitlab.com'\n  PIP_CACHE_DIR: \"$CI_PROJECT_DIR/.pip-cache\"\n  COSIGN_YES: \"true\"\n  GENERIC_PACKAGE_BASE_URL: \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/${PACKAGE_NAME}/${PACKAGE_VERSION}\"\n```\n\nWe use caching to speed up subsequent builds:\n\n```yaml\ncache:\n  paths:\n    - ${PIP_CACHE_DIR}\n```\n\n## Building: Crafting the package\n\nEvery software journey begins with creation. In our pipeline, the build stage is where raw code transforms into a distributable package, ready to travel across different Python environments.\n\nThe build process creates two standardized formats:\n\n* a wheel package (.whl) for quick, efficient installation\n* a source distribution (.tar.gz) that carries the complete code\n\nHere's the build stage implementation:\n\n```yaml\nbuild:\n  extends: .python-job\n  stage: build\n  script:\n    - git init\n    - git config --global init.defaultBranch main\n    - git config --global user.email \"ci@example.com\"\n    - git config --global user.name \"CI\"\n    - git add .\n    - git commit -m \"Initial commit\"\n    - export NORMALIZED_NAME=$(echo \"${CI_PROJECT_NAME}\" | tr '-' '_')\n    - sed -i \"s/name = \\\".*\\\"/name = \\\"${NORMALIZED_NAME}\\\"/\" pyproject.toml\n    - sed -i \"s|\\\"Homepage\\\" = \\\".*\\\"|\\\"Homepage\\\" = \\\"https://gitlab.com/${CI_PROJECT_PATH}\\\"|\" pyproject.toml\n    - python -m build\n  artifacts:\n    paths:\n      - dist/\n      - pyproject.toml\n```\n\nLet's break down what this build stage does:\n\n1. Initializes a Git repository (`git init`) and configures it with basic settings\n2. Normalizes the package name by converting hyphens to underscores, which is required for Python packaging\n3. Updates the package metadata in `pyproject.toml` to match our project settings\n4. Builds both wheel and source distribution packages using `python -m build`\n5. Preserves the built packages and configuration as artifacts for subsequent stages\n\n## Signing: The digital notarization\n\nIf attestation is the package's biography, signing is its cryptographic seal of authenticity. This is where we transform our package from a mere collection of files into a verified, tamper-evident artifact.\n\nThe signing stage uses Cosign to apply a digital signature as an unbreakable seal. This isn't just a stamp — it's a complex cryptographic handshake that proves the package's integrity and origin.\n\n```yaml\nsign:\n  extends: .python+cosign-job\n  stage: sign\n  id_tokens:\n    SIGSTORE_ID_TOKEN:\n      aud: sigstore\n  script:\n    - |\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          cosign sign-blob --yes \\\n            --fulcio-url=${FULCIO_URL} \\\n            --rekor-url=${REKOR_URL} \\\n            --oidc-issuer $CI_SERVER_URL \\\n            --identity-token $SIGSTORE_ID_TOKEN \\\n            --output-signature \"dist/${filename}.sig\" \\\n            --output-certificate \"dist/${filename}.crt\" \\\n            \"$file\"\n        fi\n      done\n  artifacts:\n    paths:\n      - dist/\n```\n\nThis signing stage performs several crucial operations:\n\n1. Obtains an OIDC token from GitLab for authentication with Sigstore services\n2. Processes each built package (both wheel and source distribution)\n3. Uses Cosign to create a cryptographic signature (`.sig`) for each package\n4. Generates a certificate (`.crt`) that proves the signature's authenticity\n5. Stores both signatures and certificates alongside the packages as artifacts\n\n## Verification: The security checkpoint\n\nVerification is our final quality control gate. It's not just a check — it's a security interrogation where every aspect of the package is scrutinized.\n\n```yaml\nverify:\n  extends: .python+cosign-job\n  stage: verify\n  script:\n    - |\n      failed=0\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          if ! cosign verify-blob \\\n            --signature \"dist/${filename}.sig\" \\\n            --certificate \"dist/${filename}.crt\" \\\n            --certificate-identity \"${CERTIFICATE_IDENTITY}\" \\\n            --certificate-oidc-issuer \"${CERTIFICATE_OIDC_ISSUER}\" \\\n            \"$file\"; then\n            failed=1\n          fi\n        fi\n      done\n      if [ $failed -eq 1 ]; then\n        exit 1\n      fi\n```\n\nThe verification stage implements several security checks:\n\n1. Examines each package file in the `dist` directory\n2. Uses Cosign to verify the signature matches the package content\n3. Confirms the certificate's identity matches our expected GitLab pipeline identity\n4. Validates our trusted OIDC provider issued the certificate\n5. Fails the entire pipeline if any verification check fails, ensuring only verified packages proceed\n\n## Publishing: The controlled release\n\nPublishing is where we make our verified packages available through GitLab's package registry. It's a carefully choreographed release that ensures only verified, authenticated packages reach their destination.\n\n```yaml\npublish:\n  extends: .python-job\n  stage: publish\n  script:\n    - |\n      cat \u003C\u003C EOF > ~/.pypirc\n      [distutils]\n      index-servers = gitlab\n      [gitlab]\n      repository = ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi\n      username = gitlab-ci-token\n      password = ${CI_JOB_TOKEN}\n      EOF\n      TWINE_PASSWORD=${CI_JOB_TOKEN} TWINE_USERNAME=gitlab-ci-token \\\n        twine upload --repository-url ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/pypi \\\n        dist/*.whl dist/*.tar.gz\n```\n\nThe publishing stage handles several important tasks:\n\n1. Creates a `.pypirc` configuration file with GitLab package registry credentials\n2. Uses the GitLab CI job token for secure authentication\n3. Uploads both wheel and source distribution packages to the GitLab PyPI registry\n4. Makes the packages available for installation via pip\n\n## Publishing signatures: Making verification possible\n\nAfter publishing the packages, we must make their signatures and certificates available for verification. We store these in GitLab's generic package registry, making them easily accessible to users who want to verify package authenticity.\n\n```yaml\npublish_signatures:\n  extends: .python+cosign-job\n  stage: publish_signatures\n  script:\n    - |\n      for file in dist/*.whl dist/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          curl --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --fail \\\n               --upload-file \"dist/${filename}.sig\" \\\n               \"${GENERIC_PACKAGE_BASE_URL}/${filename}.sig\"\n\n          curl --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --fail \\\n               --upload-file \"dist/${filename}.crt\" \\\n               \"${GENERIC_PACKAGE_BASE_URL}/${filename}.crt\"\n        fi\n      done\n```\n\nThe signature publishing stage performs these key operations:\n\n1. Processes each built package to find its corresponding signature files\n2. Uses the GitLab API to upload the signature (`.sig`) file to the generic package registry\n3. Uploads the corresponding certificate (`.crt`) file\n4. Makes these verification artifacts available for downstream package consumers\n5. Uses the same version and package name to maintain the connection between packages and signatures\n\n## Consumer verification: Testing the user experience\n\nThe final stage simulates how end users will verify your package's authenticity. This stage acts as a final check and a practical example of the verification process.\n\n```yaml\nconsumer_verification:\n  extends: .python+cosign-job\n  stage: consumer_verification\n  script:\n    - |\n      git init\n      git config --global init.defaultBranch main\n      mkdir -p pkg signatures\n\n      pip download --index-url \"https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.com/api/v4/projects/${CI_PROJECT_ID}/packages/pypi/simple\" \\\n          \"${NORMALIZED_NAME}==${PACKAGE_VERSION}\" --no-deps -d ./pkg\n\n      pip download --no-binary :all: \\\n          --index-url \"https://gitlab-ci-token:${CI_JOB_TOKEN}@gitlab.com/api/v4/projects/${CI_PROJECT_ID}/packages/pypi/simple\" \\\n          \"${NORMALIZED_NAME}==${PACKAGE_VERSION}\" --no-deps -d ./pkg\n\n      failed=0\n      for file in pkg/*.whl pkg/*.tar.gz; do\n        if [ -f \"$file\" ]; then\n          filename=$(basename \"$file\")\n          sig_url=\"${GENERIC_PACKAGE_BASE_URL}/${filename}.sig\"\n          cert_url=\"${GENERIC_PACKAGE_BASE_URL}/${filename}.crt\"\n\n          curl --fail --silent --show-error \\\n               --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --output \"signatures/${filename}.sig\" \\\n               \"$sig_url\"\n\n          curl --fail --silent --show-error \\\n               --header \"JOB-TOKEN: ${CI_JOB_TOKEN}\" \\\n               --output \"signatures/${filename}.crt\" \\\n               \"$cert_url\"\n\n          if ! cosign verify-blob \\\n            --signature \"signatures/${filename}.sig\" \\\n            --certificate \"signatures/${filename}.crt\" \\\n            --certificate-identity \"${CERTIFICATE_IDENTITY}\" \\\n            --certificate-oidc-issuer \"${CERTIFICATE_OIDC_ISSUER}\" \\\n            \"$file\"; then\n            failed=1\n          fi\n        fi\n      done\n\n      if [ $failed -eq 1 ]; then\n        exit 1\n      fi\n```\n\nThis consumer verification stage simulates the end-user experience by:\n\n1. Creating a clean environment to test package installation\n2. Downloading the published packages from the GitLab PyPI registry\n3. Retrieving the corresponding signatures and certificates from the generic package registry\n4. Performing the same verification steps that end users would perform\n5. Ensuring the entire process works from a consumer's perspective\n6. Failing the pipeline if any verification step fails, providing an early warning of any issues\n\n## Summary\n\nThis comprehensive pipeline provides a secure and reliable way to build, sign, and publish Python packages to GitLab's package registry. By following these practices and implementing the suggested security measures, you can ensure your packages are appropriately verified and safely distributed to your users.\n\nThe pipeline combines modern security practices with efficient automation to create a robust software supply chain. Using Sigstore's Cosign for signing and attestation, along with GitLab's built-in security features, you can provide users with trustworthy cryptographically verified packages.\n\n> #### Get started on your security journey today with a [free 60-day trial of GitLab Ultimate](https://gitlab.com/-/trials/new?glm_content=default-saas-trial&glm_source=about.gitlab.com).\n\n## Learn more\n- [Documentation: Use Sigstore for keyless signing and verification](https://docs.gitlab.com/ee/ci/yaml/signing_examples.html)\n- [Streamline security with keyless signing and verification in GitLab](https://about.gitlab.com/blog/keyless-signing-with-cosign/)\n- [Annotate container images with build provenance using Cosign in GitLab CI/CD](https://about.gitlab.com/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd/)",[814,230,281,680,771,108,478,9,1000],{"slug":3769,"featured":90,"template":684},"secure-and-publish-python-packages-a-guide-to-ci-integration","content:en-us:blog:secure-and-publish-python-packages-a-guide-to-ci-integration.yml","Secure And Publish Python Packages A Guide To Ci Integration","en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration.yml","en-us/blog/secure-and-publish-python-packages-a-guide-to-ci-integration",{"_path":3775,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3776,"content":3782,"config":3787,"_id":3789,"_type":13,"title":3790,"_source":15,"_file":3791,"_stem":3792,"_extension":18},"/en-us/blog/secure-and-safe-login-and-commits-with-gitlab-yubico",{"title":3777,"description":3778,"ogTitle":3777,"ogDescription":3778,"noIndex":6,"ogImage":3779,"ogUrl":3780,"ogSiteName":669,"ogType":670,"canonicalUrls":3780,"schema":3781},"Secure and safe login and commits with GitLab + Yubico","Learn how GitLab and Yubico have partnered to strengthen software development security through robust authentication measures.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663259/Blog/Hero%20Images/REFERENCE_-_display_preview_for_blog_images__3_.png","https://about.gitlab.com/blog/secure-and-safe-login-and-commits-with-gitlab-yubico","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Secure and safe login and commits with GitLab + Yubico\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-04-02\",\n      }",{"title":3777,"description":3778,"authors":3783,"heroImage":3779,"date":3784,"body":3785,"category":814,"tags":3786},[1767],"2025-04-02","We live in a time where data breaches and phishing attacks make daily headlines. These breaches can cause harm to an organization, such as regulatory fines, business downtime, or even worse, reputational damage. In terms of authentication, passwords have been the backbone of online security for decades, however, they're increasingly proving inadequate against sophisticated cyber threats.\n\nGitLab and [Yubico](https://www.yubico.com/) have partnered to strengthen software development security through robust authentication measures. Yubico is the inventor of the YubiKey, a hardware security key that delivers phishing-resistant multi-factor authentication (MFA). By implementing FIDO Universal 2nd Factor (U2F) and YubiKey hardware protection, GitLab offers developers a powerful defense against phishing attacks and other cyber threats, ensuring their code and projects remain secure. This collaboration expands enterprise-grade authentication in the GitLab platform, allowing programmers to focus on creating software while maintaining confidence in their account's integrity.\n\nThis article explains how to configure GitLab to use YubiKeys to protect developers from online threats. You’ll also learn how to further prevent tampering with GitLab verified commits.\n\n## How YubiKeys work\n\nAt their core, YubiKeys function as cryptographic hardware tokens that generate and store private keys in a secure element. These keys implement FIDO2/WebAuthn authentication protocols, which can be used as an additional factor to login to GitLab.\n\nHere's how it works when logging in:\n\n1. You enter your username and password.  \n2. GitLab sends a cryptographic challenge to your browser.  \n3. Your browser requests the YubiKey to sign this challenge.  \n4. You physically touch the YubiKey to approve.\n5. The YubiKey creates a unique cryptographic signature for that specific service and challenge.  \n6. GitLab verifies the signature using your public key stored during setup.\n\nMost major security breaches involve compromised passwords. Adding a YubiKey secures your account from a remote breach, even if your password is stolen, so you can rest assured that your GitLab account is secure. Additional key security benefits of using YubiKey for authentication with GitLab include:\n\n* **Phishing protection:** Fake sites won't have the correct cryptographic keys to verify the response. \n* **No secrets to steal:** The private key never leaves the YubiKey.  \n* **Physical security:** Physical presence is required to use it (you must touch the YubiKey).\n\n## Setting up YubiKey multifactor authentication in GitLab\n\nNow let’s go over how to set up a Yubikey for multifactor authentication in GitLab. Make sure you're using a [supported browser and operating system](https://support.yubico.com/hc/en-us/articles/360016615020-Operating-system-and-web-browser-support-for-FIDO2-and-U2F) as they have better WebAuthn support for hardware security keys.\n\n1. First, log in to your GitLab account and go to your user settings (click your avatar in the top left corner and select **Preferences**). \n2. In the left sidebar, click on **Account** and navigate to the **Two-factor Authentication** section.\n3. If you haven't already enabled 2FA, you'll need to do that first.\n\n    a. Click **Enable two-factor authentication**.\n\n    b. Scan the QR code with your authenticator app.\n\n    c. Enter the code from your authenticator app.\n\n    d. Enter your GitLab password. If you ever need to access your GitLab account without using Google authentication, you may need to:\n    * Use the **Forgot password** option on the GitLab login page to set up a separate GitLab password.\n    * Contact your GitLab administrator to help you set up alternative login methods.\n\n   e. Save your recovery codes in a safe place.\n\n4. Once 2FA is enabled, go back to the previous screen by pressing **Manage two-factor authentication** and scroll down to the **Register hardware token** section.  \n5. Press the **Set up new device** button.  \n    a. A popup from your browser should appear. **Note:** This image may look different depending on your browser. You may also get popups from password managers feel free to ignore them. \n\n![Browser (Brave) Auth Request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674606/Blog/Content%20Images/browser_auth_request.png)\n\n&nbsp; &nbsp; b. Select **Use a phone, tablet, or security key**.\n\n6. A new popup will appear.\n\n![browser security key request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674607/Blog/Content%20Images/browser_security_key_request.png)\n\n&nbsp; &nbsp; a. Insert your YubiKey into your computer's USB port.\n\n&nbsp; &nbsp; b. Touch the metal contact/button on your YubiKey when prompted. The field will automatically fill with a one-time code.\n\n7. Enter your GitLab Password and provide a name for your Hardware Key.  \n8. Click **Register** to add the YubiKey to your account.\n\nCongratulations, your YubiKey is now registered and can be used as a second factor when logging into GitLab! You can register multiple YubiKeys to your account for backup purposes. **Note:** The process may vary slightly among browsers.\n\n![yubikey registered](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674607/Blog/Content%20Images/yubikey_registered.png)\n\n\u003Ccenter>\u003Ci>YubiKey registered successfully\u003C/i>\u003C/center>\n\n## Signing in with a YubiKey\n\nNow that we have our YubiKey configured, we can log in as follows:\n\n1. Go to GitLab.com.\n\n![GitLab login](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674607/Blog/Content%20Images/gitlab_login.png)\n\n2. Provide your username and password and then press the **Sign in** button.\n3. You will be sent to the following screen.\n\n![GitLab 2fa login](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674606/Blog/Content%20Images/2fa_login.png)\n\n&nbsp; &nbsp; a. A popup, like the one below, should come up. **Note:** This image may look different depending on your browser. You may also get popups from password managers; feel free to ignore them.\n\n![Browser security key request](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674607/Blog/Content%20Images/browser_security_key_request.png)\n\n&nbsp; &nbsp; b. Insert your YubiKey into your computer's USB port.\n\n&nbsp; &nbsp; c. Touch the metal contact/button on your YubiKey when prompted. The field will automatically fill with a one-time code.\n\nNow, you should be logged in and taken to your GitLab page. **Note:** The process may vary slightly among browsers.\n\n## What happens if I lose my YubiKey?\n\nYubico recommends that you use and keep a backup YubiKey. When considering your home, car, or office, you wouldn’t think twice about having a backup key to keep in a safe place. Your digital self should get the same level of consideration. A backup YubiKey kept in a safe place provides a quick and safe backup if your primary YubiKey is lost. Keeping a backup will also easily enable you to deactivate the lost YubiKey and add a new primary or secondary YubiKey.\n\nIf you do not have an additional YubiKey added, it is recommended to have another form of 2FA added to your accounts. In either case, you should be able to get access to your account and remove the lost key from the account. Please note that if a spare key or another authentication method hasn’t been added, you will need to contact the service/website for help with recovering your account.\n\n## GitLab verified commits\n\nTo further prevent tampering, you can also configure verified commits. Verified commits in GitLab use GPG (GNU Privacy Guard) signatures to prove that a commit actually came from you. This adds another layer of security on top of authentication by ensuring that not only is your account secure, but every code change can be cryptographically verified as coming from you.\n\nYour YubiKey can store GPG keys:\n\n* The private key is stored securely on the YubiKey.  \n* The public key is shared with GitLab.\n* The key pair is used to sign your commits.\n\nOnce the GPG keys have been set up:\n\n* When you make a commit, Git uses your private key to create a signature.  \n* The GPG key is accessed from the attached YubiKey.  \n* The signature is stored with the commit metadata.  \n* GitLab verifies the signature using your public key.\n\n## Setting up verified commits\n\nLet’s go over how to configure verified commits. In this example, the GPG key will live inside your YubiKey, providing an extra layer of security.\n\n1. Install required software.\n\n```bash\n# On macOS\nbrew install --cask yubico-yubikey-manager\nbrew install gnupg gpg yubikey-manager\n\n# On Ubuntu/Debian\nsudo apt install gnupg gpg yubikey-personalization\n\n# On Windows\n# Download and install Gpg4win from https://gpg4win.org\n```\n\n2. Check YubiKey GPG status.\n\n```bash\ngpg --card-status\n```\n3. Generate GPG keys directly on YubiKey (more secure).\n\n```bash\n# Start GPG edit mode\ngpg --card-edit\n\n# Enter admin mode\nadmin\n\n# Generate key directly on card\n# PIN = '123456' | Admin PIN = '12345678'\ngenerate\n\n# Follow prompts\n# See documentation for more info \n# https://support.yubico.com/hc/en-us/articles/360013790259-Using-Your-YubiKey-with-OpenPGP\n```\n\n4. Export your public key.\n\n```bash\n# Get your key ID\ngpg --list-secret-keys --keyid-format LONG\n\n# Export the public key\ngpg --armor --export YOUR_KEY_ID\n```\n\n5. Add the public key to GitLab.\n\n    a. Click on your GitLab Avatar and select **Preferences**.\n\n    b. On the side tab select **GPG Keys**.\n\n    c. Click **Add new key**.\n\n    d. Paste your public key.\n\n    e. Click **Add key**.\n\n6. Configure Git.\n\n```bash\n# Set signing key\ngit config --global user.signingkey YOUR_KEY_ID\n\n# Enable automatic signing\ngit config --global commit.gpgsign true\n\n# Tell GPG which key to use\necho \"default-key YOUR_KEY_ID\" >> ~/.gnupg/gpg.conf\n```\n\n7. Now let’s test the configuration by creating a test commit in a project:\n\n```bash\n# Make a change in the project\n# Add changes\ngit add .\n\n# Make a test commit\ngit commit -S -m \"Test signed commit\"\n\n# Verify signature\ngit verify-commit HEAD\n\n# Push the change\ngit push\n```\n\nThe `git verify-commit HEAD` command should show the GPG key used:\n\n```bash\ngpg: Signature made Wed Feb 26 11:45:00 2025 CST\ngpg:                using RSA key YOUR_KEY_ID\ngpg: Good signature from “NAME (DESCRIPTION) \u003CEMAIL>\" [ultimate]\n```\n\nThen, when viewing the commit in GitLab, you should now see that the commit is verified as follows:\n\n![Commit is verified](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674607/Blog/Content%20Images/verified.png)\n\n\u003Ccenter>\u003Ci>Commit verified with GPG key\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nYou can also use the [commits API](https://docs.gitlab.com/api/commits/#get-signature-of-a-commit) to check a commit’s signature allowing you to further operationalize the verification workflow.\n\n## Learn more\n\nTo learn more about GitLab, Yubico, and the solutions each provides, check out these resources:\n\n* [Why GitLab](https://about.gitlab.com/why-gitlab/)  \n* [Why Yubico](https://www.yubico.com/why-yubico/)  \n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab listing in the \"Works with YubiKey\" catalog](https://www.yubico.com/works-with-yubikey/catalog/gitlab/)  \n* [Verified Commits - GitLab documentation](https://docs.gitlab.com/ee/user/project/repository/signed_commits/)  \n* [Push Rules in GitLab](https://docs.gitlab.com/user/project/repository/push_rules/)  \n* [Sign Commit with GPG Keys documentation](https://docs.gitlab.com/user/project/repository/signed_commits/gpg/)\n",[230,814,9,478,678,680],{"slug":3788,"featured":90,"template":684},"secure-and-safe-login-and-commits-with-gitlab-yubico","content:en-us:blog:secure-and-safe-login-and-commits-with-gitlab-yubico.yml","Secure And Safe Login And Commits With Gitlab Yubico","en-us/blog/secure-and-safe-login-and-commits-with-gitlab-yubico.yml","en-us/blog/secure-and-safe-login-and-commits-with-gitlab-yubico",{"_path":3794,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3795,"content":3800,"config":3806,"_id":3808,"_type":13,"title":3809,"_source":15,"_file":3810,"_stem":3811,"_extension":18},"/en-us/blog/securing-your-code-on-gitlab",{"title":3796,"description":3797,"ogTitle":3796,"ogDescription":3797,"noIndex":6,"ogImage":3049,"ogUrl":3798,"ogSiteName":669,"ogType":670,"canonicalUrls":3798,"schema":3799},"The ultimate guide to securing your code on GitLab.com","This in-depth tutorial, complete with best practices, will help you secure your development environment.","https://about.gitlab.com/blog/securing-your-code-on-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to securing your code on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Steve Grossman\"}],\n        \"datePublished\": \"2023-05-31\",\n      }",{"title":3796,"description":3797,"authors":3801,"heroImage":3049,"date":3803,"body":3804,"category":814,"tags":3805},[3802],"Steve Grossman","2023-05-31","\n\nA critical aspect of a DevSecOps methodology is to apply best practices to secure your development environment. Your software should be protected from malicious and accidental exposure or modification. This blog explains how to control and manage access to GitLab.com and, in turn, source code, build pipelines, dependency and package repositories, and deployment keys involved in the [software supply chain](/blog/the-ultimate-guide-to-software-supply-chain-security/). The best practices specifically address the capabilities for end users on multi-tenant GitLab.com and are written for the Ultimate license tier. Not all of these capabilities are available at the Premium tier.\n\n## 1. Group settings\nMany security-related settings can be set on the top-level group and will cascade down into all subgroups and projects. They are the easiest and most important in securing your GitLab.com instance.\n\n### General settings\nIn the top-level group, the following settings should be applied to provide the best security for the code within that group:\n\n#### Make the group visibility level private\nThis is likely the most important setting among general settings. By marking the group “private\", anyone who is not explicitly a member of the group will not be able to access it. Additionally, by making the top-level group private, all subgroups and projects will also be private and cannot be exposed.  \n\n#### Permissions and group features\nUnder permissions:\n - Set “Prevent members from sending invitations to outside groups”. This will prevent accidentally adding people who should not belong to the group.\n - Set “Prevent sharing a project with other groups”. This prevents accidental or malicious exfiltration of code by sharing or moving a project to another group outside the control of the top-level group owner.\n - Allow project and group access token creation. Project and group access tokens are much like [personal access tokens](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) with the following improvements:\n    - They are visible to and manageable by group owners and maintainers, which means they can be revoked and have expiration dates set by an administrator to limit the opportunity for abuse.\n    - They create a virtual “bot” user that does not count against your license count.\n - Enable [delayed project deletion](/blog/delayed-deletion/). This will give you a seven-day grace period to catch and prevent accidental or malicious removal of a repo. GitLab.com, like self-managed GitLab, does not have the ability to restore an individual project without significant expense for professional services.\n - Allowlist the Classless Inter-Domain Routing (CIDR) or supernetting from which users should be accessing the code.\n - Restrict membership to only those email domains belonging to your organization and contractors.\n - Restrict creation of subgroups to Owners. This will help keep the structure of the top-level group within your policies and make [SAML Group Sync](https://docs.gitlab.com/ee/user/group/saml_sso/group_sync.html) for membership easier to manage.\n - Block forking projects outside of this group hierarchy. This will help prevent code exfiltration.\n - Require [two-factor authentication](https://docs.gitlab.com/ee/user/profile/account/two_factor_authentication.html). This disables the ability to use password authentication with Git over HTTPS.\n - Disallow adding new members to projects within this group. All members must be inherited from the group.  \n\n#### Merge request approvals\n[Merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/) help prevent injection of malicious code into the repository by having people other than the author review them. Enable merge request approvals for all projects in your group to:  \n - Prevent approval by authors.\n - Prevent approvals by users who add commits.\n - Prevent editing approval rules in projects and on individual merge requests.  \n\n### SAML SSO\nTo more tightly control who can access your code in GitLab.com, set up [SAML SSO](https://docs.gitlab.com/ee/user/group/saml_sso/). This will ensure that everyone who accesses it is approved by someone in authority.\n\n#### To configure SAML SSO:  \n - Enable SAML authentication for this group.\n - Enforce SSO-only authentication for web activity for this group.\n - Enforce SSO-only authentication for Git and Dependency Proxy activity for this group.\n - Set the Default membership role to Minimal Access. Roles can be increased as needed in subgroups or individual projects, minimal access prevents any visibility to projects or subgroups where the user is not explicitly granted another role.\n - Tightly control access to the Maintainer and Owner roles; every developer does not need to have a Maintainer role.\n\n## 2. Group auditing and compliance\nRegularly and periodically review the [compliance reports](https://docs.gitlab.com/ee/user/compliance/compliance_report/) to verify who is approving merge requests and what MRs are getting approved.\n\nSet up streaming group audit events to your corporate security information and event management (SIEM) system and monitor them for unusual activity. This needs to be repeated for each group and project in the hierarchy to get the maximum number of audit events.\n\n## 3. Group-level push rules\n Setting restrictive push rules at the group level will help ensure malicious code is not injected into the repository:\n - Require committers be verified.\n - Reject unsigned commits.\n - Ensure the commit author is a GitLab user.\n - Prevent pushing secret files.\n - Require commit author’s email to be from your email domain.  \n\n## CI/CD  \nThe following settings can help insure the integrity of [CI/CD](/topics/ci-cd/) pipelines and reduce the opportunities for abuse and malice:\n - Register runners at the lowest practical level to reduce the blast radius of any malicious use.\n - Require tags to use all runners to reduce the opportunity for abuse.\n - Define CI/CD variables – especially if they contain secrets – at the lowest practical level to reduce the blast radius of any malicious use.\n - Use protected runners with protected variables and protected branches to significantly limit who can deploy into production environments or misuse cloud resources.\n - Access to change the .gitlab-ci.yml pipeline definition file should be tightly controlled in all repos through the CODEOWNERS file to prevent malicious use of the CI/CD system.\n\n## 4. Project settings  \nSome settings do not cascade down from the group or are not available at the group level and must be set on individual projects instead. These include some repo-specific settings.\n\n### Repository\nSet up protected branches and protected tags to go along with the protected runners and protected variables defined above.\n\n### CI/CD  \n#### General \n - Disable public pipelines.\n - Use separate caches for protected branches.  \n\n### Protected environments\nUse protected environments and tightly limit who can deploy and require approvals for deploying.\n\n#### Token access\nRestrict access to this project’s CI_JOB_TOKEN to only individual projects to ensure malicious projects to not retrieve the token and use it to access the API.\n\n#### Secure files\nStore keystores, provisioning profiles and signing certificates in the Secure Files storage rather than the repository.  \n\n## 5. Project-level security testing and compliance\n### Configuration\n#### Security testing\n - Enable static application security testing [SAST](https://docs.gitlab.com/ee/user/application_security/sast/) to help prevent insertion of malicious code into the application.\n - Enable dependency scanning and regularly review the dependency list or software, or software bill of materials ([SBOM](/blog/the-ultimate-guide-to-sboms/)], generated by dependency scanning for vulnerabilities and malicious components.\n - Enable [container scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) and cluster image scanning.  \n\n#### Policies\nAs an alternative to the security testing section above, you may choose to enable scan execution policies.\nEnable [test scan result policies](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html) to prevent merging code with critical vulnerabilities.\n\n\nFollowing these best practices will help ensure that your code hosted on GitLab.com is safe from tampering and \n[public exposure](https://www.engadget.com/okta-stolen-source-code-205601214.html) and that your \nsoftware supply chain is secure and only authorized users \nare accessing your software assets.\n\n## More resources\n- [Group level settings documentation](https://docs.gitlab.com/ee/user/group/)\n- [Project level settings documentation](https://docs.gitlab.com/ee/user/project/settings/)\n",[9,478],{"slug":3807,"featured":6,"template":684},"securing-your-code-on-gitlab","content:en-us:blog:securing-your-code-on-gitlab.yml","Securing Your Code On Gitlab","en-us/blog/securing-your-code-on-gitlab.yml","en-us/blog/securing-your-code-on-gitlab",{"_path":3813,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3814,"content":3819,"config":3826,"_id":3828,"_type":13,"title":3829,"_source":15,"_file":3830,"_stem":3831,"_extension":18},"/en-us/blog/self-service-security-alert-handling-with-gitlabs-uam",{"title":3815,"description":3816,"ogTitle":3815,"ogDescription":3816,"noIndex":6,"ogImage":3760,"ogUrl":3817,"ogSiteName":669,"ogType":670,"canonicalUrls":3817,"schema":3818},"Self-service security alert handling with GitLab's UAM","The User Attestation Module automates security alerts by routing them directly to team members for verification, reducing manual SecOps work and enhancing audit trails.","https://about.gitlab.com/blog/self-service-security-alert-handling-with-gitlabs-uam","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Self-service security alert handling with GitLab's UAM\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bala Allam\"},{\"@type\":\"Person\",\"name\":\"Matt Coons\"}],\n        \"datePublished\": \"2025-03-18\",\n      }",{"title":3815,"description":3816,"authors":3820,"heroImage":3760,"date":3823,"body":3824,"category":814,"tags":3825},[3821,3822],"Bala Allam","Matt Coons","2025-03-18","The [GitLab Security Operations team](https://handbook.gitlab.com/handbook/security/security-operations/) prioritizes automation that enables security engineers to focus on high-impact work rather than routine tasks that can be automated. A key innovation in this automation strategy is creation of the User Attestation Module (UAM), which allows GitLab team members to directly respond to and verify security alerts flagged as potentially malicious. When the [GUARD framework](https://about.gitlab.com/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd/) detects suspicious activity, it routes the alert to the relevant team member for review. The team member can then attest whether they recognize and authorize the activity. Their response is recorded for audit purposes, and, based on their input, the system either closes the alert or escalates it to the Security Incident Response Team (SIRT).\n\nIn this article, you'll learn about the UAM and how it can benefit your DevSecOps environment.\n\n## How the User Attestation Module works\n\nThe UAM streamlines security alert handling through a comprehensive workflow that includes:\n\n* Alert verification by team members  \n* Collection and documentation of supporting evidence  \n* Option to request additional support from GitLab SecOps  \n* Secure storage of team member responses  \n* Automated alert resolution or incident escalation  \n* Team member feedback collection for continuous improvement\n\nWe created UAM to help us: \n\n1. Route low priority alerts (such as administrative activities) to the relevant team members who performed them.  \n2. Reduce alert fatigue by first checking with the team member who completed the activity before routing to SIRT if necessary.   \n3. Collect and store team member responses to maintain an audit trail and rich metrics.   \n4. Create a response tier between **SIRT needs to triage this alert** and **This is an informational signal that does not need to be reviewed directly**.\n\n## UAM's design principles\n\nThe UAM is a Slack-first automation that reaches out to team members to validate activity directly in Slack, reducing effort and increasing participation. Today, 40% of all security alerts are delivered to team members through the UAM, saving SIRT valuable time to focus on higher importance alerts and incidents. \n\nA robust escalation workflow in the UAM ensures that all alerts are validated by team members or escalated to SIRT. When a UAM alert reaches a team member, they have a period of time to respond attesting to the activity or stating they do not recognize the activity. If no response is recorded, the UAM alert is auto-escalated to SIRT for handling. \n\nComprehensive metrics collection is a core GUARD design principle, which extends to how we designed UAM. All user interactivity with triggered UAM alerts is logged in a metrics database, which enables comprehensive measurement to identify problematic alerts, opportunities for process improvement, and overall UAM health.\n\nUAM enables a third alert tier, bridging the gap between alerts that always needed to be investigated, and lower importance informational signals that are grouped by entity for escalation and correlation. \n\n- Stable alerts (must be triaged and investigated by SIRT)  \n- UAM alerts (routed to team members to attest to the activity)  \n- Informational signal (low-importance events that are interesting and correlated by entity grouping)\n\n## UAM components \n\nThe UAM framework consists of multiple components:\n\n- GitLab: Fetches a user email address based on user_id via user’s API and stores user's responses  - Slack: Searches each user by email using Slack API and posts a UAM notification to the end user as well as collects responses from users using Slack modals  \n- Tines: Processes and orchestrates user responses and alerts     \n- Devo: Receives alert payload and alert notifications  \n- Metrics DB: Records metrics for triggered UAM alerts\n\nThe workflow integrates with following modules:\n\n- GitLab API for user identification  \n- Slack API for user communication  \n- Webhook configuration for alert reception  \n- Audit trail storage in GitLab\n\n## UAM workflow\n\nThe diagram below illustrates the workflow of the UAM module:\n\n![UAM - flow chart](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674678/Blog/Content%20Images/UAM_detection_edited.png)\n\n## Following along with GUARD\n\nWe are still unveiling parts of GUARD and how it works, so [follow along](https://gitlab.com/gitlab-security-oss/guard) to learn how we automate our security detections from end to end.\n\n## Read more about the GUARD framework\n\n- [Unveiling the GUARD framework to automate security detections at GitLab](https://about.gitlab.com/blog/unveiling-the-guard-framework-to-automate-security-detections-at-gitlab/)  \n- [Automating cybersecurity threat detections with GitLab CI/CD](https://about.gitlab.com/blog/automating-cybersecurity-threat-detections-with-gitlab-ci-cd/)\n- [Open Source Security at GitLab](https://about.gitlab.com/security/open-source-resources/)",[814,9,478,678,727],{"slug":3827,"featured":6,"template":684},"self-service-security-alert-handling-with-gitlabs-uam","content:en-us:blog:self-service-security-alert-handling-with-gitlabs-uam.yml","Self Service Security Alert Handling With Gitlabs Uam","en-us/blog/self-service-security-alert-handling-with-gitlabs-uam.yml","en-us/blog/self-service-security-alert-handling-with-gitlabs-uam",{"_path":3833,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3834,"content":3840,"config":3846,"_id":3848,"_type":13,"title":3849,"_source":15,"_file":3850,"_stem":3851,"_extension":18},"/en-us/blog/set-up-flux-for-gitops-on-openshift",{"title":3835,"description":3836,"ogTitle":3835,"ogDescription":3836,"noIndex":6,"ogImage":3837,"ogUrl":3838,"ogSiteName":669,"ogType":670,"canonicalUrls":3838,"schema":3839},"Set up Flux for GitOps to deploy workloads on OpenShift","Learn how to set up a sample project, complete a bootstrap Flux installation, and authenticate your installation with a project deploy token.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682825/Blog/Hero%20Images/genericworkflow.jpg","https://about.gitlab.com/blog/set-up-flux-for-gitops-on-openshift","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Set up Flux for GitOps to deploy workloads on OpenShift\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2023-07-05\",\n      }",{"title":3835,"description":3836,"authors":3841,"heroImage":3837,"date":3843,"body":3844,"category":769,"tags":3845},[3842],"Bart Zhang","2023-07-05","\n\nIn February, we announced that [Flux CD would be our recommended approach to do GitOps with GitLab](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/). This tutorial explains how to set up GitLab and Flux to deploy workloads on Red Hat OpenShift. You’ll set up a sample project, complete a bootstrap Flux installation, and authenticate your installation with a project deploy token. By the end of this tutorial, you should be able to deploy an example NGINX workload to OpenShift from a GitLab Repo via Flux.\n\nYou can find the fully configured tutorial project in [this GitLab repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux). It works in conjunction with [this repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests), which contains the example OpenShift manifest. \n\n### To set up Flux for GitOps:\n1. Create a personal access token\n2. Create the Flux repository\n3. Create the OpenShift manifest repository\n4. Configure Flux to sync your manifests\n5. Verify your configuration\n\n### Prerequisites:\nYou must have an OpenShift cluster running. Cluster-admin privileges are required to install Flux on OpenShift, which can either be installed via OperatorHub or the CLI.\n\nWhen installing Flux with CLI, you need to set the nonroot SCC for all controllers in the flux-system namespace like this:\n\n```\nNS=\"flux-system\"\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:kustomize-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:helm-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:source-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:notification-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:image-automation-controller\noc adm policy add-scc-to-user nonroot system:serviceaccount:${NS}:image-reflector-controller\n```\nExpected output:\n```\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"kustomize-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"helm-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"source-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"notification-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"image-automation-controller\"\nclusterrole.rbac.authorization.k8s.io/system:openshift:scc:nonroot added: \"image-reflector-controller\"\n```\n\nAlso, you'll need to [patch your Kustomization](https://access.redhat.com/documentation/en-us/openshift_container_platform/4.8/html/security_and_compliance/seccomp-profiles) to remove the SecComp Profile and enforce runUserAs to the same UID provided by the images to prevent OpenShift to alter the user expected by our controllers, prior to bootstrapping the cluster.\n\nYou’ll need to create a Git repository and clone it locally. I chose to create [the web-app-manifests repository](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests) to store my manifest file once it is created through the following steps.\n\nCreate the file structure required by bootstrap using the following command:\n\n```\ngit clone https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux/\ncd flux\nmkdir -p clusters/my-cluster/flux-system\ntouch clusters/my-cluster/flux-system/gotk-components.yaml \\\n    clusters/my-cluster/flux-system/gotk-sync.yaml \\\n    clusters/my-cluster/flux-system/kustomization.yaml\n```\n\nAdd the following YAML snippet and its patches section to flux/clusters/my-cluster/flux-system/kustomization.yaml:\n\n```\napiVersion: kustomize.config.k8s.io/v1beta1\nkind: Kustomization\nresources:\n  - gotk-components.yaml\n  - gotk-sync.yaml\npatches:\n  - patch: |\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: all\n      spec:\n        template:\n          spec:\n            containers:\n              - name: manager\n                securityContext:\n                  runAsUser: 65534\n                  seccompProfile:\n                    $patch: delete      \n    target:\n      kind: Deployment\n      labelSelector: app.kubernetes.io/part-of=flux\n```\n\nCommit and push the changes to main branch:\n\n```\ncd ~/flux\ngit add -A && git commit -m \"init flux for openshift\" && git push\n```\n\n### Create a personal access token\n\nTo authenticate with the Flux CLI, you must create a GitLab personal access token ([PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html)) with the api scope:\n1. In the upper-right corner, select your avatar.\n2. Select Edit profile.\n3. On the left sidebar, select Access Tokens.\n4. Enter a name and expiry date for the token.\n5. Select the api scope.\n6. Select Create personal access token.\n7. Copy the new token to your clipboard.\n\nNote: You can also use a project or group access token with the api scope.\n\n### Create the Flux repository\nCreate a Git repository, install Flux, and authenticate Flux with your repo in RedHat OpenShift:\n1. Make sure you are logged in as an OpenShift user in your CLI to access your cluster. `oc login` command is useful here.\n2. [Install the Flux CLI](https://fluxcd.io/flux/installation/#bootstrap). You must install Flux v2 or higher. `brew install fluxcd/tap/flux` on Mac OSX. Check your flux version with `flux -v`. Mine is `flux version 2.0.0-rc.1`.\n3. In GitLab, create a new empty project called `flux`. I chose to use [the respository in this readme](https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux/)\n4. From your shell, export a GITLAB_TOKEN environment variable with the value of your personal access token. For example, `export GITLAB_TOKEN=\u003Cpersonal-access-token>`.\n5. Run the bootstrap command. The exact command depends on whether you are creating the Flux repository under a GitLab user, group, or subgroup. For more information, see the Flux bootstrap documentation.\n\nIn this tutorial, you’re working with a public project in a subgroup. The bootstrap command looks like this:\n\n```\ncd ~/flux\nflux bootstrap gitlab \\\n  --owner=gitlab-partner-demos/red-hat-demos \\\n  --repository=flux \\\n  --branch=master \\\n  --path=clusters/my-cluster \\\n  --token-auth\n```\nExpected output:\n```\n► connecting to https://gitlab.com\n► cloning branch \"master\" from Git repository \"https://gitlab.com/gitlab-partner-demos/red-hat-demos/flux.git\"\n✔ cloned repository\n► generating component manifests\n✔ generated component manifests\n✔ component manifests are up to date\n► installing components in \"flux-system\" namespace\n✔ installed components\n✔ reconciled components\n► determining if source secret \"flux-system/flux-system\" exists\n✔ source secret up to date\n► generating sync manifests\n✔ generated sync manifests\n✔ sync manifests are up to date\n► applying sync manifests\n✔ reconciled sync configuration\n◎ waiting for Kustomization \"flux-system/flux-system\" to be reconciled\n```\n\nThis command installs the Flux agent on the OpenShift cluster and configures it to manage itself from the repository flux-config. The command also automatically creates the project deploy token required to access the flux-config repository.\n\nGreat work! You now have a repository bootstrapped with a Flux configuration. Any updates to your repository are automatically synced to the cluster.\n\n### Create the OpenShift manifest repository\nNext, create a repository for your Flux manifest files. These are stateful files that track the current running configuration by\nthe Flux agent. I chose to use [web-app-manifests](https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests) project to track my manifest files.\n1. In GitLab, create a new repository called `web-app-manifests`.\n1. Add a file to web-app-manifests named `nginx-deployment.yaml` with the following contents:\n\n```\napiVersion: apps/v1\n\nkind: Deployment\n\nmetadata:\n  name: nginx-deployment\n  labels:\n    app: nginx\nspec:\n  replicas: 3\n  selector:\n    matchLabels:\n      app: nginx\n  template:\n    metadata:\n      labels:\n        app: nginx\n    spec:\n      containers:\n      - name: nginx-unprivileged\n        image: nginxinc/nginx-unprivileged:latest\n        ports:\n        - containerPort: 80\n```\n\nIn the new `web-app-manifests` repository, create a [GitLab deploy token](https://docs.gitlab.com/ee/user/project/deploy_tokens/) with only the `read_repository` scope.\n\nStore your deploy token username and password somewhere safe. I used environmental variables to save mine:\n\n```\nexport GITLAB_DEPLOY_TOKEN_USER=\u003Cmy-gitlab-deployment-token-username>\nexport GITLAB_DEPLOY_TOKEN_PASS=\u003Cmy-gitlab-deployment-token-password>\nenv |grep GITLAB_DEPLOY_TOKEN\n```\nExpected output:\n```\nGITLAB_DEPLOY_TOKEN_USER=myGitLabUserName\nGITLAB_DEPLOY_TOKEN_PASS=MySecretToken\n```\n\nIn Flux CLI, create a secret with your deploy token and point the secret to the new repository. For example:\n\n```\nflux create secret git flux-deploy-authentication \\\n         --url=https://gitlab.com/gitlab-partner-demos/red-hat-demos/web-app-manifests \\\n         --namespace=default \\\n         --username=$GITLAB_DEPLOY_TOKEN_USER \\\n         --password=$GITLAB_DEPLOY_TOKEN_PASS\n```\nExpected output:\n```\n► git secret 'flux-deploy-authentication' created in 'default' namespace\n```\n\nTo check if your secret was generated successfully, run:\n\n```\noc -n default get secrets flux-deploy-authentication -o yaml\n```\nExpected output:\n```\napiVersion: v1\ndata:\n  password: Base64EncodedPassword=\n  username: Base64EncodedUsername\nkind: Secret\nmetadata:\n  creationTimestamp: \"2023-04-20T18:22:33Z\"\n  name: flux-deploy-authentication\n  namespace: default\n  resourceVersion: \"8168670\"\n  uid: 16292254-83cd-4df2-8a9c-bc4c718e4b4a\ntype: Opaque\n```\n\nUnder data, you should see base64-encoded values associated with your token username and password.\n\nCongratulations! You now have a manifest repository, a deploy token, and a secret generated directly on your cluster.\n\n### Configure Flux to sync your manifests\nNext, tell flux-config to sync with the web-app-manifests repository.\n\nTo do so, create a [GitRepository resource](https://docs.openshift.com/container-platform/3.11/dev_guide/application_lifecycle/new_app.html) in OpenShift:\n\n1. Clone the flux repo to your machine.\n```\n# Remember that we already have the flux repo cloned into our home dir.\ncd ~/flux\ngit pull\n```\n\n2. In your local clone of flux, add the GitRepository file `clusters/my-cluster/web-app-manifests-source.yaml`:\n  \n```\n",[9,533],{"slug":3847,"featured":6,"template":684},"set-up-flux-for-gitops-on-openshift","content:en-us:blog:set-up-flux-for-gitops-on-openshift.yml","Set Up Flux For Gitops On Openshift","en-us/blog/set-up-flux-for-gitops-on-openshift.yml","en-us/blog/set-up-flux-for-gitops-on-openshift",{"_path":3853,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3854,"content":3860,"config":3865,"_id":3867,"_type":13,"title":3868,"_source":15,"_file":3869,"_stem":3870,"_extension":18},"/en-us/blog/simple-kubernetes-management-with-gitlab",{"title":3855,"description":3856,"ogTitle":3855,"ogDescription":3856,"noIndex":6,"ogImage":3857,"ogUrl":3858,"ogSiteName":669,"ogType":670,"canonicalUrls":3858,"schema":3859},"Simple Kubernetes management with GitLab","Follow our tutorial to provision a Kubernetes cluster and manage it with IAC using Terraform and Helm in 20 minutes or less.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749670037/Blog/Hero%20Images/auto-deploy-google-cloud.jpg","https://about.gitlab.com/blog/simple-kubernetes-management-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simple Kubernetes management with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2022-11-15\",\n      }",{"title":3855,"description":3856,"authors":3861,"heroImage":3857,"date":3862,"body":3863,"category":769,"tags":3864},[831],"2022-11-15","\n\nKubernetes can be very complex and has dozens of tutorials out there on how to provision and manage a cluster. This tutorial aims to provide a simple, lightweight solution to provision a Kubernetes cluster and manage it with infrastructure as code (IaC) using Terraform and Helm in 20 minutes or less.\n\n**The final product of this tutorial will be two IaC repositories with fully functional CI/CD pipelines:**\n\n1. [gitlab-terraform-k8s](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) - A single source of truth to provision, configure, and manage your Kubernetes infrastructure using Terraform\n1. [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) - A single source of truth to define the desired state of your Kubernetes cluster using the GitLab Agent for Kubernetes and Helm\n\n![Final Product](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/final-product.png){: .shadow}\n\n\n### Prerequisites\n- AWS or GCP account with permissions to provision resources\n- GitLab account \n- Access to a GitLab Runner\n- 20 minutes\n\n### An overview of this tutorial is as follows:\n\n1. Set up the GitLab Terraform Kubernetes Template 🏗️\n2. Register the GitLab Agent 🕵️\n3. Add in Cloud Credentials ☁️🔑\n4. Set up the Kubernetes Cluster Management Template 🚧\n5. Enjoy your Kubernetes Cluster completely managed in code! 👏\n\n## Set up the GitLab Terraform Kubernetes Template\n\nStart by importing the example project by URL - [https://gitlab.com/projects/new#import_project](https://gitlab.com/projects/new#import_project)\n\nTo import the project:\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Import project**.\n4. Select **Repository by URL**.\n5. For the Git repository URL:\n- [GCP Google Kubernetes Engine](https://cloud.google.com/kubernetes-engine): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-gke.git\n- [AWS Elastic Kubernetes Service](https://aws.amazon.com/eks/): https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks.git\n6. Complete the fields and select **Create project**.\n\n## Register the GitLab Agent\n\nWith your newly created **gitlab-terraform-k8s** repo, create a GitLab Agent for Kubernetes:\n\n1. On the left sidebar, select **Infrastructure > Kubernetes clusters**. Select **Connect a cluster (agent).**\n2. From the **Select an agent** dropdown list, select **eks-agent/gke-agent and select **Register an agent**.\n3. GitLab generates a registration token for the agent. **Securely store this secret token, as you will need it later.**\n4. GitLab provides an address for the agent server (KAS). Securely store this as you will also need it later.\n5. Add this to the **gitlab-terraform-eks/.gitlab/agents/eks-agent/config.yaml** in order to allow the GitLab Agent to have access to your entire group.\n\n```yaml\nci_access:\n  groups:\n    - id: your-namespace-here\n```\n\n![Register GitLab Agent](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/register-gitlab-agent.png){: .shadow}\n\n\n## Add in your Cloud Credentials to CI/CD variables\n\n### [AWS EKS](https://aws.amazon.com/eks/)\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n1. Set the variable **AWS_ACCESS_KEY_ID** to your AWS access key ID.\n2. Set the variable **AWS_SECRET_ACCESS_KEY** to your AWS secret access key.\n3. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n4. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n![Add in CI/CD variables](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cicd-variables.png){: .shadow}\n\n\n### [GCP GKE](https://cloud.google.com/kubernetes-engine)\n\n1. To authenticate GCP with GitLab, create a GCP service account with the following roles: **Compute Network Viewer, Kubernetes Engine Admin, Service Account User, and Service Account Admin**. Both User and Admin service accounts are necessary. The User role impersonates the default service account when creating the node pool. The Admin role creates a service account in the kube-system namespace.\n2. **Download the JSON file** with the service account key you created in the previous step.\n3. On your computer, encode the JSON file to base64 (replace /path/to/sa-key.json to the path to your key):\n\n```\nbase64 -i /path/to/sa-key.json | tr -d\n```\n\n- Use the output of this command as the **BASE64_GOOGLE_CREDENTIALS** environment variable in the next step.\n\nOn the left sidebar, select **Settings > CI/CD. Expand Variables**.\n5. Set the variable **BASE64_GOOGLE_CREDENTIALS** to the base64 encoded JSON file you just created.\n6. Set the variable **TF_VAR_gcp_project** to your GCP’s project name.\n7. Set the variable **TF_VAR_agent_token** to the agent token displayed in the previous task.\n8. Set the variable **TF_VAR_kas_address** to the agent server address displayed in the previous task.\n\n## Run GitLab CI to deploy your Kubernetes cluster!\n\n![Deploy Kubernetes cluster](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/pipeline-view.png){: .shadow}\n\nWhen successfully completed, view the cluster in the AWS/GCP console!\n\n![AWS EKS](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/aws-eks.png){: .shadow}\n\n### You are halfway done! 👏 Keep it up!\n\n## Set up the Kubernetes Cluster Management Project\n\nCreate a project from the cluster management project template - [https://gitlab.com/projects/new#create_from_template](https://gitlab.com/projects/new#create_from_template)\n\n1. In GitLab, on the top bar, select **Main menu > Projects > View all projects**.\n2. On the right of the page, select **New project**.\n3. Select **Create from template**.\n4. From the list of templates, next to **GitLab Cluster Management**, select **Use template**.\n5. Enter the project details. Ensure this project is created in the same namespace as the gitlab-terraform-k8s project.\n6. Select **Create project**.\n7. Once the project is created on the left sidebar, select **Settings > CI/CD. Expand Variables**.\n8. Set the variable KUBE_CONTEXT to point to the GitLab Agent. For example, `noah-ing-demos/infrastructure/gitlab-terraform-eks:eks-agent`.\n\n![Set Kube Context](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/kube-config.png){: .shadow}\n\n\n- **Uncomment the applications you'd like to be installed** into your Kubernetes cluster in the **helmfile.yaml**. In this instance I chose ingress, cert-manager, prometheus, and Vault. \n\n![Uncomment Applications in helmfile](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/helmfile.png){: .shadow}\n\nThat will trigger your **CI/CD pipeline** and it should look like this.\n\n![Cluster Management CI/CD](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/cluster-management-cicd.png){: .shadow}\n\nOnce completed, **go to the AWS/GCP console** and check out all the deployed resources!\n\n![Deployed EKS applications](https://about.gitlab.com/images/blogimages/2022-11-11-simple-kubernetes-management-with-gitlab/deployed-eks-applications.png){: .shadow}\n\n### Voila! 🎉\n\n## Enjoy your Kubernetes cluster completely defined in code! 👏👏👏\n\nNow with these two repositories you can **manage a Kubernetes cluster entirely through code**:\n\n- For managing the Kubernetes cluster's infrastructure and configuring its resources you can make changes to the [gitlab-terraform-eks](https://gitlab.com/gitlab-org/configure/examples/gitlab-terraform-eks) repository you have setup. This project has a **Terraform CI/CD pipeline** that will allow you to **review, provision, configure, and manage your Kubernetes** infrastructure with ease.\n\n- For managing the desired state of the Kubernetes cluster, the [cluster-management](https://gitlab.com/gitlab-org/project-templates/cluster-management) repository has a **GitLab Agent** set up and will **deploy any Kubernetes objects defined in the helm files**.\n\n➡️ Bonus: If you'd like to deploy your own application to the Kubernetes cluster, then add to your **cluster-management** `helmfile` and see the GitLab Agent for Kubernetes roll it out with ease!\n\n\n## References\n- [Create a New GKE Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_gke_cluster.html)\n- [Create a New EKS Cluster](https://docs.gitlab.com/ee/user/infrastructure/clusters/connect/new_eks_cluster.html)\n- [Cluster Management Project](https://docs.gitlab.com/ee/user/clusters/management_project.html)\n\n\n## Related posts\n- [The ultimate guide to GitOps with GitLab](https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n- [GitOps with GitLab: Infrastructure provisioning with GitLab and Terraform](https://about.gitlab.com/blog/gitops-with-gitlab-infrastructure-provisioning/)\n- [GitOps with GitLab: Connect with a Kubernetes cluster](https://about.gitlab.com/blog/gitops-with-gitlab-connecting-the-cluster/)\n",[9,1225,533,771,772,773],{"slug":3866,"featured":6,"template":684},"simple-kubernetes-management-with-gitlab","content:en-us:blog:simple-kubernetes-management-with-gitlab.yml","Simple Kubernetes Management With Gitlab","en-us/blog/simple-kubernetes-management-with-gitlab.yml","en-us/blog/simple-kubernetes-management-with-gitlab",{"_path":3872,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3873,"content":3878,"config":3883,"_id":3885,"_type":13,"title":3886,"_source":15,"_file":3887,"_stem":3888,"_extension":18},"/en-us/blog/simplify-your-cloud-account-management-for-kubernetes-access",{"title":3874,"description":3875,"ogTitle":3874,"ogDescription":3875,"noIndex":6,"ogImage":1842,"ogUrl":3876,"ogSiteName":669,"ogType":670,"canonicalUrls":3876,"schema":3877},"Simplify your cloud account management for Kubernetes access","In this tutorial, learn how to use the GitLab agent for Kubernetes and its user impersonation features for secure cluster access.\n\n","https://about.gitlab.com/blog/simplify-your-cloud-account-management-for-kubernetes-access","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Simplify your cloud account management for Kubernetes access\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2024-03-19\",\n      }",{"title":3874,"description":3875,"authors":3879,"heroImage":1842,"date":3880,"body":3881,"category":814,"tags":3882},[2211],"2024-03-19","We hear you: Managing cloud accounts is risky, tedious, and time-consuming, but also a must-have in many situations. You might run your Kubernetes clusters with one of the hyperclouds, and your engineers need to access at least the non-production cluster to troubleshoot issues quickly and efficiently. Sometimes, you also need to give special, temporary access to engineers on a production cluster.\n\nYou have also told us that access requests might not come very often, but when they do, they are urgent, and given the high security requirements around the process, they can take close to a week to fulfill. \n\nBy giving access to your cloud infrastructure, you automatically expose yourself to risks. As a result, it's a best practice to restrict access only to the resources the given user must have access to. However, cloud identity and access management (IAM) is complex by nature. \n\nIf you are using Kubernetes and you need to give access specifically to your clusters only, GitLab can help. Your user will be able to identify with your cluster, so you can configure the Kubernetes role-based access controls (RBAC) to restrict their access within the cluster. With GitLab, and specifically the GitLab agent for Kubernetes, you can start at the last step and focus only on the RBAC aspect.\n\n## What is the GitLab agent for Kubernetes?\n\nThe GitLab agent for Kubernetes is a set of GitLab components that allows a permanent, bi-directional streaming channel between your GitLab instance and your Kubernetes cluster (one agent per cluster). Once the agent connection is configured, you can share it across projects and groups within your GitLab instance, allowing a single agent to serve all the access needs of a cluster.\n\nCurrently, the agent has several features to simplify your Kubernetes management tasks:\n\n* [Integrates with GitLab CI/CD](https://docs.gitlab.com/ee/user/clusters/agent/ci_cd_workflow.html) for push-based deployments or regular cluster management jobs. The integration exposes a Kubernetes context per available agent in the Runner environment, and any tool that can take a context as an input (e.g. kubectl or helm CLI) can reach your cluster from the CI/CD jobs.\n* Integrates with the GitLab GUI, specifically the environment pages. Users can configure [an environment to show the Kubernetes resources](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html) available in a specific namespace, and even set up a Flux resource to track the reconciliation of your applications.\n* Enables users to use the GitLab-managed channel to [connect to the cluster from their local laptop](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html#access-a-cluster-with-the-kubernetes-api), without giving them cloud-specific Kubernetes access tokens.\n* Supports [Flux GitRepository reconciliations](https://docs.gitlab.com/ee/user/clusters/agent/gitops.html#immediate-git-repository-reconciliation) by triggering a reconciliation automatically on new commits in repositories the agent can access.\n* [Runs operational container scans](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) and shows the reports in the GitLab UI.\n* Enables you to enrich the [remote development](https://docs.gitlab.com/ee/user/project/remote_development/) offering with [workspaces](https://docs.gitlab.com/ee/user/workspace/).\n\n> Try simplifying your cloud account management for Kubernetes access today with [a free trial of GitLab Ultimate](https://gitlab.com/-/trials/new).\n\n## The agent and access management\n\nThe GitLab agent for Kubernetes, which is available for GitLab Ultimate and Premium, impersonates various GitLab-specific users when it acts on behalf of GitLab in the cluster.\n\n* For the GitLab CI/CD integration, the agent impersonates the CI job as the user, and enriches the user with group specific metadata that describe the project and the group.\n\n* For the environment and local connections, the agent impersonates the GitLab user using the connection, and similarly to the CI/CD integration, the impersonated Kubernetes user is enriched with group specific metadata, like roles in configured groups.\n\nAs this article is about using the agent instead of cloud accounts for cluster access, let’s focus on the environment and local connections setup.\n\n## An example setup\n\nTo offer a realistic setup, let’s assume that in our GitLab instance we have the following groups and projects:\n\n* `/app-dev-group/team-a/service-1`\n* `/app-dev-group/team-a/service-2`\n* `/app-dev-group/team-b/service-3`\n* `/platform-group/clusters-project`\n\nIn the above setup, the agents are registered against the `clusters-project` project and, in addition to other code, the project contains the agent configuration files:\n\n* `.gitlab/agents/dev-cluster/config.yaml`\n* `.gitlab/agents/prod-cluster/config.yaml`\n\nThe `dev-cluster` and `prod-cluster` directory names are actually the agent names as well, and registered agents and related events can be seen within the projects “Operations/Kubernetes clusters” menu item. The agent offers some minimal features by default, without a configuration file. To benefit from the user access features and to share the agent connection across projects and groups, a configuration file is required.\n\nLet’s assume that we want to configure the agents in the following way:\n\n* For the development cluster connection:\n\n    * Everyone with at least developer role in team-a should be able to read-write their team specific namespace `team-a` only.\n    * Everyone with group owner role in team-a should have namespace admin rights on the `team-a` namespace only.\n    * Members of `team-b` should not be able to access the cluster.\n\n* For the production cluster connection:\n\n    * Everyone with at least developer role in team-a should be able to read-only their team specific namespace `team-a` only.\n    * Members of `team-b` should not be able to access the cluster.\n\nFor the development cluster, the above setup requires an agent configuration file in `.gitlab/agents/dev-cluster/config.yaml` as follows:\n\n```yaml\nuser_access:\n  access_as:\n    user: {}\n  groups:\n    - id: app-dev-group/team-a # group_id=1\n    - id: app-dev-group/team-b # group_id=2\n```\n\nIn this code snippet we added the group ID of the specific groups in a comment. We will need these IDs in the following Kubernetes RBAC definitions:\n\n```yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: team-a-dev-can-edit\n  namespace: team-a\nroleRef:\n  name: edit\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:group_role:1:developer\n    kind: Group\n```\n\nand...\n\n```yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: team-a-owner-can-admin\n  namespace: team-a\nroleRef:\n  name: admin\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:group_role:1:owner\n    kind: Group\n```    \n\nThe above two code snippets can be applied to the cluster with the GitLab Flux integration or manually via `kubectl`. They describe role bindings for the `team-a` group members. It’s important to note that only the groups and projects from the agent configuration file can be targeted as RBAC groups. Therefore, the following RBAC will not work as the impersonated user resources don’t know about the referenced projects:\n\n```yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: team-a-dev-can-edit\n  namespace: team-a\nroleRef:\n  name: edit\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:project_role:3:developer # app-dev-group/team-a/service-1 project ID is 3\n    kind: Group\n```\n\nFor the production cluster we need the same agent configuration under `.gitlab/agents/prod-cluster/config.yaml` and the following RBAC definitions:\n\n```yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: team-a-dev-can-read\n  namespace: team-a\nroleRef:\n  name: view\n  kind: ClusterRole\n  apiGroup: rbac.authorization.k8s.io\nsubjects:\n  - name: gitlab:group_role:1:developer\n    kind: Group\n```\n\nThese configurations allow project owners to set up the environment pages so members of `team-a` will be able to see the status of their cluster workloads in real-time and they should be able to access the cluster from their local computers using their favorite Kubernetes tools.\n\n## Explaining the magic\n\nIn the previous section, you learned how to set up role bindings for group members with specific roles. In this section, let's dive into the impersonated user and their attributes.\n\nWhile Kubernetes does not have a User or Group resource, its authentication and authorization scheme pretends to have it. Users have a username, can belong to groups, and can have other extra attributes.\n\nThe impersonated GitLab user carries the `gitlab:username:\u003Cusername>` in the cluster. For example, if our imaginary user Béla has the GitLab username `bela`, then in the cluster the impersonated user will be called `gitlab:username:bela`. This allows targeting of a specific user in the cluster.\n\nEvery impersonated user belongs to the `gitlab:user` group. Moreover, for every project and group listed in the agent configuration, we check the current user’s role and add it as a group. This is more easily understood through an example, so let’s modify a little bit the agent configuration we used above.\n\n```yaml\nuser_access:\n  access_as:\n    user: {}\n  projects:\n    - id: platform-group/clusters-project # project_id=1\n  groups:\n    - id: app-dev-group/team-a # group_id=1\n    - id: app-dev-group/team-b # group_id=2\n```\n\nFor the sake of example, let’s assume the contrived setup that our user Béla is a maintainer in the `platform-group/clusters-project` project, is a developer in `app-dev-group/team-a` group, and an owner of the `app-dev-group/team-a/service-1` project. In this case, the impersonated Kubernetes user `gitlab:username:bela` will belong to the following groups:\n\n* `gitlab:user`\n* `gitlab:project_role:1:developer`\n* `gitlab:project_role:1:maintainer`\n* `gitlab:group_role:1:developer`\n\nWhat happens is that we check Béla’s role in every project and group listed in the agent configuration, and set up all the roles that Béla has there. As Béla is a maintainer in `platform-group/clusters-project` (project ID 1), we add him to both the `gitlab:project_role:1:developer` and `gitlab:project_role:1:maintainer` groups. Note as well, that we did not add any groups for the `app-dev-group/team-a/service-1` project, only its parent group that appears in the agent configuration.\n\n## Simplifying cluster management\n\nSetting up the agent and configuring the cluster as presented above is everything you need to model the presented access requirements in the cluster. You don’t have to manage cloud accounts or add in-cluster account management tools like Dex. The agent for Kubernetes and its user impersonation features can simplify your infrastructure management work.\n\nWhen new people join your company, once they become members of the `team-a` they immediately get access to the clusters as configured above. Similarly, as someone leaves your company, you just have to remove them from the group and their access will be disabled. As we mentioned, the agent supports local access to the clusters, too. As that local access runs through the GitLab-side agent component, it will be disabled as well when users are removed from the `team-a` group.\n\nSetting up the agent takes around two-to-five minutes per cluster. Setting up the required RBAC might take another five minutes. In 10 minutes, users can get controlled access to a cluster, saving days of work and decreasing the risks associated with cloud accounts.\n\n## Get started today\n\nIf you want to try this approach and allow access to your colleagues to some of your clusters without managing cloud accounts, the following documentation pages should help you to get started:\n\n- On self-managed GitLab instances, you might need to [configure the GitLab-side component (called KAS)](https://docs.gitlab.com/ee/administration/clusters/kas.html) of the agent for Kubernetes first.\n\n- You can learn more about [all the Kubernetes management features here](https://docs.gitlab.com/ee/user/clusters/agent/), or you can immediately dive in by [installing an agent](https://docs.gitlab.com/ee/user/clusters/agent/install/), and [granting users access to Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html).\n\n- You’ll likely want to [configure a Kubernetes dashboard](https://docs.gitlab.com/ee/ci/environments/kubernetes_dashboard.html) for your deployed application.\n\n> Try simplifying your cloud account management for Kubernetes access today with [a free trial of GitLab Ultimate](https://gitlab.com/-/trials/new).",[1865,814,1225,9],{"slug":3884,"featured":90,"template":684},"simplify-your-cloud-account-management-for-kubernetes-access","content:en-us:blog:simplify-your-cloud-account-management-for-kubernetes-access.yml","Simplify Your Cloud Account Management For Kubernetes Access","en-us/blog/simplify-your-cloud-account-management-for-kubernetes-access.yml","en-us/blog/simplify-your-cloud-account-management-for-kubernetes-access",{"_path":3890,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3891,"content":3896,"config":3900,"_id":3902,"_type":13,"title":3903,"_source":15,"_file":3904,"_stem":3905,"_extension":18},"/en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation",{"title":3892,"description":3893,"ogTitle":3892,"ogDescription":3893,"noIndex":6,"ogImage":1747,"ogUrl":3894,"ogSiteName":669,"ogType":670,"canonicalUrls":3894,"schema":3895},"Speed up code reviews: Let AI handle the feedback implementation","Discover how GitLab Duo with Amazon Q automates the implementation of code review feedback through AI, transforming a time-consuming manual process into a streamlined workflow.","https://about.gitlab.com/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up code reviews: Let AI handle the feedback implementation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-06-10\",\n      }",{"title":3892,"description":3893,"authors":3897,"heroImage":1747,"date":856,"body":3898,"category":702,"tags":3899},[699],"You know that feeling when you've just submitted a merge request and the code review comments start rolling in? One reviewer wants the labels updated, another asks for side-by-side layouts, someone else requests bold formatting, and don't forget about that button color change. Before you know it, you're spending hours implementing feedback that, while important, takes you away from building new features. It's a time-consuming process that every developer faces, yet it feels like there should be a better way.\n\nWhat if you could have an AI assistant that understands code review feedback and automatically implements the changes for you? That's exactly what [GitLab Duo with Amazon Q](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/) brings to your development workflow. This seamless integration combines GitLab's comprehensive DevSecOps platform with Amazon Q's advanced AI capabilities, creating an intelligent assistant that can read reviewer comments and converts them directly into code changes. Instead of manually addressing each piece of feedback, you can let AI handle the implementation while you focus on the bigger picture.\n\n## How GitLab Duo with Amazon Q works\n\nWhen you're viewing a merge request with reviewer comments, you'll see feedback scattered throughout your code. Let's take the examples from earlier in this article: maybe you've received a request to update a form label here, a suggestion to display fields side-by-side there, or a note about making certain text bold. Each comment represents a task that normally you'd need to handle manually.\n\n![feedback on an MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673634/Blog/Content%20Images/1-show-comment.png)\n\nWith GitLab Duo with Amazon Q, you can simply enter the `/q dev` quick action in a comment. This prompts Amazon Q to analyze all the feedback and start modifying your code automatically. The AI agent understands the context of each comment and implements the requested changes directly in your codebase.\n\n![/q dev function prompting Amazon Q to analyze feedback](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673634/Blog/Content%20Images/2-invoke-q-dev.png)\n\nOnce Amazon Q processes the feedback, you can view all the updates in the \"Changes\" tab of your merge request. Every modification is clearly visible, so you can verify that the AI agent correctly interpreted and implemented each piece of feedback. You can then run your updated application to confirm that all the changes work as expected — that form label is updated, the fields are displayed side-by-side, the text is bold, and yes, that button is now blue.\n\nWatch the code review feedback process in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/31E9X9BrK5s?si=ThFywR34V3Bfj1Z-\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nProcessing code review feedback is a necessary but time-intensive part of software development.  GitLab Duo with Amazon Q evolves this manual process into an automated workflow, dramatically reducing the time between receiving feedback and implementing changes. By letting AI handle these routine modifications, you're free to focus on what really matters — building innovative features and solving complex problems.\n\nWith GitLab Duo with Amazon Q, you can:\n- Eliminate hours of manual feedback implementation\n- Accelerate your code review cycles\n- Maintain consistency in how feedback is addressed\n- Reduce context switching between reviewing comments and writing code\n- Ship features faster with streamlined deployment times\n\n> #### To learn more about GitLab Duo with Amazon Q visit us at an upcoming [AWS Summit in a city near you](https://about.gitlab.com/events/aws-summits/) or [reach out to your GitLab representative](https://about.gitlab.com/partners/technology-partners/aws/#form).\n\n## GitLab Duo with Amazon Q resources\n\n- [GitLab Duo with Amazon Q: Agentic AI optimized for AWS generally available](https://about.gitlab.com/blog/gitlab-duo-with-amazon-q-agentic-ai-optimized-for-aws/)\n- [GitLab and AWS partner page](https://about.gitlab.com/partners/technology-partners/aws/)\n- [GitLab Duo with Amazon Q documentation](https://docs.gitlab.com/user/duo_amazon_q/)\n- [What is agentic AI?](https://about.gitlab.com/topics/agentic-ai/)\n- [Agentic AI guides and resources](https://about.gitlab.com/blog/agentic-ai-guides-and-resources/)",[678,680,478,794,793,9],{"slug":3901,"featured":90,"template":684},"speed-up-code-reviews-let-ai-handle-the-feedback-implementation","content:en-us:blog:speed-up-code-reviews-let-ai-handle-the-feedback-implementation.yml","Speed Up Code Reviews Let Ai Handle The Feedback Implementation","en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation.yml","en-us/blog/speed-up-code-reviews-let-ai-handle-the-feedback-implementation",{"_path":3907,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3908,"content":3914,"config":3920,"_id":3922,"_type":13,"title":3923,"_source":15,"_file":3924,"_stem":3925,"_extension":18},"/en-us/blog/speed-up-your-monorepo-workflow-in-git",{"title":3909,"description":3910,"ogTitle":3909,"ogDescription":3910,"noIndex":6,"ogImage":3911,"ogUrl":3912,"ogSiteName":669,"ogType":670,"canonicalUrls":3912,"schema":3913},"Speed up your monorepo workflow in Git","Tap into the features that can reap huge savings in the long run for any developer team.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749665560/Blog/Hero%20Images/speedmonorepo.jpg","https://about.gitlab.com/blog/speed-up-your-monorepo-workflow-in-git","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Speed up your monorepo workflow in Git\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"John Cai\"}],\n        \"datePublished\": \"2022-09-06\",\n      }",{"title":3909,"description":3910,"authors":3915,"heroImage":3911,"date":3917,"body":3918,"category":769,"tags":3919},[3916],"John Cai","2022-09-06","\n\nMonorepos have grown in popularity in recent years. For many of us, they are a\npart of our daily Git workflows. The trouble is working with them can be slow. Speeding up\na developer's workflow can reap huge savings in the long run for any team.\n\nFirst, a word about monorepos. What does it mean for a repository to be a\nmonorepo anyway? Well, it depends who you ask and the definition has become\nmore flexible over time, but here are a few.\n\n## Characteristics of monorepos\n\nMonorepos have the following characteristics.\n\n### Multiple sub-projects\n\nThe typical definition of \"monorepo\" is a repository that contains multiple sub-projects. For instance, let's imagine a repository with a web-facing front end,\na backend, an iOS app directory, and an android app directory:\n\n```\nawesome-app/\n|\n|--backend/\n|\n|--web-frontend/\n|\n|--app-ios/\n|\n|--app-android/\n\n```\n\n`awesome-app` is a single repository:\n\n```\ngit clone https://my-favorite-git-hosting-service.com/awesome-app.git\n```\n\nThe [Chromium](https://github.com/chromium/chromium) repository is a good\nexample of this.\n\n### Large files\n\nRepositories can also grow to be very large if large files are checked in. In\nsome cases, binaries or other large assets such as images are checked into the\nrepository to have their history tracked. Other times, large files are inadvertently \nintroduced into the repository. The way Git history works, even if these files are\nimmediately removed, the single version that was checked in remains.\n\n### Old projects with deep histories\n\nWhile Git is very good at compressing text files, when a Git repository has a deep history,\nthe need to keep all versions of a file around can cause the size of the repository to be huge.\n\nThe [Linux](https://github.com/torvalds/linux) repository is a good example of this.\n\nFor instance, the Linux project's first Git commit is from [April 2005](https://github.com/torvalds/linux/commit/1da177e4c3f41524e886b7f1b8a0c1fc7321cac2).\n\nAnd a `git rev-list --all --count` gives us 1,120,826 commits! That's a lot of\nhistory! Getting into Git internals a little bit, Git keeps a commit object, and a\ntree object for each commit, as well as a copy of the files at that snapshot\nin history. This means a deep Git history means a lot of Git data.\n\n## Speeding up your Git workflow\n\nHere are some features to help speed up your Git workflow.\n\n### Sparse checkout\n\n[git sparse checkout](https://git-scm.com/docs/git-sparse-checkout) reduces the\nnumber of files you check out to a subset of the repository. (NOTE: This feature\nin Git is still marked experimental.) This is especially useful in the case of\n[many sub-projects in a repository](#multiple-sub-projects).\n\nTaking our [example](#multiple-sub-projects) of a monorepo with multiple\nsub-projects, let's say that as a front-end web developer I only need to make\nchanges to `web-frontend/`.\n\n```sh\n> git clone --no-checkout https://my-favorite-git-hosting-service.com/awesome-app.git\n> cd awesome-app\n> git sparse-checkout set web-frontend\n> git checkout\nYour branch is up to date with 'origin/master'.\n> ls\n> web-frontend README.md\n```\n\nOr, if you've already checked out a worktree, sparse checkout can be used to remove\nfiles from the worktree.\n\n\n```sh\n> git clone https://my-favorite-git-hosting-service.com/awesome-app.git\n> cd awesome-app\n> ls\n> backend web-frontend app-ios app-android README.md\n> git sparse-checkout set web-frontend\nUpdating files: 100% (103452/103452), done.\n> ls\n> web-frontend README.md\n```\n\nSparse checkout will only include the directories indicated, plus all files\ndirectly under the root repository directory.\n\nThis way, we only checkout the directories that we need, saving both space locally\nand time since each time `git pull` is done, only files that are checked out will\nneed to be updated.\n\nMore information can be found in the [docs](https://git-scm.com/docs/git-sparse-checkout)\nfor sparse checkout.\n\n### Partial clone\n\n[git partial clone](https://docs.gitlab.com/ee/topics/git/partial_clone.html#:~:text=Partial%20clone%20is%20a%20performance,0%20or%20later%20is%20required) has a similar goal to sparse checkout in reducing the number\nof files in your local Git repository. It provides the option to filter out\ncertain types of files when cloning.\n\nPartial clone is used by passing the `--filter` option to `git-clone`.\n\n```sh\ngit clone --filter=blob:limit=10m\n```\n\nThis will exclude any files over 10 megabytes from being copied to the local\nrepository. A full list of supported filters are included in the\n[docs for git-rev-list](https://git-scm.com/docs/git-rev-list#Documentation/git-rev-list.txt",[726,940,9],{"slug":3921,"featured":6,"template":684},"speed-up-your-monorepo-workflow-in-git","content:en-us:blog:speed-up-your-monorepo-workflow-in-git.yml","Speed Up Your Monorepo Workflow In Git","en-us/blog/speed-up-your-monorepo-workflow-in-git.yml","en-us/blog/speed-up-your-monorepo-workflow-in-git",{"_path":3927,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3928,"content":3933,"config":3937,"_id":3939,"_type":13,"title":3940,"_source":15,"_file":3941,"_stem":3942,"_extension":18},"/en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"title":3929,"description":3930,"ogTitle":3929,"ogDescription":3930,"noIndex":6,"ogImage":847,"ogUrl":3931,"ogSiteName":669,"ogType":670,"canonicalUrls":3931,"schema":3932},"Streamline DevSecOps engineering workflows with GitLab Duo","Learn all the ways GitLab Duo's AI capabilities can improve the efficiency of development workflows. Includes in-depth tutorials and demos.","https://about.gitlab.com/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Streamline DevSecOps engineering workflows with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-12-05\",\n      }",{"title":3929,"description":3930,"authors":3934,"heroImage":847,"date":2919,"body":3935,"category":702,"tags":3936},[1570],"It's 9 a.m. somewhere, and a DevOps engineer is starting their day. They check their [GitLab todo list](https://docs.gitlab.com/ee/user/todos.html) to see any mentions or tasks assigned to them, collaborating with other stakeholders in their organization. These tasks can include:\n\n- managing infrastructure\n- maintaining the configuration of resources\n- maintaining CI/CD pipelines\n- automating processes for efficiency\n- maintaining monitoring and alerting systems\n- ensuring applications are securely built and deployed\n- modernizing applications with containerization\n\nTo carry out these tasks, DevOps engineers spend a lot of time reading documentation, writing configuration files, and searching for help in forums, issues boards, and blogs. Time is spent studying and understanding concepts, and how tools and technologies work. When they don't work as expected, a lot more time is spent investigating why. New tools are released regularly to solve niche or existing problems differently, which introduces more things to learn and maintain context for.\n\n[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our AI-powered suite of capabilities, fits into the workflow of DevSecOps engineers, enabling them to reduce time spent solving problems while increasing their efficiency.\n\nLet's explore how GitLab Duo helps streamline workflows.\n\n## Collaboration and communication\n\nDiscussions or requests for code reviews require spending time reading comments from everyone and carefully reviewing the work shared. GitLab Duo capabilities like Discussion Summary, Code Review Summary, and Merge Request Summary increase the effectiveness of collaboration by reducing the time required to get caught up on activities and comments, with more time spent getting the actual work done.\n\n### Merge Request Summary  \n\nWriting a detailed and clear summary of the change a merge request introduces is crucial for every stakeholder to understand what, why, and how a change was made. It's more difficult than it sounds to effectively articulate every change made, especially in a large merge request. [Merge Request Summary](https://docs.gitlab.com/ee/user/project/merge_requests/duo_in_merge_requests.html#generate-a-description-by-summarizing-code-changes) analyzes the change's diff and provides a detailed summary of the changes made.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/4muvSFuWWL4?si=1i2pkyqXZGn2dSbd\" title=\"GitLab Duo Chat is now aware of Merge Requests\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Discussion Summary\n\nImagine getting pulled into an issue with more than 100 comments and a lengthy description, with different perspectives and opinions shared. GitLab Duo [Discussion Summary](https://docs.gitlab.com/ee/user/discussions/index.html#summarize-issue-discussions-with-duo-chat) summarizes all the conversations in the issue and identifies tasks that need to be done, reducing time spent. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/IcdxLfTIUgc?si=WXlINow3pLoKHBVM\" title=\"GitLab Duo Dicussion Summary\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n#### Code Review Summary\n\nA merge request has been assigned to a DevOps engineer for review in preparation for deployment, and they have spent time reviewing several parts of the change with multiple comments and suggestions. When [submitting a review](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), a text box is presented to summarize the review, which often requires taking a pause and articulating the review. With [Code Review Summary](https://docs.gitlab.com/ee/user/project/merge_requests/reviews/index.html#submit-a-review), they get a concise summary automatically drafted leading to efficiency.\n\n## Manage infrastructure changes\n\nPart of a DevOps engineer's workflow is managing infrastructure changes. Infrastructure as code ([IaC](https://docs.gitlab.com/ee/user/infrastructure/iac/)) revolutionized this process, allowing for documentation, consistency, faster recovery, accountability, and collaboration. A challenge with IaC is understanding the requirements and syntax of the chosen tool and provider where the infrastructure will be created. A lot of time is then spent reviewing documentation and tweaking configuration files until they meet expectations. \n\nWith GitLab Duo [Code Explanation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html#code-explanation) and [Code Suggestions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html), you can prompt GitLab Duo to create configuration files in your tool of choice and learn about the syntax of those tools. With Code Suggestions, you can either leverage [code generation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#code-generation), where you prompt GitLab Duo to generate the configuration, or code completion, which provides suggestions as you type while maintaining the context of your existing configurations.\n\nAs of the time this article was published, Terraform is [supported by default](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#supported-languages) with the right extensions for your IDEs. Other technologies can be supported with [additional language support configuration](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages) for the [GitLab Workflow extension](https://docs.gitlab.com/ee/editor_extensions/visual_studio_code/index.html).\n\nWhere a technology is not officially supported, [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html) is the powerful AI assistant that can help generate, explain, clarify, and troubleshoot your configuration, while maintaining context from selected text or opened files. Here are two demos where GitLab Duo helped create IaC with Terraform and AWS CloudFormation.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/saa2JJ57UaQ?si=Bu9jyQWwuSUcw8vr\" title=\"Manage your Infrastructure with Terraform and AI using GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Deploying AWS Lambda function using AWS Cloudformation with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Configuration management\n\nOnce your infrastructure is up, GitLab Duo Chat can also help create configuration files and refactor existing ones. These can be Ansible configurations for infrastructure or cloud-native configurations using Docker, Kubernetes, or Helm resource files. In the videos below, I demonstrate how GitLab Duo helps with Ansible, containerization, and application deployment to Kubernetes.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/t6ZCq_jkBwY?si=awCUdu1wCgOO21XR\" title=\"Configuring your Infrastructure with Ansible & GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/KSLk2twXqiI?si=QDdERjbM0f7X2p23\" title=\"Containerizing your application with GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/uroSxvMFqPU?si=GMNC7f2b7i_cjn6F\" title=\"Deploying your application to Kubernetes with Help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/9yGDM00RlUA?si=kE5JZD_OEFcxeR7E\" title=\"Deploying to Kubernetes using Helm with help from GitLab Duo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Test, test, test\n\nWriting tests is an important part of building secure software, but it can be a chore and often becomes an afterthought. You can leverage the power of GitLab Duo to [generate tests for your code](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) by highlighting your code and typing the `/tests` in the Chat panel of your IDE.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/zWhwuixUkYU?si=wI93j90PIiUMyGcV\" title=\"GitLab Duo Test Generation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### CI/CD pipeline troubleshooting\n\nAutomation is an essential part of the DevOps engineer's workflow, and Continuous Integration/Deployment ([CI/CD](https://about.gitlab.com/topics/ci-cd/)) is central to this. You can trigger CI jobs on code push, merge, or on schedule. But, when jobs fail, you spend a lot of time reading through the logs to identify why, and for cryptic errors, it can take more time to figure out. [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/) analyzes your failed job log and errors, and then recommends possible fixes. This reduces the time spent investigating the errors and finding a fix.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Sa0UBpMqXgs?si=IyR-skz9wJMBSicE\" title=\"GitLab Duo Root Cause Analysis\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Building secure applications\n\nPart of software development includes discovering vulnerabilities, either in the application or its dependencies. Some vulnerabilities are easy to fix, while others require creating a milestone with planning. GitLab Duo [Vulnerability Explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#explaining-a-vulnerability) and [Vulnerability Resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) reduce the time spent researching and fixing vulnerabilities. Vulnerability Explanation explains why a vulnerability is happening, its impact, and how to fix it, helping the DevOps engineer to upskill. Vulnerability Resolution takes it further – instead of just suggesting a fix, it creates a merge request with a fix for the vulnerability for you to review. \n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/MMVFvGrmMzw?si=Fxc4SeOkCBKwUk_k\" title=\"GitLab Duo Vulnerability Explanation\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n\u003Cbr>\u003C/br>\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VJmsw_C125E?si=XT3Qz5SsX-ISfCyq\" title=\"GitLab Duo Vulnerability resolution\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More work done with less stress\n\nWith GitLab Duo, DevOps engineers can do more work deploying and maintaining secure applications, while acquiring more skills with the detailed responses from GitLab Duo Chat.\n\n> [Sign up for a free 60-day trial of GitLab Duo](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) to get started today!",[704,108,835,478,9,678],{"slug":3938,"featured":90,"template":684},"streamline-devsecops-engineering-workflows-with-gitlab-duo","content:en-us:blog:streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","Streamline Devsecops Engineering Workflows With Gitlab Duo","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo.yml","en-us/blog/streamline-devsecops-engineering-workflows-with-gitlab-duo",{"_path":3944,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3945,"content":3951,"config":3955,"_id":3957,"_type":13,"title":3958,"_source":15,"_file":3959,"_stem":3960,"_extension":18},"/en-us/blog/structuring-the-gitlab-package-registry-for-enterprise-scale",{"title":3946,"description":3947,"ogTitle":3946,"ogDescription":3947,"noIndex":6,"ogImage":3948,"ogUrl":3949,"ogSiteName":669,"ogType":670,"canonicalUrls":3949,"schema":3950},"Structuring the GitLab Package Registry for enterprise scale","Learn how to leverage GitLab's unique project-based publishing model alongside root-group-level consumption to create a secure, flexible package management strategy.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749662332/Blog/Hero%20Images/blog-image-template-1800x945__23_.png","https://about.gitlab.com/blog/structuring-the-gitlab-package-registry-for-enterprise-scale","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Structuring the GitLab Package Registry for enterprise scale\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tim Rizzi\"}],\n        \"datePublished\": \"2025-02-19\",\n      }",{"title":3946,"description":3947,"authors":3952,"heroImage":3948,"date":2881,"body":3953,"category":678,"tags":3954},[916],"As organizations grow, managing internal packages becomes increasingly complex. While traditional package managers, like JFrog Artifactory and Sonatype Nexus, use a centralized repository approach, GitLab takes a different path that aligns with modern development teams' work. In this post, we'll explore how to effectively structure your GitLab Package Registry for enterprise scale, focusing on Maven and npm packages as examples.\n\n## Understanding the GitLab Package Registry model\n\nIf you're coming from a traditional package manager, GitLab's approach might initially seem different. Instead of a single centralized repository, GitLab integrates package management directly into your existing project and group structure. This means:\n\n- Teams publish packages to specific projects where the code lives\n- Teams consume packages from root group registries that aggregate all packages below them\n- Access control inherits from your existing GitLab permissions\n\nThis model offers several advantages:\n\n- Clear ownership of packages alongside their source code\n- Granular access control without additional configuration\n- Simplified CI/CD integration\n- Natural alignment with team structures\n- Single URL for accessing all company packages through root group consumption\n\n### The power of root group package registry\n\nWhile GitLab supports package consumption at various group levels, using the root group level has emerged as a best practice among our users. Here's why:\n\n- **Single access point:** One URL provides access to all private packages across your organization\n- **Consistent package naming:** Group-level endpoints allow teams to maintain their preferred naming conventions without conflicts\n- **Simplified configuration:** All developers can use the same configuration to access packages\n- **Secure access management:** Combines with deploy tokens for easy rotation and access control\n- **Hierarchical organization**: Naturally maps to your organizational structure while maintaining unified access\n\n## Real-world example: Enterprise structure\n\nLet's look at how this works in practice with a large enterprise:\n\n```\ncompany/ (root group)\n├── retail-division/\n│   ├── shared-libraries/     # Division-specific shared code\n│   └── teams/\n│       ├── checkout/        # Team publishes packages here\n│       └── inventory/       # Team publishes packages here\n├── banking-division/\n│   ├── shared-libraries/    # Division-specific shared code\n│   └── teams/\n│       ├── payments/       # Team publishes packages here\n│       └── fraud/         # Team publishes packages here\n└── shared-platform/        # Enterprise-wide shared code\n    ├── java-commons/      # Shared Java libraries\n    └── ui-components/     # Shared UI components\n```\n\n### Publishing configuration\n\nTeams publish packages to their specific project registries, maintaining clear ownership:\n\n1. Maven example\n\n```xml\n\u003C!-- checkout/pom.xml -->\n\u003CdistributionManagement>\n    \u003Crepository>\n        \u003Cid>gitlab-maven\u003C/id>\n        \u003Curl>${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/maven\u003C/url>\n    \u003C/repository>\n\u003C/distributionManagement>\n```\n\n2. npm example\n\n```json\n// ui-components/package.json\n{\n  \"name\": \"@company/ui-components\",\n  \"publishConfig\": {\n    \"registry\": \"${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/npm/\"\n  }\n}\n```\n\n### Consuming configuration\n\nThe power of root group consumption comes into play here. All teams configure a single endpoint for package access:\n\n1. Maven example\n\n```xml\n\u003C!-- Any project's pom.xml -->\n\u003Crepositories>\n    \u003Crepository>\n        \u003Cid>gitlab-maven\u003C/id>\n        \u003Curl>https://gitlab.example.com/api/v4/groups/company/-/packages/maven\u003C/url>\n    \u003C/repository>\n\u003C/repositories>\n```\n\n2. npm example\n\n```\n# Any project's .npmrc\n@company:registry=https://gitlab.example.com/api/v4/groups/company/-/packages/npm/\n```\n\nThis configuration automatically provides access to all packages across your organization while maintaining the benefits of project-based publishing.\n\n## Authentication and access control\n\nGitLab's model simplifies authentication through deploy tokens and CI/CD integration.\n\n### For CI/CD pipelines\n\nGitLab automatically handles authentication in pipelines using `CI_JOB_TOKEN`:\n\n```yaml\n# .gitlab-ci.yml\npublish:\n  script:\n    - mvn deploy  # or npm publish\n  # CI_JOB_TOKEN provides automatic authentication\n```\n\n### For development\n\nUse group deploy tokens for package consumption:\n\n- Create read-only deploy tokens at the root group level\n- Rotate tokens periodically for security\n- Share a single configuration across all developers\n\n## Benefits of root group package registry\n\n1. Simplified configuration\n   - One URL for all package access\n   - Consistent setup across teams\n   - Easy token rotation\n2. Clear ownership\n   - Packages stay with their source code\n   - Teams maintain control over publishing\n   - Version history tied to project activity\n3. Natural organization\n   - Matches your company structure\n   - Supports team autonomy\n   - Enables cross-team collaboration\n\n## Getting started\n\n1. Set up your root group\n   - Create a clear group structure\n   - Configure appropriate access controls\n   - Create group deploy tokens\n2. Configure team projects\n   - Set up project-level publishing\n   - Implement CI/CD pipelines\n   - Document package naming conventions\n3. Standardize consumption\n   - Configure root group registry access\n   - Share deploy tokens securely\n   - Document package discovery process\n\n## Summary\n\nGitLab's package registry model, particularly when leveraging root group consumption, offers a powerful solution for enterprise package management. By combining project-based publishing with root group consumption, organizations get the best of both worlds: clear ownership and simplified access. This approach scales naturally with your organization while maintaining security and ease of use.\n\nStart by implementing this model with a single team or division, and expand as you see the benefits of this integrated approach. Remember that while this post focused on Maven and npm, the same principles apply to all package types supported by GitLab.\n\n> Get started with package registries today! Sign up for a [free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n",[835,9,1000],{"slug":3956,"featured":90,"template":684},"structuring-the-gitlab-package-registry-for-enterprise-scale","content:en-us:blog:structuring-the-gitlab-package-registry-for-enterprise-scale.yml","Structuring The Gitlab Package Registry For Enterprise Scale","en-us/blog/structuring-the-gitlab-package-registry-for-enterprise-scale.yml","en-us/blog/structuring-the-gitlab-package-registry-for-enterprise-scale",{"_path":3962,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3963,"content":3968,"config":3974,"_id":3976,"_type":13,"title":3977,"_source":15,"_file":3978,"_stem":3979,"_extension":18},"/en-us/blog/take-advantage-of-git-rebase",{"title":3964,"description":3965,"ogTitle":3964,"ogDescription":3965,"noIndex":6,"ogImage":3911,"ogUrl":3966,"ogSiteName":669,"ogType":670,"canonicalUrls":3966,"schema":3967},"Take advantage of Git rebase","Tap into the Git rebase features to improve your workflow.","https://about.gitlab.com/blog/take-advantage-of-git-rebase","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Take advantage of Git rebase\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Christian Couder\"}],\n        \"datePublished\": \"2022-10-06\",\n      }",{"title":3964,"description":3965,"authors":3969,"heroImage":3911,"date":3971,"body":3972,"category":769,"tags":3973},[3970],"Christian Couder","2022-10-06","\n\nThese days, developers spend a lot of time reviewing merge requests\nand taking these reviews into account to improve the code. We'll discuss how\n[Git rebase](https://git-scm.com/docs/git-rebase) can help in\nspeeding up these review cycles. But first, let's take a look at some\nworkflow considerations.\n\n## Different ways to rework a merge request\n\nA developer who worked on some code changes and created a merge\nrequest with these changes will often have to rework them. Why does\nthis happen? Tests can fail, bugs are found, or reviewers suggest\nimprovements and find shortcomings.\n\n### Simple but messy method: add more commits\n\nOne way to rework the code changes is to make more changes in some new\ncommits on top of the branch that was used to create the merge\nrequest, and then push the branch again to update the merge\nrequest.\n\nWhen a number of commits have been added in this way, the merge\nrequest becomes problematic:\n\n- It's difficult to review by looking at all the changes together.\n- It's difficult to review the commits separately as they may contain different unrelated changes, or even multiple reworks of the same code.\n\nReviewers find it easier to review changes split into a number of small,\nself-contained commits that can be reviewed individually.\n\n### Pro method: rebase!\n\nA better method to prepare or rework a merge request is to always\nensure that each commit contains small, self-contained, easy-to-review\nchanges.\n\nThis means that all the commits in the branch may need reworking\ninstead of stacking on yet more commits. This approach might seem much\nmore complex and tedious, but `git rebase` comes to the rescue!\n\n## Rework your commits with `git rebase`\n\nIf your goal is to build a merge request from a series of small,\nself-contained commits, your branch may need significant rework before its\ncommits are good enough. When the commits are ready, you can push the branch\nand update or create a merge request with this branch.\n\n### Start an interactive rebase\n\nIf your branch is based on `main`, the command to rework your branch\nis:\n\n```plaintext\ngit rebase -i main\n```\n\nI encourage you to create [a Git alias](https://git-scm.com/book/en/v2/Git-Basics-Git-Aliases),\nor a shell alias or function for this command right away, as you will\nuse it very often.\n\nThe `-i` option passed to `git rebase` is an alias for\n`--interactive`. It starts\n[an 'interactive' rebase](https://git-scm.com/docs/git-rebase#Documentation/git-rebase.txt---interactive)\nwhich will open your editor. In it, you will find a list of the\ncommits in your branch followed by commented-out lines beginning with\n`#`. The list of commits looks like this:\n\n```plaintext\npick 1aac632db2 first commit subject\npick a385014ad4 second commit subject\npick 6af12a88cf other commit subject\npick 5cd121e2a1 last commit subject\n```\n\nThese lines are instructions for how `git rebase` should handle these\ncommits. The commits are listed in chronological order, with the\noldest commit at the top. (This order is the opposite of the default\n`git log` order.) What do these lines contain?\n\n- An instruction (here, `pick`) that tells Git what action to take\n- An abbreviated commit ID\n- A commit subject to help you identify the commit contents\n\n### Edit the instruction list\n\nYou can edit these instructions! When you quit your text editor, `git rebase`\nreads the instructions you've just edited, and performs them\nin sequence to recreate your branch the way you want.\n\nAfter the instructions for all commits, a set of commented-out lines\nexplain how to edit the instruction lines, and how each instruction\nwill change your branch:\n\n- If you **delete a commit's entire instruction line** from the list,\n  that commit won't be recreated.\n- If you **reorder the instruction lines**, the commits will be\n  recreated in the order you specify.\n- If you **change the action** from `pick` to something else, such as\n  `squash` or `reword`, Git performs the action you specify on that\n  commit.\n- You can even **add new instruction lines** before, after, or between\n  existing lines.\n\nIf the comment lines aren't enough, more information about what you\ncan do and how it works is available in:\n\n- The [Git Tools - Rewriting History](https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History)\n  section of the \"Pro Git\" book\n- The [Interactive mode](https://git-scm.com/docs/git-rebase#_interactive_mode)\n  section of the `git rebase` documentation\n\n### Continue or abort the rebase\n\nAn interactive rebase can stop if there is a conflict (as a regular\nrebase would) or if you used an instruction like `edit` in the\ninstruction line. This allows you to make some changes, like splitting\nthe current commit into two commits, or fixing the rebase conflict if\nthere is one. You can then either:\n\n- Continue the interactive rebase with `git rebase --continue`.\n- Abort the rebase altogether with `git rebase --abort`.\n\n(These `git rebase` options also work when a regular, non-interactive\nrebase stops.)\n\n## Further tips and benefits\n\n### Try different instructions\n\nI recommend you try out the different instructions you can use in\neach instruction line, especially `reword`, `edit`, `squash`, and `fixup`. You'll\nsoon want to use the abbreviated versions of these instructions: `r`,\n`e`, `s`, and `f`.\n\n### Run shell commands in your rebase\n\nYou might also have noticed an `exec \u003Ccommand>` instruction that\nallows you to run any shell command at any point in the interactive rebase.\nI've found it more useful for non-interactive rebases, such as:\n\n```plaintext\ngit rebase --exec 'make test' main\n```\n\n(It's not an interactive rebase because it doesn't contain the `-i` flag.)\n\nThe `--exec \u003Ccommand>` flag allows you to run any shell command after\neach rebased commit, stopping if the shell command fails (which is\nsignaled by a non zero exit code).\n\n### Test all your commits\n\nPassing a command to build your software and run its tests, like\n`make test`, to `--exec` will check that each commit in your branch\nbuilds correctly and passes your tests.\n\nIf `make test` fails, the rebase stops. You can then fix the current\ncommit right away, and continue the rebase to test the next\ncommits.\n\nChecking each commit builds cleanly and passes all the tests ensures\nyour code base is always in a good state. It's especially useful if\nyou want to take advantage of\n[Git bisect](https://git-scm.com/docs/git-bisect) when you encounter\nregressions.\n\n## Conclusion\n\nIn Git, a rebase is a very versatile and useful tool to rework\ncommits. Use it to achieve a workflow with high-quality changes\nproposed in high-quality commits and merge requests. It makes your\ndevelopers and reviewers more efficient. Code reviews and debugging also become easier and more effective.\n\n**EDIT:** Check out our [follow-up post on how you can apply this is real life](/blog/rebase-in-real-life/).\n",[726,940,877,9],{"slug":3975,"featured":6,"template":684},"take-advantage-of-git-rebase","content:en-us:blog:take-advantage-of-git-rebase.yml","Take Advantage Of Git Rebase","en-us/blog/take-advantage-of-git-rebase.yml","en-us/blog/take-advantage-of-git-rebase",{"_path":3981,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":3982,"content":3988,"config":3993,"_id":3995,"_type":13,"title":3996,"_source":15,"_file":3997,"_stem":3998,"_extension":18},"/en-us/blog/the-ultimate-guide-to-enabling-saml",{"title":3983,"description":3984,"ogTitle":3983,"ogDescription":3984,"noIndex":6,"ogImage":3985,"ogUrl":3986,"ogSiteName":669,"ogType":670,"canonicalUrls":3986,"schema":3987},"The ultimate guide to enabling SAML and SSO on GitLab.com","Learn how to make full use of SAML and SSO security features on the GitLab DevSecOps platform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666495/Blog/Hero%20Images/cover-1800x945.png","https://about.gitlab.com/blog/the-ultimate-guide-to-enabling-saml","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to enabling SAML and SSO on GitLab.com\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Bradley Lee\"}],\n        \"datePublished\": \"2023-09-14\",\n      }",{"title":3983,"description":3984,"authors":3989,"heroImage":3985,"date":1825,"body":3991,"category":814,"tags":3992},[3990],"Bradley Lee","\nAs a follow-on to the recent blog, [The ultimate guide to securing your code on\nGitLab.com](https://about.gitlab.com/blog/securing-your-code-on-gitlab/),\nwe recommended enabling SAML (Security Assertion Markup Language) and SSO (single\nsign-on) to enable tighter control over code access. Let’s take a deep dive into\nhow to enable SAML and SSO on GitLab.com.\n\n## What are SAML and SSO?\nSAML is an open standard, which service providers (like GitLab.com) and\nidentity providers (commonly referred to as IdPs) use to communicate\nauthentication data. SSO is provided by IdPs, such as Okta and Entra ID\n(formerly Azure AD), and enables users to log into multiple systems or service\nproviders through a single interface with a single set of credentials.\n\nAs with any configuration, there should be thoughtful and careful planning when\nenabling SSO.\n\n### What are the benefits of SSO?\nIn general, enabling SSO streamlines the user experience by unifying the login\nprocess and reducing the account and password bloat required for multiple\nenterprise applications. Enabling SSO also adds an extra layer of security and\nmanagement efficiency for identity management teams by providing a single\nsource of truth for authentication. Below, you’ll learn how SAML SSO applies\nspecifically to GitLab.com.\n\n## Configuring SSO and SAML for GitLab.com\nPremium and Ultimate tiers can enable SSO in the settings available at the\nnamespace or top level group.\n\n### Enabling SSO at the group level\nBefore getting started, you’ll need a few key\npieces of information from your chosen IdP:\n- The IdP SSO URL\n- The certificate fingerprint provided by the IdP application\n\nOnce these key pieces are entered, check the “Enable SAML\nauthentication for this group” box. \n\n### How user accounts are linked\nBefore we proceed further into configuration, let’s take a look at how GitLab\nauthenticates against the IdP.\n\nFor GitLab.com, each user who requires access to\nthe system must have an account on GitLab.com. By default, when a user first\nattempts logging into GitLab via SSO, GitLab will receive the SAML assertion\nand validate if the identity (specifically the email address) is linked to a\nGitLab.com account. If not, GitLab will request the user either login to an\nexisting account or create a new account. In most instances, this may not be\ndesired behavior; however, we will address this later in the process. We’ve\nprovided a flowchart below to help you navigate the provisioning flow.\n\n![image of saml group links](https://about.gitlab.com/images/blogimages/2023-09-14-ultimate-guide-to-enabling-saml/saml-provisioning.png)\n\n### Enforcing SSO\nTo further increase security, there are two options available for enforcing\nSSO. Assuming neither are checked, users with access to the namespace can log\nin with either the SSO credentials or the GitLab.com credentials.\n\nHere is a working example that we can use to follow along as we discuss how the\nconfiguration options affect our baseline. Let’s consider a user in the IdP\nwhere the username is `idpusername` and contains a super secret password:\n`idppassword` (apologies, security professionals). Taking into account the\ninformation we just learned about account linking, let us also assume our demo\nuser created a new account following the prompt from an SSO login with a\nusername of `gitlabusername` and `gitlabpassword` as an even more secure\npassword.\n\n#### Enforcing SSO only for web\nWhen enabling the “Enforce SSO-only authentication for web activity for this\ngroup” setting, all members must now access all groups and projects under the\nhierarchy using the configured SSO login regardless of whether they have an\nexisting SAML identity. As we mentioned prior, with this flag disabled, our\n`idpusername` user will be able to log into the GitLab namespace with either\nthe `idpusername` or `gitlabusername` credential sets. When we enable this\nsetting for web-based activity ([further details in\ndocs](https://docs.gitlab.com/ee/user/group/saml_sso/#sso-only-for-web-activity-enforcement)),\nour group is now only accessible by the `idpusername` credential set.\n\n#### Enforcing SSO only for Git proxy\nVery similar to enforcing SSO for web, when the “Enforce SSO-only\nauthentication for Git and Dependency Proxy” activity for this group option is\nenabled, a few things happen:\n- Calling an API endpoint that involves Git activity requires SSO.\n- For Git activity over SSH and HTTPS, users must have at least one active session signed-in through SSO before they can push to or pull.\n\nThere is a strong recommendation to enable both of these settings to take full\nadvantage of the benefits of SSO for users and administrators through\ncentralized authentication.\n\n### Enterprise user support\nNow that we know how some of the configuration options can help secure access,\nlet’s take a deeper dive into user management. Consider the following scenario:\nOur `idpusername` user has decided to pursue another opportunity outside of the\ndomain. Based on what we have configured now, once the account has been\ndeprovisioned from the IdP, it should no longer have access to anything secured\nbehind it on GitLab.com. However, while the user will not have access, the\nassociated user ID and roles still remain until manually removed. This is where\nEnterprise users come in.\n\n#### What are Enterprise users in GitLab?\nIf you look closely, any user that has a linked SSO account will carry a `SAML`\nbadge in the member list. GitLab also has an associated `Enterprise` badge\nthat grants additional management functionality through SSO. For a user to\ncarry the `Enterprise` badge, the user must either have the initial GitLab.com account creation initiated by a SAML SSO login or have the initial GitLab.com account created by SCIM.\n\n#### What is SCIM?\nSCIM, or System for Cross-domain Identity Management, is another standard\nused in conjunction with SAML, primarily for provisioning and deprovisioning\nacross multiple systems. By enabling SCIM for your GitLab.com group (which is\ncurrently supported with Entra ID and Okta), you can enable automatic\nprovisioning and deprovisioning of accounts.\n\nIf we look back at some of our scenarios, without SCIM, our `idpusername` user\nwas prompted to create or link a GitLab.com account on first login. With SCIM\nenabled, this process is handled automatically based on information provided\nand managed by the IdP and is completely transparent to the end user. The\nsecond half of our scenario, where our `idpusername` user is deprovisioned from\nthe IdP, also is solved with automation via SCIM. In this instance, when the\nuser is removed on the IdP side, SCIM automatically disconnects the SAML\nidentity from the GitLab.com account and removes the user from the GitLab.com\ngroup.\n\n#### Protecting your intellectual property\nAnother important feature of Enterprise users is the ability to control two\nvery important user settings that are not accessible to group administrators on\nGitLab.com. Since all users require an account on GitLab.com, they are also\ngranted access to a personal user namespace. For example, our `idpusername` will have access to our Acme Corp. group at `.com/acmecorp`, and will also have\naccess to their own personal space at `.com/idpusername`. One common concern with this is the ability for users to take code out of the organization\nnamespace and commit to their own personal namespace.\n\nWith Enterprise users, we have two settings that we can control based on attributes received in the SAML\nresponse. These keys are `projects_limit` and `can_create_group`. The\n`projects_limit` is an integer value that sets the amount of projects a user\ncan create in their personal namespace. When set to `0`, this effectively\ndisables project creation in that space. Similarly, `can_create_group` is a\nboolean `true` or `false` value that indicates whether a user can create new\ngroups.\n\n### Managing roles with SAML\nNow that we know the ins and outs of creating and removing users with SAML and\nSCIM, how can we leverage our work to help manage our active users? In this\nfinal section, we’ll take a look at why we recommend setting default membership\nto \"Minimal Access\" and how to leverage group memberships in the IdP.\n\n#### Why Minimal Access?\nIn the [Ultimate guide to securing your code on GitLab](https://about.gitlab.com/blog/securing-your-code-on-gitlab/),\nwe recommend setting the default membership role to Minimal Access, and\noperating with the concept of least privilege. Roles can be elevated as needed\nin subgroups or individual projects while preventing visibility to projects or\nsubgroups where the user is not explicitly granted another role. By default,\nthis option is set to Guest, which will allow all provisioned users guest\naccess to the repositories. Default membership controls are available at the\ntop-level group, along with the SAML and SSO settings. For automation at the\nsubgroup level, we can leverage SAML Group Sync.\n\n#### Configuring SAML Group Sync with SAML Group Links\nBefore we dive into the configuration, there is one very important step we need\nto take. The configured SAML assertion that is sent MUST include an attribute\nnamed `Groups` or `groups`. If SAML Group Links are present without the\nattribute in the assertion, users may be removed from the group or reverted to\nMinimal Access.\n\nAfter we ensure our assertions contain the necessary information, we can start\nusing SAML Group Links to automatically assign membership roles to GitLab\ngroups based on group membership in the IdP. Let’s build on our demo user\n`idpusername` by considering the following:\n- `idpusername` is a maintainer on the acme-web project.\n- The `acme-web` project exists under the `acme-corp` namespace, under subgroup `acme-com`.\n- The full path to the project would be `.com/acme-corp/acme-com/acme-web`.\n- `idpusername` should also be granted developer access for the `acme-db` project, which is also under the `acme-com` group.\n- In our IdP, `idpusername` is a member of the IdP group `idp-acme-com`.\n\nSAML group links allow us to map IdP group memberships to role assignments at\nthe GitLab group level. In this scenario, we can create a group link at the\n`acme-com` group in GitLab that maps the IdP group `idp-acme-com` to the\ndeveloper role to the `acme-com` group.\n\nDue to inheritance, our `idpusername`\nuser will be granted developer access and associated visibility to every\nproject and group that falls under the GitLab `acme-com` group automatically by\nvirtue of the IdP group membership, because we’re working under the concept of\nleast privilege for the `acme-web` project.\n\nThe `idpusername` user’s role can\nbe elevated to maintainer directly in the project. From a user perspective,\n`idpusername` would still carry the Minimal Access role at the `acme-corp`\ngroup as well. This allows a separation of access management between\nengineering and identity management teams and allows role management to be\nflexible with guardrails.\n\n![image of saml group links](https://about.gitlab.com/images/blogimages/2023-09-14-ultimate-guide-to-enabling-saml/saml-group-links.png)\n\nWith this approach, it’s important to find that balance between what is managed\nin the IdP and what is managed in GitLab. It’s possible to have hundreds of\ngroup mappings to roles in the IdP and almost completely remove role management\nwithin GitLab and vice versa. The flexibility that GitLab allows enables you to\nfind the best solution that works for you. Building on our example, if we hire\nanother engineer for the `acme-com` project, they can be added to the GitLab\napplication in the IdP, and added to the `idp-acme-com` group. This\nautomatically assigns them the developer role at the `acme-com` group and for\nall projects under it, while limiting access to any other groups outside of\n`acme-com` in the namespace.\n\n## Learn more\nWe’ve covered how to get started with enabling SAML and SSO on your GitLab.com\ngroup, along with how to leverage the features to programmatically manage users\nand roles with real examples. For more information, see the full [SAML SSO for\nGitLab.com groups](https://docs.gitlab.com/ee/user/group/saml_sso/)\ndocumentation.\n\nCover image by [Towfiqu barbhuiya](https://unsplash.com/photos/FnA5pAzqhMM) on [Unsplash](https://unsplash.com)\n{: .note}\n",[814,9,478],{"slug":3994,"featured":6,"template":684},"the-ultimate-guide-to-enabling-saml","content:en-us:blog:the-ultimate-guide-to-enabling-saml.yml","The Ultimate Guide To Enabling Saml","en-us/blog/the-ultimate-guide-to-enabling-saml.yml","en-us/blog/the-ultimate-guide-to-enabling-saml",{"_path":4000,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4001,"content":4006,"config":4011,"_id":4013,"_type":13,"title":4014,"_source":15,"_file":4015,"_stem":4016,"_extension":18},"/en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"title":4002,"description":4003,"ogTitle":4002,"ogDescription":4003,"noIndex":6,"ogImage":2226,"ogUrl":4004,"ogSiteName":669,"ogType":670,"canonicalUrls":4004,"schema":4005},"The ultimate guide to GitOps with GitLab","This eight-part tutorial series demonstrates how to use GitLab as a best-in-class GitOps tool.","https://about.gitlab.com/blog/the-ultimate-guide-to-gitops-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to GitOps with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Viktor Nagy\"}],\n        \"datePublished\": \"2022-04-07\",\n      }",{"title":4002,"description":4003,"authors":4007,"heroImage":2226,"date":4008,"body":4009,"category":769,"tags":4010},[2211],"2022-04-07","\n\nIt is possible to use GitLab as a best-in-class GitOps tool, and this blog post series is going to show you how. [GitOps](/topics/gitops/) is an operational framework that takes DevOps best practices used for application development such as version control, collaboration, compliance, and CI/CD tooling, and applies them to infrastructure automation. This series of easy-to-follow tutorials will focus on different user problems, including provisioning, managing a base infrastructure, and deploying various third-party or custom applications on top of them, that can be solved by pairing GitOps with GitLab.\n\nHere are 8 tutorials on how to do GitOps with GitLab:\n\n**1. [Here's how to do GitOps with GitLab](/blog/gitops-with-gitlab/)**\n\nThis tutorial sets the stage for what you will learn throughout the series, including the tech concepts you'll need to know.\n\n**2. [Infrastructure provisioning with GitLab and Terraform](/blog/gitops-with-gitlab-infrastructure-provisioning/)**\n\nThis tutorial walks you through setting up the underlying infrastructure using GitLab and Terraform.\n\n**3. [Connect with a Kubernetes cluster](/blog/gitops-with-gitlab-connecting-the-cluster/)**\n\nThis tutorial demonstrates how to connect a Kubernetes cluster with GitLab for pull- and push-based deployments and easy security integrations.\n\n**4. [How to tackle secrets management](/blog/gitops-with-gitlab-secrets-management/)**\n\nThis tutorial builds on the previous tutorial to show you how to use a Kubernetes cluster connection to manage secrets within a cluster.\n\n**5. [The CI/CD tunnel](/blog/gitops-with-gitlab-using-ci-cd/)**\n\nThis tutorial introduces you to CI/CD tunnels and shows step-by-step how to access a Kubernetes cluster using GitLab CI/CD.\n\n**6. [Connecting GitLab with a Kubernetes cluster - Auto DevOps](/blog/gitops-with-gitlab-auto-devops/)**\n\nThis tutorial looks at how you can use Auto DevOps with all its bells and whistles to easily manage deployments.\n\n**7. [Connecting GitLab with a Kubernetes cluster for GitOps-style application delivery](/blog/gitops-with-gitlab/)**\n\nThis tutorial shows you how to connect an application project to a manifest project for controlled, GitOps-style deployments.\n\n**8. [Turn a GitLab agent for Kubernetes installation to manage itself](/blog/gitops-with-gitlab-manage-the-agent/)**\n\nThis tutorial is the culmination of the previous tutorials and will teach you how to turn a GitLab agent for Kubernetes installation to manage itself.\n\n\n**Read more about GitOps:**\n- [GitLab for GitOps](/solutions/gitops/)\n- [What is GitOps](/topics/gitops/)\n- [GitOps viewed as part of the Ops evolution](/blog/gitops-as-the-evolution-of-operations/)\n- [How to use a push-based approach for GitOps with GitLab scripting and variables](/blog/how-to-agentless-gitops-vars/)\n\n\n\n\n",[773,9,533],{"slug":4012,"featured":6,"template":684},"the-ultimate-guide-to-gitops-with-gitlab","content:en-us:blog:the-ultimate-guide-to-gitops-with-gitlab.yml","The Ultimate Guide To Gitops With Gitlab","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab.yml","en-us/blog/the-ultimate-guide-to-gitops-with-gitlab",{"_path":4018,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4019,"content":4025,"config":4030,"_id":4032,"_type":13,"title":4033,"_source":15,"_file":4034,"_stem":4035,"_extension":18},"/en-us/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab",{"title":4020,"description":4021,"ogTitle":4020,"ogDescription":4021,"noIndex":6,"ogImage":4022,"ogUrl":4023,"ogSiteName":669,"ogType":670,"canonicalUrls":4023,"schema":4024},"The ultimate guide to least privilege access with GitLab","This tutorial demonstrates how to achieve least privilege access using custom roles, security policies, compliance pipelines, branch protections, and more.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099438/Blog/Hero%20Images/Blog/Hero%20Images/built-in-security_built-in-security.jpeg_1750099438377.jpg","https://about.gitlab.com/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to least privilege access with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2024-03-06\",\n      }",{"title":4020,"description":4021,"authors":4026,"heroImage":4022,"date":4027,"body":4028,"category":814,"tags":4029},[1767],"2024-03-06","The principle of least privilege ([PoLP](https://csrc.nist.gov/glossary/term/least_privilege)) is a concept in which a user's access rights should be limited to the bare minimum needed for them to complete the tasks required within their respective roles. By implementing PoLP you can enhance your organization's [security posture](https://csrc.nist.gov/glossary/term/security_posture), complementing zero trust, in the following ways:\n\n- **Reduction of attack surface:** If credentials are compromised, the breach will be limited to only the paths where the compromised account has access.\n- **Protection against human error:** Users will not be able to perform actions that are not required for their role.\n- **Adherence to compliance:** Separation of duties and least privilege best practices are required for several compliance mandates such as SOC2 and HIPAA.\n- **Reduced system downtime:** By preventing everyone from accessing critical parts of the software development lifecycle (SDLC), there is less likelihood of downtime.\n\nGitLab provides a variety of different features that allow you to customize the actions a user can perform which assist in the achievement of PoLP. These features include:\n\n- **[Custom roles and granular security permissions](#custom-roles-and-granular-security-permissions):** Allows creation of roles with permissions that are specific to particular functions required by the organization.\n- **[Security policies](#security-policies):** Allows policies to be created that prevent insecure code from being merged into production branches without approval, and run security scanners regardless of your pipeline definition.\n- **[Branch protections and Code Owners](#branch-protections-and-code-owners):** Imposes further restrictions on certain branches to control permissions such as who can merge, push, etc. to defined branches.\n- **[Compliance pipelines and frameworks](#compliance-pipelines-and-frameworks):** Identifies that your project has certain compliance requirements or needs additional oversight, enforcing a pipeline configuration to the projects on which it is applied.\n\nIn this blog post, you'll learn each of the features mentioned, how they improve your organization's security posture, as well as how to implement them.\n\nWatch my video, which introduces you to achieving PoLP with GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/jvZ3eqWMeSY?si=DedSYiBNy2kTLJKo\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Custom roles and granular security permissions\n\nGitLab allows you to create [custom roles](https://docs.gitlab.com/ee/user/custom_roles.html), which apply additional permissions to base roles to meet the security needs of your organization. The available [base roles](https://docs.gitlab.com/ee/user/permissions.html#roles) are as follows:\n\n- Guest\n- Reporter\n- Developer\n- Maintainer\n- Owner\n\nEach base role applies a particular set of permissions to a user. Base roles apply different permissions for [group members](https://docs.gitlab.com/ee/user/permissions.html#group-members-permissions), [project members](https://docs.gitlab.com/ee/user/permissions.html#project-members-permissions), and in [project features](https://docs.gitlab.com/ee/user/permissions.html#project-features-permissions). For example, the table below shows which roles can view the project [dependency list](https://docs.gitlab.com/ee/user/application_security/dependency_list/):\n\n| Base role    | Can view project dependency list     |\n| ---------- | ---------- |\n| Guest      | ❌       |\n| Reporter      | ❌       |\n| Developer      | ✅       |\n| Maintainer      | ✅       |\n| Owner       | ✅     |\n\n\u003Cbr>\u003C/br>\nThe dependency list also known as a software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)), displays your project's dependencies\nand key details about those dependencies. It makes sense that only those actively working on a project should\nbe able to see what dependencies are present to limit any exploitation of your application using its dependencies.\n\nHowever, there are cases in which a Guest may need to see the SBOM to assist the organization in\n[achieving compliance](https://www.whitehouse.gov/briefing-room/presidential-actions/2021/05/12/executive-order-on-improving-the-nations-cybersecurity/). By using custom roles, a new role can be created with all the limited permissions of the Guest role, and additionally, the ability to view the project dependency list can be added. Therefore, we have a Guest assisting us with compliance with the least privileged access required for their job.\n\nWatch my video on custom roles and granular security permissions with GitLab:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/WyrhkpO5WkI?si=4B4mNYNK9UyNrru8\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Granular permissions\n\nAs of the GitLab 16.8 release, the following granular permissions can be added to any base role:\n\n- Viewing project code\n- Viewing vulnerability reports\n- Changing the status of vulnerabilities\n- Viewing SBOMs\n- Approving merge requests\n- Managing project/group access tokens\n- Adding/removing group members\n- Archiving/unarchiving/removing projects\n- Admin Terraform state\n\nWe will continue to add [more granular permissions](https://docs.gitlab.com/ee/user/custom_roles/abilities.html) with each GitLab release. You can learn more about our roadmap for this feature by referring to the [Granular Security Permissions Epic](https://gitlab.com/groups/gitlab-org/-/epics/10684) and provide feedback in the [customer feedback Issue](https://gitlab.com/gitlab-org/gitlab/-/issues/391760). You also have the ability to contribute to GitLab and [develop your own granular permissions](https://docs.gitlab.com/ee/development/permissions/custom_roles.html).\n\n### Implementation prerequisites\nThe requirements for implementing custom roles are as follows: \n- Owner role in the top-level group in which you are creating the custom role\n- Administrator for the self-managed instance in which you are creating the custom role\n- GitLab Ultimate tier in the top-level group\n- A [personal access token with the API scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token)\n\nTo see custom roles in action requires:\n- a private project within the top-level group or its subgroups\n- a guest user within the private project\n\nWhen you enable a custom role for a user with the Guest role, that user has access to elevated permissions, and therefore:\n- is considered a billable user on self-managed GitLab\n- uses a seat on GitLab.com\n\n### Creating the custom role with granular permissions\n\nNow that you know the benefits of implementing custom roles with granular permissions, let's implement them within our GitLab instance:\n\n1. On the left sidebar, select **Search or go to**.\n    - In GitLab SaaS find and select the top-level group in which you want to create a custom role.\n    - In GitLab Self-Managed find and select **Admin Area**.\n2. Select **Settings > Roles and Permissions**.\n    - In GitLab Self-Managed use the top dropdown list to find and select the top-level group in which you want to create a custom role.\n3. Select **Add new role**.\n4. Under Base role to use as a template, select **Guest** for this tutorial.\n5. Under Role name, enter the custom role’s title.\n6. Under Permissions for the custom role, select **Read Vulnerability** for this tutorial.\n7. Select **Create a new role**.\n\n![Create new role screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099455072.png)\n\n\u003Ccenter>\u003Ci>Interface for creating a custom role\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nAfter creating the role you should be able to see the new custom role along with its ID, Base role, and Permissions. Be sure to save the ID as it will be used when we assign the custom role to a guest user.\n\n![Custom role screen](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099455073.png)\n\n\u003Ccenter>\u003Ci>Security Auditor role created\u003C/i>\u003C/center>\n\u003Cp>\u003C/p>\n\nNow we must assign the custom role to a group or project member. This can be done as follows:\n1. Invite a user as a direct member with the Guest role to your top-level group where the custom role was created.\n2. You can invite them to a sub-group or private project within the top-level group as well.\n* The guest user should not be able to see any code within the project they have been assigned to.\n* Open your terminal.\n3. Export the required environment variables:\n* Your [personal access token with API scope](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token)\n\n```\n$ export TOKEN=glpat-XXXXXXXXXXXX\n$ echo $TOKEN\nglpat-XXXXXXXXXXXX\n\n```\n\n* The ID of the user we will be granting a custom role to. You can obtain the user id by providing the username to the [User API](https://docs.gitlab.com/ee/api/users.html#list-users). For more information on using the GitLab API, see the [REST API documentation](https://docs.gitlab.com/ee/api/rest/).\n\n```\n$ curl \"https://gitlab.example.com/api/v4/users?username=fjdiaz\"\n[{\"id\":4710074,\"username\":\"fjdiaz\",\"name\":\"Fern\",\"state\":\"active\",\"locked\":false,\"avatar_url\":\"https://gitlab.com/uploads/-/system/user/avatar/4710074/avatar.png\",\"web_url\":\"https://gitlab.com/fjdiaz\"}]\n\n$ export USER_ID=4710074\n$ echo $USER_ID\n4710074\n```\n\n* The ID of the custom role. You can obtain the custom role ID from the ID column in the [custom roles UI](https://docs.gitlab.com/ee/user/custom_roles.html#gitlab-saas) or the [member roles API](https://docs.gitlab.com/ee/api/member_roles.html#add-a-member-role-to-a-group).\n\n```\n$ export CUSTOM_ROLE_ID=1000782\n$ echo $CUSTOM_ROLE_ID\n1000782\n```\n\n* The ID of your group or project. You can obtain the group id from the [group UI](https://docs.gitlab.com/ee/user/group/#get-the-group-id) or using the [groups API](https://docs.gitlab.com/ee/api/groups.html). You can obtain the project ID from the [project UI](https://docs.gitlab.com/ee/user/project/working_with_projects.html#access-the-project-overview-page-by-using-the-project-id) or using the [projects API](https://docs.gitlab.com/ee/api/projects.html).\n\n```\n$ export GROUP_ID=10087220\n$ echo $GROUP_ID\n10087220\n\n$ export PROJECT_ID=45738177\n$ echo $PROJECT_ID\n45738177\n```\n\n4. Associate the guest user with the custom role using the appropriate [group or project APIs](https://docs.gitlab.com/ee/api/members.html#edit-a-member-of-a-group-or-project).\n\n* If the user just needs to role in a project, update the project membership:\n\n```\n\"Authorization: Bearer $TOKEN\" --data '{\"member_role_id\": $CUSTOM_ROLE_ID, \"access_level\": 10}' \"https://gitlab.example.com/api/v4/projects/$PROJECT_ID/members/$USER_ID\"\n```\n\n* If the user just needs to role in a group, update the group membership:\n\n```\n$ curl --request PUT --header \"Content-Type: application/json\" --header \"Authorization: Bearer $TOKEN\" --data '{\"member_role_id\": $CUSTOM_ROLE_ID, \"access_level\": 10}' \"https://gitlab.example.com/api/v4/groups/$GROUP_ID/members/$USER_ID\"\n```\n\nNow that the custom role has been applied to a guest user, when they login, they can see the Vulnerability dashboard present in the Secure tab. Notice, however, that they are still not allowed to see the source code. \n\nThis is useful because it allows users to audit the system without being able to make changes to the code base, which applies the PoLP for those auditing the system for vulnerabilities.\n\n## Security policies\nGitLab provides [security policies](https://docs.gitlab.com/ee/user/application_security/policies/) to help you achieve least privilege access. There are two different types of security policies provided by GitLab:\n- [Scan Execution policies](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html) allow project maintainers and administrators the confidence of knowing that the scans they set up have not been changed, altered, or disabled.\n- [Merge Request Approval policies](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html) prevent insecure code from being merged into production without appropriate approval.\n\nSome examples of how both policy types can be used in unison to provide least privilege access are as follows:\n- remove the ability for developers to disable security scanners\n- remove the ability for developers to merge insecure code\n\nPolicies are stored in a separate repo from the project they are being applied to called the Security Policy Project (SPP). This allows for separate permissions to be set to the SPP vs. the application repo, thus strengthening your ability to separate duties and apply PoLP.\n\n![Security policy hierarchy](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image19_aHR0cHM6_1750099455074.png)\n\n\u003Ccenter>\u003Ci>Security policy hierarchy\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nTo enforce the policies contained in an SPP you link it to a project, subgroup, group, or multiples of each. An SPP can contain multiple policies but they are enforced together. An SPP enforced on a group or subgroup applies to everything below the hierarchy, including all subgroups and their projects.\n\nSecurity policies can be managed via the policy management UI as well as via yaml. Using the policy editor you can create, edit, and delete policies.\n\n![Policy management interface](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image20_aHR0cHM6_1750099455076.png)\n\n\u003Ccenter>\u003Ci>Policy management interface\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nFeel free to leverage the [Simple Notes demo environment](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes) to try this yourself by following the provided [DevSecOps tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/).\n\n### Creating a Scan Execution policy\nNow let's take a look at how to create a Scan Execution policy. Before getting started make sure you have met the following criteria:\n- GitLab Ultimate tier in the top-level group\n- Owner role to create/assign an SPP\n- Developer role or greater to create/edit/delete individual security policies\n\nWe will be creating a policy that automatically runs a SAST scan with each pipeline, regardless of the SAST template is defined within the gitlab-ci.yml:\n\n1. On the left sidebar, select **Search or go to** and search for the project to which you wish to add a policy.\n2. On the project left sidebar, go to **Secure > Policies**.\n3. Select **New policy**.\n4.  In the **Scan Execution Policy** section, select **Select policy**.\n5. Complete the fields:\n    - **Name:** The name of the policy\n    - **Description:** The description of the Policy\n    - **Policy status:** Whether it is enabled or not\n    - **Actions:** What actions to take when the defined conditions are met\n\n![Scan Execution policy actions](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image15_aHR0cHM6_1750099455077.png)\n\n \u003Ccenter>\u003Ci>Scan Execution policy actions\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n  - **Conditions:** Conditions which must be met (a pipeline is triggered or on a set schedule) in order for an action to take place.\n\n    ![Scan Execution policy conditions](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750099455078.png)\n \u003Ccenter>\u003Ci>Scan Execution policy conditions\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n- Press the **Configure with a merge request** button.\n\nNow that the policy has been created, all we need to do is run a pipeline to see that SAST will be present even if it is not defined in the .gitlab-ci.yml.\n\n### Creating a Merge Request Approval policy\n\nNow let's take a look at how to create a Merge Request Approval policy. Before getting started make sure you have met the following criteria:\n- GitLab Ultimate tier in the top-level group\n- Owner role to create/assign an SPP\n- Developer role or greater to create/edit/delete individual security policies\n- Security scanners added to project\n\nWe will be creating a policy that requires approval from project maintainers if any security scanner detects a vulnerability when compared with any branch:\n\n1. On the left sidebar, select **Search or go to** and search for the project to which you wish to add a policy.\n2. On the project left sidebar, go to **Secure > Policies**\n3. Select **New policy**\n4. In the **Merge Request Approval policy** section, select **Select policy**.\n5. Complete the fields:\n    - **Name:** The name of the policy\n    - **Description:** The description of the policy\n    - **Policy status:** Whether it is enabled or not\n    - **Rules:** The conditions which must be met for an action (require approval) to take place.\n\n![Merge Request Approval policy rules](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image17_aHR0cHM6_1750099455079.png)\n\n\u003Ccenter>\u003Ci>Merge Request Approval policy rules\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n   - **Actions:** The action to be taken whenever the conditions in the rules (defined vulnerabilities/licenses detected) are met.\n\n![Merge Request Approval  policy actions](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099455080.png)\n\u003Ccenter>\u003Ci>Merge Request Approval  policy actions\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n   - **Override project approval settings:** If selected, the following choices will overwrite project settings but only affect the branches selected in the policy.\n\n![Merge Request Approval policy approval settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image21_aHR0cHM6_1750099455081.png)\n\n\u003Ccenter>\u003Ci>Merge Request Approval policy approval settings\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n6. Press the **Configure with a merge request** button.\n\nNow that the policy has been created, all we need to do is run a pipeline and if SAST detects any vulnerabilities then approvals will be required from the selected approver before the code change can be merged. Merge Request Approval policies can be used with all GitLab security scanners, including license scanning.\n\n![Merge Request Approval policies blocking code from being merged in an MR](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099455082.png)\n\n\u003Ccenter>\u003Ci>Merge Request Approval policies blocking code from being merged in an MR\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n## Branch protections and Code Owners\n[Branch protections](https://docs.gitlab.com/ee/user/project/protected_branches.html) allow you to impose additional restrictions on particular branches within your repository. This further strengthens the PoLP for the interactions on a particular set of branches. \n\nFor example, a protected branch can control:\n- which users can merge into the branch\n- which users can push to the branch\n- if users can force push to the branch\n- if changes to files listed in the CODEOWNERS file can be pushed directly to the branch\n- which users can unprotect the branch\n\n### Applying branch protections\n\nBranch protections are available in all tiers and offerings of GitLab. Branch protections can be applied to a single project or a group of projects. You can apply branch protections for required roles to push and merge as follows:\n\n1. On the left sidebar, select **Search or go to** and find your project or group.\n2. Select **Settings > Repository**.\n3. Expand **Protected branches**.\n4. Select **Add protected branch**.\n    - For groups, from the **Branch** text box, type the branch name or a wildcard.\n    - For projects, from the **Branch** dropdown list, select the branch you want to protect.\n5. From the **Allowed to merge** list, select a role that can merge into this branch.\n6. From the **Allowed to push and merge** list, select a role that can push to this branch.\n7. Select **Protect**.\n\nYou should now see the protected branch added to the list.\n\n![Protected branches settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750099455082.png)\n\n\u003Ccenter>\u003Ci>Protected branches settings\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nThe Owner role is required to add branch protections to a group and the Maintainer role or greater is required to add branch protections to a project.\n\n### Code Owners\nIf you want to further limit what files developers can perform changes on, one of the best features to implement is [Code Owners](https://docs.gitlab.com/ee/user/project/codeowners/). Code Owners allows you to define who has the expertise for specific parts of your project’s codebase. Defining the owners of files and directories in Code Owners will:\n\n- require owners to approve changes as well as merge requests before they merge into a protected branch\n- identify owners by displaying the Code Owner names on the files and directories they own\n\nTo set up Code Owners, follow these steps:\n1. Create a CODEOWNERS file in your preferred location.\n2. Define some rules in the file following the Code Owners syntax reference. You can configure all eligible approvers' approval rules and require Code Owner approval on a protected branch.\n3. Commit your changes, and push them up to GitLab.\n\nNow, when looking at files, you can see who the Code Owners are for a particular file.\n\n![Code Owners displayed for file](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099455083.png)\n\n\u003Ccenter>\u003Ci>Code Owners displayed for file\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nIf you implement Code Owner approvals, then when creating a merge request, the Code Owners must approve before the code can be merged.\n\n![Code Owners approvals](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099455084.png)\n\n\u003Ccenter>\u003Ci>Code Owners approvals\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n### Additional approval settings\nThere are additional approval settings that can be applied before code can be committed with a merge request. These additional approval settings are as follows:\n- prevent approval by author\n- prevent approvals by users who add commits\n- prevent editing approval rules in merge requests\n- require user re-authentication (password or SAML) to approve\n\nAdditionally, whenever a commit is added, you can:\n- keep approvals\n- remove all approvals\n- remove approvals by Code Owners if their files changed\n\n![Additional Approval settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750099455084.png)\n\n\u003Ccenter>\u003Ci>Additional Approval settings\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nTo configure additional approval settings you can perform the following steps:\n1. On the left sidebar, select **Search or go to** and find your project.\n2. Select **Settings > Merge requests**.\n3. Scroll down to the **Merge request approvals** section.\n4. Under **Approval settings** select the approval settings you would like to apply.\n5. Press the **Save changes** button.\n\nThese can also be applied to your top-level group by performing the following steps:\n1. On the left sidebar, select **Search or go to** and find your top-level group.\n2. Select **Settings > General**.\n3. Expand the **Merge request approvals** section.\n4. Under **Approval settings** select the approval settings you would like to apply.\n5. Press the **Save changes** button.\n\nBy leveraging these approval settings you can make sure that code always obtains oversight by a person who was not involved in creating the code, thereby preventing a conflict of interest.\n\n## Compliance pipelines and frameworks\nYou can create a compliance framework that is a label to identify that your project has certain compliance requirements or needs additional oversight. The label can optionally enforce compliance pipeline configuration to the projects on which it is applied.\n\nFeel free to leverage the [Compliance Frameworks Demo](https://gitlab.com/gitlab-de/tutorials/security-and-governance/compliance-frameworks) group to see an example of compliance frameworks and their usage.\n\n### Create a compliance pipeline\nTo create a compliance pipeline, all you need to do is create a new project which will store a `.gitlab-ci.yml` file that we wish to use in another project. The new compliance pipeline project can have separate permissions from the project to which you will apply it. This is beneficial because it prevents developers from making changes to pipelines that must run.\n\nYou can see I have created the following [pipeline definition](https://gitlab.com/gitlab-de/tutorials/security-and-governance/compliance-frameworks) which:\n- runs the SAST security scanner\n- runs the secret detection scanner\n- runs a SOC2 compliance job\n- runs the original pipeline defined in the project to which we will apply this pipeline. This allows developers to focus on the actual application development and the compliance team to focus on defining the SOC2 rules.\n\n### Create and apply a compliance framework\nNow that the compliance pipeline for SOC2 has been defined, we must define a compliance framework and apply it to our project. In this case, I will apply it to my Accounting Department project.\n\nTo create a compliance framework label, follow these steps:\n1. On the left sidebar, select **Search or go to** and find your group.\n2. Select **Settings > General**.\n3. Expand the **Compliance frameworks** section.\n4. Click the **Add framework** button.\n5. Create a new compliance framework and populate the following sections:\n    - **Name:** The name of your compliance framework\n    - **Description:** A description of your compliance framework\n    - **Compliance pipeline configuration:** The location of the compliance pipeline to run. \n    - **Background color:** A color for the compliance framework label\n\n![PoLP - image 15](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750099455085.png)\n\n   \u003Ccenter>\u003Ci>Creating a compliance framework\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n6. Press the **Add framework** button.\n\nAnd now you should see your newly added framework under active compliance frameworks.\n\n![Active compliance frameworks](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750099455085.png)\n\n\u003Ccenter>\u003Ci>Active compliance frameworks\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nNow let’s go ahead and assign this compliance label to our Accounting Department project:\n\n1. On the left sidebar, select **Search or go to** and find your project.\n2. Select **Settings > General**.\n3. Expand **Compliance frameworks**.\n4. Select the compliance framework created above.\n\n![Adding a compliance framework](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099455086.png)\n\n\u003Ccenter>\u003Ci>Adding a compliance framework\u003C/i>\u003C/center>\n\n5. Select **Save changes**.\n\nThe project should now have the compliance framework label applied. \n\n![Project running a compliance pipeline](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750099455086.png)\n\n\u003Ccenter>\u003Ci>Project running a compliance pipeline\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nThis enables separation of duties and prevents compliance pipelines from being altered by those without permissions.\n\nSecurity Policy Scope and Pipeline Execution\nOver the past several releases, GitLab has introduced two experimental features, Security Policy Scope and Pipeline Execution, to make it even easier to adhere to PoLP. These features are very similar to Compliance Pipelines and Compliance Frameworks and can be managed from GitLab’s security policy UI.\n\n**Note:** These features are currently considered experimental. An experiment is a feature that is in the process of being developed. It is not production ready. We encourage users to try experimental features and provide feedback.\n\nThe [pipeline execution policy action](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html#pipeline-execution-policy-action) introduces a new scan action type into Scan Execution policies for creating and enforcing custom CI in your target development projects. You can execute a custom pipeline along with your current pipeline. This allows you to enforce compliance by always forcing particular actions to run that are not just security scanners and that cannot be overwritten by those without permissions.\n\n![Pipeline Execution policy scope selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image18_aHR0cHM6_1750099455087.png)\n\u003Ccenter>\u003Ci>Pipeline Execution policy scope selection - insert code block\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\n![Pipeline Execution policy scope selection](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099455/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750099455087.png)\n\u003Ccenter>\u003Ci>Pipeline Execution policy scope selection - link existing CI file\u003C/i>\u003C/center>\u003Cp>\u003C/p>\n\nThe [Security policy scope](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html#security-policy-scopes) can be applied to either Merge Request Approval or Scan Execution policies. Scopes enable you to administer policies with a particular scope, meaning you can:\n\n- Include only projects containing a compliance framework label\n- Include or exclude selected projects from enforcement\n\nTo enable these experimental features, follow these steps:\n1. On the left sidebar, select **Search or go to** and find your top-level group.\n2. Select **Settings > General**.\n3. Expand **Permissions and group features**.\n4. Scroll down to the **Security policy management** section.\n5. Select the following checkboxes\n**Security policy pipeline execution action:** Create and enforce custom CI jobs and scripts using this new policy action.\n6. **Security policy scopes:** Granularly scope each policy you create to projects containing a compliance framework label, or a list of projects.\n7. **Enforce for all subgroups (optional):** Subgroups cannot change these settings.\n8. Scroll down to the **Experiment and Beta features** section.\n9. Select the **Use Experiment and Beta features** checkbox.\n10. Scroll down and press the **Save changes** button.\n\nNow, whenever you are creating a security policy, the following options will be available:\n\n- Inserting a CI code block (Scan Execution policy only)\n- Loading CI/CD code from file (Scan Execution policy only)\n- Linking an existing CI file from another project (Scan Execution policy only)\n- Scoping a policy to projects with selected compliance framework (Group Level only)\n- Scoping a policy towards specific projects (Group Level only)\n- Scoping a policy towards all projects in group (Group Level only)\n\nTo learn more about these features, check out the following documentation:\n- [Pipeline Execution Policy action (Scan Execution policy)](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html#pipeline-execution-policy-action)\n- [Security Policy Scopes (Scan Execution policy)](https://docs.gitlab.com/ee/user/application_security/policies/scan-execution-policies.html#security-policy-scopes)\n- [Security Policy Scopes (Merge Request Approval policy)](https://docs.gitlab.com/ee/user/application_security/policies/scan-result-policies.html#security-policy-scopes)\n\n## Additional resources\n\nThanks for reading! These are some of the ways that GitLab allows you to strengthen your organization's security posture through the enablement of PoLP. To learn more about GitLab and the other ways we can strengthen your organization's security throughout all parts of the SDLC, check out the following links:\n\n- [GitLab Security and Compliance](https://about.gitlab.com/solutions/security-compliance/)\n- [GitLab Application Security Documentation](https://docs.gitlab.com/ee/user/application_security/)\n- [GitLab DevSecOps Demo Project](https://gitlab.com/gitlab-de/tutorials/security-and-governance/devsecops/simply-vulnerable-notes)\n- [GitLab DevSecOps Tutorial](https://gitlab-de.gitlab.io/tutorials/security-and-governance/devsecops/simply-vulnerable-notes/)\n- [GitLab Roles and Permissions Documentation](https://docs.gitlab.com/ee/user/permissions.html)\n- [GitLab Custom Roles Documentation](https://docs.gitlab.com/ee/user/custom_roles.html)\n- [GitLab Security Policies Documentation](https://docs.gitlab.com/ee/user/application_security/policies/)\n- [GitLab Compliance Frameworks Documentation](https://docs.gitlab.com/ee/user/group/compliance_frameworks.html)\n- [GitLab Code Owners Documentation](https://docs.gitlab.com/ee/user/project/codeowners/)\n- [GitLab Branch Protections Documentation](https://docs.gitlab.com/ee/user/project/protected_branches.html)",[3345,9,814,680],{"slug":4031,"featured":90,"template":684},"the-ultimate-guide-to-least-privilege-access-with-gitlab","content:en-us:blog:the-ultimate-guide-to-least-privilege-access-with-gitlab.yml","The Ultimate Guide To Least Privilege Access With Gitlab","en-us/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab.yml","en-us/blog/the-ultimate-guide-to-least-privilege-access-with-gitlab",{"_path":4037,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4038,"content":4044,"config":4050,"_id":4052,"_type":13,"title":4053,"_source":15,"_file":4054,"_stem":4055,"_extension":18},"/en-us/blog/the-ultimate-guide-to-token-management-at-gitlab",{"title":4039,"description":4040,"ogTitle":4039,"ogDescription":4040,"noIndex":6,"ogImage":4041,"ogUrl":4042,"ogSiteName":669,"ogType":670,"canonicalUrls":4042,"schema":4043},"The ultimate guide to token management at GitLab","Learn all the steps in the end-to-end process of identifying, managing, and securing tokens for improved security across the software development lifecycle.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097408/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_1097303277_6gTk7M1DNx0tFuovupVFB1_1750097407860.jpg","https://about.gitlab.com/blog/the-ultimate-guide-to-token-management-at-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"The ultimate guide to token management at GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Hakeem Abdul-Razak\"}],\n        \"datePublished\": \"2025-02-25\",\n      }",{"title":4039,"description":4040,"authors":4045,"heroImage":4041,"date":4047,"body":4048,"category":814,"tags":4049},[4046],"Hakeem Abdul-Razak","2025-02-25","Imagine this: You are an engineer at a growing tech company, and it’s 2 a.m. when you get an urgent call. A critical deployment pipeline has failed, and your team is scrambling to figure out why. After hours of digging, you realize someone revoked a personal access token belonging to an engineer who left the company a week ago. This token was tied to several key automation processes, and now your entire system is in chaos. How do you make sure it does not happen again?\n\nFollow this guide, which takes GitLab customers through the end-to-end process of identifying, managing, and securing their tokens. It is meant to be a handy supplement to the extensive [token overview documentation](https://docs.gitlab.com/ee/security/tokens) for GitLab administrators, developers, and security teams who need to ensure proper token management within their projects.\n\nHere's what is covered in this guide:\n- [How to select the right token for the job](#how-to-select-the-right-token-for-the-job)\n- [Token types](#token-types)\n- [Discovering your tokens](#discovering-your-tokens)\n    - [Credentials inventory](#credentials-inventory)\n- [Managing tokens in the GitLab UI and API](#managing-tokens-in-the-gitlab-ui-and-api)\n- [Token rotation and expiration management](#token-rotation-and-expiration-management)\n- [Token management best practices](#token-management-best-practices)\n    - [Service accounts](#service-accounts)\n\n## How to select the right token for the job\n\nChoosing the right token ensures optimal security and functionality based on your use case. \nTokens can be used for authenticating API requests, automating CI/CD pipelines, integrating third-party tools, managing deployments and repositories, and more.\n\n![Token management guide - flow chart for tokens](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097435/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097434869.png)\n\nFor the sake of simplicity, the chart illustrates a straightforward use case tied to single user ownership. For more information, check out our documentation of user roles and permissions at each [namespace](https://docs.gitlab.com/ee/user/permissions.html) (user/group) in your instance or top-level group. Example use cases could be as follows: \n\n- **Personal access tokens** ([PAT](https://docs.gitlab.com/user/profile/personal_access_tokens/#personal-access-token-scopes)) can be used by developers when a user's personal access and permissions are required. In this case, the credentials follow the status and permissions of the user, including the removal of access if the account loses access to a specific project or group (or is blocked entirely).   \n- **Project/group access tokens** ([PrAT](https://docs.gitlab.com/user/project/settings/project_access_tokens/#scopes-for-a-project-access-token)/[GrAT](https://docs.gitlab.com/user/group/settings/group_access_tokens/#scopes-for-a-group-access-token)) are recommended when access should be scoped to resources within a specific project/group, allowing anyone with a PrAT/GrAT to access those resources through mechanisms managed by assigned scopes.\n\n## Token types\n\nBelow is a list of GitLab tokens with their default prefixes and use cases. For more information, please visit the [GitLab Token overview page](https://docs.gitlab.com/ee/security/tokens/#available-scopes). \n\n| Tokens | Prefix  | Description |\n| :---: | :---: | :---: |\n| Personal access token | glpat | Access user-specific data |\n| OAuth 2.0 token |  gloas | Integrate with third-party applications using OAuth2.0 authentication protocol |\n| Impersonation token | glpat | Act on behalf of another user for administrative purposes |\n| Project access token | glpat | Access data from a specific project |\n| Group access token | glpat |  Access data from a specific group |\n| Deploy token | gldt |  Clone, push, and pull container registry images of a project without a user and a password |\n| Deploy keys | N/A | Allow read-only or read-write access to your repositories |\n| Runner authentication token | glrt | Authenticate GitLab Runners |\n| CI/CD job token  | glcbt | Automate CI/CD processes |\n| Trigger token | glptt | Trigger pipelines manually or programmatically |\n| Feed token | glft | Authenticate access to package/RSS feeds |\n| Incoming mail token  | glimt | Process incoming emails |\n| GitLab agent for Kubernetes token | glagent | Manage Kubernetes clusters via the GitLab agent |\n| SCIM tokens | glsoat | Enable SCIM integrations for user provisioning |\n| Feature flags client token | glffct | Enable feature flags programmatically |\n| Webhook token | N/A | User set secret token to secure webhook payloads and ensure that the requests are from GitLab |\n\n## Discovering your tokens\n\n### Credentials inventory\n\nOn GitLab Ultimate, administrators (GitLab Self-Managed) and top-level group owners of an enterprise organization (GitLab.com as of Version 17.5) can monitor the credentials in their namespace.\n\nThis inventory tracks token details such as:\n\n* Token type  \n  * Available tokens on [GitLab.com](https://docs.gitlab.com/ee/user/group/credentials_inventory.html)  \n  * Available tokens on [GitLab Self-Managed](https://docs.gitlab.com/ee/administration/credentials_inventory.html)  \n* Associated user accounts  \n* Token scopes, and creation and expiration dates  \n* Token last used IP addresses (as of GitLab 17.10)  \n* Token filtration based on the above user-defined parameters  \n* Ability to revoke and rotate those tokens\n\nA well-maintained credentials inventory helps identify over-permissioned tokens, and gives insight into credentials that may need to be rotated, ensuring a secure and efficient workflow.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/A9ONfnwswd0?si=4VIEUgJaD4daj81b&amp;start=105\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n#### Credentials inventory API\n\nAs a complement to the UI, there is [ongoing development](https://gitlab.com/groups/gitlab-org/-/epics/16343) to release a credentials inventory API through the new /group/:id/manage [endpoint](https://docs.gitlab.com/ee/api/members.html#list-all-members-of-a-group-or-project). The credentials accessible under this endpoint are limited to enterprise [users](https://docs.gitlab.com/ee/user/enterprise_user/), and can be accessed by the top-level group owner of an enterprise organization. An example of the future API call would be:\n\n```console\ncurl --header \"PRIVATE-TOKEN: \u003Cpat>\" \"https://verified_domain.com/api/v4/groups/\u003Cgroup_id>/manage/personal_access_tokens\"           \n```\n### GitLab API\n\nThe GitLab API allows you to programmatically list and manage tokens within your organization. Key authentication-related endpoints support [various token types](https://docs.gitlab.com/ee/api/rest/authentication.html)), including personal, group, CI/CD tokens, and more. An example of using a personal access token to list all visible projects across GitLab for the authenticated user is:\n\n```console\ncurl --header \"PRIVATE-TOKEN: \u003Cyour_access_token>\" \\\n     \"https://gitlab.example.com/api/v4/projects\"\n\n```\n\nWatch this video to learn how to make API calls to the GitLab API.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/0LsMC3ZiXkA?si=vj871YH610jwQdFc\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Finding where tokens are used\n\nCustomers can find where tokens are used in different ways:\n* under **User Profile > [Access Tokens](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#view-the-time-at-and-ips-where-a-token-was-last-used)**\n* in credentials inventory\n* in audit events\n* via the API \n\nInformation on token usage is updated every 10 minutes for **last_used** and every minute for **last_used_ip**. \n\nThe ability to view IP addresses was introduced in GitLab 17.9, and is controlled by the **:pat_ip** feature flag. Follow these [steps to view the last time a token was used](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#view-the-time-at-and-ips-where-a-token-was-last-used), along with its last five distinct IP addresses.\n\n![Token management guide - personal access tokens settings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097435/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097434870.png)\n\n## Managing tokens in the GitLab UI and API\nThe following table includes videos detailing a few token creations in the UI and demonstrates their usage via the API.\n\n| Tokens     | GitLab UI    | GitLab API    |\n| ---------- | ---------- | ---------- |\n| Personal access token | [Documentation](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token) and [video](https://youtu.be/v5Nj3Jy4vaI?t=3)  | [Documentation](https://docs.gitlab.com/ee/api/personal_access_tokens.html) and [video](https://youtu.be/v5Nj3Jy4vaI?t=43)  |\n| Group access token | [Documentation](https://docs.gitlab.com/ee/user/group/settings/group_access_tokens.html#group-access-tokens) and [video](https://youtu.be/v5Nj3Jy4vaI?t=120)  | [Documentation](https://docs.gitlab.com/ee/api/group_access_tokens.html) and [video](https://youtu.be/v5Nj3Jy4vaI?t=157)  |\n| Project access token | [Documentation](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#project-access-tokens) and [video](https://youtu.be/v5Nj3Jy4vaI?t=254)  | [Documentation](https://docs.gitlab.com/ee/api/project_access_tokens.html) and [video](https://youtu.be/v5Nj3Jy4vaI?t=285)  |\n\n## Token rotation and expiration management\n\nImplementing token rotation and strict expiration policies reduces the risk of compromise and ensures compliance with security standards. Regular rotation and enforced expirations prevent stale credentials from becoming security vulnerabilities.\n\nPreviously, expired group and project access tokens were automatically deleted upon expiration, which made auditing and security reviews more challenging due to the lack of a record of inactive tokens. To address this, a [recent feature](https://gitlab.com/gitlab-org/gitlab/-/issues/462217) introduced the retention of inactive group and project access token records in the UI for 30 days after they became inactive. This enhancement aims to allow teams to track token usage, expiration, and revocation for better compliance and monitoring.\n\nTo be more proactive in your token rotation and expiration management, do the following: \n\n* Actively rotate your tokens via the UI or API. If you use the latter, be mindful of the [automatic token reuse detection](https://docs.gitlab.com/ee/api/personal_access_tokens.html#automatic-reuse-detection) security mechanism.  \n* Set an instance-wide [maximum lifetime limit](https://docs.gitlab.com/ee/administration/settings/account_and_limit_settings.html#limit-the-lifetime-of-access-tokens) for access tokens. \n\n### Token rotation API\n\nUntil GitLab 17.7, customers had to programmatically rotate access tokens with the API. Its counterpart is now available on the UI. Check out the video in the table below or follow the [documentation](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#use-the-ui) for guidance.\n\n### Token rotation snippets\n\nThe following table includes videos detailing the rotation of GitLab tokens. \n\n| Tokens | Prerequisites | GitLab UI | GitLab API |\n| :---: | :---: | ----- | ----- |\n| Personal access token | Scope: api\u000b | [Documentation](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token) and [video](https://youtu.be/v5Nj3Jy4vaI?t=76)  | [Documentation](https://docs.gitlab.com/ee/api/personal_access_tokens.html#rotate-a-personal-access-token) and [video](https://youtu.be/v5Nj3Jy4vaI?t=92)  |\n| Group access token | Scope: api and Role(s): owner | [Documentation](https://docs.gitlab.com/ee/user/group/settings/group_access_tokens.html#create-a-group-access-token-using-ui) and [video](https://youtu.be/v5Nj3Jy4vaI?t=203)  | [Documentation](https://docs.gitlab.com/ee/api/group_access_tokens.html) and [video](https://youtu.be/v5Nj3Jy4vaI?t=214)  |\n| Project access token | Scope: api and Role(s): owner, maintainer | [Documentation](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token) and [video](https://youtu.be/v5Nj3Jy4vaI?t=335)  | [Documentation](https://docs.gitlab.com/ee/api/project_access_tokens.html) and [video](https://youtu.be/v5Nj3Jy4vaI?t=349)  |\n\n## Token management best practices\n\n### Principle of least privilege\n\nMitigate risk by restricting assigned permissions to tokens required for their respective tasks. This allows you to proactively predict and troubleshoot points of failure in your systems. You can do this by: \n\n* Selecting the right token for the right job. See the flowchart.  \n* Assign only the required scopes when creating a token. For example, use read-only scopes for tokens with auditor-like jobs. See [roles](https://docs.gitlab.com/ee/user/permissions.html#roles).  \n* Avoid granting administrative privileges unless specifically required.  \n* Enforce instance-wide default token [lifetimes](https://docs.gitlab.com/ee/administration/settings/account_and_limit_settings.html#set-a-lifetime-1).  \n* Regularly review and audit token permissions to ensure they align with current operational needs.  \n* Revoke tokens once the task is complete.\n\n### Service accounts\n\n[Service accounts](https://docs.gitlab.com/ee/user/profile/service_accounts.html) ensure tokens are tied to non-human entities, separating them from individual user accounts and reducing dependency on specific users. Instead of using personal accounts to generate tokens for automation, create service accounts with limited scopes. Benefits include:\n\n* Usage of service account tokens in CI/CD pipelines to avoid disruptions caused by user account changes  \n* Programmatically automate rotation processes, as personal accounts remain unaffected  \n* Clearer monitoring and auditing trail of actions taken by service accounts  \n* Service accounts with [no expiration](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-service-account-personal-access-token-with-no-expiry-date) date  \n* Does not consume [a license seat](https://docs.gitlab.com/user/profile/service_accounts/#create-a-service-account)\n\nGitLab plans to release a new [Service Accounts UI](https://gitlab.com/groups/gitlab-org/-/epics/9965) as a counterpart to its [API-based creation](https://docs.gitlab.com/ee/api/user_service_accounts.html#create-a-service-account-user), designed to simplify the management of service accounts and their associated tokens. Check out the demo below on the programmatic usage of service accounts.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/oZvjg0SCsqY?si=cj-0LjfeonLGXv9u\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Vulnerability tools\n\nLeverage GitLab’s built-in security tools to identify and mitigate vulnerabilities associated with token usage. For maximum coverage, it is recommended to use them all in tandem.\n\n* [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/): Scans your repository for hardcoded secrets like API tokens, passwords, and other sensitive information. View the [list of detected secrets](https://docs.gitlab.com/ee/user/application_security/secret_detection/detected_secrets.html).  \n* [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/): Analyzes your source code for security vulnerabilities and [provides reports with UI findings in merge requests](https://docs.gitlab.com/ee/user/application_security/sast/#features), among other features.  \n* [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/): Ensures that third-party libraries used in your project do not expose token-related vulnerabilities. \n\n### Audit logs and monitoring\n\nMaintain token health by regularly reviewing audit logs and token usage, instance- and/or group-wide.\n\n* [Audit events](https://docs.gitlab.com/ee/user/compliance/audit_events.html): Enable audit event logging in GitLab to track token-related activities such as creation, usage, deletion and unusual API calls (unpermitted parameters in logs, and consistent triggers of the rate limiter). \n* [IP allowlisting](https://docs.gitlab.com/ee/administration/reporting/ip_addr_restrictions.html#configure-ip-address-restrictions): Helps prevent malicious users from hiding their activities behind multiple IP addresses.  \n* [Alerts](https://docs.gitlab.com/ee/operations/incident_management/alerts.html): Set up alerts for unusual activities (trigger paging for on-call rotations or be used to create incidents).  \n* [Credentials inventory](https://docs.gitlab.com/ee/administration/credentials_inventory.html): Complete control of all available access tokens with the ability to revoke as needed.  \n* [Notifications](https://docs.gitlab.com/ee/user/profile/notifications.html): Proactively handle any token (group, project, and personal) expiration notification emails you receive. Based on customer demand, this feature was recently extended to include 30-day and 60-day notifications from the seven-day default.   \n* [Webhooks](https://docs.gitlab.com/ee/user/project/integrations/webhooks.html#create-a-webhook): Access token webhooks can be configured on groups and projects to send seven-day token expiry events. This feature was also recently extended to include 30-day and 60-day notifications behind the **:extended_expiry_webhook_execution_setting** feature flag (disabled by default).\n\n## What's next\n\nWith GitLab’s large token catalog, there are ongoing [plans](https://gitlab.com/gitlab-org/gitlab/-/issues/502630) for consolidation with a focus on the lifetime, fine-grained scopes, consistent management, and usage. Our current prioritized token-related features include a complete UI for service accounts, additional credential types in the credentials inventory, and improved auditing for tokens and service accounts.\n\n> Sign up for a [free 60-day trial of GitLab Ultimate](https://about.gitlab.com/free-trial/) to start using token management.",[9,814,478,680,678],{"slug":4051,"featured":90,"template":684},"the-ultimate-guide-to-token-management-at-gitlab","content:en-us:blog:the-ultimate-guide-to-token-management-at-gitlab.yml","The Ultimate Guide To Token Management At Gitlab","en-us/blog/the-ultimate-guide-to-token-management-at-gitlab.yml","en-us/blog/the-ultimate-guide-to-token-management-at-gitlab",{"_path":4057,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4058,"content":4063,"config":4069,"_id":4071,"_type":13,"title":4072,"_source":15,"_file":4073,"_stem":4074,"_extension":18},"/en-us/blog/there-is-no-mlops-without-devsecops",{"title":4059,"description":4060,"ogTitle":4059,"ogDescription":4060,"noIndex":6,"ogImage":1260,"ogUrl":4061,"ogSiteName":669,"ogType":670,"canonicalUrls":4061,"schema":4062},"Building GitLab with GitLab: Why there is no MLOps without DevSecOps","Follow along as data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.","https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Building GitLab with GitLab: Why there is no MLOps without DevSecOps\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"William Arias\"}],\n        \"datePublished\": \"2023-10-05\",\n      }",{"title":4059,"description":4060,"authors":4064,"heroImage":1260,"date":4066,"body":4067,"category":702,"tags":4068},[4065],"William Arias","2023-10-05","\nBuilding predictive models requires a good amount of experimentation and iterations. Data scientists building those models usually implement workflows involving several steps such as data loading, processing, training, testing, and deployment. Such workflows or data science pipelines come with a set of challenges on their own; some of these common challenges are:\n- prone to error due to manual steps\n- experimentation results that are hard to replicate\n- long training time of machine learning (ML) models \n\nWhen there is a challenge, there is also an opportunity; in this case, those challenges represent an opportunity for data scientists to adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure needed for data-driven applications at scale.\n\nThe [Data Science team at GitLab](https://about.gitlab.com/handbook/business-technology/data-team/organization/data-science/) is now utilizing the GitLab DevSecOps Platform in their workflows, specifically to:\n- enhance experiment reproducibility by ensuring code and data execute in a standardized container image\n- automate training and re-training of ML models with GPU-enabled CI/CD\n- leverage ML experiment tracking, storing the most relevant metadata and artifacts produced by data science pipelines automated with CI\n\nAt GitLab, we are proponents of \"dogfooding\" our platform and sharing how we use GitLab to build GitLab. What follows is a detailed look at the Data Science team's experience.\n\n### Enhancing experiment reproducibility \nA baseline step to enhance reproducibility is having a common and standard experiment environment for all data scientists to run experiments in their Jupyter Notebooks. A standard data science environment ensures that all team members use the same software dependencies. A way to achieve this is by building a container image with all the respective dependencies under version control and re-pulling it every time a new version of the code is run. This process is illustrated in the figure below:\n\n![build](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/build-2.png)\nData science image of automatic build using GitLab CI \n{: .note.text-center}\n\nYou might wonder if the image gets built every time there is a new commit. The answer is \"no\" since that would result in longer execution times, and the image dependencies versions don’t change frequently, rendering it unnecessary to build it every time there is a new commit. Therefore, once the standard image is automatically built by the pipeline, it is pushed to the GitLab Container Registry, where it is stored and ready to be pulled every time changes to the model code are introduced, and re-training is necessary.\n\n![registry](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/registry.png)\nGitLab Container Registry with image automatically built and pushed by a CI pipeline\n{: .note.text-center}\n\nChanges to the image dependencies or Dockerfile require a [merge request](https://docs.gitlab.com/ee/user/project/merge_requests/) and an approval process.\n\n### How to build the data science image using GitLab CI/CD\nConsider this project structure:\n\n```\nnotebooks/\n.gitlab-ci.yml\nDockerfile\nconfig.yml\nrequirements.txt\n```\nGitLab's Data Science team already had a pre-configured JupyterLab image with packages such as [gitlabds](https://pypi.org/project/gitlabds/1.0.0/) for common data preparation tasks and modules to enable Snowflake connectivity for loading raw data. All these dependencies are reflected in the Dockerfile at the root of the project, plus all the steps necessary to build the image: \n\n```\nFROM nvcr.io/nvidia/cuda:12.1.1-base-ubuntu22.04\nCOPY .    /app/\nWORKDIR /app\nRUN apt-get update\nRUN apt-get install -y python3.9\nRUN apt-get install -y python3-pip\nRUN pip install -r requirements.txt\n```\n\nThe instructions to build the data science image start with using Ubuntu with CUDA drivers as a base image. We are using this baseline image because, moving forward, we will use GPU hardware to train models. The rest of the steps include installing Python 3.9 and the dependencies listed in `requirements.txt` with their respective versions. \n\nAutomatically building the data science image using [GitLab CI/CD](https://about.gitlab.com/topics/ci-cd/) requires us to create the `.gitlab-ci.yml ` at the root of the project and use it to describe the jobs we want to automate. For the time being, let’s focus only on the `build-ds-image`job:\n\n```\nvariables:\n  DOCKER_HOST: tcp://docker:2375\n  MOUNT_POINT: \"/builds/$CI_PROJECT_PATH/mnt\"\n  CONTAINER_IMAGE: \"$CI_REGISTRY_IMAGE/main-image:latest\"\n\nstages:\n    - build\n    - train\n    - notify\ninclude:\n  - template: 'Workflows/MergeRequest-Pipelines.gitlab-ci.yml'\nworkflow:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n    - if: $CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS\n      when: never\n\nbuild-ds-image:\n  tags: [ saas-linux-large-amd64 ]\n  stage: build\n  services:\n    - docker:20.10.16-dind\n  image:\n    name: docker:20.10.16\n  script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n    - docker build -t $CONTAINER_IMAGE .\n    - docker push $CONTAINER_IMAGE\n  rules:\n    - if: '$CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_MERGE_REQUEST_TARGET_BRANCH_NAME == $CI_DEFAULT_BRANCH'\n      changes:\n       - Dockerfile\n       - requirements.txt\n\n  allow_failure: true\n```\n\nAt a high level, the job `build-ds-image`:\n- uses a docker-in-docker service (dind) necessary to create docker images in GitLab CI/CD.\n- uses [predefined variables](link) to log into the GitLab Container Registry, build the image, tag it using $CONTAINER_IMAGE variable, and push it to the registry. These steps are declared in the script section lines.\n- leverages a  `rules` section to evaluate conditions to determine if the job should be created. In this case, this job runs only if there are changes to the Dockerfile and requirements.txt file and if those changes are created using a merge request.\n\nThe conditions declared in `rules` helps us optimize the pipeline running time since the image gets rebuilt only when necessary.\n\nA complete pipeline can be found in this example project, along with instructions to trigger the automatic creation of the data science image: [Data Science CI pipeline](https://gitlab.com/gitlab-data/data-science-ci-example/-/blob/main/.gitlab-ci.yml?ref_type=heads).\n\n### Automate training and re-training of ML models with GPU-enabled CI/CD\nGitLab offers the ability to leverage GPU hardware and, even better, to get this hardware automatically provisioned to run jobs declared in the .gitlab-ci.yml file. We took advantage of this capability to train our ML models faster without spending time setting up or configuring graphics card drivers. Using GPU hardware ([GitLab Runners](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html)) requires us to add this line to the training job: \n\n```\ntags:\n        - saas-linux-medium-amd64-gpu-standard\n```\n\nThe tag above will ensure that a GPU GitLab Runner automatically picks up every training job.\nLet’s take a look at the entire training job in the .gitlab-ci.yml file and break down what it does:\n\n```\ntrain-commit-activated:\n    stage: train\n    image: $CONTAINER_IMAGE\n    tags:\n        - saas-linux-medium-amd64-gpu-standard\n    script:\n        - echo \"GPU training activated by commit message\"\n        - echo \"message passed is $CI_COMMIT_MESSAGE\"\n        - notebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n    rules:\n        - if: '$CI_COMMIT_BRANCH == \"staging\"'\n          when: never\n        - if: $CI_COMMIT_MESSAGE =~ /\\w+\\.ipynb/\n          when: always\n          allow_failure: true\n    artifacts:\n      paths:\n        - ./model_metrics.md\n````\n\nLet’s start with this block:\n\n```\ntrain-commit-activated:\n    stage: train\n    image: $CONTAINER_IMAGE\n    tags:\n        - saas-linux-medium-amd64-gpu-standard\n```\n\n- **train-commit-activated** This is the name of the job. Since the model training gets activated given a specific pattern in the commit message, we use a descriptive name to easily identify it in the larger pipeline.\n- **stage: train** This specifies the pipeline stage where this job belongs. In the first part of the CI/CD configuration, we defined three stages for this pipeline: `build`, `train`,  and `notify`. This job comes after building the data science container image. The order is essential since we first need the image built to run our training code in it.\n- **image: $CONTAINER_IMAGE** Here, we specify the Docker image built in the first job that contains the CUDA drivers and necessary Python dependencies to run this job. $CONTAINER_IMAGE is a user-defined variable specified in the variables section of the .gitlab-ci.yml file. \n- **tags: saas-linux-medium-amd64-gpu-standard** As mentioned earlier, using this line, we ask GitLab to automatically provision a GPU-enabled Runner to execute this job.\n\nThe second block of the job:\n\n```\nscript:\n        - echo \"GPU training activated by commit message\"\n        - echo \"message passed is $CI_COMMIT_MESSAGE\"\n        - notebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n```\n\n- **script** This section contains the commands in charge of running the model training. The execution of this job is conditioned to the contents of the  commit message. The commit message must have the name of the Jupyter Notebook that contains the actual model training code.\n\nThe rationale behind this approach is that we wanted to keep the data scientist workflow as simple as possible. The team had already adopted the [modeling templates](https://gitlab.com/gitlab-data/data-science/-/tree/main/templates) to start building predictive models quickly. Plugging the CI pipeline into their modeling workflow was a priority to ensure productivity would remain intact. With these steps:\n\n```\nnotebookName=$(echo ${CI_COMMIT_MESSAGE/train})\n        - echo \"Notebook name $notebookName\"\n        - papermill -p is_local_development False -p tree_method 'gpu_hist' $notebookName -\n```\n\nThe CI pipeline captures the name of the Jupyter Notebook with the training modeling template and passes parameters to ensure [XGBoost](https://xgboost.readthedocs.io/en/stable/) uses the provisioned GPU. You can find an example of the Jupyter modeling template that is executed in this job [here](https://gitlab.com/gitlab-data/data-science-ci-example/-/blob/main/notebooks/training_example.ipynb?ref_type=heads).\n\nOnce the data science image is built, it can be reutilized in further model training jobs. The `train-commit-activated` job pulls the image from the GitLab Container Registry and utilizes it to run the ML pipeline defined in the training notebook. This is illustrated in the `CI Job - Train model` in the figure below:\n\n![training](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/training_job.png)\nTraining job executes ML pipeline defined in the modeling notebook\n{: .note.text-center}\n\nSince our image contains CUDA drivers and GitLab automatically provisions GPU-enabled hardware, the training job runs significantly faster with respect to standard hardware.\n\n### Using GitLab ML experiment tracker\nEach model training execution triggered using GitLab CI is an experiment that needs tracking. Using Experiment tracking in GitLab helps us to record metadata that comes in handy to compare model performance and collaborate with other data scientists by making result experiments available for everyone and providing a detailed history of the model development.\n\n![experiments](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/experiments.png)\nExperiments automatically logged on every CI pipeline GPU training run \n{: .note.text-center}\n\nEach model artifact created can be traced back to the pipeline that generated it, along with its dependencies:\n\n![traceability](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/traceability_small.png)\nModel traceability from pipeline run to candidate details\n{: .note.text-center}\n\n### Putting it all together\nWhat is machine learning without data to learn from? We also leveraged the [Snowflake](https://www.snowflake.com/en/) connector in the model training notebook and automated the data extraction whenever the respective commit triggers a training job. Here is an architecture of the current solution with all the parts described in this blog post:\n\n![process](https://about.gitlab.com/images/blogimages/2023-10-04-there-is-no-mlops-without-devsecops/training_fixed.png)\nData Science pipelines automated using GitLab DevSecops Platform\n{: .note.text-center}\n\n| Challenge | Solution |\n| ------ | ------ | \n|Prone to error due to manual steps | Automate steps with [GitLab CI/CD](https://docs.gitlab.com/ee/ci/) |\n|Experimentation results that are hard to replicate    |  Record metadata and model artifacts with [GitLab Experiment Tracker](https://docs.gitlab.com/ee/user/project/ml/experiment_tracking/)    |\n|The long training time of machine learning models     |  Train models with [GitLab SaaS GPU Runners](https://docs.gitlab.com/ee/ci/runners/saas/gpu_saas_runner.html)  |\n\nIterating on these challenges is a first step towards MLOps, and we are at the tip of the iceberg; in coming iterations, we will adopt security features to ensure model provenance (software bill of materials) and code quality, and to monitor our ML workflow development with value stream dashboards. But so far, one thing is sure: **There is no MLOps without DevSecOps**.\n\nGet started automating your data science pipelines, follow this [tutorial](https://about.gitlab.com/handbook/business-technology/data-team/platform/ci-for-ds-pipelines/) and clone this [data-science-project](https://gitlab.com/gitlab-data/data-science-ci-example) to follow along and watch this demo of using GPU Runners to train [XGBoost](https://xgboost.readthedocs.io/en/stable/) model.\n\nSee how data scientists can train ML models with GitLab GPU-enabled Runners (XGBoost 5-minute demo):\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/tElegG4NCZ0?si=L1IZfx_UGv6u81Gk\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More \"Building GitLab with GitLab\" blogs\nRead more of our \"Building GitLab with GitLab\" series:\n- [How we use Web API fuzz testing](https://about.gitlab.com/blog/building-gitlab-with-gitlab-api-fuzzing-workflow/)\n- [How GitLab.com inspired GitLab Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)\n",[9,835,478,704],{"slug":4070,"featured":6,"template":684},"there-is-no-mlops-without-devsecops","content:en-us:blog:there-is-no-mlops-without-devsecops.yml","There Is No Mlops Without Devsecops","en-us/blog/there-is-no-mlops-without-devsecops.yml","en-us/blog/there-is-no-mlops-without-devsecops",{"_path":4076,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4077,"content":4083,"config":4089,"_id":4091,"_type":13,"title":4092,"_source":15,"_file":4093,"_stem":4094,"_extension":18},"/en-us/blog/tips-to-configure-browser-based-dast-scans",{"title":4078,"description":4079,"ogTitle":4078,"ogDescription":4079,"noIndex":6,"ogImage":4080,"ogUrl":4081,"ogSiteName":669,"ogType":670,"canonicalUrls":4081,"schema":4082},"Tips to configure browser-based DAST scans","Learn how to use the browser-based analyzer with common dynamic application security testing settings, based on web application attributes, to ensure successful scans.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659561/Blog/Hero%20Images/securitycheck.png","https://about.gitlab.com/blog/tips-to-configure-browser-based-dast-scans","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tips to configure browser-based DAST scans\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Julie Byrne\"},{\"@type\":\"Person\",\"name\":\"Jerez Solis\"}],\n        \"datePublished\": \"2023-11-14\",\n      }",{"title":4078,"description":4079,"authors":4084,"heroImage":4080,"date":4086,"body":4087,"category":814,"tags":4088},[4085,1861],"Julie Byrne","2023-11-14","The GitLab Dynamic Application Security Testing ([DAST](https://docs.gitlab.com/ee/user/application_security/dast/)) scan uses an actively running environment to crawl the application and find misconfigurations of your application server or incorrect assumptions about security controls that may not be visible from the source code. GitLab now provides a proprietary [browser-based analyzer](https://docs.gitlab.com/ee/user/application_security/dast/browser_based.html) for scanning applications that make heavy use of JavaScript, including single-page web applications. The DAST scan needs to be configured properly to account for various web application attributes, including authentication mechanism, authenticated landing page, and page load times. In this tutorial, you will learn common configurations that have helped our customers use the browser-based analyzer to successfully implement DAST scans. \n\n## General considerations\n\nThe browser-based DAST scan takes the URL of the application it's supposed to scan from the `DAST_WEBSITE` environment variable. This should point to a test environment - you should not run a DAST scan against a production environment, even if you are only running a passive scan. For ephemeral environments that are deployed as part of the CI/CD pipeline, you can save the URL of the environment as an artifact `environment_url.txt`, which will then be used by the [DAST scan template job](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Security/DAST.gitlab-ci.yml) to set the `DAST_WEBSITE` variable.  The [GitLab Auto DevOps deploy template job](https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Deploy.gitlab-ci.yml) has an example of this. \n\nDepending on the size of the web application, a DAST scan may take an hour or more to complete. You will want to ensure that whatever runner is used to perform the DAST scan has a [job timeout value](https://docs.gitlab.com/ee/ci/runners/configure_runners.html#set-maximum-job-timeout-for-a-runner) set to be long enough to allow the scan to complete. Similarly, you should ensure that the [project level CI/CD timeout](https://docs.gitlab.com/ee/ci/pipelines/settings.html#set-a-limit-for-how-long-jobs-can-run) is sufficient to allow the job to complete. **Note:** Shared runners on gitlab.com have a runner timeout of 180 minutes, regardless of the project CI/CD timeout set.\n\n## Configuration options for websites requiring authentication\n\nMany web applications require a user to log in to access the site. Logins can be implemented as basic http authentication or, more commonly, as form authentication.  For form authentication, the login form might be implemented in one of several ways:\n\n1. username and password fields on the main website landing page\n2. a login button that opens a modal (also called a modal window or lightbox) that displays in front of the page and disables all other page content until the login is completed or the modal is closed; there is not a separate URL associated with a modal window\n3. a login button that opens a new window, with its own URL \n\nAdditionally, the form may be either a single-step form, where the username and password fields are on the same page, or a multi-step form, where the username and password fields are on separate pages.  \n\nWhen running a DAST scan, the analyzer must know how to [authenticate](https://docs.gitlab.com/ee/user/application_security/dast/authentication.html).  We need to specify these details via the appropriate variables.  \n\nThe `DAST_USERNAME` and `DAST_PASSWORD` variables specify the login credentials to be used. The variable values should be set via masked variables at the project level, not included within the `.gitlab-ci.yml` file.\n\n### URL variable values\n\nVarious URL values must also be specified: \n   - `DAST_AUTH_URL` - the URL of the login page\n   - `DAST_WEBSITE` - specifies the URL of the page the user is redirected to after logging in\n\n**Note:** If your website uses authentication with a login button that opens a new window with its own URL, you should specify the URL of that new window as the `DAST_AUTH_URL` value.  \n\nGitLab enhancements are currently being implemented to support cases where additional actions must be taken post-login prior to being brought to the main site. See [this epic](https://gitlab.com/groups/gitlab-org/-/epics/11585)  for details: DAST browser-based analyzer multi-step login form does not support 'keep me signed in' workflow (AzureAD).\n\n### Field variable values\n\n`FIELD` variables specify the page elements used. These values can typically be identified by inspecting the page source. For single-step login pages, you will need to specify:\n - `DAST_USERNAME_FIELD`\n - `DAST_PASSWORD_FIELD`\n - `DAST_SUBMIT_FIELD` \n\nFor multi-step logins, you would instead specify:\n\n - `DAST_FIRST_SUBMIT_FIELD` - the button clicked after entering the username\n - `DAST_SUBMIT_FIELD` - the button clicked after entering the password\n\nIf your login button opens a modal, you should also specify `DAST_BROWSER_PATH_TO_LOGIN_FORM`, which provides the path of elements to click to get from the initial login URL to the login fields.\n\n#### Examples\n\n![username email field](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683113/Blog/Content%20Images/username_email_field.png)\n\nIn this example, you can target the input element for the username field in different ways. Keep in mind that the selector you chose should be resilient to the application changing. We recommend to use the `id` and `name` attributes, as these are generally unique on a page and rarely change.\n\n```\nDAST_USERNAME_FIELD: \"id:user_login\"\nDAST_USERNAME_FIELD: \"name:user[login]\"\nDAST_USERNAME_FIELD: \"css:input[type=text]\"\n```\n\nThe same process can be followed for the password field. For example:\n\n```\nDAST_PASSWORD_FIELD: \"id:user_password\"\nDAST_PASSWORD_FIELD: \"name:user[password]\"\nDAST_PASSWORD_FIELD: \"css:input[type=password]\"\n```\n\n**Submit/Sign in/Login button**\n\n![login](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749683114/Blog/Content%20Images/login.png)\n\nYou can target the submit/sign in/login button using this selector:\n\n```\nDAST_SUBMIT_FIELD: \"css:button[type=submit]\"\n```\n\n**Note:** If the submit button is not a `\u003Cbutton>`, but an input element in the form of `\u003Cinput type=\"submit\" name=\"login\">`, you can use one of the following selectors:\n\n```\nDAST_SUBMIT_FIELD: \"css:input[type=submit]\"\nDAST_SUBMIT_FIELD: \"css:input[name=login]\"\n```\n\n### Other variables to set\n\nIf the username and password fields are on separate pages, DAST has to wait after submitting the username before looking for the password field.\n\nThe `DAST_BROWSER_ACTION_STABILITY_TIMEOUT` variable, with a default value of 800ms, specifies the wait time. This time can be increased if the login response time is slow.\n\nIf the website has a large JavaScript file that is required to load the target application, it is recommended that you use the variable `DAST_BROWSER_MAX_RESPONSE_SIZE_MB` to increase the limit for response sizes. The default is 10MB but can be increased for 50MB or more, if necessary.\n\n## Tools for troubleshooting\n\nSeveral tools will help with DAST scan troubleshooting:\n\n- [authentication report](https://docs.gitlab.com/ee/user/application_security/dast/authentication.html#configure-the-authentication-report) - This report can be produced during the scan and saved as a CI/CD job artifact to assist with understanding the cause of an authentication failure. The report contains steps performed during the login process, HTTP requests and responses, the Document Object Model (DOM), and screenshots. To configure the report, set `DAST_AUTH_REPORT` to `true` and configure an artifacts attribute for the DAST job, e.g.:\n\n```\ndast:\n   variables:\n      DAST_WEBSITE: \"https://example.com\"\n      DAST_AUTH_REPORT: \"true\"\n    artifacts:\n      paths: [gl-dast-debug-auth-report.html]\n      when: always\n```\n\n- [analyzer logs](https://docs.gitlab.com/ee/user/application_security/dast/browser_based_troubleshooting.html#browser-based-analyzer-logging) - Setting `DAST_BROWSER_LOG` to `auth:debug` or `auth:trace` will provide additional logging that may help identify an issue with the scan. \n\nThe browser-based DAST scan configuration depends on the specific attributes of the web application you're testing, including how authentication is implemented to access the web site, what buttons are used, and how fast your browser loads once the user has authenticated. Using the appropriate variables to guide the analyzer through the authentication process will ensure that you are able to run a successful scan. And robust error logging and the authentication report will provide additional pointers to where the configuration might be incorrect and need to be adjusted.\n\nTry DAST scanning with [a free trial of GitLab Ultimate](https://about.gitlab.com/free-trial/).\n\n## Read more\n- [How to configure DAST full scans for complex web applications](https://about.gitlab.com/blog/how-to-configure-dast-full-scans-for-complex-web-applications/)\n- [How we're using DAST 2 for easier scan configuration](https://about.gitlab.com/blog/how-were-using-dast2-for-easier-scan-configuration/)\n",[814,1041,1041,9],{"slug":4090,"featured":6,"template":684},"tips-to-configure-browser-based-dast-scans","content:en-us:blog:tips-to-configure-browser-based-dast-scans.yml","Tips To Configure Browser Based Dast Scans","en-us/blog/tips-to-configure-browser-based-dast-scans.yml","en-us/blog/tips-to-configure-browser-based-dast-scans",{"_path":4096,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4097,"content":4103,"config":4108,"_id":4110,"_type":13,"title":4111,"_source":15,"_file":4112,"_stem":4113,"_extension":18},"/en-us/blog/top-10-gitlab-hacks",{"title":4098,"description":4099,"ogTitle":4098,"ogDescription":4099,"noIndex":6,"ogImage":4100,"ogUrl":4101,"ogSiteName":669,"ogType":670,"canonicalUrls":4101,"schema":4102},"Top ten GitLab hacks for all stages of the DevOps Platform","Get the most out of the GitLab DevOps Platform with our ten best tips for enhanced productivity.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749667482/Blog/Hero%20Images/cover-image-unsplash.jpg","https://about.gitlab.com/blog/top-10-gitlab-hacks","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top ten GitLab hacks for all stages of the DevOps Platform\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2021-10-19\",\n      }",{"title":4098,"description":4099,"authors":4104,"heroImage":4100,"date":4105,"body":4106,"category":1103,"tags":4107},[1612],"2021-10-19","\nIt's been ten years since the first commit to GitLab, so we are sharing our ten favorite GitLab hacks to help you get the most out of our DevOps Platform. These are tips for all stages of the development lifecycle, so roll up your sleeves and let's get started.\n\n## Manage faster with quick actions\n\nYou might have adopted keyboard shortcuts for faster navigation and workflows already - if not, check out the GitLab documentation for [platform specific shortcuts](https://docs.gitlab.com/ee/user/shortcuts.html). The knowledge of pressing `r` to land in the reply to comment in text form can be combined with other quick actions, including:\n\n```\n/assign_reviewer @ \u003Csearch username>\n\n/label ~ \u003Csearch label>\n/label ~enhancement ~workflow::indev\n\n/due Oct 8\n\n/rebase\n\n/approve\n\n/merge \n```\n\nQuick actions are also helpful if you have to manage many issues, merge requests and epics at the same time. There are specific actions which allow you to duplicate existing issues, as one example. \n\nTake a deeper dive into [Quick Actions](/blog/improve-your-gitlab-productivity-with-these-10-tips/). \n\n## Plan instructions with templates\n\nDon’t fall into the trap of back-and-forth with empty issue descriptions that leave out details your development teams need to reproduce the error in the best way possible. \n\nGitLab provides the possibility to use so-called [description templates](https://docs.gitlab.com/ee/user/project/description_templates.html) in issues and merge requests. Next to providing a structured template with headings, you can also add [task lists](https://docs.gitlab.com/ee/user/markdown.html#task-lists) which can later be ticked off by the assignee. Basically everything is possible and is supported in GitLab-flavored markdown and HTML.\n\nIn addition to that, you can combine the static description templates with quick actions. This allows you to automatically set labels, assignees, define due dates, and more to level up your productivity with GitLab. \n\n```\n\u003C!-- \nThis is a comment, it will not be rendered by the Markdown engine. You can use it to provide instructions how to fill in the template.\n--> \n\n### Summary \n\n\u003C!-- Summarize the bug encountered concisely. -->\n\n### Steps to reproduce\n\n\u003C!-- Describe how one can reproduce the issue - this is very important. -->\n\n### Output of checks\n\n\u003C!-- If you are reporting a bug on GitLab.com, write: This bug happens on GitLab.com -->\n\n#### Results of GitLab environment info\n\n\u003C!--  Input any relevant GitLab environment information if needed. -->\n\n\u003Cdetails>\n\u003Csummary>Expand for output related to app info\u003C/summary>\n\n\u003Cpre>\n\n(Paste the version details of your app here)\n\n\u003C/pre>\n\u003C/details>\n\n### Possible fixes\n\n\u003C!-- If you can, link to the line of code and suggest actions. →\n\n## Maintainer tasks\n\n- [ ] Problem reproduced\n- [ ] Weight added\n- [ ] Fix in test\n- [ ] Docs update needed\n\n/label ~\"type::bug\"\n```\n\nWhen you manage different types of templates, you can pass along the name of the template in the `issuable_template` parameter, for example `https://gitlab.com/gitlab-org/gitlab/-/issues/new?issuable_template=Feature%20proposal%20%23%20lean`. \n\nAt GitLab, we use description and merge request templates in many ways: [GitLab the project](https://gitlab.com/gitlab-org/gitlab/-/tree/master/.gitlab/issue_templates), [GitLab Corporate Marketing team](https://gitlab.com/gitlab-com/marketing/corporate_marketing/corporate-marketing/-/tree/master/.gitlab/issue_templates), [GitLab team member onboarding](https://gitlab.com/gitlab-com/people-group/people-operations/employment-templates/-/tree/master/.gitlab/issue_templates) and [GitLab product team](https://gitlab.com/gitlab-com/Product/-/tree/main/.gitlab/issue_templates) are just a few examples.\n\n## Create with confidence \n\nWhen reading GitLab issues and merge requests, you may see the abbreviation `MWPS` which means `Merge When Pipeline Succeeds`. This is an efficient way to merge the MRs when the pipeline passes all jobs and stages - you can even combine this workflow with [automatically closing issues](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically) with keywords from the MR.\n\n`Merge When Pipeline Succeeds` also works on the CLI with the `git` command and [push options](https://docs.gitlab.com/ee/user/project/push_options.html). That way you can create a merge request from a local Git branch, and set it to merge when the pipeline succeeds.\n\n```shell\n# mwps BRANCHNAME\nalias mwps='git push -u origin -o merge_request.create -o merge_request.target=main -o merge_request.merge_when_pipeline_succeeds'\n```\n\nCheckout [this ZSH alias example](https://gitlab.com/sytses/dotfiles/-/blob/745ef9725a859dd759059f6ce283e2a8132c9b00/git/aliases.zsh#L24) in our CEO [Sid Sijbrandij](/company/team/#sytses)’s dotfiles repository. There are more push options available, and even more Git CLI tips in [our tools & tips handbook](https://handbook.gitlab.com/handbook/tools-and-tips/#terminal). One last tip: Delete all local branches where the remote branch was deleted, for example after merging a MR.\n\n```shell\n# Delete all remote tracking Git branches where the upstream branch has been deleted\nalias git_prune=\"git fetch --prune && git branch -vv | grep 'origin/.*: gone]' | awk '{print \\$1}' | xargs git branch -d\"\n```\n\nYou are not bound to your local CLI environment; take it to the cloud with [Gitpod](/blog/teams-gitpod-integration-gitlab-speed-up-development/) and either work in VS Code or the pod terminal. \n\n## Verify your CI/CD pipeline\n\nRemember the old workflow of committing a change to `.gitlab-ci.yml` just to see if it was valid, or if the job template really inherits all the attributes? This has gotten a whole lot easier with our new [pipeline editor](https://docs.gitlab.com/ee/ci/pipeline_editor/). Navigate into the `CI/CD` menu and start building CI/CD pipelines right away.\n\nBut the editor is more than just another YAML editor. You’ll get live linting, allowing you to know if there is a missing dash for array lists or a wrong keyword in use before you commit. You can also preview jobs and stages or asynchronous dependencies with `needs` to make your pipelines more efficient.\n\nThe pipeline editor also uses uses the `/ci/lint` API endpoint, and fetches the merged YAML configuration I described earlier in [this blog post about jq and CI/CD linting](/blog/devops-workflows-json-format-jq-ci-cd-lint/). That way you can quickly verify that job templates with [extends](https://docs.gitlab.com/ee/ci/yaml/#extends) and [!reference tags](https://docs.gitlab.com/ee/ci/yaml/yaml_optimization.html#reference-tags) work in the way you designed them. It also allows you to unfold included files, and possible job overrides (for example changing the stage of an [included SAST security template](https://docs.gitlab.com/ee/user/application_security/sast/#overriding-sast-jobs)).\n\nLet’s try a quick example – create a new project and new file called `server.c` with the following content: \n\n```\n#include \u003Cstdio.h>\n#include \u003Cstring.h>\n#include \u003Csys/mman.h>\n#include \u003Csys/stat.h>\n#include \u003Cunistd.h>\n\nint main(void) {\n    size_t pagesize = getpagesize();\n    char * region = mmap(\n        (void*) (pagesize * (1 \u003C\u003C 20)),\n        pagesize,\n        PROT_READ|PROT_WRITE|PROT_EXEC,\n        MAP_ANON|MAP_PRIVATE, 0, 0);\n\n    strcpy(region, \"Hello GitLab SAST!\");\n    printf(\"Contents of region: %s\\n\", region);\n\n    FILE *fp;\n    fp = fopen(\"devops.platform\", \"r\");\n    fprintf(fp, \"10 years of GitLab 🦊 🥳\");\n    fclose(fp);\n    chmod(\"devops.platform\", S_IRWXU|S_IRWXG|S_IRWXO);\n\n    return 0;\n}\n```\n\nOpen the CI/CD pipeline editor and add the following configuration, with an extra `secure` stage assigned to the `semgrep-sast` job for SAST and the C code. \n\n```yaml\nstages:\n    - build\n    - secure\n    - test\n    - deploy\n\ninclude:\n    - template: Security/SAST.gitlab-ci.yml\n\nsemgrep-sast:\n    stage: secure\n```\n\nInspect the `Merged YAML tab` to see the fully compiled CI/CD configuration. You can commit the changes and check the found vulnerabilities too as an async practice :). The examples are available in [this project](https://gitlab.com/gitlab-de/playground/sast-10y-example).\n\n![CI/CD Pipeline editor - Merged YAML](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_pipeline_editor_view_merged_yaml.png)\nVerify the stage attribute for the job by opening the `view merged YAML` tab in the CI/CD pipeline editor.\n{: .note.text-center}\n\n## Package your applications\n\nThe [package registry](https://docs.gitlab.com/ee/user/packages/) possibilities are huge and there are more languages and package managers to come. Describing why Terraform, Helm, and containers (for infrastructure) and Maven, npm, NuGet, PyPI, Composer, Conan, Debian, Go and Ruby Gems (for applications) are so awesome would take too long, but it's clear there are plenty of choices. \n\nOne of my favourite workflows is to use existing CI/CD templates to publish container images in the GitLab container registry. This makes continuous delivery much more efficient, such as when deploying the application into your Kubernetes cluster or AWS instances. \n\n```yaml\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n```\n\nIn addition to including the CI/CD template, you can also override the job attributes and define a specific stage and manual non-blocking rules.\n\n```yaml\nstages:\n  - build\n  - docker-build\n  - test\n\ninclude:\n  - template: 'Docker.gitlab-ci.yml'\n\n# Change Docker build to manual non-blocking\ndocker-build:\n  stage: docker-build\n  rules:\n    - if: '$CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH'\n      when: manual \n      allow_failure: true\n```\n\nFor celebrating #10YearsOfGitLab, we have created a [C++ example](https://gitlab.com/gitlab-de/cicd-tanuki-cpp) with an Easter egg on time calculations. This project also uses a Docker builder image to showcase a more efficient pipeline. Our recommendation is to learn using the templates in a test repository, and then create a dedicated group/project for managing all required container images. You can think of builder images which include the compiler tool chain, or specific scripts to run end-to-end tests, etc. \n\n## Secure your secrets\n\nIt is easy to leak a secret by making choices that uncomplicate a unit test by running it directly with the production database. The secret persists in git history, and someone with bad intentions gains access to private data, or finds ways to exploit your supply chain even further. \n\nTo help prevent that, include the CI/CD template for secret detection. \n\n```yaml\nstages:\n    - test\n\ninclude:\n  - template: Security/Secret-Detection.gitlab-ci.yml  \n```\n\nA known way to leak secrets is committing the `.env` file which stores settings and secrets in the repository. Try the following snippet by adding a new file `.env` and create a merge request.\n\n```\nexport AWS_KEY=\"AKIA1318109798ABCDEF\"\n```\n\nInspect the reports JSON to see the raw reports structure. GitLab Ultimate provides an MR integration, a security dashboard overview, and more features to take immediate action. The example can be found in [this project](https://gitlab.com/gitlab-de/playground/secret-scanning-10y-example).\n\n![Secrets Scanning in MR](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_secrets_scanning.png)\nMR detail view with detected AWS secret from security scanning\n{: .note.text-center}\n\n## Release and continuously deliver (CD)\n\nGitLab’s release stage provides many [features](https://about.gitlab.com/handbook/product/categories/features/#release), including [canary deployments](https://docs.gitlab.com/ee/user/project/canary_deployments.html) and [GitLab pages](https://docs.gitlab.com/ee/user/project/pages/). There are also infrastructure deployments with Terraform and cloud native (protected) [environments](https://docs.gitlab.com/ee/ci/environments/). \n\nWhile working on a CI/CD pipeline efficiency workshop, I got enthusiastic about [parent-child pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html#parent-child-pipelines) allowing non-blocking child pipelines into production, with micro services in Kubernetes as one example. \n\nLet’s try it! Create a new project, and add 2 child pipeline configuration files: `child-deploy-staging.yml` and `child-deploy-prod.yml`. The naming is important as the files will be referenced in the main `.gitlab-ci.yml` configuration file later. The jobs in the child pipelines will sleep for 60 seconds to simulate a deployment. \n\nchild-deploy-staging.yml:\n\n```yaml\ndeploy-staging:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to staging\" && sleep 60\n```\n\nchild-deploy-prod.yml\n\n```yaml\ndeploy-prod:\n    stage: deploy\n    script:\n        - echo \"Deploying microservices to prod\" && sleep 60\n\nmonitor-prod:\n    stage: deploy\n    script:\n        - echo \"Monitoring production SLOs\" && sleep 60\n```\n\nNow edit the `.gitlab-ci.yml` configuration file and create a build-test-deploy stage workflow.\n\n```yaml\nstages:\n  - build\n  - test\n  - deploy\n\nbuild:\n  stage: build\n  script: echo \"Build\"\n\ntest:\n  stage: test \n  script: echo \"Test\"\n\ndeploy-staging-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-staging.yml\n  #rules:\n  #  - if: $CI_MERGE_REQUEST_ID\n\ndeploy-prod-trigger:\n  stage: deploy\n  trigger:\n    include: child-deploy-prod.yml\n    #strategy: depend\n  #rules:\n  #  - if: $CI_COMMIT_REF_NAME == $CI_DEFAULT_BRANCH   \n```\n\nCommit the changes and inspect the CI/CD pipelines. \n\n![Parent-child Pipelines](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_parent_child_pipelines.png)\nView parent-child pipelines in GitLab\n{: .note.text-center}\n\n`strategy: depends` allows you to make the child pipelines blocking again, and the parent child pipeline waits again. Try uncommenting this for the prod job, and verify that by inspecting the pipeline view. [Rules](https://docs.gitlab.com/ee/ci/yaml/#rules) allow refining the scope when jobs are being run, such as when staging child pipelines that should only be run in merge requests and the prod child pipeline only gets triggered when on the default main branch. The full example can be found in [this project](https://gitlab.com/gitlab-de/playground/parent-child-pipeline-10y-example).\n\nTip: You can use [resource_groups](/blog/introducing-resource-groups/) to limit production deployments from running concurrent child pipelines. \n\n## Configure your infrastructure\n\nTerraform allows you to describe, plan and apply the provisioning of infrastructure resources. The workflow requires a state file to be stored over steps, where the [managed state in GitLab](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html) as an HTTP backend is a great help, together with predefined container images and CI/CD templates to make [Infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) as smooth as possible.\n\nYou can customize the template, or copy the CI/CD configuration into .gitlab-ci.yml and modify the steps by yourself. Let’s try a quick example with only an AWS account and an IAM user key pair. Configure them as CI/CD variables in `Settings > CI/CD > Variables`: `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY`.\n\nNext, create the `backend.tf` file and specify the http backend and AWS module dependency.\n\n```terraform\nterraform {\n  backend \"http\" {\n  }\n\n  required_providers {\n    aws = {\n      source = \"hashicorp/aws\"\n      version = \"~> 3.0\"\n    }\n  }\n}\n```\n\nCreate `provider.tf` to specify the AWS region.\n\n```terraform\nprovider \"aws\" {\n  region = \"us-east-1\"\n}\n```\n\nThe `main.tf` describes the S3 bucket resources.\n\n```terraform\nresource \"aws_s3_bucket_public_access_block\" \"publicaccess\" {\n  bucket = aws_s3_bucket.demobucket.id\n  block_public_acls = false\n  block_public_policy = false\n}\n\nresource \"aws_s3_bucket\" \"demobucket\" {\n  bucket = \"terraformdemobucket\"\n  acl = \"private\"\n}\n```\n\nTip: You can verify the configuration locally on your CLI by commenting out the HTTP backend above.\n\nFor GitLab CI/CD, open the pipeline editor and use the following configuration: (Note that it is important to specify the `TF_ROOT` and `TF_ADDRESS` variables since you can [manage multiple Terraform state files](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html#configure-the-backend)). \n\n```yaml\nvariables:\n  TF_ROOT: ${CI_PROJECT_DIR}\n  TF_ADDRESS: ${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/terraform/state/${CI_PROJECT_NAME}\n\ninclude:\n    - template: Terraform.latest.gitlab-ci.yml\n\nstages:\n  - init\n  - validate\n  - build\n  - deploy\n  - cleanup\n\ndestroy:\n    stage: cleanup\n    extends: .terraform:destroy \n    when: manual\n    allow_failure: true\n```\n\nCommit the configuration and inspect the pipeline jobs. \n\n![Terraform pipeline AWS S3 bucket](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_pipeline_aws_s3_bucket.png)\nAWS S3 bucket provisioned with Terraform in GitLab CI/CD \n{: .note.text-center}\n\nThe `destroy` job is not created in the template and therefore explicitly added as a manual job. It is recommended to review the opinionated Terraform CI/CD template and copy the jobs into your own configuration to allow for further modifications or style adjustments.  The full example is located in [this project](https://gitlab.com/gitlab-de/playground/terraform-aws-state-10y-example).\n\n![GitLab managed Terraform states](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_terraform_state_cicd_overview.png)\nView the Terraform states in GitLab\n{: .note.text-center}\n\nHat tipping to our Package stage - you can manage and publish [Terraform modules in the registry](https://docs.gitlab.com/ee/user/packages/terraform_module_registry/) too, using all of the DevOps Platform advantages. And hot off the press, the [GitLab Kubernetes Operator is generally available](/blog/open-shift-ga/). \n\n## Monitor GitLab and dive into Prometheus\n\nPrometheus is a monitoring solution which collects metrics from `/metrics` HTTP endpoints made available by applications, as well as so-called exporters to serve services and host information in the specified metrics format. One example is CI/CD pipeline insights to analyse bottlenecks and [make your pipelines more efficient](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html). The [GitLab CI Pipeline Exporter project](https://github.com/mvisonneau/gitlab-ci-pipelines-exporter/tree/main/examples/quickstart) has a great quick start in under 5 minutes, bringing up demo setup with Docker-compose, Prometheus and Grafana. From there, it is not far into your production monitoring environment, and monitoring more of GitLab. \n\n![GitLab CI Exporter](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_ci_pipeline_exporter_prometheus.png)\nExample dashboard for the GitLab CI Pipeline Exporter\n{: .note.text-center}\n\nThe Prometheus Exporter uses the [Go client libraries](https://prometheus.io/docs/instrumenting/writing_exporters/). They can be used to write your own exporter, or instrument your application code to expose `/metrics`. When deployed, you can use Prometheus again to monitor the performance of your applications in Kubernetes, as one example. Find more monitoring ideas in my talk “[From Monitoring to Observability: Left Shift your SLOs](https://docs.google.com/presentation/d/1LPb-HPMgbc8_l98VjMEo5d0uYlnNnAtJSURngZPWDdE/edit)”. \n\n## Protect\n\nYou can enable security features in GitLab by including the CI/CD templates one by one. A more easy way is to enable [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) and use the default best practices for [security scans](https://docs.gitlab.com/ee/user/application_security/index.html#security-scanning-with-auto-devops). This includes [container scanning](https://docs.gitlab.com/ee/topics/autodevops/stages.html#auto-container-scanning) ensuring that application deployments are not vulnerable on the container OS level. \n\nLet’s try a quick example with a potentially vulnerable image, and the Docker template tip from the Package stage above. Create a new `Dockerfile` in a new project:\n\n```yaml\nFROM debian:10.0 \n```\n\nOpen the pipeline editor and add the following CI/CD configuration:\n\n```yaml\n# 1. Automatically build the Docker image\n# 2. Run container scanning. https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html\n# 3. Inspect `Security & Compliance > Security Dashboard`\n\n# For demo purposes, scan the latest tagged image from 'main'\nvariables:\n    DOCKER_IMAGE: $CI_REGISTRY_IMAGE:latest    \n\ninclude:\n    - template: Docker.gitlab-ci.yml\n    - template: Security/Container-Scanning.gitlab-ci.yml\n```\n\nThe full example is located in [this project](https://gitlab.com/gitlab-de/playground/container-scanning-10y-example).\n\nTip: Learn more about [scanning container images in a deployed Kubernetes cluster](https://docs.gitlab.com/ee/user/application_security/container_scanning/) to stay even more safe. \n\n![Container Scanning Vulnerability Report](https://about.gitlab.com/images/blogimages/top-10-gitlab-hacks/gitlab_10y_container_scanning_vulnerability_report.png)\nView the container scanning vulnerability report\n{: .note.text-center}\n\n## What’s next?\n\nWe have tried to find a great “hack” for each stage of the DevOps lifecycle. There are more hacks and hidden gems inside GitLab - share yours and be ready to explore more stages of the DevOps Platform.\n\nCover image by [Alin Andersen](https://unsplash.com/photos/diUGN5N5Rrs) on [Unsplash](https://unsplash.com)\n",[9,773,940],{"slug":4109,"featured":6,"template":684},"top-10-gitlab-hacks","content:en-us:blog:top-10-gitlab-hacks.yml","Top 10 Gitlab Hacks","en-us/blog/top-10-gitlab-hacks.yml","en-us/blog/top-10-gitlab-hacks",{"_path":4115,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4116,"content":4121,"config":4127,"_id":4129,"_type":13,"title":4130,"_source":15,"_file":4131,"_stem":4132,"_extension":18},"/en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"title":4117,"description":4118,"ogTitle":4117,"ogDescription":4118,"noIndex":6,"ogImage":3090,"ogUrl":4119,"ogSiteName":669,"ogType":670,"canonicalUrls":4119,"schema":4120},"Top 10 GitLab technical blogs of 2023","2023 was a big year! Catch up on expert insights into DevSecOps, AI, CI/CD, and more.","https://about.gitlab.com/blog/top-10-gitlab-technical-blogs-of-2023","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab technical blogs of 2023\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2024-01-09\",\n      }",{"title":4117,"description":4118,"authors":4122,"heroImage":3090,"date":4124,"body":4125,"category":1103,"tags":4126},[4123],"Sandra Gittlen","2024-01-09","2023 brought fresh insights from experts across GitLab and beyond —  all of them focused on the challenges and opportunities facing DevSecOps teams. From Lockheed Martin to CARFAX, organizations are trying to understand and unlock the power of technologies such as artificial intelligence (AI), CI/CD, security automation, and more. Our experts provided tips, best practices, and tutorials to use throughout the software development lifecycle.\n\nHere are the top 10 technical blogs from what was an incredible year in DevSecOps innovation.\n\n**1. [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)**\nLooking for a smooth transition from Jenkins to GitLab? Follow this step-by-step tutorial to learn how GitLab's integrated CI/CD capabilities help deliver high-quality software faster.\n\n**2. [U.S. Navy Black Pearl: Lessons in championing DevSecOps](https://about.gitlab.com/blog/u-s-navy-black-pearl-lessons-in-championing-devsecops/)**\nSigma Defense's director of engineering details what it's like to manage the U.S. Navy's Black Pearl, which uses GitLab as its DevSecOps platform. The DevSecOps champion relays his experience implementing DevSecOps and the benefits of that decision.\n\n**3. [Quickstart guide for GitLab Remote Development workspaces](https://about.gitlab.com/blog/quick-start-guide-for-gitlab-workspaces/)**\nEnabling developers to work in their preferred environments empowers DevSecOps teams to build and deliver software more efficiently. With these quickstart instructions, developers can create a workspace, use the Web IDE Terminal to install dependencies or start their server, and view their running application.\n\n**4. [Introducing the GitLab CI/CD Catalog Beta](https://about.gitlab.com/blog/introducing-the-gitlab-ci-cd-catalog-beta/)**\nCI/CD catalogs are a game-changer, allowing developers to discover, integrate, and share pre-existing CI/CD components with ease. This tutorial shows how to get the most from this new DevSecOps platform feature.\n\n**5. [Combine GitLab Flow and GitLab Duo for a workflow powerhouse](https://about.gitlab.com/blog/gitlab-flow-duo/)**\nGitLab Flow and GitLab Duo can help organizations achieve significant improvements in end-to-end workflow efficiency that can lead to higher levels of productivity, deployment frequency, code quality and overall security, and production resiliency and availability. Find out how with this step-by-step guide.\n\n**6. [Efficient DevSecOps workflows: Hands-on python-gitlab API automation](https://about.gitlab.com/blog/efficient-devsecops-workflows-hands-on-python-gitlab-api-automation/)**\nThe python-gitlab library is a useful abstraction layer for the GitLab API. Dive into hands-on examples and best practices in this tutorial.\n\n**7. [Building GitLab with GitLab: Why there is no MLOps without DevSecOps](https://about.gitlab.com/blog/there-is-no-mlops-without-devsecops/)**\nAt GitLab, we believe in the power of MLOps, especially when combined with DevSecOps. So follow along as our data scientists adopt DevSecOps practices and enjoy the benefits of automation, repeatable workflows, standardization, and automatic provisioning of infrastructure.\n\n**8. [Explore the Dragon Realm: Build a C++ adventure game with a little help from AI](https://about.gitlab.com/blog/building-a-text-adventure-using-cplusplus-and-code-suggestions/)**\nReaders are invited to create a mystical world while learning how to integrate AI into their coding environment. This tutorial demonstrates how to use GitLab Duo Code Suggestions to create a text-based adventure game, including magical locations to visit and items to procure, using C++. \n\n**9. [How GitLab's Red Team automates C2 testing](https://about.gitlab.com/blog/how-gitlabs-red-team-automates-c2-testing/)**\nThe GitLab Red Team conducts security exercises that simulate real-world threats. They apply professional development practices to using the same open source C2 tools as threat actors. In this tutorial, the GitLab Red Team shares how they implement continuous testing for the Mythic framework, their design philosophy, and a public project that can be forked for use by other Red Teams.\n\n**10. [Building GitLab with GitLab: How GitLab.com inspired Dedicated](https://about.gitlab.com/blog/building-gitlab-with-gitlabcom-how-gitlab-inspired-dedicated/)**\nThe design of GitLab Dedicated, our single-tenancy SaaS version of the DevSecOps platform, came from the lessons learned while building GitLab.com. In this peek behind the curtains, learn the considerations that sparked different decisions regarding automation, databases, monitoring, availability, and more – and what the outcome was.\n\nSign up for the GitLab newsletter using the form to the right to receive the latest blogs right in your inbox.\n",[704,108,478,835,814,9],{"slug":4128,"featured":90,"template":684},"top-10-gitlab-technical-blogs-of-2023","content:en-us:blog:top-10-gitlab-technical-blogs-of-2023.yml","Top 10 Gitlab Technical Blogs Of 2023","en-us/blog/top-10-gitlab-technical-blogs-of-2023.yml","en-us/blog/top-10-gitlab-technical-blogs-of-2023",{"_path":4134,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4135,"content":4141,"config":4146,"_id":4148,"_type":13,"title":4149,"_source":15,"_file":4150,"_stem":4151,"_extension":18},"/en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know",{"title":4136,"description":4137,"ogTitle":4136,"ogDescription":4137,"noIndex":6,"ogImage":4138,"ogUrl":4139,"ogSiteName":669,"ogType":670,"canonicalUrls":4139,"schema":4140},"Top 10 GitLab workflow hacks you need to know","A GitLab product manager shares her favorite tricks to navigate quickly and efficiently around the GitLab DevSecOps Platform and to boost team collaboration.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099361/Blog/Hero%20Images/Blog/Hero%20Images/lightvisibility_lightvisibility.png_1750099361252.png","https://about.gitlab.com/blog/top-10-gitlab-workflow-hacks-you-need-to-know","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 GitLab workflow hacks you need to know\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Amanda Rueda\"}],\n        \"datePublished\": \"2024-04-09\",\n      }",{"title":4136,"description":4137,"authors":4142,"heroImage":4138,"date":4143,"body":4144,"category":1103,"tags":4145},[2666],"2024-04-09","In the world of software development, efficiency isn't just about moving fast – it's about smart navigation. As a GitLab product manager, I truly understand the value of efficiency when working within the DevSecOps platform. These are my top 10 favorite GitLab features and they might be the workflow hacks you never knew you needed.\n\nLet's dive into these hidden gems to unlock a new level of productivity and collaboration within your team.\n\n## 1. Resolve comments\n\nNot just for merge requests! Resolving comments on issues can significantly reduce noise and streamline task management. It's particularly handy for managing feedback efficiently.\n\n> **Why do I love it?** Not only does resolving comments reduce the noise on an issue, but it’s also a great way to manage tasks.\n>\n> **Use case.** Resolving comments is a great tool for issues where you are collecting feedback – respond to the feedback and provide a link, resolve the comment, and move on to the next one.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/discussions/#resolve-a-thread)__\n\n![example of resolve comments - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750099376147.gif)\n\n\u003Cp>\u003C/p>\n\n## 2. Internal comments\n\nSpeak directly to your team without an external audience. Keep discussions private within an issue or merge request with comments visible only to your team members. It's the perfect balance between transparency and privacy.\n\n> **Why do I love it?** It balances privacy with transparency, while keeping the broader discussion open for the community.\n>\n> **Use case.** When coordinating a product launch, your marketing team can use internal comments to discuss and refine messaging and strategy. This keeps your discussions centralized and easily accessible to the team while in draft mode.\n>\n> **[How-to documentation](https://docs.gitlab.com/ee/user/discussions/#add-an-internal-note)**\n\n![internal comments example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099376148.png)\n\n\u003Cp>\u003C/p>\n\n## 3. And/or in filters\n\nWhen searching records on a listing page, using and/or filters can help you slice through the noise and find exactly what you're looking for quickly and efficiently.\n\n> **Why do I love it?** Perfect for finding exactly what you need, powering efficient and streamlined workflows.\n>\n>**Use case.** Search for feature issues related to a specific initiative that are assigned to specific groups.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#filter-with-the-or-operator)__\n\n![and/or filter example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/and_or__1__aHR0cHM6_1750099376152.gif)\n\n\u003Cp>\u003C/p>\n\n## 4. Auto expand URLs\n\nAppending '+' or '+s' to the end of a GitLab URL transforms it into an informative snippet, allowing you to share progress without forcing your teammates to leave the page.\n\n> **Why do I love it?** It's like having x-ray vision for URLs – see the important stuff without even clicking!\n>\n> **Use case.** Sharing progress in comments? Just add '+s' to the link, and boom – everyone's instantly on the same page.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/markdown.html#show-the-issue-merge-request-or-epic-title-in-the-reference)__\n\n![auto expand URLs example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099376154.gif)\n\n\u003Cp>\u003C/p>\n\n## 5. Quick actions\n\nWith simple text commands, quick actions let you perform tasks like assigning users, adding labels, and more, directly from the description or comment box, saving you clicks and time.\n\n> **Why do I love it?** Saves clicks and time.\n>\n> **Use case.** When creating a new issue I use quick actions to automatically add labels, a milestone, and connect to the epic upon saving the record.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/quick_actions.html)__\n\n![quick actions example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750099376156.gif)\n\n\u003Cp>\u003C/p>\n\n## 6. Bulk edit\n\nApply labels, change assignees, or update milestones for multiple issues at once. This feature turns potentially tedious updates into a breeze, allowing for quick adjustments across numerous issues.\n\n> **Why do I love it?** Because it turns tedious updates into quick updates!\n>\n> **Use case.** Need to tag the whole sprint's issues as Review needed? Just filter, select all, and add that label in bulk – easy peasy.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#bulk-edit-issues-from-a-project)__\n\n![bulk edit example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750099376157.gif)\n\n\u003Cp>\u003C/p>\n\n## 7. Epic swimlanes\n\nGroup issues under epics on your board to visually track and discuss progress. It's a powerful way to contextualize work during reviews or standups.\n\n> **Why do I love it?** Easily understand the context of work as you’re walking the board.\n>\n> **Use case.** Group by epic during standup reviews to easily piece together work with its parent initiative.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/project/issue_board.html#group-issues-in-swimlanes)__\n\n![epic swimlanes example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750099376158.gif)\n\n\u003Cp>\u003C/p>\n\n## 8. Wiki diagrams\n\nIllustrate ideas and workflows directly in your wiki pages with easy-to-create diagrams. This feature supports visual learning and simplifies complex concepts.\n\n> **Why do I love it?** It’s incredibly user-friendly and flexible.\n>\n> **Use case.** When outlining a new feature workflow, draw it directly in the wiki page, making it crystal clear for everyone on the team.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/administration/integration/diagrams_net.html)__\n\n![wiki diagrams example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750099376159.gif)\n\n\u003Cp>\u003C/p>\n\n## 9. Table creation\n\nForget about wrestling with markdown for table creation. The rich text editor lets you effortlessly insert and format tables, making documentation cleaner and more structured.\n\n> **Why do I love it?** It turns the table creation ordeal into a breeze, making updates clean and structured with just a few clicks.\n>\n> **Use case.** Compiling a sprint retro? Quickly insert a table to organize feedback, action items, and owners, making the review process smoother for everyone.\n>\n> __[How-to documentation](https://docs.gitlab.com/ee/user/rich_text_editor.html#tables)__\n\n![table creation example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750099376160.gif)\n\n\u003Cp>\u003C/p>\n\n## 10. Video and GIF embeds\n\nEnhance your issues and epic descriptions or comments with embedded GIFs and YouTube videos, adding a dynamic layer to your communication.\n\n> **Why do I love it?** Sometimes a GIF or video speaks better than words.\n>\n> **Use case.** Trying to explain a UI bug? Embed a YouTube video for a quick walkthrough of the proposed feature enhancement.\n\n![video and gif embed example](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099376/Blog/Content%20Images/Blog/Content%20Images/gif__1__aHR0cHM6_1750099376161.gif)\n\n\u003Cp>\u003C/p>\n\n## Explore these features\n\nThese features represent just the tip of the iceberg in GitLab's comprehensive toolkit designed to boost efficiency and foster better collaboration. While they may be underutilized, their impact on your workflow could be substantial. I encourage you to explore these features further and integrate them into your daily routines.\n\n> Are you excited to power your DevSecOps workflow using GitLab? [Try GitLab Ultimate for free for 30 days](https://gitlab.com/-/trial_registrations/new).\n",[9,478,680,940],{"slug":4147,"featured":6,"template":684},"top-10-gitlab-workflow-hacks-you-need-to-know","content:en-us:blog:top-10-gitlab-workflow-hacks-you-need-to-know.yml","Top 10 Gitlab Workflow Hacks You Need To Know","en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know.yml","en-us/blog/top-10-gitlab-workflow-hacks-you-need-to-know",{"_path":4153,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4154,"content":4160,"config":4165,"_id":4167,"_type":13,"title":4168,"_source":15,"_file":4169,"_stem":4170,"_extension":18},"/en-us/blog/top-10-technical-articles-of-2022",{"title":4155,"description":4156,"ogTitle":4155,"ogDescription":4156,"noIndex":6,"ogImage":4157,"ogUrl":4158,"ogSiteName":669,"ogType":670,"canonicalUrls":4158,"schema":4159},"Top 10 technical articles of 2022","Let’s review our fantastic year of how-to guides. From fixing failed pipelines to making the best use of GitOps, we have you covered with our in-depth tutorials.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749663736/Blog/Hero%20Images/a-deep-dive-into-the-security-analyst-persona.jpg","https://about.gitlab.com/blog/top-10-technical-articles-of-2022","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top 10 technical articles of 2022\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Valerie Silverthorne\"}],\n        \"datePublished\": \"2022-12-08\",\n      }",{"title":4155,"description":4156,"authors":4161,"heroImage":4157,"date":4162,"body":4163,"category":769,"tags":4164},[3706],"2022-12-08","\nWith 2022 coming to a close, we wanted to ensure everyone gets one more chance to explore our top 10 technical blog posts of the year. Roll up your sleeves and enjoy our most-viewed how-to articles and don’t forget to bookmark them for next year!\n\n## 1. Failed pipeline? \n\nWe have *all* been there, and not much is more frustrating than that red X. Staff Developer Evangelist [Brendan O’Leary](/company/team/#brendan) offers his best advice on troubleshooting the “why?” of a GitLab failed pipeline – it starts with keeping the right perspective. So many factors are involved in code development that it’s critical to ask all of the questions: Is it the code? Is it the test? Is it a vulnerability, etc.?\n\n[How to troubleshoot a GitLab pipeline failure](/blog/how-to-troubleshoot-a-gitlab-pipeline-failure/)\n\n## 2. Why Git Rebase is your BFF\n\nWith code review increasingly important to successful DevOps, Senior Backend Engineer (Gitaly) [Christian Couder](/company/team/#chriscool) thinks devs might be forgetting a secret weapon in their IDE: Git Rebase. Learn how to rework commits with Git Rebase, including expert tips to try different instructions like ‘reword’, ‘edit’, and ‘squash’.\n\n[Take advantage of Git Rebase](/blog/take-advantage-of-git-rebase/)\n\n## 3. Alert fatigue is real\n\nFollow along with Senior Site Reliability Engineer [Steve Azzopardi](/company/team/#steveazz) as he lays out a GitLab investigation into annoying, time-consuming (and customer-facing) 502 errors in the GitLab Pages logs. To uncover the problem, Azzopardi and team had to unearth some red herrings along the way, but ultimately discovered the importance of PID 1 in a container.\n\n[How we reduced 502 errors by caring about PID 1 in containers](/blog/how-we-removed-all-502-errors-by-caring-about-pid-1-in-kubernetes/)\n\n## 4. More pipelines = less complexity\n\nCI/CD is at the heart of most modern DevOps practices, but that doesn’t mean it’s a “set it and forget it.” Staff Backend Engineer Fabio Pittino acknowledges the complexity challenges of CI/CD and suggests the solution is choosing the right pipelines for the job. Understand the differences between parent-child and multi-project pipelines to streamline your CI/CD efforts.\n\n[Breaking down CI/CD complexity with parent-child and multi-project pipelines](/blog/parent-child-vs-multi-project-pipelines/)\n\n## 5. Hacking and bug bounties\n\nHow did a Swedish web developer go from zero to number seven on our HackerOne Top 10 list in just over a year? Johan Carlsson offers a detailed look at how and why he started looking for bugs in GitLab in his spare time, and how others can jump into hacking, too.\n\n[Want to start hacking? Here’s how to quickly dive in](/blog/cracking-our-bug-bounty-top-10/)\n\n## 6. Gitlab… on an iPad\n\nYes, you can code on an M1-chip-based iPad, and Staff Developer Evangelist Brendan O’Leary walks through all the necessary steps to get GitLab running using GitPod.\n\n[How to code, build, and deploy from an iPad using GitLab and GitPod](/blog/how-to-code-build-and-deploy-from-an-ipad-using-gitlab-and-gitpod/)\n\n## 7. Speed up database changes\n\nMany DevOps teams have mastered speedy application code changes but have struggled to make database updates equally streamlined. In this step-by-step guide, you’ll learn how to apply DevOps principles to database change management.\n\n[How to bring DevOps to the database with GitLab and Liquibase](/blog/how-to-bring-devops-to-the-database-with-gitlab-and-liquibase/)\n\n## 8. A primer on IaC security\n\nInfrastructure as Code (IaC) is an increasingly popular solution for DevOps teams, and with good reason: It’s an efficient and low-resource solution. But, as Senior Developer Evangelist [Michael Friedrich](/company/team/#dnsmichi) explains, it’s also ripe with potential security vulnerabilities. Friedrich takes an exhaustive look at the threats, tools, integrations, and strategies that make IaC a safer choice.\n\n[Fantastic Infrastructure as Code security attacks and how to find them](/blog/fantastic-infrastructure-as-code-security-attacks-and-how-to-find-them/)\n\n## 9. Everything you need to know about GitOps \n\nWant to know how to make GitLab work with GitOps? Senior Product Manager (Configure) [Viktor Nagy](/company/team/#nagyv-gitlab) created an eight-part tutorial covering everything GitLab and GitOps, culminating in how to make a GitLab agent for Kubernetes self-managing. \n\n[The ultimate guide to GitOps with GitLab](/blog/the-ultimate-guide-to-gitops-with-gitlab/)\n\n## 10. The skinny on static site generators\n\nDevs will get the most out of GitLab Pages by choosing the right static site generator (SSG). Developer Evangelist [Fatima Sarah Khalid](/company/team/#sugaroverflow) reviews six options and has also created a toolkit to help make the SSG evaluation process easier.\n\n[How to choose the right static site generator](/blog/comparing-static-site-generators/)\n\n",[773,9,726],{"slug":4166,"featured":6,"template":684},"top-10-technical-articles-of-2022","content:en-us:blog:top-10-technical-articles-of-2022.yml","Top 10 Technical Articles Of 2022","en-us/blog/top-10-technical-articles-of-2022.yml","en-us/blog/top-10-technical-articles-of-2022",{"_path":4172,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4173,"content":4179,"config":4184,"_id":4186,"_type":13,"title":4187,"_source":15,"_file":4188,"_stem":4189,"_extension":18},"/en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo",{"title":4174,"description":4175,"ogTitle":4174,"ogDescription":4175,"noIndex":6,"ogImage":4176,"ogUrl":4177,"ogSiteName":669,"ogType":670,"canonicalUrls":4177,"schema":4178},"Top tips for efficient AI-powered Code Suggestions with GitLab Duo","Explore best practices  for using Code Suggestions and how to combine it with our other AI features to greatly improve the developer experience (includes real-world exercises).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749669095/Blog/Hero%20Images/gitlabduo.png","https://about.gitlab.com/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Top tips for efficient AI-powered Code Suggestions with GitLab Duo\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-06-11\",\n      }",{"title":4174,"description":4175,"authors":4180,"heroImage":4176,"date":4181,"body":4182,"category":702,"tags":4183},[1612],"2024-06-11","[GitLab Duo](https://about.gitlab.com/gitlab-duo/), our suite of AI-powered features, provides a unique opportunity to make your DevSecOps workflows more efficient. To make the most of GitLab Duo requires hands-on practice and learning in public together. This tutorial centers on GitLab Duo Code Suggestions and provides tips and tricks, learned best practices, and some hidden gems (including how to pair Code Suggestions with our other AI features for even more efficiency). You'll also discover how AI greatly improves the developer experience.\n\nThe best practices, tips, and examples in this article have been created from scratch and are included in the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html) and [GitLab Duo prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts), maintained by the GitLab Developer Relations team. Bookmark this page, and navigate into the respective chapters at your convenience.\n\nWhat you'll learn:\n\n1. [Why use GitLab Duo Code Suggestions?](#why-use-gitlab-duo-code-suggestions%3F)\n1. [Start simple, refine prompts](#start-simple-refine-prompts)\n1. [Practice, practice, practice](#practice-practice%2C-practice)\n    - [Fix missing dependencies](#fix-missing-dependencies)\n    - [Boilerplate code: Optimized logging](#boilerplate-code-optimized-logging)\n    - [Utility helper functions, well tested](#utility-helper-functions-well-tested)\n    - [Generate regular expressions](#generate-regular-expressions)\n1. [Re-trigger Code Suggestions](#re-trigger-code-suggestions)\n    - [Common keyboard combinations to re-trigger Code Suggestions](#common-keyboard-combinations-to-re-trigger-code-suggestions)\n    - [Stuck in the middle of suggestions](#stuck-in-the-middle-of-suggestions)\n    - [Code Suggestions stopped](#code-suggestions-stopped)\n1. [Code Suggestions vs, code generation](#code-suggestions-vs.-code-generation)\n    - [Start with a comment on top for code generation](#start-with-a-comment-on-top-for-code-generation)\n    - [Intent detection for code suggestions and generation](#intent-detection-for-code-suggestions-and-generation)\n    - [Tell a story for efficient code generation](#tell-a-story-for-efficient-code-generation)\n    - [Generate regular expressions](#generate-regular-expressions)\n    - [Iterate faster with code generation](#iterate-faster-with-code-generation)\n    - [Practical code generation: Cloud-native observability](#practical-code-generation-cloud-native-observability)\n1. [Take advantage of all GitLab Duo features](#take-advantage-of-all-gitlab-duo-features)\n    - [Combine Chat with Code Suggestions](#combine-chat-with-code-suggestions)\n    - [Use Chat to generate build configuration](#use-chat-to-generate-build-configuration)\n    - [Use Chat to explain potential vulnerabilities](#use-chat-to-explain-potential-vulnerabilities)\n    - [Combine vulnerability resolution with Code Suggestions](#combine-vulnerability-resolution-with-code-suggestions)\n1. [More tips](#more-tips)\n    - [Verify code quality and security](#verify-code-quality-and-security)\n    - [Learn as a team, and understand AI's impact](#learn-as-a-team-and-understand-ai-impact)    \n    - [Development is a marathon, not a sprint](#development-is-a-marathon-not-a-sprint)\n    - [Contribute using GitLab Duo](#contribute-using-gitlab-duo)\n1. [Share your feedback](#share-your-feedback)\n\n## Why use GitLab Duo Code Suggestions?\n\nConsider these two scenarios:\n\n1. As a senior developer, you have the confidence in your ability with various programming languages to write new source code, review existing code, design resilient architectures, and implement new projects. However, getting familiar with the latest programming language features requires time, research, and a change of habits. So how can you quickly learn about new language feature additions that could make your code even more robust or use resources more sustainably?\n\n    - As a personal example, I learned the C++03 standard, later C++11 and never really touched base on C++14/17/20/23 standards. Additionally, new languages such as [Rust](https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/) came around and offered better developer experiences. What now?\n\n2. As a new engineer, it can be challenging to navigate new projects, get familiar with a new programming language, understand specific algorithms, and find the documentation for structures, interfaces, and other technical components. New engineers are also learning under pressure, which often leads to errors and roadblocks along the way. There is no time for digging into best practices.\n\n    - I, myself, never really learned frontend engineering, just some self-taught HTML, CSS, and JavaScript. Adapting into frontend frameworks such as VueJS after a decade feels overwhelming, and I have little time to learn.\n\nThese scenarios show how hard it can be to keep up with the latest programming languages, best practices, and other key information. [GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/), which predictively completes code blocks, defines function logic, generates tests, and proposes common code like regex patterns – all in your coding environment. Code Suggestions provides the AI assistance necessary to learn what you need to know while staying in your development flow.\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today](https://about.gitlab.com/seventeen/)!\n\n## Start simple, refine prompts\n\nMy own GitLab Duo adoption journey started with single-line code comments, leading to not-so-great results at first. \n\n```\n# Generate a webserver\n\n// Create a database backend\n\n/* Use multi-threaded data access here */\n```\n\nAfter experimenting with different contexts, and writing styles, I found that code generation out of refined comments worked better. \n\n```\n# Generate a webserver, using the Flask framework. Implement the / URL endpoint with example output.\n\n// Create a database backend. Abstract data handlers and SQL queries into function calls.\n\n/* Use multi-threaded data access here. Create a shared locked resource, and focus on supporting Linux pthreads. */\n```\n\nCode comments alone won't do the trick, though. Let's explore more best practices.\n\n## Practice, practice, practice \n\nFind use cases and challenges for your daily workflows, and exclusively use GitLab Duo. It can be tempting to open browser search tabs, but you can also solve the challenge in your IDE by using GitLab Duo. Here are some examples:\n\n1. Fix missing dependencies (which always cause build/execution failures).\n1. If you're missing logging context, let Code Suggestions auto-complete started function calls, including `print` statements.\n1. Generate common methods and attributes for object-oriented design patterns (e.g. getter/setter methods, `toString()` and object comparison operators, object inheritance, etc.).\n1. Identify the function that generates random crashes. Use Code Suggestions to implement a new function with a different algorithm.\n1. If you encounter application cannot be compiled or executed, cryptic error, ask GitLab Duo Chat about it.\n1. Learn about existing (legacy) code, and strategies to document and refactor code into modern libraries. Start a v2 of an application with a new framework or programming language, helping solve technical debt.\n1. Prevent operations and security issues in Git history by detecting them before they occur (e.g. performance, crashes, security vulnerabilities).\n\nThink of the most boring - or most hated - coding task, and add it to the list above. My least favorite tasks are attribute getter/setter methods in C++ classes (as can be seen in the video below), immediately followed by regular expressions for email address format.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Z9EJh0J9358?si=QGvQ6mXxPPz4WpM0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nIt can also help to use Code Suggestions in different programming languages, for example focusing on backend and frontend languages. If you are experienced in many languages, take a look into languages that you have not used in a while, or look into learning a new programming language such as [Python](https://about.gitlab.com/blog/learning-python-with-a-little-help-from-ai-code-suggestions/) or [Rust](https://about.gitlab.com/blog/learning-rust-with-a-little-help-from-ai-code-suggestions-getting-started/). \n\nWhen you adopt Code Suggestions into a fast auto-completion workflow, it can happen without any interruption. The suggested code is greyed out and optional, depending on the user interface – for example, VS Code. This means that it will not distract you from continuing to write source code. Try using Code Suggestions on your own by familiarizing yourself with how suggestions are shown, how you can fully or partially accept them, and soon they will become optional help to write better code. \n\n![Image with code suggestions greyed out](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_java_springboot_class_methods_tostring.png)\n\n### Fix missing dependencies\n\nAfter building or running source code, missing dependency errors might be logged and prevent further execution and testing. The following example in Go shows an error from `go build`, where the source code did not import any dependencies yet. A manual approach can be collecting all listed dependencies, running a unique sort on them, and adding them into the source code file, as shown below.\n\n![Go build failed - missing dependencies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps.png)\n\nBut what if GitLab Duo knows about the file context and missing dependencies already? Navigate into the top section and add a comment, saying `// add missing imports` and wait for Code Suggestions.\n\n![GitLab Duo Code Suggestions - go build failed missing dependencies suggested fix](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps_suggested_fix.png)\n\nRunning `go build` again results in success, and the source code can be tested and run.\n\n![Go build failed - missing dependencies fixed](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_build_failed_missing_deps_fixed.png)\n\n### Boilerplate code: Optimized logging\n\n*Q: Logging – and more observability data with metrics and traces – can be hard and tedious to implement. What is the most efficient way to implement them that does not impact the application performance or cause bugs?*\n\n*A: Use Code Suggestions to generate logging function calls, and refactor the code into robust observability instrumentation library abstractions. This method can help to prepare the code for later integration with [OpenTelemetry](https://docs.gitlab.com/ee/development/stage_group_observability/gitlab_instrumentation_for_opentelemetry.html), for example.*\n\nExample for a logging class in Ruby:\n\n```ruby\n# Create Logging utility class\n# Define default log level as attribute\n# Add method for logging, inputs: level, app, message\n# Print the data with formatted date and time in syslog format\n\n# Potential AI-generated code below\nclass Logging\n  attr_accessor :log_level\n\n  def log(level, app, message)\n    time = Time.now.strftime(\"%b %d %H:%M:%S\")\n    puts \"#{time} #{app}[#{Process.pid}]: #{level} - #{message}\"\n  end\nend\n\n# Instantiate class and test the log method\n\n# Potential AI-generated code below\nlogger = Logging.new\nlogger.log_level = :debug\n\nlogger.log(:info, 'MyApp', 'Application started - info')\nlogger.log(:debug, 'MyApp', 'Application started - debug')\nlogger.log(:error, 'MyApp', 'Application started - error')\nlogger.log(:fatal, 'MyApp', 'Application started - fatal')\nlogger.log(:warn, 'MyApp', 'Application started - warn')\n```\n\n### Utility helper functions, well tested\n\n*Q: The programming language does not provide basic functions in the standard library. I'm tempted to open my browser to add string manipulation and regular expression parser functions.*\n\n*A: Create a new file called `utility.{language-suffix}` and add a code comment on top. Instruct Code Suggestions to generate a string manipulation function ().*\n\nExample for a string manipulation method in Python:\n\n```python\n# Create a function to search for a string in a string, return the index\n# Optionally remove search string, and return modified string\n# Test the function on the string \"Cool programming languages: C++, Rust, Python\" - Remove the string \"C++\"\n\n# Potential AI-generated code below\ndef search_and_remove(text, search):\n    index = text.find(search)\n    if index != -1:\n        return index, text[:index] + text[index+len(search):]\n    else:\n        return None, text\n\ntext = \"Cool programming languages: C++, Rust, Python\"\nindex, new_text = search_and_remove(text, \"C++\")\nprint(index, new_text)\n```\n\n**Async exercise:** Ask GitLab Duo Chat how to add tests in Python, select the source code, and use the `/tests` slash command. \n\nA similar example can be implemented in Go, creating utility functions for unique integer values in an array, or the sum of all even values. Start with defining the project structure through `go mod init gitlab.com/group/project` and create the `array.go` file. Define the `package` and start with the first code comment to generate the functions.\n\n```go\npackage array \n\n// Create a function that returns unique values from an integer array\n\n// Possible AI-generated code\nfunc unique(ints []int) []int {\n    occurred := map[int]bool{}\n    result := []int{}\n    for _, i := range ints {\n        if !occurred[i] {\n            occurred[i] = true\n            result = append(result, i)\n        }\n    }\n    return result\n}\n\n// Create a function that returns the sum of all even numbers in an integer array\n\n// Possible AI-generated code\nfunc sumEvens(ints []int) int {\n    var sum int\n    for _, i := range ints {\n        if i%2 == 0 {\n            sum += i\n        }\n    }\n    return sum\n}\n```\n\n**Async exercise**: Create more utility helper functions in dedicated libraries, and use Chat to select and generate `/tests`. For the Go example, you can inspect potential solutions in the `go/utility/array_test.go` file in the [GitLab Duo Prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts). Build and test the code using `go build && go test`.\n\n### Generate regular expressions\n\nDevelopers' favorite one liners, never touched again. `git blame` knows very well but might not be able to provide enough context. GitLab Duo can help with regular expressions creation, explanation, and refactoring, in the following example:\n\n*Q: My regular expressions for parsing IPv6 and IPv4 addresses do not work. What's the best approach to solve this?*\n\n*A: Use Code Suggestions comments to generate examples using these regex types. Combine the questions with Chat, and ask for more examples in different languages. You can also select the existing source, and use a refined prompt with `/refactor using regular expressions` in the Chat prompt.*\n\n**Async exercise**: Choose your favorite language, create a function stub that checks IPv6 and IPv4 address strings for their valid format. Trigger Code Suggestions to generate a parsing regular expression code for you. Optionally, ask Chat how to refine and refactor the regex for greater performance.\n\nI chose TypeScript, a language on my personal learning list for 2024: `// Generate a TypeScript function which parses IPv6 and IPv4 address formats. Use regular expressions`.\n\n![Code Suggestions - typescript utility parse ip address regex](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_typescript_utility_parse_ip_address_regex.png)\n\n![Code Suggestions typescript - utility parse ip address regex tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_typescript_utility_parse_ip_address_regex_tests.png)\n\n## Re-trigger Code Suggestions\n\nYou can trigger Code Suggestions by pressing the `enter` or `space` key, depending on the context. In VS Code and the GitLab Web IDE, the GitLab Duo icon will appear in the same line, and at the bottom of the window.\n\nIf you accepted a suggestion, but actually want to try a different suggestion path, select the code, delete the line(s) and start over.\n\n> **Tip:** Different keystrokes and strategies for Code Suggestions are recorded in this video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ORpRqp-A9hQ?si=CmA7PBJ9ckWsvjO3\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Common keyboard combinations to re-trigger Code Suggestions\n\nEspecially in the early adoption phase of Code Suggestions, you'll need to practice to get the best results from comments, existing code style, etc., put into context.\n\nA common keystroke pattern for triggering suggestions can be\n\n1. Press `Enter` and wait for the suggestion.\n1. Press `Space` followed by `Backspace` to immediately delete the whitespace again, or\n1. Press `Enter` to re-trigger the suggestion. `Backspace` to delete any leftover new lines.\n\nWhen a suggestion makes sense, or you want to see how far you can get:\n\n1. Continue pressing `Tab` to accept the suggestion.\n1. Add a space or press `Enter` to open a new scope for triggering a new suggestion.\n1. Continue accepting suggestions with `Tab`. \n\nNote that generative AI sometimes ends up in a loop of suggesting similar code paths over and over again. You can trigger this behavior by inserting test data into an array, using strings and numbers in a sorted order or by generating different API endpoints, as it tries to guess which other endpoints could be helpful. When this happens, break the acceptance flow, and continue writing code as normal.\n\n### Stuck in the middle of suggestions\n\nSometimes, the code suggestions may stop in the middle of a variable, function, etc. definition. If you are unsure about the syntax, or want to restart the code suggestions:\n\n1. Delete the last character(s) or the entire line, using `Backspace`.\n1. Alternatively, use `shift cursor left` (select characters) or `cmd shift cursor left` (select entire line), followed by `Backspace`.\n1. Move the cursor into the line above, and press `Enter` to force a Code Suggestions trigger again.\n\n### Code Suggestions stopped\n\nWhen Code Suggestions stops, there can be multiple reasons:\n\n1. The current file scope ends – for example, a `main()` function has been generated and closed.\n1. There could be connection problems to the GitLab instance (self-managed) or GitLab.com (SaaS, [Dedicated](https://about.gitlab.com/dedicated/)). Follow the [troubleshooting documentation](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/troubleshooting.html).\n\n## Code suggestions vs. code generation\n\nCode suggestions \"come as you go\" while writing code, and help with completing line(s). Code generation on the other hand requires more context to create entire code blocks, consisting of functions, algorithms, classes, etc. \n\nThe following sections discuss both methods, and how to get started with a practical example. \n\n### Code suggestions flow with comments\n\nUse your natural programming flow, and stop to adapt to adding code comments when helpful for context and better suggestions. You can accept code suggestions using the `Tab` key, or selectively accept words using the `cmd cursor right` keyboard shortcut.\n\nThe following new challenge implements a simple Linux statistics tool in C, mimicking the functionality of `iostat`, `vmstat` and `du` CLI commands on Linux. Sometimes, these low-level metrics come in handy for presenting application metrics, or otherwise help with debugging when requesting support data from customers.\n\nCreate a new application directory called `linux_stat` and `main.c` file, and let Code Suggestions complete everything else. First, start with a simple code comment block:\n\n```c\n// Create a Linux statistic tool\n// Collect IO, performance, disk usage, TCP latency\n// Print summaries\n```\n\nNext, press `Enter` to trigger the creation of the `main()` method. If not automatically created, force it through a new code comment.\n\n```c\n// Create main function to collect data\n```\n\nIn my attempts, generative AI did not immediately implement all functionality. Instead, it added a `TODO` comment into the following code block.\n\n```c\nint main() {\n    struct stat statbuf;\n\n    if (stat(\"/etc\", &statbuf) != 0) {\n        perror(\"stat\");\n        return 1;\n    }\n\n    printf(\"IO Block Size: %ld\\n\", statbuf.st_blksize);\n\n    // TODO: add CPU usage, disk usage, network latency measurements\n\n    return 0;\n}\n```\n\nMove the cursor after the `TODO` comment, and continue with a new comment to collect CPU usage:\n\n```c\n    // Collect CPU usage\n\n    // Example for AI-generated code below\n    double cpu_usage;\n\n    FILE* stat_file = fopen(\"/proc/stat\", \"r\");\n    if (stat_file == NULL) {\n        perror(\"fopen\");\n        return 1;\n    }\n\n    // Parse /proc/stat to get CPU usage\n    fscanf(stat_file, \"cpu %lf\", &cpu_usage);\n\n    printf(\"CPU Usage: %.2f%%\\n\", cpu_usage * 100);\n\n    fclose(stat_file);\n```\n\nThe generated code might not be 100% accurate, since measuring CPU usage on Linux requires taking samples over a defined time range. Thus, the caller needs to calculate the real CPU utilization from all values in `/proc/stat`, including [`system, nice, user, idle, iowait`, etc.](https://www.kernel.org/doc/Documentation/filesystems/proc.txt).\n\n**Async challenge:** Optimize the collection process with average calculation.\n\nRepeat the steps for disk usage and network latency. The code comment is written by you, the C source code is generated with GitLab Duo.\n\n```c\n    // Collect disk usage\n\n    // Collect network latency\n\n    // Example for AI-generated code below\n    int sockfd = socket(AF_INET, SOCK_STREAM, 0);\n\n    struct sockaddr_in servaddr;\n    servaddr.sin_family = AF_INET;\n    servaddr.sin_port = htons(80);\n    inet_pton(AF_INET, \"8.8.8.8\", &servaddr.sin_addr);\n```\n\nThe generated code may use public IP addresses to test network latency. Change the IPv4 address to GitLab.com's current address. \n\n```c\n    inet_pton(AF_INET, \"172.65.251.78\", &servaddr.sin_addr);\n```\n\nThe generated source code added more header dependencies. Navigate into the top section of the file, and add a comment to include all required headers:\n\n```c\n// Import required headers\n```\n\nBuild and run the application on Linux, example on Ubuntu 22 LTS:\n\n```shell\n# Install compiler\napt install gcc\n\n# Build\ngcc main.c -o linux_stat\n\n# Build: If the math.h header is included, linking against the library is needed, too\ngcc linux_stat.c -o linux_stat -lm\n\n# Run the tool\n./linux_stat\n```\n\n**Bonus:** Change the code to use IPv6, and check the GitLab.com IPv6 address again (`dig gitlab.com AAAA +short`).\n\n```c\n    // Collect network latency\n\n    // Use IPv6 \n\n    // Example for AI-generated code below\n    struct sockaddr_in6 servaddr;\n    servaddr.sin6_family = AF_INET6;\n    servaddr.sin6_port = htons(80);\n    //inet_pton(AF_INET6, \"2001:db8::1\", &servaddr.sin6_addr);\n    inet_pton(AF_INET6, \"2606:4700:90:0:f22e:fbec:5bed:a9b9\", &servaddr.sin6_addr);\n\n    int sockfd = socket(AF_INET6, SOCK_STREAM, 0);\n```\n\n![C Linux stat tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_c_linux_stat_tests.png)\n\nThe full working source code is available in the [GitLab Duo Prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts) in the directory for C code.\n\n**Async exercise:** Refactor the C code into Rust, using only GitLab Duo. Start by selecting the source code, and use the Duo Chat prompt `/refactor into Rust`. \n\n> **Tip:** Thoughtful code comments make the source code more readable, too. This helps new team members with onboarding, site reliability engineers with debugging production incidents, and open source contributors with [merging their first MRs](https://handbook.gitlab.com/handbook/marketing/developer-relations/contributor-success/community-contributors-workflows/#first-time-contributors).\n\n### Start with a comment on top for code generation\n\nSource code can be organized in multiple files. Whether you start with a new application architecture, or refactor existing source code, you can take advantage of code generation with GitLab Duo.\n\nStart with a comment block on top, and make it a step-by-step description. You can also break longer comments into multiple lines, revisiting the examples in this article. This pattern also helps to think about the requirements, and can help refining the prompts. \n\n```diff\n# Generate a webserver, using the Flask framework. \n# Implement the / URL endpoint with example output.\n+# Add an endpoint for Promtheus metrics\n\n// Create a database backend. \n// Abstract data handlers and SQL queries into function calls.\n+// Use PostgreSQL as default backend, and SQLite for developers as fallback.\n\n/* \nUse multi-threaded data access here.\nCreate a shared locked resource, and focus on supporting Linux pthreads. \n+Abstract the thread creation/wait procedures into object-oriented classes and methods.\n*/\n```\n\nMore code generation prompts for [supported programming languages](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html) are available in the [GitLab Duo Use Cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#code-generation-prompts).\n\n### Intent detection for code suggestions and generation\n\nCode Suggestions, depending on the GitLab Language Server in your IDE, will parse and detect the intent and offer code completion suggestions in the same line or code generation.\n\nThe technology in the background uses TreeSitter to parse the code into an [AST](https://en.wikipedia.org/wiki/Abstract_syntax_tree), and determine whether the scope is inside a code comment block (generation), or inside the source code (completion). This detection needs to be executed fast on the client IDE, and proves to be a great use case for [WebAssembly](https://webassembly.org/). You can learn more in [this epic](https://gitlab.com/groups/gitlab-org/-/epics/11568), and the following video, which provides a look into the GitLab Language Server powering Code Suggestions:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/VQlWz6GZhrs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Tell a story for efficient code generation\n\nCode generation is art. Tell the story, and AI-powered GitLab Duo can assist you. \n\nThe following example aims to implement an in-memory key-value store in Go, similar to Redis. Start with a description comment, and trigger Code Suggestions by continuing with a new line and pressing `Enter`.\n\n```golang\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n```\n\nWe can be more specific – which methods are required for data manipulation? Instruct Code Suggestions to generate methods for setting keys, updating values, and listing all contained data.\n\n```golang\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n```\n\nAccept all suggestions using the `Tab` key. As a next step, instruct Code Suggestions to create a `main` function with test code.\n\n```golang\n// Create a main function and show how the code works\n```\n\nIf the test data is not enough, refine the generated code with a focus on extreme test cases.\n\n> **Tip:** You can use the same method for refined [Chat prompts and test generation](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide), `/tests focus on extreme test cases`.\n\n```golang\n// Add more random test data, focus on extreme test cases\n```\n\n![Code Suggestions - go kv more test data](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_more_test_data.png)\n\nThe full example, including fixed dependencies, is located in the [gitlab-duo-prompts project](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts) in the `code-suggestions/go/key-value-store` directory. Update the `main.go` file, and build and run the code using the following command: \n\n```shell\ngo build\n./key-value-store\n```\n\nThe first iteration was to create a standalone binary and test different implementation strategies for key-value stores. Commit the working code and continue with your GitLab Duo adoption journey in the next step.\n\n> **Tip:** New projects can benefit from Code Generation, and require practice and more advanced techniques to use code comments for prompt engineering. This method can also make experienced development workflows more efficient. Proof of concepts, new library introductions, or otherwise fresh iterations might not always be possible in the existing project and framework. Experienced developers seek to create temporary projects, and isolate or scope down the functionality. For example, introducing a database backend layer, and benchmarking it for production performance. Or, a library causing security vulnerabilities or license incompatibilities should be replaced with a different library, or embedded code functionality.\n\n### Iterate faster with code generation\n\nExperienced developers will say, \"There must be a key-value library in Go, let us not reinvent the wheel.\" Fortunately, Go is a mature language with a rich ecosystem, and awesome-go collection projects, for example [avelino/awesome-go](https://github.com/avelino/awesome-go), provide plenty of example libraries. Note: This possibility might not be the case for other programming languages, and requires a case-by-case review.\n\nWe can also ask GitLab Duo Chat first, `Which Go libraries can I use for key-value storage?`:\n\n![Chat - ask golang libraries kv](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_chat_ask_golang_libs_kv.png)\n\nAnd then refine the Code Suggestions prompt to specifically use the suggested libraries, for example, BoltDB.\n\n```diff\n// Create an in-memory key value store, similar to Redis \n// Provide methods to\n// set/unset keys\n// update values\n// list/print with filters\n+// Use BoltDB as external library\n```\n\nRepeat the pattern from above: Generate the source code functions, then ask GitLab Duo to create a main function with test data, and build the code. The main difference is external libraries, which need to be pulled with the `go get` command first. \n\n```shell\ngo get\ngo build\n```\n\nIf the source code build fails with missing dependencies such as `fmt`, practice using GitLab Duo again: Move the cursor into the `import` statement, and wait for the suggestion to add the missing dependencies. Alternatively, add a comment saying `Import all libraries`.\n\n![Code Suggestions - go kv external lib boltdb fix dependencies](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_external_lib_boltdb_fix_deps.png)\n\nYou can also add more test data again, and verify how the functions behave: `// Add more random test data, focus on extreme test cases`. In the following example, an empty key causes the program to panic.\n\n![Code Suggestions - Go kv external lib boltdb test extreme cases panic](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_kv_external_lib_boltdb_test_extreme_cases_panic.png)\n\nThis example is a great preparation for test cases later on.\n\n### Practical code generation: Cloud-native observability\n\nThink of a client application in Go, which lists the current state of containers, pods, and services in a Kubernetes cluster, similar to the `kubectl get pods` command line. The Kubernetes project provides [Go libraries](https://pkg.go.dev/k8s.io/client-go/kubernetes) to programmatically interact with the Kubernetes APIs, interfaces, and object structures.\n\nOpen your IDE, and create a new Go project.\n\n> **Tip:** You can ask Chat how to do it - `How to start a Go project? Please show CLI command examples`. \n\nStart with a single comment on top of the `main.go` file, and describe the application purpose: Observability in Kubernetes.\n\n```golang\n// Create a client for Kubernetes observability\n```\n\nThink about the main requirements: Get access to Kubernetes, create context, namespace, and inspect the state. Additionally, instruct Code Suggestions to import packages and create a main package in the `main.go` file.\n\nFirst iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Inspect container, pod, service status and print an overview\n```\n\nThis might do unexpected things with hardcoding the access credentials, missing contexts, failing builds.\n\nSecond iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Inspect container, pod, service status and print an overview\n```\n\nThis might not know about Kubernetes contexts and namespaces, thus leading to build errors or unexpected results.\n\nThird iteration:\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Create kubernetes context, namespace default\n// Inspect container, pod, service status and print an overview\n```\n\nThis example hardcodes the Kubernetes context and default namespace to generate an initial foundation. Later iterations can read the namespace value from a command line parameter, or configuration file.\n\nThe final example can look like the following. In addition to the application functionality, it also instructs Code Suggestions to import all dependencies, and create a `main` package in `main.go`.\n\n```golang\n// Create a client for Kubernetes observability\n// Create a function that\n// Read the kubernetes configuration file from the KUBECONFIG env var\n// Create kubernetes context, namespace default\n// Inspect container, pod, service status and print an overview\n// Import necessary packages\n// Create main package\n```\n\n\u003Cdetails> Solution \n\u003Csummary>\n\n```golang\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n)\n\nfunc main() {\n\tkubeconfig := os.Getenv(\"KUBECONFIG\")\n\tconfig, err := clientcmd.BuildConfigFromFlags(\"\", kubeconfig)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tclientset, err := kubernetes.NewForConfig(config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tpods, err := clientset.CoreV1().Pods(\"\").List(context.TODO(), metav1.ListOptions{})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfmt.Printf(\"There are %d pods in the cluster\\n\", len(pods.Items))\n\n\t// Additional code to inspect services, containers, etc\n}\n```\n\n\u003C/summary>\n\u003C/details>\n\nExample output:\n\n![duo code suggestions - go k8s o11y output](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_go_k8s_o11y_output.png)\n\n**Async exercise:** Complete the project with code for inspecting services, containers, etc., and export the findings to [OpenTelemetry](https://opentelemetry.io/).\n\n> **Tip:** Practice with the [GitLab Duo use cases: Code generation prompts](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#code-generation-prompts) in the documentation, and/or send merge requests with your working prompts.\n\nWhile recording a short video to highlight how code generation is working, another more refined source code was generated. You can inspect the differences in [this commit](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/commit/a1a46de9789d4791f04b4df9f1a35d05b8e67568), and benefit from both solutions.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/ORpRqp-A9hQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Take advantage of all GitLab Duo features \n\n### Combine Chat with Code Suggestions\n\nIn combination with [GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat/index.html), Code Suggestions becomes even more powerful. The following workflow illustrates the intersection of AI efficiency:\n\nWrite and generate new code using Code Suggestions. The source code will be verified through CI/CD automation, code quality tests, and security scanning. But what about the developer's knowledge?\n\n1. In your IDE, select the generated code portions and use the [`/explain` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide) in the Chat prompt. You can even refine the prompt to `/explain with focus on algorithms`, or otherwise helpful scopes such as potential security or performance problems, etc.\n\n    - Continue writing and maintaining source code, but at some point code quality decreases and refactoring gets challenging. Ask GitLab Duo Chat for help.\n\n2. In your IDE, select the source code, and use the [`/refactor` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#refactor-code-in-the-ide) in the Chat prompt. You can refine the prompt to focus on specific design patterns (functions, object-oriented classes, etc.), `/refactor into testable functions` for example._\n\n    - After ensuring more readable code, tests need to be written. What are potential extreme cases, or random data examples for unit tests? Research and implementation in various frameworks can take time.\n\n3. In your IDE, select the source code, and use the [`/tests` slash command](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#write-tests-in-the-ide) in the Chat prompt. You can also refine the prompt to focus in specific test frameworks, scenarios, input methods, etc. \n\n    - Code quality and test coverage reports are green again. Focus on efficient DevSecOps workflows with Code Suggestions again. \n\nMore scenarios are described in the [GitLab Duo use cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html).\n\n### Use Chat to generate build configuration\n\nThe time-intensive research on getting started with a new project can be exhausting. Especially with different paths to do it right, or alternative frameworks, this can lead to more work than anticipated. Newer programming languages like Rust propose one way (Cargo), while Java, C++, etc. offer multiple ways and additional configuration languages on top (Kotlin DSL, CMake DSL, etc.).\n\nTake advantage of asking GitLab Duo how to start a project, generate specific configuration examples for build tools (e.g. `Please show a gradle.build example for Spring Boot`), and reduce the time to start developing, building, and testing source code.\n\n1. Java, Gradle, Spring Boot: `Please show a gradle.build example for Spring Boot`\n1. C++, CMake, clang: `Please show a basic CMake configuration file for C++17, using clang as compiler.`\n1. Python: `Please show how to initialize and configure a Python project on the CLI`\n1. Rust: `Please show how to initialize and configure a Rust project.`, followed by a refinement question: `Explain the structure of Cargo.toml`.\n1. Go: `Please show how to initialize and configure a Go project`. \n\n### Use Chat to explain potential vulnerabilities\n\nLet us assume that some PHP code was generated to create a web form. The code might be vulnerable to security issues.\n\n```php\n\u003C?php \n// Create a feedback form for user name, email, and comments\n// Render a HTML form\n\n$name = $_POST['name'];\n$email = $_POST['email'];\n$comments = $_POST['comments'];\n\necho '\u003Cform method=\"post\">';\necho '\u003Clabel for=\"name\">Name:\u003C/label>';\necho '\u003Cinput type=\"text\" id=\"name\" name=\"name\">';\n\necho '\u003Clabel for=\"email\">Email:\u003C/label>';\necho '\u003Cinput type=\"email\" id=\"email\" name=\"email\">';\n\necho '\u003Clabel for=\"comments\">Comments:\u003C/label>';\necho '\u003Ctextarea id=\"comments\" name=\"comments\">\u003C/textarea>';\n\necho '\u003Cinput type=\"submit\" value=\"Submit\">'; \necho '\u003C/form>';\n\n?>\n```\n\nSelect the source code, and [ask Chat to explain](https://docs.gitlab.com/ee/user/gitlab_duo_chat/examples.html#explain-code-in-the-ide), using a refined prompt with `/explain why this code is vulnerable to bad security actors`. \n\n![Code Suggestions - Chat explains potential vulnerability](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_chat_explain_potential_vulnerability.png)\n\n> **Tip**: We are investigating and learning in the local developer environment. The vulnerable source code can be fixed before it reaches a Git push and merge request that trigger security scanning, which will unveil and track the problems, too. Learning about security vulnerabilities helps improve the developer experience.\n\n### Combine vulnerability resolution with Code Suggestions\n\nLets look into another example with an intentional [vulnerability resolution challenge](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/vulnerability-resolution/challenge-resolve-vulnerabilities), and see if we can use Code Suggestions in combination with vulnerability resolution. The linked project has been preconfigured with static application security testing (SAST) scanning. You can follow these steps to configure GitLab SAST by using the [SAST CI/CD component](https://gitlab.com/explore/catalog/components/sast) in the `.gitlab-ci.yml` CI/CD configuration file.\n\n```yaml\ninclude:\n  # Security: SAST (for vulnerability resolution)\n  - component: gitlab.com/components/sast/sast@1.1.0\n```\n\nAfter inspecting the vulnerability dashboard and details, you can use [vulnerability explanation](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/#vulnerability-explanation) to better understand the context and potential problems. [Vulnerability resolution](https://docs.gitlab.com/ee/user/application_security/vulnerabilities/index.html#vulnerability-resolution) creates a rerge request with a proposed source code fix for a detected security vulnerability. \n\nSometimes, it can be necessary to refine the suggested code. Navigate into the [created MR](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-challenges/vulnerability-resolution/challenge-resolve-vulnerabilities/-/merge_requests/1), and either copy the Git branch path for local Git fetch, or open the Web IDE from the `Edit` button to continue in the browser. Navigate into the source code sections with the fixed code portions, and modify the code with a comment:\n\n```\n// refactor using safe buffers, null byte termination\n```\n\n![duo code suggestions - with vulnerability resolution proposal](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_with_vulnerability_resolution_proposal.png)\n\nAlternatively, you can also open Chat, select the source code and use the `/refactor` slash command.\n\n![duo code suggestions - with vulnerability resolution add duo chat refactor](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749677063/Blog/Content%20Images/duo_code_suggestions_with_vulnerability_resolution_add_duo_chat_refactor.png)\n\nA full example is available in the [GitLab Duo use cases documentation](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#explain-and-resolve-vulnerabilities). \n\nHere is a recording of that example:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Ypwx4lFnHP0\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## More tips \n\n### Verify code quality and security\n\nMore generated code requires quality assurance, testing, and security measures. Benefit from all features on a DevSecOps platform:\n\n1. [CI/CD components](https://docs.gitlab.com/ee/ci/components/) and [pipeline efficiency](https://docs.gitlab.com/ee/ci/pipelines/pipeline_efficiency.html)\n1. [Code quality](https://docs.gitlab.com/ee/ci/testing/code_quality.html)\n1. [Code test coverage](https://docs.gitlab.com/ee/ci/testing/code_coverage.html)\n1. [Application security](https://docs.gitlab.com/ee/user/application_security/)\n1. [Observability](https://docs.gitlab.com/ee/operations/tracing.html)\n\n### Learn as a team, and understand AI impact\n\nAdapt and explore with dedicated team collaboration sessions, and record them for other teams to benefit from later. You can also follow the [GitLab Duo Coffee Chat playlist on YouTube](https://www.youtube.com/playlist?list=PL05JrBw4t0Kp5uj_JgQiSvHw1jQu0mSVZ).\n\nRead about AI impact metrics, including [How to put generative AI to work in your DevSecOps environment](https://about.gitlab.com/the-source/ai/how-to-put-generative-ai-to-work-in-your-devsecops-environment/) and the [Developing GitLab Duo: AI Impact analytics dashboard measures the ROI of AI](https://about.gitlab.com/blog/developing-gitlab-duo-ai-impact-analytics-dashboard-measures-the-roi-of-ai/). Visit the [AI Transparency Center](https://about.gitlab.com/ai-transparency-center/) to learn more about data usage, transparency, and AI ethics at GitLab.\n\n### Development is a marathon, not a sprint\n\nSometimes, code suggestions might take longer to load, compared to local auto-completion features. Take this time as an advantage, and think about the current algorithm or problem you are trying to solve. Often, a secondary thought can lead to more refined ideas. Or you can take a short break to take a sip from your preferred drink, and continue refreshed when the suggestions arrive.\n\nSome algorithms are super complex, or require code dependencies which cannot be resolved through auto-completion help. Proprietary and confidential code may provide less context to the large language models, and, therefore, require more context in the comments for Code Suggestions. Follow your own pace and strategy, and leverage Code Suggestions in situations where they help with boilerplate code, or helper functions. \n\n> **Tip:** Explore [Repository X-Ray](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/repository_xray.html) for more Code Suggestions context, and test experimental features, for example, [support for more languages in VS Code](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/supported_extensions.html#add-support-for-more-languages-for-code-suggestions-in-vs-code). More insights can be found in the epic to [improve acceptance rate for Code Suggestions](https://gitlab.com/groups/gitlab-org/-/epics/13085).\n\n### Contribute using GitLab Duo\n\nYou can use GitLab Duo to contribute to open source projects, using Code Suggestions, code refactoring, documentation through explanations, or test generation.\n\nGitLab customers can [co-create GitLab using GitLab Duo](https://docs.gitlab.com/ee/user/gitlab_duo/use_cases.html#use-gitlab-duo-to-contribute-to-gitlab), too. Follow the updated guidelines for [AI-generated contributions](https://about.gitlab.com/community/contribute/dco-cla/#ai-generated-contributions), and watch an example recording from the GitLab Duo Coffee Chat: Contribute to GitLab using Code Suggestions and Chat:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/TauP7soXj-E\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## Share your feedback\n\nGitLab Duo Code Suggestions enables more efficient development workflows. It requires hands-on practice and exercise through tutorials, team workshops, and guided training. Automated workflows with code quality, security scanning, and observability help tackle challenges with newly introduced source code at a much higher frequency. Taking advantage of all GitLab Duo features, including Chat, greatly improves the developer experience on the most comprehensive AI-powered DevSecOps platform.\n\nUse the best practices in this tutorial to kickstart your journey, follow the [GitLab Duo documentation](https://docs.gitlab.com/ee/user/gitlab_duo/index.html), and [ask our teams for GitLab Duo AI workshops](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/) (I have already shadowed customer workshops, they are great!). Please share your Code Suggestions feedback in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/435783), including screenshots and videos (when possible).\n\n> [Try GitLab Duo for free today!](https://about.gitlab.com/gitlab-duo/#free-trial)",[704,680,9],{"slug":4185,"featured":90,"template":684},"top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo","content:en-us:blog:top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo.yml","Top Tips For Efficient Ai Powered Code Suggestions With Gitlab Duo","en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo.yml","en-us/blog/top-tips-for-efficient-ai-powered-code-suggestions-with-gitlab-duo",{"_path":4191,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4192,"content":4198,"config":4203,"_id":4205,"_type":13,"title":4206,"_source":15,"_file":4207,"_stem":4208,"_extension":18},"/en-us/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies",{"title":4193,"description":4194,"ogTitle":4193,"ogDescription":4194,"noIndex":6,"ogImage":4195,"ogUrl":4196,"ogSiteName":669,"ogType":670,"canonicalUrls":4196,"schema":4197},"Tutorial: Advanced use case for GitLab Pipeline Execution Policies","Learn how new GitLab Ultimate functionality can enforce a standardized pipeline across an organization for improved compliance.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098083/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750098083312.jpg","https://about.gitlab.com/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Advanced use case for GitLab Pipeline Execution Policies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Dan Rabinovitz\"}],\n        \"datePublished\": \"2025-01-22\",\n      }",{"title":4193,"description":4194,"authors":4199,"heroImage":4195,"date":2293,"body":4201,"category":814,"tags":4202},[4200],"Dan Rabinovitz","[Pipeline execution policies](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html) are a newer addition to the GitLab DevSecOps platform and a powerful mechanism to enforce CI/CD jobs across applicable projects. They enable platform engineering or security teams to inject jobs into developers’ YAML pipeline definition files, guaranteeing that certain CI/CD jobs will execute no matter what a developer defines in their \\`.gitlab-ci.yml\\` file. \n\nThis article will explain how to utilize pipeline execution policies to create guardrails around the stages or jobs that a developer can use in their pipeline definition. In regulated environments, this may be necessary to ensure developers adhere to a standard set of jobs or stages in their GitLab pipeline. Any job or stage that a developer adds to their pipeline that does not adhere to a corporate standard will cause the pipeline to fail. \n\nOne example use case for pipeline execution policies is ensuring a security scanner job runs. Let’s say an organization has made an investment in a third-party security scanner and they have a requirement that the external scan runs before any merge is made into the main branch. Without a pipeline execution policy, a developer could easily skip this step by not including the required code in their `.gitlab-ci.yml` file.  With a pipeline execution policy in place, a security team can guarantee the external security scanning job executes regardless of how a developer defines their pipeline.\n\nTo use pipeline execution policies to enforce these restrictions requires two parts: a shell script to make calls to the GitLab API and the policy itself. This tutorial uses a bash script; if your runner uses a different scripting language, it is easy to adapt to other languages.\n\nHere is the example shell script I will use for this exercise:\n\n``` \n#!/bin/bash\n\necho \"Checking pipeline stages and jobs...\"\n\n# Pull the group access token from the environment variable\nGROUP_ACCESS_TOKEN=\"$PIPELINE_TOKEN\"\n\necho \"PROJECT_ID: $PROJECT_ID\"\necho \"PIPELINE_ID: $PIPELINE_ID\"\n\nif [ -z \"$GROUP_ACCESS_TOKEN\" ]; then  \n  echo \"GROUP_ACCESS_TOKEN (MR_GENERATOR) is not set\"\n  exit 1\nfi\n\nif [ -z \"$PROJECT_ID\" ]; then\n  echo \"PROJECT_ID is not set\"\n  exit 1\nfi\n\nif [ -z \"$PIPELINE_ID\" ]; then\n  echo \"PIPELINE_ID is not set\"\n  exit 1\nfi\n\n# Use the group access token for the API request\napi_url=\"$GITLAB_API_URL/projects/$PROJECT_ID/pipelines/$PIPELINE_ID/jobs\"\necho \"API URL: $api_url\"\n\n# Fetch pipeline jobs using the group access token\njobs=$(curl --silent --header \"PRIVATE-TOKEN: $GROUP_ACCESS_TOKEN\" \"$api_url\")\necho \"Fetched Jobs: $jobs\"\n\nif [[ \"$jobs\" == *\"404 Project Not Found\"* ]]; then\n  echo \"Failed to authenticate with GitLab API: Project not found\"\n  exit 1\nfi\n\n# Extract stages and jobs\npipeline_stages=$(echo \"$jobs\" | grep -o '\"stage\":\"[^\"]*\"' | cut -d '\"' -f 4 | sort | uniq | tr '\\n' ',')\npipeline_jobs=$(echo \"$jobs\" | grep -o '\"name\":\"[^\"]*\"' | cut -d '\"' -f 4 | sort | uniq | tr '\\n' ',')\n\necho \"Pipeline Stages: $pipeline_stages\"  \necho \"Pipeline Jobs: $pipeline_jobs\"\n\n# Check if pipeline stages are approved\nfor stage in $(echo $pipeline_stages | tr ',' ' '); do \n  echo \"Checking stage: $stage\"\n  if ! [[ \",$APPROVED_STAGES,\" =~ \",$stage,\" ]]; then\n    echo \"Stage $stage is not approved.\"\n    exit 1\n  fi\ndone\n\n# Check if pipeline jobs are approved \nfor job in $(echo $pipeline_jobs | tr ',' ' '); do\n  echo \"Checking job: $job\"\n  if ! [[ \",$APPROVED_JOBS,\" =~ \",$job,\" ]]; then\n    echo \"Job $job is not approve\n```\n\nLet’s break this down a bit. \n\nThe first few lines of this code perform some sanity checks, ensuring that a pipeline ID, project ID, and group access token exist.\n\n* A GitLab pipeline ID is a unique numerical identifier that GitLab automatically assigns to each pipeline run.\n* A GitLab project ID is a unique numerical identifier assigned to each project in GitLab.\n* A GitLab group access token is a token that authenticates and authorizes access to resources at the group level in GitLab. This is in contrast to a GitLab personal access token (PAT), which is unique to each user.  \n\nThe bulk of the work comes from the [GitLab Projects API](https://docs.gitlab.com/ee/api/projects.html) call where the script requests the jobs for the specified pipeline. Once you have job information for the currently running pipeline, you can use a simple grep command to parse out stage and job names, and store them in variables for comparison. The last portion of the script checks to see if pipeline stages and jobs are on the approved list. Where do these parameters come from?\n\nThis is where [GitLab Pipeline Execution Policies](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html) come into play. They enable injection of YAML code into a pipeline. How can we leverage injected YAML to execute this shell script?  Here’s a code snippet showing how to do this.\n\n```\n## With this config, the goal is to create a pre-check job that evaluates the pipeline and fails the job/pipeline if any checks do not pass\n\nvariables:\n  GITLAB_API_URL: \"https://gitlab.com/api/v4\"\n  PROJECT_ID: $CI_PROJECT_ID\n  PIPELINE_ID: $CI_PIPELINE_ID\n  APPROVED_STAGES: \".pipeline-policy-pre,pre_check,build,test,deploy\"\n  APPROVED_JOBS: \"pre_check,build_job,test_job,deploy_job\"\n\npre_check:\n  stage: .pipeline-policy-pre\n  script:\n    - curl -H \"PRIVATE-TOKEN:${REPO_ACCESS_TOKEN}\" --url \"https://\u003Cgitlab_URL>/api/v4/projects/\u003Cproject_id>/repository/files/check_settings.sh/raw\" -o pre-check.sh\n    - ls -l\n    - chmod +x pre-check.sh\n    - DEBUG_MODE=false ./pre-check.sh  # Set DEBUG_MODE to true or false\n  allow_failure: true\n```\n\nIn this YAML snippet, we set a few variables used in the shell script. Most importantly, this is where approved stages and approved jobs are defined. After the `variables` section, we then add a new job to the `.pipeline-policy-pre` stage. This is a reserved stage for pipeline execution policies and is guaranteed to execute before any stages defined in a `.gitlab-ci.yml` file.  There is a corresponding `.pipeline-policy-post` stage as well, though we will not be using it in this scenario.  \n\nThe script portion of the job does the actual work. Here, we leverage a curl command to execute the shell script defined above. This example includes authentication if it’s located in a private repository. However, if it’s publicly accessible, you can forgo this authentication. The last line controls whether or not the pipeline will fail. In this example, the pipeline will continue. This is useful for testing – in practice, you would likely set `allow_failure: false` to cause the pipeline to fail. This is desired as the goal of this exercise is to not allow pipelines to continue execution if a developer adds a rogue job or stage.\n\nTo utilize this YAML, save it to a `.yml` file in a repository of your choice. We’ll see how to connect it to a policy shortly.\n\nNow, we have our script and our YAML to inject into a developer’s pipeline. Next, let’s see how to put this together using a pipeline execution policy.\n\nLike creating other policies in GitLab, start by creating a new Pipeline Execution Policy by navigating to **Secure > Policies** in the left hand navigation menu. Then, choose **New Policy** at the top right, and select **Pipeline Execution Policy** from the policy creation options.  \n\nFor this exercise, you can leave the **Policy Scope** set to the default options. In the **Actions** section, be sure to choose **Inject** and select the project and file where you’ve saved your YAML code snippet. Click on **Update via Merge Request** at the very bottom to create an MR that you can then merge into your project.\n\nIf this is your first security policy, clicking on **Merge** in the MR will create a [Security Policy Project](https://docs.gitlab.com/ee/user/application_security/policies/vulnerability_management_policy.html), which is a project to store all security policies. When implementing any type of security policy in a production environment, [access to this project should be restricted](https://docs.gitlab.com/ee/user/project/members/) so developers cannot make changes to security policies. In fact, you may also want to consider storing YAML code that’s used by pipeline execution policies in this project to restrict access as well, though this is not a requirement.  \nExecuting a pipeline where this pipeline execution policy is enabled should result in the following output when you attempt to add an invalid stage to the project `.gitlab-ci.yml` file.\n\n![Output of attempting an invalid stage to project gitlab-ci.yml file](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098102/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098102394.png)\n\nWhile this use case is very focused on one aspect of security and compliance in your organization, this opens the door to other use cases. For example, you may want to make group-level variables accessible to every project within a group; this is possible with pipeline execution policies. Or, you may want to create a golden pipeline and have developers add to it. The possibilities are endless. GitLab customers are finding new and exciting ways to use this new functionality every day.\n\nIf you’re a GitLab Ultimate customer, try this out today and let us know how you’re using pipeline execution policies. Not a GitLab Ultimate customer? [Sign up for a free 60-day trial](https://about.gitlab.com/free-trial/devsecops/) to get started.\n\n## Read more\n- [How to integrate custom security scanners into GitLab](https://about.gitlab.com/blog/how-to-integrate-custom-security-scanners-into-gitlab/)\n- [Integrate external security scanners into your DevSecOps workflow](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/)\n- [Why GitLab is deprecating compliance pipelines in favor of security policies](https://about.gitlab.com/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies/)\n",[814,9,183,478,108,680],{"slug":4204,"featured":6,"template":684},"tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies","content:en-us:blog:tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies.yml","Tutorial Advanced Use Case For Gitlab Pipeline Execution Policies","en-us/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies.yml","en-us/blog/tutorial-advanced-use-case-for-gitlab-pipeline-execution-policies",{"_path":4210,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4211,"content":4217,"config":4222,"_id":4224,"_type":13,"title":4225,"_source":15,"_file":4226,"_stem":4227,"_extension":18},"/en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab",{"title":4212,"description":4213,"ogTitle":4212,"ogDescription":4213,"noIndex":6,"ogImage":4214,"ogUrl":4215,"ogSiteName":669,"ogType":670,"canonicalUrls":4215,"schema":4216},"Tutorial: Automate releases and release notes with GitLab","With the GitLab Changelog API, you can automate the generation of release artifacts, release notes, and a comprehensive changelog detailing all user-centric software modifications.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749659978/Blog/Hero%20Images/automation.png","https://about.gitlab.com/blog/tutorial-automated-release-and-release-notes-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Automate releases and release notes with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ben Ridley\"}],\n        \"datePublished\": \"2023-11-01\",\n      }",{"title":4212,"description":4213,"authors":4218,"heroImage":4214,"date":3095,"body":4220,"category":678,"tags":4221,"updatedDate":676},[4219],"Ben Ridley","***2025 update** - The Changelog API has continued to evolve and now has some great new capabilities we don’t cover in this blog, such as the ability to provide custom changelogs with templated values from your commit history. [Discover more in the official Changelogs docs.](https://docs.gitlab.com/user/project/changelogs/)*\n\nWhen you develop software that users rely on, effective communication about changes with each release is essential. By keeping users informed about new features and any modifications or removals, you ensure they maximize the software's benefits and avoid encountering unpleasant surprises during upgrades.\n\nHistorically, creating release notes and maintaining a changelog has been a laborious task, requiring developers to monitor changes externally or release managers to sift through merge histories. With the GitLab Changelog API, you can use the rich history provided in our git repository to easily create release notes and maintain a changelog.\n\nIn this tutorial, we'll delve into automating releases with GitLab, covering the generation of release artifacts, release notes, and a comprehensive changelog detailing all user-centric software modifications.\n\n## Releases in GitLab\nFirst, let's explore how releases work in GitLab.\n\nIn GitLab, a release is a specific version of your code, identified by a git tag, that includes details about changes since the last release (and release notes) and any related artifacts built from that version of the code, such as Docker images, installation packages, and documentation.\n\nYou can create and track releases in GitLab using the UI by calling our Release API or by defining a special `release` job inside a CI pipeline. In this tutorial, we'll use the `release` job in a CI/CD pipeline, which allows us to extend the automation we're using in our pipelines for testing, code scanning, etc. to also perform automated releases.\n\nTo automate our releases, we first need to answer this question: Where are we going to get the information on changes made for our release notes and our changelog? The answer: Our git repository, which provides us with a rich history of development activity through commit messages and merge commit history. Let's see if we can leverage this rich history to automatically create our notes and changelogs.\n\n## Introducing commit trailers\n[Commit trailers](https://git-scm.com/docs/git-interpret-trailers) are structured entries in your git commits, created by adding simple `\u003CHEADER>:\u003CBODY>` format messages to the end of your commit. The `git` CLI tool can then parse and extract these for use in other systems. An example you might have already used is `git commit --sign-off` to sign off on a commit. This is implemented by adding a `Signed-off-by: \u003CYour Name>` trailer to the commit. We can add any arbitrary structured data here, which makes it a great place to store information that could be useful for our changelog.\n\nIn fact, if we use a `Changelog: \u003Cadded/changed/removed>` trailer in our commits, the GitLab Changelog API will parse these and use them to create a changelog for us automatically!\n\nLet's see this in action by making some changes to a real codebase and performing a release, and generating release notes and changelog entries.\n\n## Our example project\nFor the purposes of this blog, I'm using a simple Python web app repository. Let's pretend Version 1.0.0 of the application was just released and is the current version of the code. I've also created a 1.0.0 release in GitLab, which I did manually because we haven't created our automated release pipeline yet:\n\n![A screenshot of the GitLab UI showing a release for Version 1.0.0](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/1-0-release.png)\n\n## Making our changes\nWe're in rapid development mode, so we're going to be working on releasing Version 2.0.0 of our application today. As part of our 2.0.0 release, we're going to be adding a new feature to our app: A chatbot! And we're also going to be removing the quantum blockchain feature, because we only needed that for our first venture capital funding round. Also, we're going to be adding an automated release job to our CI/CD pipeline for our 2.0.0 release.\n\nFirst, let's remove unneeded features. I've created a merge request that contains the necessary removals. Importantly, we need to ensure we have a commit message that includes the `Changelog: removed` trailer. There's a few ways to do this, such as including it directly in a commit, or performing an interactive rebase and adding it using the CLI. But I think the easiest way in our situation is to leave it until the end and then use the `Edit commit message` button in GitLab to add the trailer to the merge commit like so:\n\n![A screenshot the GitLab UI showing a merge request removing unused features](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/remove-unused-features-mr.png)\n\nIf you use this method, you can also change the merge commit title to something more succinct. I've changed the title of my merge commit to 'Remove Unused Features', as this is what will appear in the changelog entry.\n\nNext, let's add some new functionality for the 2.0.0 release. Again, all we need to do is open another merge request that includes our new features and then edit the merge commit to include the `Changelog: added` trailer and edit the commit title to be more succinct:\n\n![A screenshot of the GitLab UI showing a merge request to add new functionality](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/add-chatbot-mr.png)\n\nNow we're pretty much ready to release 2.0.0. But we don't want to create our release manually this time. So before our release we're going to add some jobs to our `.gitlab-ci.yml` file that will perform the release for us automatically, and generate the respective release notes and changelog entries, when we tag our code with a new version like `2.0.0`.\n\n**Note:** If you want to enforce changelog trailers, consider using something like [Danger to perform automated checks for MR conventions](https://docs.gitlab.com/ee/development/dangerbot.html).\n\n## Building an automated release pipeline\nFor our pipeline to work, we need to create a project access token that will allow us to call GitLab's API to generate changelog entries. [Create a project access token with the API scope](https://docs.gitlab.com/ee/user/project/settings/project_access_tokens.html#create-a-project-access-token), and then [store the token as a CI/CD variable](https://docs.gitlab.com/ee/ci/variables/#define-a-cicd-variable-in-the-ui) called `CI_API_TOKEN`. We'll reference this variable to authenticate to the API.\n\nNext, we're going to add two new jobs to our `gitlab-ci.yml` file:\n```yaml\nprepare_job:\n  stage: prepare\n  image: alpine:latest\n  rules:\n  - if: '$CI_COMMIT_TAG =~ /^v?\\d+\\.\\d+\\.\\d+$/'\n  script:\n    - apk add curl jq\n    - 'curl -H \"PRIVATE-TOKEN: $CI_API_TOKEN\" \"$CI_API_V4_URL/projects/$CI_PROJECT_ID/repository/changelog?version=$CI_COMMIT_TAG\" | jq -r .notes > release_notes.md'\n  artifacts:\n    paths:\n    - release_notes.md\n\nrelease_job:\n  stage: release\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  needs:\n    - job: prepare_job\n      artifacts: true\n  rules:\n  - if: '$CI_COMMIT_TAG =~ /^v?\\d+\\.\\d+\\.\\d+$/'\n  script:\n    - echo \"Creating release\"\n  release:\n    name: 'Release $CI_COMMIT_TAG'\n    description: release_notes.md\n    tag_name: '$CI_COMMIT_TAG'\n    ref: '$CI_COMMIT_SHA'\n    assets:\n      links:\n        - name: 'Container Image $CI_COMMIT_TAG'\n          url: \"https://$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG:$CI_COMMIT_SHA\"\n```\n\nIn the above configuration, the `prepare_job` uses `curl` and `jq` to call the GitLab Changelog API endpoint and then passes this to our `release_job` to actually create the release. To break it down further:\n- We use the project access token created earlier to call the GitLab Changelog API, which performs the generation of the release notes and we store this as an artifact.\n- We're using the `$CI_COMMIT_TAG` variable as the version. For this to work, we need to be using semantic versioning for our tags (something like `2.0.0` for example), so you'll notice I've also restricted the release job using a `rules` section that checks for a semantic version tag.\n\t- Semantic versioning is required for the GitLab Changelog API to work. It uses this format to find the most recent release to compare to our current release.\n- We use the official `release-cli` image from GitLab. The release-cli is required to use the `release` keyword in a job.\n- We use the `release` keyword to create a release in GitLab. This is a special job keyword reserved for creating a release and populating the required fields.\n- We can pass a file as an argument to the `description` of the release. In our case, it's the file we generated in the `prepare_job`, which was passed to this job as an artifact.\n- We've also included our container image that is being built earlier in the pipeline as a release asset. You can attach any assets you'd like from your build process, such as binaries or documentation by providing a URL to wherever you've uploaded them earlier in the pipeline.\n\n## Performing an automated release\nWith this setup, all we need to do to perform a release is push a tag to our repository that follows our versioning scheme. You can simply push a tag using the CLI, this example uses GitLab's UI to create a tag on the main branch. Create a tag by selecting Code -> Tags -> New Tag on the sidebar:\n![A screenshot of the GitLab UI illustrating how to create a tag](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/create-2-tag.png)\n\nOn creation, our pipelines will start to execute. The GitLab Changelog API will automatically generate release notes for us as markdown, which contains all the changes between this release and the previous release. Here's the resulting markdown generated in our example:\n\n```md\n## 2.0.0 (2023-08-25)\n\n### added (1 change)\n\n- [Add ChatBot](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo@0c3601a45af617c5481322bfce4d71db1f911b02) ([merge request](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo!4))\n\n### removed (1 change)\n\n- [Remove Unused Features](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo@463d453c5ae0f4fc611ea969e5442e3298bf0d8a) ([merge request](gl-demo-ultimate-bridley/super-devsecops-incorporated/simply-notes-release-demo!3))\n```\n\nAs you can see, GitLab has extracted the entries for our release notes automatically using our git commit trailers. In addition, it's helpfully provided links back to the merge request so readers can see more details and discussion around the changes.\n\nAnd now, our final release:\n![The GitLab release UI showing a release for version 2.0.0](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/2-0-release.png)\n\n## Creating the changelog\nNext, we want to update our changelog (which is basically a collated history of all your release notes). You can use a `POST` request to the changelog API endpoint we used earlier to do this.\n\nYou can do this as part of your release pipeline if you like, for example by adding this to the `script` section of your prepare job:\n```sh\n'curl -H \"PRIVATE-TOKEN: $CI_API_TOKEN\" -X POST \"$CI_API_V4_URL/projects/$CI_PROJECT_ID/repository/changelog?version=$CI_COMMIT_TAG\"\n```\n\n**Note that this will actually modify the repository.** It will create a commit to add the latest notes to a `CHANGELOG.md` file:\n![A screenshot of the repository which shows a commit updating the changelog file](https://about.gitlab.com/images/blogimages/2023-08-22-automated-release-and-release-notes-with-gitlab/changelog-api-commit.png)\n\nAnd we are done! By utilizing the rich history provided by `git` with some handy commit trailers, we can leverage GitLab's powerful API and CI/CD pipelines to automate our release process and generate release notes for us.\n\n> If you’d like to explore the project we used for this article, [you can find the project at this link](https://gitlab.com/gitlab-learn-labs/sample-projects/release-automation-demo).\n",[9,771,108,773,835,726],{"slug":4223,"featured":6,"template":684},"tutorial-automated-release-and-release-notes-with-gitlab","content:en-us:blog:tutorial-automated-release-and-release-notes-with-gitlab.yml","Tutorial Automated Release And Release Notes With Gitlab","en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab.yml","en-us/blog/tutorial-automated-release-and-release-notes-with-gitlab",{"_path":4229,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4230,"content":4236,"config":4241,"_id":4243,"_type":13,"title":4244,"_source":15,"_file":4245,"_stem":4246,"_extension":18},"/en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"title":4231,"description":4232,"ogTitle":4231,"ogDescription":4232,"noIndex":6,"ogImage":4233,"ogUrl":4234,"ogSiteName":669,"ogType":670,"canonicalUrls":4234,"schema":4235},"Tutorial: How to set up your first GitLab CI/CD component","Use Python scripts in your GitLab CI/CD pipelines to improve usability. In this step-by-step guide, you'll learn how to get started building your own CI/CD component.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098410/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2826%29_3lH4gZFVIGCndksN6Rlg85_1750098409928.png","https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: How to set up your first GitLab CI/CD component\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sophia Manicor\"},{\"@type\":\"Person\",\"name\":\"Noah Ing\"}],\n        \"datePublished\": \"2024-11-12\",\n      }",{"title":4231,"description":4232,"authors":4237,"heroImage":4233,"date":4238,"body":4239,"category":769,"tags":4240},[2312,831],"2024-11-12","Do you use Python scripts in your GitLab CI pipelines? Do you want to create pipelines at scale? This tutorial shows how to set up your first [GitLab CI/CD component](https://docs.gitlab.com/ee/ci/components/) to deploy Python scripts. \n\nA [CI/CD component is a reusable single pipeline configuration unit](https://about.gitlab.com/blog/introducing-ci-components/). Use components to create a small part of a larger pipeline, or even to compose a complete pipeline configuration.\n\n# Prerequisites\n- Basic Python knowledge\n- Working knowledge of GitLab CI\n- 8 minutes\n\n## Python script \n\n* **[The demo Python script](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/src/script.py?ref_type=heads)**\n\nThis Python script utilizes a library called [ArgParse](https://docs.python.org/3/library/argparse.html) . ArgParse allows you to pass variables to script through the command line. This script takes in three arguments:\n[Python_container_image](https://docs.gitlab.com/ee/ci/yaml/#image): This is the Python container image you wish to use.\n[Stage](https://docs.gitlab.com/ee/ci/yaml/#stage): This is the GitLab CI stage in which you job will run in. \nName: This is your name.\n\n```python\nimport argparse\n\nparser = argparse.ArgumentParser(description='Python CICD Component Boilerplate')\nparser.add_argument('python_container_image', type=str, help='python:3.10-slim')\nparser.add_argument('stage', type=str, help='Build')\nparser.add_argument('persons_name', type=str, help='Noah')\nargs = parser.parse_args()\n\npython_container_image = args.python_container_image\nstage = args.stage\npersons_name = args.persons_name\n```\n\nThis will take in these three variables and print out simple statements:\n\n```python\nprint(\"You have chosen \" + python_container_image + \" as the container image\")\nprint(\"You have chosen \" + stage + \" as the stage to run this job\")\nprint(\"Thank you \" + persons_name + \"! you are succesfully using GitLab CI with a Python script.\")\n```\n\nTo test this script locally, you can call on the script by utilizing the following command:\n\n```bash\npython3 src/script.py python_container_image stage name\n```\n\nModify this script accordingly if you’d like to add in your own arguments!\n\n## Template \n\n* **[Demo of template](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/templates/template.yml?ref_type=heads)**\n\n**Note:** As long as the `gitlab-ci.yml` is placed in the templates/directory, the CI/CD component will know to pick it up. We named our template `templates.yml`, but any name would work for this YAML file.\n\nNow, getting into the fun part of CI/CD components, inputs!  [Inputs](https://docs.gitlab.com/ee/ci/yaml/inputs.html) allow you to pass through variables into your pipeline. \n\n```yml\nspec:\n  inputs:\n    python_container_image:\n      default: python:3.10-slim\n      description: \"Define any python container image\"\n    stage:\n      default: build\n      description: \"Define the stage this job will run in\"\n    persons_name:\n      default: Noah\n      description: \"Put your name here\"\n```\nHere we have defined the three inputs that are our arguments in our Python script. You can see for each input we have added in a default value – this will be what the input is set to if not overridden. If we took out this default keyword the input would become mandatory when we use our component. As it is written now, adding in these inputs when we use our component is optional due to our default values.\n\nWe can also set descriptions to ensure that other developers can understand what to input when they use our component. Descriptions are optional but they provide self documentation within the code itself, which is always nice.\n\nAfter we set up our inputs, let’s write the rest of our component:\n\n```yml\ncomponent:\n  image: $[[ inputs.python_container_image ]]\n  stage: $[[ inputs.stage ]]\n  before_script:\n    - pip3 install -r src/requirements.txt\n  script: python3 src/script.py $[[ inputs.python_container_image ]] $[[ inputs.stage ]] $[[ inputs.persons_name ]]\n```\n\nTo use inputs in our component, we need to use the syntax `$[[ inputs.$VARIABLE ]]`. In the above code, you can see that we use inputs to define our image and stage with  `$[[ inputs.python_container_image ]]` and   `$[[ inputs.stage ]] `.\n\n```\nscript: python3 src/script.py $[[ inputs.python_container_image ]] $[[ inputs.stage ]] $[[ inputs.persons_name ]]\n```\nDiving into the script section, you can see we call upon our Python script.. We are able to pass our inputs in with the help of the ArgParse.\n\nNow that you have reviewed how the Python script works and the template has been set up, it is time to use the component!\n\n## Using the component \n\n* **[A demo of including the component](https://gitlab.com/demos/templates/gitlab-python-cicd-component/-/blob/main/.gitlab-ci.yml?ref_type=heads)\n\nIn order to utilize the CI/CD component we just created, we need to include it in the `.gitlab-ci.yml` file that is in the root of our directory. \n\n```\ninclude:\n  # include the component located in the current project from the current SHA\n  - component: $CI_SERVER_FQDN/$CI_PROJECT_PATH/template@$CI_COMMIT_SHA\n    inputs:\n      python_container_image: python:3.11-slim\n      stage: test\n      persons_name: Tanuki\n```\n\nOne way to include it is to call upon it locally in the current project from the current `Commit SHA`. You can find other ways to [reference a component in our documentation](https://docs.gitlab.com/ee/ci/components/#use-a-component).\n\nTo override the defaults, we have passed in other inputs so we get the correct image, stage, and name for our job. \n\nTry and change the `persons_names` to your own and watch the pipeline run!\n\n![ci/cd component tutorial - pipeline running](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098419/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098418901.png)\n\nVoila! You have learned how to set up a basic C/ICD component utilizing a Python ArgParse script!\n\n## What's next?\nIn the Python script, there is a commented out GitLab Python library and OS library. If you would like to interact with the GitLab API, you can uncomment these and add in a [GitLab personal access token](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) to the [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/) named `GLPAT`.\n\n```\nimport gitlab\nimport os\n```\nAfterwards you can then interact with the GitLab API.\n\n```\nglpat = os.environ['GLPAT']\n\ngl = gitlab.Gitlab(private_token=glpat)\n# SELF_HOSTED gl = gitlab.Gitlab(url='https://gitlab.example.com', private_token='xxxxxxxxxxxxxx')\ntry:\n   projects = gl.projects.list(get_all=True)\n   print(projects)\nexcept Exception as error:\n   print(\"Error:\", error)\n```\n\n> Learn more about CI/CD components and how to avoid building pipelines from scratch with the [GitLab CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/). \n\n## Read more\n\n- [FAQ: GitLab CI/CD Catalog](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/)\n- [Introducing CI/CD Steps, a programming language for DevSecOps automation](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/)\n- [A CI/CD component builder's journey](https://about.gitlab.com/blog/a-ci-component-builders-journey/)\n",[108,9,835,1000],{"slug":4242,"featured":90,"template":684},"tutorial-how-to-set-up-your-first-gitlab-ci-cd-component","content:en-us:blog:tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","Tutorial How To Set Up Your First Gitlab Ci Cd Component","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component.yml","en-us/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component",{"_path":4248,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4249,"content":4254,"config":4259,"_id":4261,"_type":13,"title":4262,"_source":15,"_file":4263,"_stem":4264,"_extension":18},"/en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"title":4250,"description":4251,"ogTitle":4250,"ogDescription":4251,"noIndex":6,"ogImage":1842,"ogUrl":4252,"ogSiteName":669,"ogType":670,"canonicalUrls":4252,"schema":4253},"Tutorial: Install VS Code on a cloud provider VM and set up remote access","Learn how to automate the installation of VS Code on a VM running on a cloud provider and how to access it from your local laptop.","https://about.gitlab.com/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Install VS Code on a cloud provider VM and set up remote access\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-05-06\",\n      }",{"title":4250,"description":4251,"authors":4255,"heroImage":1842,"date":4256,"body":4257,"category":769,"tags":4258},[699],"2024-05-06","DevSecOps teams can sometimes find they need to run an instance of Visual Studio Code (VS Code) remotely for team members to share when they don't have enough local resources. However, installing, running, and using VS Code on a remote virtual machine (VM) via a cloud provider can be a complex process full of pitfalls and false starts. This tutorial covers how to automate the installation of VS Code on a VM running on a cloud provider.\n\nThis approach involves two separate GitLab projects, each with its own pipeline. The first one uses Terraform to instantiate a virtual machine in GCP running Linux Debian. The second one installs VS Code on the newly instantiated VM. Lastly, we provide a procedure on how to set up your local Mac laptop to connect and use the VS Code instance installed on the remote VM.\n\n## Create a Debian Linux distribution VM on GCP\n\nHere are the steps to create a Debian Linux distribution VM on GCP.\n\n### Prerequisites\n\n1. A GCP account. If you don't have one, please [create one](https://cloud.google.com/free?hl=en).\n2. A GitLab account on [gitlab.com](https://gitlab.com/users/sign_in)\n\n**Note:** This installation uses:\n\n- Debian 5.10.205-2 (2023-12-31) x86_64 GNU/Linux, a.k.a Debian 11\n\n### Create a service account and download its key\n\nBefore you create the first GitLab project, you need to create a service account in GCP and then generate and download a key. You will need this key so that your GitLab pipelines can communicate to GCP and the GitLab API.\n\n1. To authenticate GCP with GitLab, sign in to your GCP account and create a [GCP service account](https://cloud.google.com/docs/authentication#service-accounts) with the following roles:\n- `Compute Network Admin`\n- `Compute Admin`\n- `Service Account User`\n- `Service Account Admin`\n- `Security Admin`\n\n3. Download the JSON file with the service account key you created in the previous step.\n4. On your computer, encode the JSON file to `base64` (replace `/path/to/sa-key.json` to the path where your key is located):\n\n   ```shell\n   base64 -i /path/to/sa-key.json | tr -d \\\\n\n   ```\n\n**NOTE:** Save the output of this command. You will use it later as the value for the `BASE64_GOOGLE_CREDENTIALS` environment variable.\n\n### Configure your GitLab project\n\nNext, you need to create and configure the first GitLab project.\n\n1. Create a group in your GitLab workspace and name it `gcpvmlinuxvscode`.\n\n1. Inside your newly created group, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/gcpvmlnxsetup.git\n   ```\n\n1. Drill into your newly cloned project, `gcpvmlnxsetup`, and set up the following CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in the previous section.\n   1. Set the variable `TF_VAR_gcp_project` to your GCP `project` ID.\n   1. Set the variable `TF_VAR_gcp_region` to your GCP `region` ID, e.g. us-east1, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_zone` to your GCP `zone` ID, e.g. us-east1-d, which is also its default value.\n   1. Set the variable `TF_VAR_machine_type` to the GCP `machine type` ID, e.g. e2-standard-2, which is also its default value.\n   1. Set the variable `TF_VAR_gcp_vmname` to the GCP `vm name` you want to give the VM, e.g. my-test-vm, which is also its default value.\n\n**Note:** We have followed a minimalist approach to set up this VM. If you would like to customize the VM further, please refer to the [Google Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/guides/provider_reference) and the [Google Compute Instance Terraform provider](https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/compute_instance) documentation for additional resource options.\n\n### Provision your VM\n\nAfter configuring your project, manually trigger the provisioning of your VM as follows:\n\n1. On the left sidebar, go to **Build > Pipelines**.\n1. Next to **Play** (**{play}**), select the dropdown list icon (**{chevron-lg-down}**).\n1. Select **Deploy** to manually trigger the deployment job.\n\nWhen the pipeline finishes successfully, you can see your new VM on GCP:\n\n- Check it on your [GCP console's VM instances list](https://console.cloud.google.com/compute/instances).\n\n### Remove the VM\n\n**Important note:** Only run the cleanup job when you no longer need the GCP VM and/or the VS Code that you installed in it.\n\nA manual cleanup job is included in your pipeline by default. To remove all created resources:\n\n1. On the left sidebar, select **Build > Pipelines** and select the most recent pipeline.\n1. For the `destroy` job, select **Play** (**{play}**).\n\n## Install and set up VS Code on a GCP VM\n\nPerform the steps in this section only after you have successfully finished the previous sections above. In this section, you will create the second GitLab project that will install VS Code and its dependencies on the running VM on GCP.\n\n### Prerequisites\n\n1. A provisioned GCP VM. We covered this in the previous sections.\n\n**Note:** This installation uses:\n\n- VS Code Version 1.85.2\n\n### Configure your project\n\n**Note:** Since you will be using the `ssh` command multiple times on your laptop, we strongly suggest that you make a backup copy of your laptop local directory `$HOME/.ssh` before continuing.\n\nNext, you need to create and configure the second GitLab project.\n\n1. Head over to your GitLab group `gcpvmlinuxvscode`, which you created at the beginning of this post.\n\n1. Inside group, `gcpvmlinuxvscode`, clone the following project:\n\n   ```shell\n   git@gitlab.com:tech-marketing/sandbox/gcpvmlinuxvscode/vscvmsetup.git\n   ```\n\n1. Drill into your newly cloned project, `vscvmsetup` and set up the following CI/CD variables to configure it:\n   1. On the left sidebar, select **Settings > CI/CD**.\n   1. Expand **Variables**.\n   1. Set the variable `BASE64_GOOGLE_CREDENTIALS` to the `base64` encoded JSON file you created in project `gcpvmlnxvsc`. You can copy this value from the variable with the same name in project `gcpvmlnxvsc`.\n   1. Set the variable `gcp_project` to your GCP `project` ID.\n   1. Set the variable `gcp_vmname` to your GCP `region` ID, e.g. us-east1.\n   1. Set the variable `gcp_zone` to your GCP `zone` ID, e.g. us-east1-d.\n   1. Set the variable `vm_pwd` to the password that you will use to ssh to the VM.\n   1. Set the variable `gcp_vm_username` to the first portion (before the \"@\" sign) of the email associated to your GCP account, which should be your GitLab email.\n\n### Run the project pipeline\n\nAfter configuring the second GitLab project, manually trigger the provisioning of VS Code and its dependencies to the GCP VM as follows:\n\n1. On the left sidebar, select **Build > Pipelines** and click on the button **Run Pipeline**. On the next screen, click on the button **Run pipeline**.\n\n    The pipeline will:\n\n    - install `xauth` on the virtual machine. This is needed for effective X11 communication between your local desktop and the VM \n    - install `git` on the VM\n    - install `Visual Studio Code` on the VM.\n\n2. At this point, you can wait until the pipeline successfully completes. If you don't want to wait, you can continue to do the first step of the next section. However, you must ensure the pipeline has successfully completed before you can perform Step 2 of the next section.\n\n### Connect to your VM from your local Mac laptop\n\nNow that you have an instance of VS Code running on a Linux VM on GCP, you need to configure your Mac laptop to be able to act as a client to the remote VM. Follow these steps:\n\n1. To connect to the remote VS Code from your Mac, you must first install `XQuartz` on your Mac. You can execute the following command on your Mac to install it:\n\n```\nbrew install xquartz\n```\nOr, you can follow the instructions from the following [tutorial](https://und.edu/research/computational-research-center/tutorials/mac-x11.html) from the University of North Dakota.\n\nAfter the pipeline for project `vscvmsetup` successfully executes to completion (pipeline you manually executed in the previous section), you can connect to the remote VS Code as follows:\n\n2. Launch `XQuartz` on your Mac (it should be located in your Applications folder). Its launching should open up an `xterm` on your Mac. If it does not, then you can select **Applications > Terminal** from the `XQuartz` top menu. \n3. On the `xterm`, enter the following command:\n\n```\ngcloud compute ssh --zone \"[GCP zone]\" \"[name of your VM]\" --project \"[GCP project]\" --ssh-flag=\"-Y\"\n```\nWhere:\n\n- `[VM name]` is the name of the VM you created in project `gcpvmlnxvsc`. Its value should be the same as the `gcp_project` variable.\n- `[GCP zone]` is the zone where the VM is running. Its value should be the same as the `gcp_vmname` variable.\n- `[GCP project]` is the name of your GCP project assigned name. Its value should be the same as the `gcp_project` variable.\n\n***Note: If you have not installed the Google Cloud CLI, please do so by following the [Google documentation](https://cloud.google.com/sdk/docs/install).***\n\n4. If you have not used SSH on your Mac before, you may not have a `.ssh` in your `HOME` directory. If this is the case, you will be asked if you would like to continue with the creation of this directory. Answer **Y**.\n\n5. Next, you will be asked to enter the same password twice to generate a public/private key. Enter the same password you used when defining the variable `vm_pwd` in the required configuration above.\n\n6. Once the SSH key is done propagating, you will need to enter the password again two times to log in to the VM.\n\n7. You should now be logged in to the VM.\n\n### Create a personal access token\n\nThe assumption here is that you already have a GitLab project that you would want to open from and work on the remote VS Code. To do this, you will need to clone your GitLab project from the VM. First, you will be using a personal access token (PAT) to clone your project.\n\n1. Head over to your GitLab project (the one that you'd like to open from the remote VS Code).\n2. From your GitLab project, create a [PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html#create-a-personal-access-token), name it `pat-gcpvm` and ensure that it has the following scopes: `read_repository`, `write_repository`, `read_registry`, `write_registry`, and `ai_features`\n3. Save the generated PAT somewhere safe; you will need it later.\n\n### Clone the read_repository\n\n1. On your local Mac, from the `xterm` where you are logged on to the remote VM, enter the following command:\n\n```\ngit clone https://[your GitLab username]:[personal_access_token]@gitlab.com/[GitLab project name].git \n```\n\nWhere:\n\n- `[your GitLab username]` is your GitLab handle.\n- `[personal_access_token]` is the PAT you created in the previous section.\n- `[GitLab project name]` is the name of the project that contains the GitLab Code Suggestions test cases.\n\n## Launch Visual Studio Code\n\n1. From the `xterm` where you are logged in to the VM, enter the following command:\n\n```\ncode\n```\n\nWait for a few seconds and Visual Studio Code will appear on your Mac screen.\n\n2. From the VS Code menu, select **File > Open Folder...\"\n3. In the File chooser, select the top-level directory of the GitLab project you cloned in the previous section\n\nThat's it! You're ready to start working on your cloned GitLab project using the VS Code that you installed on a remote Linux-based VM.\n\n### Troubleshooting\n\nWhile using the remotely installed VS Code from your local Mac, you may encounter a few issues. In this section, we provide guidance on how to mitigate them.\n\n#### Keyboard keys not mapped correctly\n\nIf, while running VS Code, you are having issues with your keyboard keys not being mapped correctly, e.g. letter e is backspace, letter r is tab, letter s is clear line, etc., do the following:\n\n1. In VS Code, select **File > Preferences > Settings**.\n1. Search for \"keyboard\". If having issues with the letter e, then search for \"board\". Click on the \"Keyboard\" entry under \"Application.\"\n1. Ensure that the Keyboard Dispatch is set to \"keyCode.\"\n1. Restart VS Code.\n1. If you need further help, this is a good resource for [keyboard problems](https://github.com/microsoft/vscode/wiki/Keybinding-Issues#troubleshoot-linux-keybindings).\n\n#### Error loading webview: Error\n\nIf while running VS Code, you get a message saying:\n\n\"Error loading webview: Error: Could not register service worker: InvalidStateError: Failed to register a ServiceWorker: The document is in an invalid state.\"\n\n1. Exit VS Code and then enter this cmd from the `xterm` window:\n\n`killall code`\n\nYou may need to execute this command two or three times in a row to kill all VS Code processes.\n\n2. Ensure that all VS Code-related processes are gone by entering the following command from the `xterm` window:\n\n`ps -ef | grep code`\n\n3. Once all the VS Code-related processes are gone, restart VS Code by entering the following command from the `xterm` window:\n\n`code`\n\n#### Some useful commands to debug SSH\n\nHere are some useful commands to run on the VM that can help you debug SSH issues:\n\n1. To get the status, location and latest event of sshd:\n\n`sudo systemctl status ssh`\n\n2. To see the log of sshd:\n\n`journalctl -b -a -u ssh`\n\n3. To restart to SSH daemon:\n\n`sudo systemctl restart ssh.service`\n\nOr\n\n`sudo systemctl restart ssh`\n\n4. To start a root shell:\n\n`sudo -s`\n\n## Get started\n\nThis article described how to:\n- instantiate a Linux-based VM on GCP\n- install VS Code and dependencies on the remote VM\n- clone an existing GitLab project of yours in the remote VM\n- open your remotely cloned project from the remotely installed VS Code\n\nAs a result, you can basically use your laptop as a thin client that accesses a remote server, where all the work takes place.\n\n> The automation to get all these parts in place was done by GitLab. Sign up for a [free 30-day GitLab Ultimate trial](https://about.gitlab.com/free-trial/) to get started today!",[1865,9,727],{"slug":4260,"featured":90,"template":684},"tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access","content:en-us:blog:tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","Tutorial Install Vs Code On A Cloud Provider Vm And Set Up Remote Access","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access.yml","en-us/blog/tutorial-install-vs-code-on-a-cloud-provider-vm-and-set-up-remote-access",{"_path":4266,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4267,"content":4273,"config":4279,"_id":4281,"_type":13,"title":4282,"_source":15,"_file":4283,"_stem":4284,"_extension":18},"/en-us/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems",{"title":4268,"description":4269,"ogTitle":4268,"ogDescription":4269,"noIndex":6,"ogImage":4270,"ogUrl":4271,"ogSiteName":669,"ogType":670,"canonicalUrls":4271,"schema":4272},"Tutorial: Integrate GitLab Merge Request approvals with external systems","Learn how to improve GitLab extensibility and integration with external applications in this demo. The result: a seamless integration that provides more control over merge requests.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749676011/Blog/Hero%20Images/blog-image-template-1800x945.svg","https://about.gitlab.com/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Integrate GitLab Merge Request approvals with external systems\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Samer Akkoub\"}],\n        \"datePublished\": \"2024-10-08\",\n      }",{"title":4268,"description":4269,"authors":4274,"heroImage":4270,"date":4276,"body":4277,"category":678,"tags":4278},[4275],"Samer Akkoub","2024-10-08","GitLab customers often ask how to connect merge requests to external applications, such as ServiceNow or custom-built applications, to control approvals for the merging of code into a target branch from these external systems. To address this need, GitLab offers [External Status Check](https://docs.gitlab.com/ee/user/project/merge_requests/status_checks.html), a powerful feature that allows the sending of API calls to external systems to request the status of an external requirement, providing seamless integration and control over your merge requests.\n\nIn this article, I'll demonstrate this feature by explaining how to deploy an application I developed. The application is designed to receive status check requests from GitLab Merge Requests, list them, and enable external users to approve/reject these requests without logging in to the GitLab console. As a result, GitLab platform architects will better understand GitLab extensibility and integration with external systems.\n\nThe provided sample application can:\n1. Receive API requests from merge requests.\n2. Store the requests in AlchemyDB running on the same instance.\n3. Show Approve/Reject buttons for each row to approve or reject the corresponding merge request status check.\n\n## How to deploy the status review demo application\n1. Import this [GitLab repo project](https://gitlab.com/sakkoub-publicgroup/external-approval-app) to your GitLab account.\n2. The project pipeline will deploy the application to a Kubernetes cluster. To achieve this, define a [GitLab Agent](https://docs.gitlab.com/ee/user/clusters/agent/install/index.html) for Kubernetes in a separate project and include a path to the cloned project under the “[user_access](https://docs.gitlab.com/ee/user/clusters/agent/user_access.html)” section in the agent configuration.\n3. Add a new environment variable `KUBE_CONTEXT`, with the value equal to the used agent path:name, similar to the following structure `path/to/agent/project:agent-name`.\n4. The status check application will be deployed to the `approval-app` namespace by default.\n5. Create the `approval-app` namespace in the target Kubernetes cluster.\n6. In the created namespace, add a secret named `gitlab-token` with the value set to the personal access token (PAT) of the user who will be approving the requests. The approval application will use this PAT to communicate back to the GitLab instance.\n7. Run the status check application pipeline on the main branch.\n8. Once deployed, the application will be exposed behind a load balancer. Use this command to grab the public IP address of the load balancer: `kubectl get services -n approval-app`.\n9. The application can then be accessed using this URL: http://EXTERNAL-IP/approval-apps/. Replace the `EXTERNAL-IP` with the value of the external IP address from the previous step. The resulting page should look like below (the table would be empty as we have not added any new merge requests yet).\n\n![Table showing IP address](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752507534/v0pgvobf09eh9yqxqzrk.png)\n\n## Configure status check in GitLab\n\n1. In the GitLab project where the external status check needs to be configured, from the left menu, navigate under settings **-\\> Merge Request** and scroll down to **Status checks**.\n2. Click on **Add status check**.\n3. Add a service name.\n4. For the API to check enter: `[http://EXTERNAL-IP[/approval-apps/status_check`. Replace the `EXTERNAL-IP` with the external IP address found in the previous steps.\n5. Leave the `Target Branch` to the default, or select branch if you want this check to be triggered only for merge requests against certain branches.\n6. Leave `HMAC Shared Secret` as it is and click **Add status check**.\n\n![How to configure status check](https://res.cloudinary.com/about-gitlab-com/image/upload/v1752507426/jal2hw9ef3pydbetbp7p.png)\n\n## Test everything together\n\n1. In the project where you have configured the external check, create a new merge request from any branch targeting the main branch (assuming the main branch was selected when the external check was configured in the previous section).\n2. In the merge request details, look for the **Status checks** section and it should show `1 Pending`.\n3. Now, in a new tab, open the deployed external check application using this URL (replace `EXTERNAL-IP` with the value of the external IP address from the previous steps): `http://EXTERNAL-IP/approval-apps/`.\n4. A new entry should show in the list for the request external check from the merge request just created. Click on **Approve**.\n5. Switch back to the merge request's details screen and notice how the merge request is showing an approved status now.\n\n## Debugging tips\n\nUse the following notes to debug if something does not go as planned:\n\nIt is always helpful to view the logs for the external status check application. To do so: \n   1. Extract the name of the application pod using this command: `kubectl get pods -n approval-app`.\n   2. View the pod logs `kubectl logs [THE NAME OF THE POD] -n approval-app`.\n\nYou can SSH into the application pod and view the database (Alchemydb), which is used for the application. \n   1. `kubectl exec -it \\[POD-NAME\\] -n approval-app -- /bin/sh` \n   2. `cd instance`\n   3. `sqlite3 gitlab_status_checks.db` \n   4. To view the database tables, type `.tables`.\n   5. To describe the table structure, type `PRAGMA table_info('status_check');`.\n   6. To view all the records in the `status_check` table, type `select * from status_check`.\n\n> Discover more about [GitLab External Status Check](https://docs.gitlab.com/ee/user/project/merge_requests/status_checks.html) and how to gain more control over merge requests.\n",[9,678,680,108],{"slug":4280,"featured":6,"template":684},"tutorial-integrate-gitlab-merge-request-approvals-with-external-systems","content:en-us:blog:tutorial-integrate-gitlab-merge-request-approvals-with-external-systems.yml","Tutorial Integrate Gitlab Merge Request Approvals With External Systems","en-us/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems.yml","en-us/blog/tutorial-integrate-gitlab-merge-request-approvals-with-external-systems",{"_path":4286,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4287,"content":4293,"config":4298,"_id":4300,"_type":13,"title":4301,"_source":15,"_file":4302,"_stem":4303,"_extension":18},"/en-us/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab",{"title":4288,"description":4289,"ogTitle":4288,"ogDescription":4289,"noIndex":6,"ogImage":4290,"ogUrl":4291,"ogSiteName":669,"ogType":670,"canonicalUrls":4291,"schema":4292},"Tutorial: Migrate from Google Cloud Source Repositories to GitLab","Google Cloud is deprecating Cloud Source Repositories. Learn how to migrate a CSR source code repository to GitLab, along with best practices.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097739/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2813%29_1zdtbfPDHZVe6JC2AbdHmb_1750097738370.png","https://about.gitlab.com/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Migrate from Google Cloud Source Repositories to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsukasa Komatsubara\"},{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2024-08-28\",\n      }",{"title":4288,"description":4289,"authors":4294,"heroImage":4290,"date":700,"body":4296,"category":678,"tags":4297},[4295,1841],"Tsukasa Komatsubara","Google Cloud’s [deprecation of Cloud Source Repositories](https://cloud.google.com/source-repositories/docs/release-notes) (CSR) has prompted development teams to seek a full-featured alternative for their source code repositories. GitLab, a [Google Cloud Technology Partner](https://cloud.google.com/find-a-partner/partner/gitlab-inc), is a strong choice due to its comprehensive DevSecOps capabilities.\n\nIn this tutorial, you'll learn the steps to ensure a smooth transition from CSR to GitLab, whether you're using GitLab.com or a self-managed instance on Google Cloud.\n\n## Why GitLab?\nTransitioning from Google Cloud Source Repositories to GitLab is a recommended step. As a strategic partner of Google Cloud, GitLab seamlessly integrates with existing infrastructure with ease and brings value to customers in the following ways:\n- **Unified DevSecOps platform**\n    - Consolidate your entire development lifecycle into a single application, from planning to monitoring. Eliminate tool sprawl and dramatically boost productivity.\n- **Seamless Google Cloud integration**\n    - Effortlessly connect with GKE, Cloud Build, and Cloud Storage, ensuring a smooth migration and efficient operations within the Google Cloud ecosystem.\n- **Advanced CI/CD capabilities**\n    - Leverage [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) to automate everything from security scanning to deployment, accelerating your development cycles.\n- **Industry-recognized AI coding assistance**\n    - Benefit from built-in AI-assisted development with [GitLab Duo](https://about.gitlab.com/gitlab-duo/), fostering a secure and efficient coding environment.\n\n## Prerequisites\n\nBefore you start the migration, ensure you have:\n- GitLab account: Set up your account on GitLab.com or on a self-hosted instance.\n- GitLab project: Create a blank project in GitLab where the CSR repository will be migrated.\n\n## Migration steps\n\n1. Create a blank GitLab project: This will serve as the destination for your migrated CSR repository. Keep this project empty for now.\n2. Generate a personal access token (PAT): Navigate to GitLab settings and [generate a PAT](https://docs.gitlab.com/ee/user/profile/personal_access_tokens.html) with `read_repository` and `write_repository` scopes enabled. This token will be used to authenticate your Git operations during the migration process.\n3. Edit code in Cloud Shell Editor: From your CSR repository, open the Cloud Shell Editor by clicking the “Edit code” button. You’ll need to authorize the Cloud Shell and select “Trust repo” to proceed.\n\n![Google Cloud Shell Editor](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097750/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097750517.png)\n\n4. Inspect Git status: Run `git status` in the Cloud Shell to check the current branch and ensure everything is in order before pushing to GitLab.\n\n![Inspect Git status](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097750/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097750518.png)\n\n5. Set Up the Remote Repository: Add your GitLab project as a remote repository by running:\n\n```\ngit remote add origin [GITLAB_PROJECT_URL]\n\n```\n\n6. Replace `[GITLAB_PROJECT_URL]` with the actual URL of your GitLab project.\nPush to GitLab: Finally, push your local repository to GitLab by running: \n\n```\ngit push -u origin [BRANCH_NAME]\n\n```\n\n7. Replace `[BRANCH_NAME]` with the current branch name you noted earlier.\nWhen prompted, use your GitLab username and the PAT as the password to authenticate and complete the push.\n\n## Best practices\n\n- Back up before you begin: Always back up your CSR repository before starting the migration process.\n- Test after migration: Ensure all aspects of the repository, including branches and CI/CD pipelines, are functioning as expected in GitLab.\n- Leverage GitLab features: Take advantage of GitLab’s advanced DevSecOps features such as [AI](https://about.gitlab.com/gitlab-duo/), [CI/CD](https://docs.gitlab.com/ee/ci/), and [Enterprise Agile planning](https://about.gitlab.com/solutions/agile-delivery/) to enhance your development workflow.\n\nMoving from Google Cloud Source Repositories to GitLab is easy and offers more benefits than just managing source code. GitLab, with its integration with Google Cloud, makes it an ideal choice for developers seeking to enhance their workflow post-migration.\n\n> Read more about [GitLab's integration with Google Cloud](https://about.gitlab.com/blog/gitlab-google-cloud-integrations-now-in-public-beta/).",[9,1248,478],{"slug":4299,"featured":6,"template":684},"tutorial-migrate-from-google-cloud-source-repositories-to-gitlab","content:en-us:blog:tutorial-migrate-from-google-cloud-source-repositories-to-gitlab.yml","Tutorial Migrate From Google Cloud Source Repositories To Gitlab","en-us/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab.yml","en-us/blog/tutorial-migrate-from-google-cloud-source-repositories-to-gitlab",{"_path":4305,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4306,"content":4311,"config":4316,"_id":4318,"_type":13,"title":4319,"_source":15,"_file":4320,"_stem":4321,"_extension":18},"/en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"title":4307,"description":4308,"ogTitle":4307,"ogDescription":4308,"noIndex":6,"ogImage":667,"ogUrl":4309,"ogSiteName":669,"ogType":670,"canonicalUrls":4309,"schema":4310},"Tutorial: Secure BigQuery data publishing with GitLab ","Learn how to create repeatable, auditable, and efficient processes for automating and securing BigQuery data exports.","https://about.gitlab.com/blog/tutorial-secure-bigquery-data-publishing-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Secure BigQuery data publishing with GitLab \",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Regnard Raquedan\"}],\n        \"datePublished\": \"2025-03-25\",\n      }",{"title":4307,"description":4308,"authors":4312,"heroImage":667,"date":4313,"body":4314,"category":769,"tags":4315},[1841],"2025-03-25","GitLab offers a powerful solution for automating and securing [BigQuery](https://cloud.google.com/bigquery) data exports. This integration transforms manual exports into repeatable, auditable processes that can eliminate security vulnerabilities while saving valuable time. This tutorial explains how to implement this solution so you can quickly reduce manual operations, permission issues, and security concerns with just a few lines of GitLab YAML code.\n\nFollow along with this step-by-step video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/gxXX-ItAreo?si=FijY9wMVppCW-18q\" frameborder=\"0\" allowfullscreen=\"true\">\u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## The solution architecture\n\nOur solution leverages GitLab CI/CD pipelines to automate the secure export of data from BigQuery to Google Cloud Storage. Here's the high-level architecture:\n\n1. SQL code is stored and version-controlled in GitLab.  \n2. After code review and approval, GitLab CI/CD pipeline executes the code.  \n3. The pipeline authenticates with Google Cloud.  \n4. SQL queries are executed against BigQuery.  \n5. Results are exported as CSV files to Google Cloud Storage.  \n6. Secure links to these files are provided for authorized consumption.\n\n## Prerequisites\n\nBefore we begin, ensure you have:\n\n* **Google Cloud APIs enabled:** BigQuery API and Cloud Storage API  \n* **Service account** with appropriate permissions:  \n  * BigQuery Job User  \n  * Storage Admin  \n  * **Note:** For this demo, we're using the service account approach for authentication, which is simpler to set up. For production environments, you might consider using GitLab's identity and access management integration with Google Cloud. This integration leverages Workload Identity Federation, which provides enhanced security and is more suitable for enterprise customers and organizations.  \n* **GitLab project** ready to store your SQL code and pipeline configuration\n\n## Step-by-step implementation\n\n**1. Configure Google Cloud credentials.**\n\nFirst, set up the necessary environment variables in your GitLab project:\n\n- Go to your **GitLab project > Settings > CI/CD**.  \n- Expand the **Variables** section.  \n- Add the following variables:  \n   * `GCS_BUCKET`: Your Google Cloud Storage bucket name  \n   * `GCP_PROJECT_ID`: Your Google Cloud project ID  \n   * `GCP_SA_KEY`: Base64-encoded service account key (mark as masked)\n\n**2. Create your SQL query.**\n\nCreate a file named `query.sql` in your GitLab repository with your BigQuery SQL query. The query looks like this:\n\n```\n-- This query shows a list of the daily top Google Search terms.\nSELECT\n   refresh_date AS Day,\n   term AS Top_Term,\n       -- These search terms are in the top 25 in the US each day.\n   rank,\nFROM `bigquery-public-data.google_trends.top_terms`\nWHERE\n   rank = 1\n       -- Choose only the top term each day.\n   AND refresh_date >= DATE_SUB(CURRENT_DATE(), INTERVAL 2 WEEK)\n       -- Filter to the last 2 weeks.\nGROUP BY Day, Top_Term, rank\nORDER BY Day DESC\n   -- Show the days in reverse chronological order.\n\n```\n\nThis query gets the top 25 search terms from Google Trends for the current day.\n\n**3. Configure the GitLab CI/CD pipeline.**\n\nCreate a `.gitlab-ci.yml` file in your repository root:\n\n```\nimage: google/cloud-sdk:alpine\n\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml  # https://gitlab.com/gitlab-org/gitlab/blob/master/lib/gitlab/ci/templates/Jobs/Secret-Detection.gitlab-ci.yml\n\nexecute:\n  stage: deploy\n  script: \n    # Set up Google Cloud authentication and install necessary components\n    - export GOOGLE_CLOUD_CREDENTIALS=$(echo $SERVICE_ACCOUNT_KEY | base64 -d)\n    - echo $GOOGLE_CLOUD_CREDENTIALS > service-account-key.json \n    - gcloud auth activate-service-account --key-file service-account-key.json \n    - gcloud components install gsutil\n    # Set the active Google Cloud project\n    - gcloud config set project $PROJECT_ID\n    # Run the BigQuery query and export the results to a CSV file\n    - bq query --format=csv --use_legacy_sql=false \u003C test.sql > results.csv\n    # Create a Google Cloud Storage bucket if it doesn't exist\n    - gsutil ls gs://${CLOUD_STORAGE_BUCKET} || gsutil mb gs://${CLOUD_STORAGE_BUCKET}\n    # Upload the CSV file to the storage bucket\n    - gsutil cp results.csv gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Set the access control list (ACL) to make the CSV file publicly readable\n    - gsutil acl ch -u AllUsers:R gs://${CLOUD_STORAGE_BUCKET}/results.csv\n    # Define the static URL for the CSV file\n    - export STATIC_URL=\"https://storage.googleapis.com/${CLOUD_STORAGE_BUCKET}/results.csv\"\n    # Display the static URL for the CSV file\n    - echo \"File URL = $STATIC_URL\"\n\n```\n\n**4. Run the pipeline.**\n\nNow, whenever changes are merged to your main branch, the pipeline will provide a link to the CSV file stored on the Google Cloud Storage bucket. This file contains the result of the executed SQL query that GitLab subjects to security checks.\n\n## Benefits of this approach\n\n* **Security:** Authentication is handled automatically via service accounts (or Workload Identity Federation for enhanced security in production environments).  \n* **Auditability:** All data exports are tracked through GitLab commits and pipeline logs.  \n* **Repeatability:** Consistent, predictable export process on every run, and can be scheduled.  \n* **Version control:** SQL queries are properly versioned and reviewed.  \n* **Automation:** Significantly fewer manual exports, reducing human error.\n\n## Try it today\n\nBy combining GitLab's DevSecOps capabilities with Google Cloud's BigQuery and Cloud Storage, you've now automated and secured your data publishing workflow. This approach reduces manual operations, resolves permission headaches, and addresses security concerns – all achieved with just a few lines of GitLab CI code.\n\n> Use this tutorial's [complete code example](https://gitlab.com/gitlab-partners-public/google-cloud/demos/big-query-data-publishing) to get started now.",[835,478,9,940,230,1248],{"slug":4317,"featured":90,"template":684},"tutorial-secure-bigquery-data-publishing-with-gitlab","content:en-us:blog:tutorial-secure-bigquery-data-publishing-with-gitlab.yml","Tutorial Secure Bigquery Data Publishing With Gitlab","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab.yml","en-us/blog/tutorial-secure-bigquery-data-publishing-with-gitlab",{"_path":4323,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4324,"content":4330,"config":4335,"_id":4337,"_type":13,"title":4338,"_source":15,"_file":4339,"_stem":4340,"_extension":18},"/en-us/blog/tutorial-security-scanning-in-air-gapped-environments",{"title":4325,"description":4326,"ogTitle":4325,"ogDescription":4326,"noIndex":6,"ogImage":4327,"ogUrl":4328,"ogSiteName":669,"ogType":670,"canonicalUrls":4328,"schema":4329},"Tutorial: Security scanning in air-gapped environments","Security scanning remains crucial even in air-gapped environments to detect internal threats, prevent data exfiltration, and maintain operational integrity. Learn how GitLab can help get air-gapped environments secure.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099301/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_1097303277_6gTk7M1DNx0tFuovupVFB1_1750099300786.jpg","https://about.gitlab.com/blog/tutorial-security-scanning-in-air-gapped-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Tutorial: Security scanning in air-gapped environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-02-05\",\n      }",{"title":4325,"description":4326,"authors":4331,"heroImage":4327,"date":4332,"body":4333,"category":814,"tags":4334},[1767],"2025-02-05","Air-gapped environments are computer networks or systems that are physically isolated from unsecured networks, such as the public internet or unsecured local area networks. This isolation is implemented as a security measure to protect sensitive data and critical systems from external cyber threats by providing:\n\n* Enhanced security: By physically isolating systems from external networks, air-gapped environments help prevent remote attacks, malware infections, and unauthorized data access. This is crucial for highly sensitive data and critical systems.\n* Data protection: Air-gapping provides the strongest protection against data exfiltration since there's no direct connection that attackers could use to steal information.\n* Critical infrastructure protection: For systems that control vital infrastructure (like power plants, water treatment facilities, or military systems), air-gapping helps prevent potentially catastrophic cyber attacks.\n* Compliance requirements: Many regulatory frameworks require air-gapping for certain types of sensitive data or critical systems, particularly in government, healthcare, and financial sectors.\n* Malware protection: Without network connectivity, systems are protected from network-based malware infections and ransomware attacks.\n\nEven though air-gapped systems are isolated, they can still have vulnerabilities. Regular security scanning helps identify these weaknesses before they can be exploited. In this article, you will learn the different security scanners GitLab provides and how they can be added/updated in a limited-connectivity environment.\n\n## GitLab security scanners in air-gapped environments\n\nGitLab provides a variety of different security scanners for the complete application lifecycle. The scanners that support air-gapped environments include:\n\n* [Static Application Security Testing (SAST)](https://docs.gitlab.com/ee/user/application_security/sast/index.html#running-sast-in-an-offline-environment)  \n* [Dynamic Application Security Testing (DAST](https://docs.gitlab.com/ee/user/application_security/dast/browser/configuration/offline_configuration.html))  \n* [Secret Detection](https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/index.html#offline-configuration)  \n* [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/index.html#running-container-scanning-in-an-offline-environment)  \n* [Dependency Scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/index.html#offline-environment)  \n* [API Fuzzing](https://docs.gitlab.com/ee/user/application_security/api_fuzzing/configuration/offline_configuration.html)  \n* [License Scanning](https://docs.gitlab.com/ee/user/compliance/license_scanning_of_cyclonedx_files/index.html#running-in-an-offline-environment)\n\nBy default, GitLab Self-Managed instances pull security scanner images from the public GitLab container registry (registry.gitlab.com) and store them within the [built-in local GitLab container registry](https://docs.gitlab.com/ee/user/packages/container_registry/). I will demonstrate this flow below by running the following pipeline that scans for secrets on a [sample project](https://gitlab.com/gitlab-da/tutorials/security-and-governance/owasp/juice-shop): \n\n```yaml\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n```\n\nWhen running the job in an internet-connected GitLab instance the job passes:\n\n![GitLab Runner with internet access successfully pulling from external registry\n](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099328/Blog/Content%20Images/Blog/Content%20Images/pass-1_aHR0cHM6_1750099328577.png)\n\n\u003Ccenter>\u003Ci>GitLab Runner with internet access successfully pulling from external registry\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\nHowever, If I disable internet access to the VM running GitLab, the `secret-detection` job will fail to download the container image, causing the job to fail:\n\n![GitLab Runner without internet access failing to pull from external registry](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099328/Blog/Content%20Images/Blog/Content%20Images/fail-1_aHR0cHM6_1750099328577.png)\n\n\u003Ccenter>\u003Ci>GitLab Runner without internet access failing to pull from external registry\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nAlternatively, if I set my GitLab Runners’ pull image policy to `if-not-present` from `always`, I can load the cached version of the scanner if it was run before on the internet by using the image stored in our local docker:\n\n![GitLab Runner without internet access successfully pulling from internal registry cache](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099329/Blog/Content%20Images/Blog/Content%20Images/pass-2_aHR0cHM6_1750099328579.png)\n\n\u003Ccenter>\u003Ci>GitLab Runner without internet access successfully pulling from internal registry cache\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\n### Setting up offline scanning prerequisites\n\nRunning these security scanners in an air-gapped environment requires the following:\n\n* [GitLab Ultimate subscription](https://about.gitlab.com/pricing/ultimate/)  \n* [Offline cloud license](https://about.gitlab.com/pricing/licensing-faq/cloud-licensing/#offline-cloud-licensing)  \n* GitLab Self-Managed cluster\n\nYou can follow along with this tutorial in any GitLab Self-Managed EE instance (even those that are not air-gapped) to learn how to transfer and run images in an air-gapped environment. In this tutorial, I will demonstrate how to load scanner images onto a GitLab-EE instance running in a Google Compute VM where I cut off the `EGRESS` to everything by implementing firewall rules:\n\n```bash\n# egress firewall rule to block all outbound traffic to the internet\n$ gcloud compute firewall-rules create deny-internet-egress \\\n    --direction=EGRESS \\\n    --priority=1000 \\\n    --network=default \\\n    --action=DENY \\\n    --rules=all \\\n    --destination-ranges=0.0.0.0/0 \\\n    --target-tags=no-internet\n\n# Create an allow rule for internal traffic with higher priority\n$ gcloud compute firewall-rules create allow-internal-egress \\\n    --direction=EGRESS \\\n    --priority=900 \\\n    --network=default \\\n    --action=ALLOW \\\n    --rules=all \\\n    --destination-ranges=10.0.0.0/8,192.168.0.0/16,172.16.0.0/12 \\\n    --target-tags=no-internet\n\n# Apply tag to VM\n$ gcloud compute instances add-tags YOUR_VM_NAME \\\n    --zone=YOUR_ZONE \\\n    --tags=no-internet\n```\n\nThen, once I SSH into my VM, you can see we cannot connect to registry.gitlab.com:\n\n```bash\n# showing I can’t access the gitlab container registry\n$ ping registry.gitlab.com\nPING registry.gitlab.com (35.227.35.254) 56(84) bytes of data.\n^C\n--- registry.gitlab.com ping statistics ---\n3 packets transmitted, 0 received, 100% packet loss, time 2031ms\n```\n\n**Note:** I am still allowing ingress so I can copy files and SSH into the machine.\n\n## Load security scanners in air-gapped environments\n\nTo use the various security scanners on air-gapped environments, the GitLab Runner must be able to fetch the scanner container images from GitLab’s built-in container registry. This means that the container images for the security scanners must be downloaded and packaged in a separate environment with access to the public internet. The process of loading security scanners onto an air-gapped environment includes the following:\n\n1. Download and package container images from the public internet.\n2. Transfer images to offline environment.\n3. Load transferred images into offline container registry.\n\nNow let’s go over how we can implement GitLab Secret Detection in an air-gapped environment.\n\n### Download and package container images from public internet\n\nLet’s download the container image for secret detection and store it within our local container registry. Other scanner images can be found in the [offline deployments documentation](https://docs.gitlab.com/ee/user/application_security/offline_deployments/). I will be using Podman desktop to download these images, but you can use Docker desktop or other alternatives.\n\n1. Pull the GitLab Secret Detection image.\n\n```bash\n$ podman pull registry.gitlab.com/security-products/secrets:6\nTrying to pull registry.gitlab.com/security-products/secrets:6...\nGetting image source signatures\nCopying blob sha256:999745130ac045f2b1c29ecce088b43fc4a95bbb82b7960fb7b8abe0e3801bf8\nCopying blob sha256:a4f7c013bb259c146cd8455b7c3943df7ed84b157e42a2348eef16546d8179b1\nCopying blob sha256:1f3e46996e2966e4faa5846e56e76e3748b7315e2ded61476c24403d592134f0\nCopying blob sha256:400a41f248eb3c870bd2b07073632c49f1e164c8efad56ea3b24098a657ec625\nCopying blob sha256:9090f17a5a1bb80bcc6f393b0715210568dd0a7749286e3334a1a08fb32d34e6\nCopying blob sha256:c7569783959081164164780f6c1b0bbe1271ee8d291d3e07b2749ae741621ea3\nCopying blob sha256:20c7ca6108f808ad5905f6db4f7e3c02b21b69abdea8b45abfa34c0a2ba8bdb5\nCopying blob sha256:e8645a00be64d77c6ff301593ce34cd8c17ffb2b36252ca0f2588009a7918d2e\nCopying config sha256:0235ed43fc7fb2852c76e2d6196601968ae0375c72a517bef714cd712600f894\nWriting manifest to image destination\nWARNING: image platform (linux/amd64) does not match the expected platform (linux/arm64)\n0235ed43fc7fb2852c76e2d6196601968ae0375c72a517bef714cd712600f894\n\n$ podman images\nREPOSITORY                                                  TAG         IMAGE ID      CREATED      SIZE\nregistry.gitlab.com/security-products/secrets               6           0235ed43fc7f  4 hours ago  85.3 MB\n```\n\n2. Save the image as a tarball.\n\n```bash\n$ podman save -o secret-detection.tar registry.gitlab.com/security-products/secrets:6\n$ chmod +r secret-detection.tar\n$ ls -al secret-detection.tar\n-rw-r--r--@ 1 fern  staff  85324800 Jan 10 10:25 secret-detection.tar\n```\n\nAlternatively, you can use the [official GitLab template](https://docs.gitlab.com/ee/user/application_security/offline_deployments/#using-the-official-gitlab-template) on an environment with internet access to download the container images needed for the security scanners and save them as job artifacts or push them to the container registry of the project where the pipeline is executed. \n\n### Transfer images to offline environment\n\nNext, let's transfer the tarball to our air-gapped environment. This can be done in several ways, depending on your needs, such as:\n\n* Physical media transfer  \n* Data diodes  \n* Guard systems  \n* Cross-domain solutions (CDS) \n\nI will SCP (Secure Copy Protocol) the tarball directly to my VM that does not have egress access, but does allow ingress. As this is just for demonstration purposes, make sure to consult your organization's security policies and transfer procedures for air-gapped environments.\n\n#### Verify the image is not cached\n\nBefore transferring the file, I’ll delete the Docker images on my GitLab instance pertaining to secret detection to make sure they aren't cached:\n\n```bash\n$ docker images\nREPOSITORY                                                          TAG              IMAGE ID       CREATED        SIZE\nregistry.gitlab.com/security-products/secrets                       6                0235ed43fc7f   9 hours ago    84.8MB\nregistry.gitlab.com/security-products/secrets                       \u003Cnone>           16d88433af61   17 hours ago   74.9MB\n\n$ docker image rmi 16d88433af61 -f\nUntagged: registry.gitlab.com/security-products/secrets@sha256:f331da6631d791fcd58d3f23d868475a520f50b02d64000e2faf1def66c75d48\nDeleted: sha256:16d88433af618f0b405945031de39fe40b3e8ef1bddb91ca036de0f5b32399d7\nDeleted: sha256:1bb06f72f06810e95a70039e797481736e492201f51a03b02d27db055248ab6f\nDeleted: sha256:a5ef2325ce4be9b39993ce301f8ed7aad1c854d7ee66f26a56a96967c6606510\nDeleted: sha256:f7cdac818a36d6c023763b76a6589c0db7609ca883306af4f38b819e62f29471\nDeleted: sha256:5eabf4d47287dee9887b9692d55c8b5f848b50b3b7248f67913036014e74a0e9\nDeleted: sha256:51b7cb600604c0737356f17bc02c22bac3a63697f0bf95ba7bacb5b421fdb7da\nDeleted: sha256:1546193b011d192aa769a15d3fdd55eb4e187f201f5ff7506243abb02525dc06\nDeleted: sha256:1ea72408d0484c3059cc0008539e6f494dc829caa1a97d156795687d42d9cb57\nDeleted: sha256:1313ee9da7716d85f63cfdd1129f715e9bbb6c9c0306e4708ee73672b3e40f26\nDeleted: sha256:954ebfd83406f0dfed93eb5157ba841af5426aa95d4054174fff45095fd873a1\n\n$ docker image rmi 0235ed43fc7f -f\nUntagged: registry.gitlab.com/security-products/secrets:6\nDeleted: sha256:0235ed43fc7fb2852c76e2d6196601968ae0375c72a517bef714cd712600f894\nDeleted: sha256:f05f85850cf4fac79e279d93afb6645c026de0223d07b396fce86c2f76096c1f\nDeleted: sha256:7432b0766b885144990edd3166fbabed081be71d28d186f4d525e52729f06b1f\nDeleted: sha256:2c6e3361c2ee2f43bd75fb9c7c12d981ce06df2d51a134965fa47754760efff0\nDeleted: sha256:7ad7f7245b45fbe758ebd5788e0ba268a56829715527a9a4bc51708c21af1c7f\nDeleted: sha256:3b73a621115a59564979f41552181dce07f3baa17e27428f7fff2155042a1901\nDeleted: sha256:78648c2606a7c4c76885806ed976b13e4d008940bd3d7a18b52948a6be71b60d\nDeleted: sha256:383d4a6dc5be9914878700809b4a3925379c80ab792dfe9e79d14b0c1d6b5fad\n```\n\nThen I'll rerun the job to show the failure:\n\n![GitLab Runner without internet access fails to pull an image from internal registry cache](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099328/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750099328580.png)\n\n\u003Ccenter>\u003Ci>GitLab Runner without internet access fails to pull an image from internal registry cache\u003C/i>\u003C/center>\n\n#### SCP file to GitLab instance\n\nNow, from my local machine, I will SCP the file to my GitLab instance as follows:\n\n```bash\n$ gcloud compute scp secret-detection.tar INSTANCE:~ --zone=ZONE\nsecret-detection.tar                                                          100%   81MB  21.5MB/s   00:03\n```\n\n### Load transferred images into offline container registry\n\nNext, I'll SSH into my VM and load the Docker image:\n\n```bash\n$ gcloud compute ssh INSTANCE --zone=ZONE\n\n$ sudo docker load -i secret-detection.tar\nc3c8e454c212: Loading layer [==================================================>]  2.521MB/2.521MB\n51e93afaeedc: Loading layer [==================================================>]  32.55MB/32.55MB\ne8a25e39bb30: Loading layer [==================================================>]  221.2kB/221.2kB\n390704968493: Loading layer [==================================================>]  225.8kB/225.8kB\n76cf57e75f63: Loading layer [==================================================>]  17.64MB/17.64MB\nc4c7a681fd10: Loading layer [==================================================>]  4.608kB/4.608kB\nf0690f406157: Loading layer [==================================================>]  24.01MB/24.01MB\nLoaded image: registry.gitlab.com/security-products/secrets:6\n```\n\n### Run the scanners\n\nI'll [re-run the pipeline manually](https://docs.gitlab.com/ee/ci/pipelines/#run-a-pipeline-manually) and the scanner will be pulled from the cache. Once the pipeline completes, we can see the secret detection job is successful:\n\n![GitLab Runner without internet access successfully pulling from internal registry cache after image loaded](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099328/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750099328581.png)\n\n\u003Ccenter>\u003Ci>GitLab Runner without internet access successfully pulling from internal registry cache after image loaded\u003C/center>\u003C/i>\n\nIf you want to pull the image from a different location or you tag your images in a different way, you can edit the config as follows:\n\n```yaml\ninclude:\n  - template: Jobs/Secret-Detection.gitlab-ci.yml\n\nvariables:\n  SECURE_ANALYZERS_PREFIX: \"localhost:5000/analyzers\"\n```\n\nSee the [offline environments documentation](https://docs.gitlab.com/ee/user/application_security/offline_deployments/) for more information.\n\n### View scanner results\n\nOnce the scanner completes on the default branch, a vulnerability report is populated with all the findings. The vulnerability report provides information about vulnerabilities from scans of the default branch.\n\nYou can access the vulnerability report by navigating to the side tab and selecting **Secure > Vulnerability Report**:\n\n![GitLab Vulnerability Report with secret detection findings](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099328/Blog/Content%20Images/Blog/Content%20Images/vulnerability_report_aHR0cHM6_1750099328581.png)\n\n\u003Ccenter>\u003Ci>GitLab Vulnerability Report with secret detection findings\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nThe project’s vulnerability report provides:\n- totals of vulnerabilities per severity level\n- filters for common vulnerability attributes\n- details of each vulnerability, presented in tabular layout\n- a timestamp showing when it was updated, including a link to the latest pipeline\n\nWe can see that two vulnerabilities were detected by the Secret Detection scanner. If we click on a vulnerability, we will be transported to its vulnerability page:\n\n![GitLab Vulnerability Page showing detailed insights](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750099329/Blog/Content%20Images/Blog/Content%20Images/insights_aHR0cHM6_1750099328582.png)\n\n\u003Ccenter>\u003Ci>GitLab Vulnerability Page showing detailed insights\u003C/center>\u003C/i>\n\n\u003Cbr>\u003C/br>\n\nThe vulnerability page provides details of the vulnerability, which can be used to triage and find a path to remediation. These vulnerability details include:\n- description\n- when it was detected\n- current status\n- available actions\n- linked issues\n- actions log\n- filename and line number of the vulnerability (if available)\n- severity\n\n## Read more\n\nTo learn more about GitLab and running security scanners in air-gapped environments, check out the following resources:\n\n* [GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/)  \n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab Offline Deployments Documentation](https://docs.gitlab.com/ee/user/application_security/offline_deployments/)  \n* [GitLab Application Security Documentation](https://docs.gitlab.com/ee/user/application_security/)\n",[9,814,183,478,680],{"slug":4336,"featured":90,"template":684},"tutorial-security-scanning-in-air-gapped-environments","content:en-us:blog:tutorial-security-scanning-in-air-gapped-environments.yml","Tutorial Security Scanning In Air Gapped Environments","en-us/blog/tutorial-security-scanning-in-air-gapped-environments.yml","en-us/blog/tutorial-security-scanning-in-air-gapped-environments",{"_path":4342,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4343,"content":4348,"config":4353,"_id":4355,"_type":13,"title":4356,"_source":15,"_file":4357,"_stem":4358,"_extension":18},"/en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation",{"title":4344,"description":4345,"ogTitle":4344,"ogDescription":4345,"noIndex":6,"ogImage":1137,"ogUrl":4346,"ogSiteName":669,"ogType":670,"canonicalUrls":4346,"schema":4347},"Ultimate guide to CI/CD: Fundamentals to advanced implementation","Learn how to modernize continuous integration/continuous deployment, including automating the development, delivery, and security of pipelines.","https://about.gitlab.com/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to CI/CD: Fundamentals to advanced implementation\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Sandra Gittlen\"}],\n        \"datePublished\": \"2025-01-06\",\n      }",{"title":4344,"description":4345,"authors":4349,"heroImage":1137,"date":4350,"body":4351,"category":1103,"tags":4352},[4123],"2025-01-06","Continuous integration/continuous delivery ([CI/CD](https://about.gitlab.com/topics/ci-cd/)) has revolutionized how software teams create value for their users. Gone are the days of manual deployments and integration headaches — modern development demands automation, reliability, and speed.\n\nAt its core, CI/CD is about creating a seamless pipeline that takes code from a developer's environment all the way to production and incorporates feedback in real time. [CI](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) helps teams catch issues early — before they become costly problems — by ensuring that code changes are frequently merged into a shared repository, automatically tested, and validated. [CD](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd) extends this by automating deployments, making releases predictable and stress-free.\n\nRather than relying on manual processes and complex toolchains for software development, teams can use a robust CI/CD pipeline to build, test, and deploy software. And AI can streamline the process even further, automatically engineering CI/CD pipelines for consistent quality, compliance, and security checks.\n\nThis guide explains modern CI/CD pipelines, from basic principles to best practices to advanced strategies. You'll also discover how leading organizations use CI/CD for impactful results. What you learn in this guide will help you scale your DevSecOps environment to develop and deliver software in an [agile](https://about.gitlab.com/topics/ci-cd/continuous-integration-agile/), automated, and efficient manner.\n\nWhat you'll learn:\n- [What is continuous integration?](#what-is-continuous-integration%3F)\n- [What is continuous delivery?](#what-is-continuous-delivery%3F)\n- [How source code management relates to CI/CD](#how-source-code-management-relates-to-cicd)\n- [The benefits of CI/CD in modern software development](#the-benefits-of-cicd-in-modern-software-development)\n  - [Key differences between CI/CD and traditional development](#key-differences-between-cicd-and-traditional-development)\n- [Understanding CI/CD fundamentals](#understanding-cicd-fundamentals)\n  - [What is a CI/CD pipeline?](#what-is-a-cicd-pipeline%3F)\n- [Best practices for CI/CD implementation and management](#best-practices-for-cicd-implementation-and-management)\n  - [CI best practices](#ci-best-practices)\n  - [CD best practices](#cd-best-practices)\n- [How to get started with CI/CD](#how-to-get-started-with-cicd)\n- [Security, compliance, and CI/CD](#security-compliance%2C-and-cicd)\n- [CI/CD and the cloud](#cicd-and-the-cloud)\n- [Advanced CI/CD](#advanced-cicd)\n  - [Reuse and automation in CI/CD](#reuse-and-automation-in-cicd)\n  - [Troubleshooting pipelines with AI](#troubleshooting-pipelines-with-ai)\n- [How to migrate to GitLab CI/CD](#how-to-migrate-to-gitlab-cicd)\n- [Lessons from leading organizations](#lessons-from-leading-organizations)\n- [CI/CD tutorials](#cicd-tutorials)\n\n## What is continuous integration?\n\n[Continuous integration](https://about.gitlab.com/topics/ci-cd/benefits-continuous-integration/) (CI) is the practice of integrating all your code changes into the main branch of a shared source code repository early and often, automatically testing changes when you commit or merge them, and automatically kicking off a build. With continuous integration, teams can identify and fix errors and security issues more easily and much earlier in the development process.\n\n## What is continuous delivery?\n[Continuous delivery](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd) (CD) – sometimes called _continuous deployment_ – enables organizations to deploy their applications automatically, allowing more time for developers to focus on monitoring deployment status and assure success. With continuous delivery, DevSecOps teams set the criteria for code releases ahead of time and when those criteria are met and validated, the code is deployed into the production environment. This allows organizations to be more nimble and get new features into the hands of users faster. \n\n## How source code management relates to CI/CD\n\nSource code management ([SCM](https://about.gitlab.com/solutions/source-code-management/)) and CI/CD form the foundation of modern software development practices. SCM systems like [Git](https://about.gitlab.com/blog/what-is-git-the-ultimate-guide-to-gits-role-and-functionality/) provide a centralized way to track changes, manage different versions of code, and facilitate collaboration among team members. When developers work on new features or bug fixes, they create branches from the main codebase, make their changes, and then [merge them through merge requests](https://docs.gitlab.com/ee/user/project/merge_requests/). This branching strategy allows multiple developers to work simultaneously without interfering with each other's code, while maintaining a stable main branch that always contains production-ready code.\n\nCI/CD takes the code managed by SCM systems and automatically builds, tests, and validates it whenever changes are pushed. When a developer submits their code changes, the CI/CD system automatically retrieves the latest code, combines it with the existing codebase, and runs through a series of automated checks. These typically include compiling the code, running unit tests, performing static code analysis, and checking code coverage. If any of these steps fail, the team is immediately notified, allowing them to address issues before they impact other developers or make their way to production. This tight integration between source control and continuous integration creates a feedback loop that helps maintain code quality and prevents integration problems from accumulating.\n\n## The benefits of CI/CD in modern software development\n\n[CI/CD brings transformative benefits to modern software development](https://about.gitlab.com/blog/ten-reasons-why-your-business-needs-ci-cd/) by dramatically reducing the time and risk associated with delivering new features and fixes. The continuous feedback loop gives DevSecOps teams confidence their changes are automatically validated against the entire codebase. The result is higher quality software, faster delivery times, and more frequent releases that can quickly respond to user needs and market demands.\n\nPerhaps most importantly, CI/CD fosters a culture of collaboration and transparency within software development teams. When everyone can see the status of builds, tests, and deployments in real time, it becomes easier to identify and resolve bottlenecks in the delivery process. The automation provided by CI/CD also reduces the cognitive load on developers, freeing them to focus on writing code rather than managing manual deployment processes. This leads to improved developer satisfaction and productivity, while also reducing the risk traditionally associated with the entire software release process. Teams can experiment more freely knowing rapid code reviews are part of the process and they can quickly roll back changes if needed, which encourages innovation and continuous improvement.\n\n> Get started with GitLab CI/CD. [Sign up for GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) and try the AI-powered DevSecOps platform free for 60 days.\n\n### Key differences between CI/CD and traditional development\n\nCI/CD differs from traditional software development in many ways, including:\n\n**Frequent code commits**\n\nDevelopers often work independently and infrequently upload their code to a main codebase, causing merge conflicts and other time-consuming issues. With CI/CD, developers push commits throughout the day, ensuring that conflicts are caught early and the codebase remains up to date.\n\n**Reduced risk**\n\nLengthy testing cycles and extensive pre-release planning are hallmarks of traditional software development. This is done to minimize risk but often hinders the ability to find and fix problems. Risk is managed in CI/CD by applying small, incremental changes that are closely monitored and easily reverted.\n\n**Automated and continuous testing**\n\nIn traditional software development, testing is done once development is complete. However, this causes problems, including delayed delivery and costly bug fixes. CI/CD supports automated testing that occurs continuously throughout development, sparked by each code commit. Developers also receive feedback they can take fast action on.\n\n**Automated, repeatable, and frequent deployments**\n\nWith CI/CD, deployments are automated processes that reduce the typical stress and effort associated with big software rollouts. The same deployment process can be repeated across environments, which saves time and reduces errors and inconsistencies.\n\n## Understanding CI/CD fundamentals\n\nCI/CD serves as a framework for building scalable, maintainable delivery processes, so it's critical for DevSecOps teams to firmly grasp its core concepts. A solid understanding of CI/CD principles enables teams to adapt strategies and practices as technology evolves, rather than being tied to legacy approaches. Here are some of the basics.\n\n### What is a CI/CD pipeline?\n\nA [CI/CD pipeline](https://about.gitlab.com/topics/ci-cd/cicd-pipeline/) is a series of steps, such as build, test, and deploy, that automate and streamline the software delivery process. [Each stage serves as a quality gate](https://about.gitlab.com/blog/guide-to-ci-cd-pipelines/), ensuring that only validated code moves forward. Early stages typically handle basic checks like compilation and unit testing, while later stages may include integration testing, performance testing, compliance testing, and staged deployments to various environments.\n\nThe pipeline can be configured to require manual approvals at critical points, such as before deploying to production, while automating routine tasks and providing quick feedback to developers about the health of their changes. This structured approach ensures consistency, reduces human error, and provides a clear audit trail of how code changes move from development to production. Modern pipelines are often implemented as code, allowing them to be version controlled, tested, and maintained just like application code.\n\nThese are other terms associated with CI/CD that are important to know:\n- **Commit:** a code change\n- **Job:** instructions a runner has to execute\n- **Runner:** an agent or server that executes each job individually that can spin up or down as needed\n- **Stages:** a keyword that defines certain job stages, such as \"build\" and \"deploy.\" Jobs of the same stage are executed in parallel. Pipelines are configured using a version-controlled YAML file, `.gitlab-ci.yml`, at the root level of a project.\n\n![CI/CD pipeline diagram](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749673928/Blog/Content%20Images/1690824533476.png)\n\n## Best practices for CI/CD implementation and management\n\nHow successful you are with CI/CD depends greatly on the [best practices](https://about.gitlab.com/blog/how-to-keep-up-with-ci-cd-best-practices/) you implement. \n\n#### CI best practices\n\n* Commit early, commit often.\n* Optimize pipeline stages.\n* Make builds fast and simple.\n* Use failures to improve processes.\n* Make sure the test environment mirrors production.\n\n#### CD best practices\n\n* Start where you are – you can always iterate.\n* Understand the best continuous delivery is done with minimal tools.\n* Track what’s happening so issues and merge requests don't get out of hand.\n* Streamline user acceptance testing and staging with automation.\n* Manage the release pipeline through automation.\n* Implement monitoring for visibility and efficiency. \n\n> ### Bookmark this!\n>\n>Watch our [\"Intro to CI/CD\" webinar](https://www.youtube.com/watch?v=sQ7Nw3o0izc)!\n>\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/sQ7Nw3o0izc?si=3HpNqIClrc2ncr7Y\" title=\"Intro to CI/CD webinar\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to get started with CI/CD\n\nGetting started with CI/CD begins with identifying a simple but representative project to serve as your pilot. Choose a straightforward application with basic testing requirements, as this allows you to focus on learning the pipeline mechanics rather than dealing with complex deployment scenarios. Begin by ensuring your code is in [version control](https://about.gitlab.com/topics/version-control/) and has some [basic automated tests](https://about.gitlab.com/blog/develop-c-unit-testing-with-catch2-junit-and-gitlab-ci/) — even a few unit tests will suffice. The goal is to [create a minimal pipeline](https://about.gitlab.com/blog/how-to-learn-ci-cd-fast/) that you can gradually enhance as your understanding grows.\n\nFor GitLab specifically, the process starts with creating a `.gitlab-ci.yml` file in your project's root directory. This YAML file defines your pipeline stages (basic ones like build, test, and deploy) and jobs. A simple pipeline might look like this: The build stage compiles your code and creates artifacts, the test stage runs your unit tests, and the deploy stage pushes your application to a staging environment. GitLab will automatically detect this file and start running your pipeline whenever changes are pushed to your repository. The platform provides [built-in runners](https://docs.gitlab.com/runner/) to execute your pipeline jobs, though you can also set up your own runners for more control.\n\nAs you become comfortable with the basics, gradually add more sophisticated elements to your pipeline. This might include adding code quality checks, [security scanning](https://docs.gitlab.com/ee/user/application_security/#security-scanning), or automated deployment to production. GitLab's DevSecOps platform includes features like [compliance management](https://about.gitlab.com/blog/meet-regulatory-standards-with-gitlab/), [deployment variables](https://about.gitlab.com/blog/demystifying-ci-cd-variables/), and manual approval gates that you can incorporate as your pipeline matures. Pay attention to pipeline execution time and look for opportunities to run jobs in parallel where possible. Remember to add proper error handling and notifications so team members are promptly alerted of any pipeline failures. Start documenting common issues and solutions as you encounter them — this will become invaluable as your team grows.\n\n> ### Want to learn more about getting started with CI/CD? Register for a [free CI/CD course on GitLab University](https://university.gitlab.com/courses/continuous-integration-and-delivery-ci-cd-with-gitlab).\n\n## Security, compliance, and CI/CD\n\nOne of the greatest advantages of CI/CD is the ability to embed security and compliance checks early and often in the software development lifecycle. In GitLab, teams can use the `.gitlab-ci.yml` configuration to automatically trigger security scans at multiple stages, from initial code commit to production deployment. The platform's container scanning, dependency scanning, and security scanning capabilities ([Dynamic Application Security Testing](https://docs.gitlab.com/ee/user/application_security/dast/) and [Advanced SAST](https://about.gitlab.com/blog/gitlab-advanced-sast-is-now-generally-available/)) can be configured to run automatically with each code change, checking for vulnerabilities, compliance violations, and security misconfigurations. The platform's API enables integration with [external security tools](https://about.gitlab.com/blog/integrate-external-security-scanners-into-your-devsecops-workflow/), while the test coverage features ensure security tests meet required thresholds.\n\nGitLab's security test reports provide detailed information about findings, enabling quick remediation of security issues before they reach production. The Security Dashboard provides a centralized view of vulnerabilities across projects, while [security policies can be enforced](https://about.gitlab.com/blog/how-gitlab-supports-the-nsa-and-cisa-cicd-security-guidance/) through merge request approvals and pipeline gates. In addition, GitLab provides multiple layers of secrets management to protect sensitive information throughout the CI/CD process, audit logs to track access to secrets, and role-based access control (RBAC) to ensure only authorized users can view or modify sensitive configuration data.\n\nGitLab also supports software bill of materials ([SBOM](https://about.gitlab.com/blog/the-ultimate-guide-to-sboms/)) generation, providing a comprehensive inventory of all software components, dependencies, and licenses in an application and enabling teams to quickly identify and respond to vulnerabilities and comply with regulatory mandates.\n\n## CI/CD and the cloud\n\nGitLab's CI/CD platform provides robust integration with major cloud providers including [Amazon Web Services](https://about.gitlab.com/partners/technology-partners/aws/), [Google Cloud Platform](https://about.gitlab.com/blog/provision-group-runners-with-google-cloud-platform-and-gitlab-ci/), and [Microsoft Azure](https://docs.gitlab.com/ee/install/azure/), enabling teams to automate their cloud deployments directly from their pipelines. Through GitLab's cloud integrations, teams can manage cloud resources, deploy applications, and monitor cloud services all within the GitLab interface. The platform's built-in cloud deployment templates and [Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/) features significantly reduce the complexity of cloud deployments, allowing teams to focus on application development rather than infrastructure management. For organizations that want to automate their IT   infrastructure using GitOps, GitLab has a [Flux CD integration](https://about.gitlab.com/blog/why-did-we-choose-to-integrate-fluxcd-with-gitlab/).\n\nGitLab's cloud capabilities extend beyond basic deployment automation. The platform's [Kubernetes integration](https://about.gitlab.com/blog/kubernetes-overview-operate-cluster-data-on-the-frontend/) enables teams to manage container orchestration across multiple cloud providers, while the [cloud native GitLab installation options](https://about.gitlab.com/topics/ci-cd/cloud-native-continuous-integration/) allow the platform itself to run in cloud environments. Through GitLab's cloud-native features, teams can implement auto-scaling runners that dynamically provision cloud resources for pipeline execution, optimizing costs and performance. The platform's integration with cloud provider security services ensures that security and compliance requirements are met throughout the deployment process.\n\nFor multi-cloud environments, GitLab provides consistent workflows and tooling regardless of the underlying cloud provider. Teams can use GitLab's environment management features to handle different cloud configurations across development, staging, and production environments. The platform's [infrastructure as code](https://docs.gitlab.com/ee/user/infrastructure/iac/) support, particularly its native integration with Terraform, enables teams to version control and automate their cloud infrastructure provisioning. GitLab's monitoring and observability features integrate with cloud provider metrics, providing comprehensive visibility into application and infrastructure health across cloud environments.\n\n## Advanced CI/CD \nCI/CD has evolved far beyond simple build and deploy pipelines. In advanced implementations, CI/CD involves sophisticated orchestration of automated testing, security scanning, infrastructure provisioning, AI, and more. Here are a few advanced CI/CD strategies that can help engineering teams scale their pipelines and troubleshoot issues even as architectural complexity grows.\n\n### Reuse and automation in CI/CD\n\nGitLab is transforming how development teams create and manage CI/CD pipelines with two major innovations: the [CI/CD Catalog](https://about.gitlab.com/blog/ci-cd-catalog-goes-ga-no-more-building-pipelines-from-scratch/) and [CI/CD steps](https://about.gitlab.com/blog/introducing-ci-cd-steps-a-programming-language-for-devsecops-automation/), a new programming language for DevSecOps automation currently in experimental phase. The CI/CD Catalog is a centralized platform where developers can discover, reuse, and contribute CI/CD components. Components function as reusable, single-purpose building blocks that simplify pipeline configuration — similar to Lego pieces for CI/CD workflows. Meanwhile, CI/CD steps support complex workflows by allowing developers to compose inputs and outputs for a CI/CD job. With the CI/CD Catalog and CI/CD steps, DevSecOps teams can easily standardize CI/CD and its components, simplifying the process of developing and maintaining CI/CD pipelines.\n\n> Learn more in our [CI/CD Catalog FAQ](https://about.gitlab.com/blog/faq-gitlab-ci-cd-catalog/) and [CI/CD steps documentation](https://docs.gitlab.com/ee/ci/steps/).\n\n### Troubleshooting pipelines with AI\n\nWhile CI/CD pipelines can and do break, troubleshooting the issue quickly can minimize the impact. GitLab Duo Root Cause Analysis, part of a suite of AI-powered features, removes the guesswork by [determining the root cause for a failed CI/CD pipeline](https://about.gitlab.com/blog/quickly-resolve-broken-ci-cd-pipelines-with-ai/). When a pipeline fails, GitLab provides detailed job logs, error messages, and execution traces that show exactly where and why the failure occurred. Root Cause Analysis then uses AI to suggest a fix.\nWatch GitLab Duo Root Cause Analysis in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n\u003Ciframe src=\"https://www.youtube.com/embed/sTpSLwX5DIs?si=J6-0Bf6PtYjrHX1K\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n## How to migrate to GitLab CI/CD\n\nMigrating to the DevSecOps platform and its built-in CI/CD involves a systematic approach of analyzing your existing pipeline configurations, dependencies, and deployment processes to map them to GitLab's equivalent features and syntax. Use these guides to help make the move.\n\n* [How to migrate from Bamboo to GitLab CI/CD](https://about.gitlab.com/blog/migrating-from-bamboo-to-gitlab-cicd/)\n* [Jenkins to GitLab: The ultimate guide to modernizing your CI/CD environment](https://about.gitlab.com/blog/jenkins-gitlab-ultimate-guide-to-modernizing-cicd-environment/)\n* [GitHub to GitLab migration the easy way](https://about.gitlab.com/blog/github-to-gitlab-migration-made-easy/)\n\n## Lessons from leading organizations\n\nThese leading organizations migrated to GitLab and are enjoying the myriad benefits of CI/CD. Read their stories.\n\n- [Lockheed Martin](https://about.gitlab.com/customers/lockheed-martin/)\n- [Indeed](https://about.gitlab.com/blog/how-indeed-transformed-its-ci-platform-with-gitlab/)\n- [CARFAX](https://about.gitlab.com/customers/carfax/)\n- [HackerOne](https://about.gitlab.com/customers/hackerone/)\n- [Betstudios](https://about.gitlab.com/blog/betstudios-cto-on-improving-ci-cd-capabilities-with-gitlab-premium/)\n- [Thales and Carrefour](https://about.gitlab.com/blog/how-carrefour-and-thales-are-evolving-their-ci-cd-platforms/)\n\n## CI/CD tutorials\n\nBecome a CI/CD expert with these easy-to-follow tutorials.\n\n* [Basics of CI: How to run jobs sequentially, in parallel, or out of order](https://about.gitlab.com/blog/basics-of-gitlab-ci-updated/)\n* [How to set up your first GitLab CI/CD component](https://about.gitlab.com/blog/tutorial-how-to-set-up-your-first-gitlab-ci-cd-component/)\n* [Building a GitLab CI/CD pipeline for a monorepo the easy way](https://about.gitlab.com/blog/building-a-gitlab-ci-cd-pipeline-for-a-monorepo-the-easy-way/)\n* [Using child pipelines to continuously deploy to five environments](https://about.gitlab.com/blog/using-child-pipelines-to-continuously-deploy-to-five-environments/)\n* [CI/CD automation: Maximize 'deploy freeze' impact across GitLab groups](https://about.gitlab.com/blog/ci-cd-automation-maximize-deploy-freeze-impact-across-gitlab-groups/)\n* [Refactoring a CI/CD template to a CI/CD component](https://about.gitlab.com/blog/refactoring-a-ci-cd-template-to-a-ci-cd-component/)\n* [Annotate container images with build provenance using Cosign in GitLab CI/CD](https://about.gitlab.com/blog/annotate-container-images-with-build-provenance-using-cosign-in-gitlab-ci-cd)\n\n> #### Get started with GitLab CI/CD. [Sign up for GitLab Ultimate](https://about.gitlab.com/free-trial/devsecops/) and try the AI-powered DevSecOps platform free for 60 days.",[108,835,478,9,814,678],{"slug":4354,"featured":90,"template":684},"ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation","content:en-us:blog:ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation.yml","Ultimate Guide To Ci Cd Fundamentals To Advanced Implementation","en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation.yml","en-us/blog/ultimate-guide-to-ci-cd-fundamentals-to-advanced-implementation",{"_path":4360,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4361,"content":4367,"config":4371,"_id":4373,"_type":13,"title":4374,"_source":15,"_file":4375,"_stem":4376,"_extension":18},"/en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"title":4362,"description":4363,"ogTitle":4362,"ogDescription":4363,"noIndex":6,"ogImage":4364,"ogUrl":4365,"ogSiteName":669,"ogType":670,"canonicalUrls":4365,"schema":4366},"Ultimate guide to migrating from AWS CodeCommit to GitLab","Learn how to migrate from AWS Services to GitLab and seamlessly integrate with the DevSecOps platform in this comprehensive tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097810/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%2828%29_4mi0l4wzUa5VI4wtf8gInx_1750097810027.png","https://about.gitlab.com/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Ultimate guide to migrating from AWS CodeCommit to GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tsukasa Komatsubara\"},{\"@type\":\"Person\",\"name\":\"Darwin Sanoy\"},{\"@type\":\"Person\",\"name\":\"Samer Akkoub\"},{\"@type\":\"Person\",\"name\":\"Bart Zhang\"}],\n        \"datePublished\": \"2024-08-26\",\n      }",{"title":4362,"description":4363,"authors":4368,"heroImage":4364,"date":2487,"body":4369,"category":678,"tags":4370},[4295,675,4275,3842],"On July 25, 2024, AWS made a significant announcement regarding its CodeCommit service. As detailed in their [official blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/), AWS has decided to close new customer access to CodeCommit. While existing customers can continue using the service, AWS will not introduce new features, focusing only on security, availability, and performance improvements.\n\nThis announcement has prompted development teams to consider migrating their repositories to alternative Git providers. In light of these changes, we've prepared this comprehensive guide to assist teams in migrating to GitLab and integrating with other AWS services.\n\n**Note:** For more details on AWS's official migration recommendations, please refer to [their blog post](https://aws.amazon.com/blogs/devops/how-to-migrate-your-aws-codecommit-repository-to-another-git-provider/).\n\n## About this guide\n\nThis guide provides comprehensive information for development teams using GitLab who are considering integration with AWS services or planning to migrate from AWS-hosted Git repositories to GitLab.com. The guide is structured into three main sections:\n\n- [Parallel migration to GitLab](#section-1-parallel-migration-to-gitlab): Explains how to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n\n- [Integration with AWS CodeBuild](#section-2-integrating-gitlab-with-aws-codebuild): Provides steps to integrate GitLab repositories with AWS CodeBuild, setting up a powerful continuous integration (CI) environment.\n\n- [Integration with AWS CodePipeline](#section-3-integrating-gitlab-with-aws-codepipeline): Details how to connect GitLab repositories with AWS CodePipeline to build efficient continuous delivery (CD) pipelines.\n\n- [Downstream integrations for CodePipeline and CodeStar Connections](#section-4-migrating-to-gitlab): Explains how to leverage GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nThrough this guide, you'll learn how to combine the powerful features of GitLab and AWS to create an efficient and flexible development workflow.\n\n## Section 1: Parallel migration to GitLab \n\nFor those considering migrating Git repositories hosted on AWS to GitLab.com, this section, which is a phased approach, introduces methods to achieve migration while minimizing risks. By leveraging GitLab's mirroring capabilities, you can maintain existing development flows while testing the new environment.\n\n### Why is parallel migration important?\n\nLarge-scale system migrations always involve risks, particularly potential impacts on ongoing development work, existing integrations, and automated processes. Adopting a parallel migration approach offers the following benefits:\n\n1. Risk minimization: Test the new environment while keeping existing systems operational.\n2. Seamless transition: Development teams can gradually acclimate to the new system.\n3. Integration testing: Thoroughly test all integrations and automation in the new environment.\n4. Future-proofing: Enable teams to gradually migrate to GitLab CI/CD in parallel to existing CI.\n\nParallel migration is not required if it is already known that you want to cut over directly to GitLab.\n\n### Steps for migrating to GitLab.com\n\n#### Step 1: Get set up on GitLab.com\n\n- Check if your company already has a group in use on GitLab.com and whether they have single sign-on (SSO) set up – if they do, then you will want to use both.\n\n- If your company does not have a presence on GitLab.com, visit [GitLab.com](www.gitlab.com) and create a new account or log in to an existing one.\n- Create a new company namespace (a group at the root level of gitlab.com).\n- Pick a name that reflects your entire company (and is not already taken).\n\n#### Step 2: Import repository\nFor parallel migration: Use GitLab's pull mirroring feature to automatically sync changes from AWS-hosted repositories to GitLab.com.\n\n1. Navigate to the target group GitLab.com.\n2. In the upper right, click \"New project.\"\n3. On the \"Create new project\" page, click \"Import project.\"\n4. On the \"Import project\" page, click \"Repository by URL.\"\n5. Enter the URL of your AWS-hosted repository in the \"Git repository URL\" field.\n6. Underneath the Git repository URL field, check \"Mirror repository.\"\n7. Set up authentication: in the AWS CodeCommit console, select the clone URL for the repository you will migrate. If you plan on importing CodeCommit repositories into GitLab, you can use the HTTPS CodeCommit URL to clone the repository via GitLab Repository Mirroring. You will need to also provide your Git credentials from AWS for your identity and access management (IAM) user within GitLab. You can create Git credentials for AWS CodeCommit by following this [AWS guide](https://docs.aws.amazon.com/codecommit/latest/userguide/setting-up-gc.html).\n\n![Clone URL](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/clone-url-screenshot__1__aHR0cHM6_1750097822121.png)\n\nThis setup will automatically pull changes from the AWS-hosted repository to GitLab.com every five minutes by default.\n\nFor more information, read our [repository mirroring documentation](https://docs.gitlab.com/ee/user/project/repository/mirror/).\n\n#### Step 3: Test and validate integrations\n\n1. CI/CD pipelines: Set up the `.gitlab-ci.yml` file in GitLab CI to replicate existing pipelines. You can read more about [planning a migration from other CI tools into GitLab CI/CD](https://docs.gitlab.com/ee/ci/migration/plan_a_migration.html).\n2. Issue tracking: Import project issues and test workflows.\n3. Code review: Set up the merge request process and test review workflows.\n\n#### Step 4: Gradual migration\n\n1. Start with small or non-critical projects to familiarize yourself with working on GitLab.com.\n2. Provide training for team members and allow time to adapt to new workflows.\n3. Gradually migrate more projects while ensuring integrations and workflows are problem-free.\n\nFor more information, see [Automating Migrations from CodeCommit to GitLab](https://gitlab.com/guided-explorations/aws/migrating-from-codecommit-to-gitlab/-/blob/main/migrating_codecommit_to_gitlab.md).\n\n#### Step 5: Complete migration\nOnce all tests and validations are complete and the team is comfortable with the new environment, plan for full migration. For each project:\n\n1. Set a migration date and notify all stakeholders.\n2. Perform final data synchronization.\n3. Remove mirroring settings from the GitLab project.\n4. Set AWS-hosted repositories to read-only and transition all development work to GitLab.com.\n\n#### Step 6: Assess adoption of new capabilities\n\nGitLab collaboration and workflow automation for developers is far richer than CodeCommit. It merits some time to learn what these capabilities are. The merge request process is especially rich compared to CodeCommit.\n\nAfter repositories are stable on GitLab, it is very easy to experiment with GitLab CI/CD in parallel to an existing solution. Teams can take time to perfect their GitLab CI/CD automation while production workflows remain unaffected.\n\nGitLab artifact management is also very capable with the Releases feature and many package registries.\n\n### Section 1: Summary\nBy adopting a parallel migration approach to GitLab, you can achieve a smooth transition while minimizing risks. This process allows teams to gradually adapt to the new environment and ensure all integrations and automations function correctly. Cutover migrations only omit a single setting checkbox if it is known that a parallel migration is not necessary.\n\n## Section 2: Integrating GitLab with AWS CodeBuild\n\nFor those wanting to build and test code from GitLab repositories using AWS CodeBuild, this comprehensive guide will help you set up an efficient CI pipeline.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodeBuild service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connect setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822122.png)\n\n### Step 2: Create AWS CodeBuild project\n\n1. Click \"Create build project\" on the CodeBuild dashboard.\n2. Enter a project name and description.\n3. For source settings, select \"GitLab\" as the provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n\n![Add CodeBuild project](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_3_add_codebuild_aHR0cHM6_1750097822123.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 2\nThis section explained in detail how to integrate GitLab repositories with AWS CodeBuild. This setup enables a continuous integration pipeline where code changes in GitLab are automatically built and tested using AWS CodeBuild.\n\n## Section 3: Integrating GitLab with AWS CodePipeline\n\nFor those looking to implement continuous delivery from GitLab repositories using AWS CodePipeline, this detailed guide will be helpful. The integration has become even easier now that GitLab is available as an AWS CodeStar Connections provider.\n\n### Prerequisites\n\n- GitLab.com account\n- AWS account\n- AWS CLI (configured)\n\n### Step 1: Create GitLab connection in AWS CodeStar Connections\n\n1. Log in to the AWS Management Console and navigate to the CodePipeline service.\n2. Select \"Settings\" > \"Connections\" from the left navigation panel.\n3. Click the \"Create connection\" button.\n4. Choose \"GitLab\" as the provider.\n5. Enter a connection name and click \"Connect to GitLab.\"\n6. You'll be redirected to the GitLab authentication page.\n7. Approve the necessary permissions.\n8. Once successful, the connection status will change to \"Available.\"\n\n![CodeStar Connections setup](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codestar-connections-setup_aHR0cHM6_1750097822125.png)\n\n### Step 2: Create AWS CodePipeline\n\n1. Click \"Create pipeline\" on the CodePipeline dashboard.\n2. Enter a pipeline name and click \"Next.\"\n3. Select \"GitLab\" as the source provider.\n4. Choose the connection you just created and specify the GitLab repository and branch.\n5. Select the Trigger type: You can trigger CodePipeline pipeline execution based on either pull or push events against specific branches and file types within your repository.\n\n![Add source provider](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_provider_aHR0cHM6_1750097822127.png)\n\n![Add source configuration](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/codepipeline_step_2_source_configured_aHR0cHM6_1750097822129.png)\n\n**Note: From Step 3 forward, please configure the settings according to your specific environment and needs.**\n\n### Summary of Section 3\nThis section detailed how to integrate GitLab repositories with AWS CodePipeline. This setup enables a continuous delivery pipeline where code changes in GitLab are automatically deployed to your AWS environment.\n\n## Section 4: Migrating to GitLab\n\nIntegrating GitLab with AWS unlocks powerful capabilities for streamlining your development and deployment workflows and helps to solve your source code management woes. This integration can be achieved in several ways, each offering unique benefits:\n\n- Using AWS CodeStar Connections to link GitLab with AWS services enables a more cohesive workflow by allowing external Git repositories, like GitLab, to connect with various AWS services. This setup supports automated builds, deployments, and other essential actions directly from your GitLab repository, making your development process more integrated and streamlined.\n\n- Connecting GitLab with AWS CodePipeline via AWS CodeStar Connections takes automation to the next level by allowing you to create a full CI/CD pipeline. This approach integrates GitLab with AWS CodePipeline, enabling you to automate the entire process – from source control and builds to testing and deployment – using AWS services like CodeBuild and CodeDeploy. This ensures a robust, scalable, and efficient delivery process.\n\n![Chart of new technology and solutions for using GitLab and AWS together](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097822/Blog/Content%20Images/Blog/Content%20Images/Announcing_New_Technology_and_Solutions_for_using_GitLab_and_AWS_Together_aHR0cHM6_1750097822130.png)\n\n1\\. Connecting GitLab with AWS services using AWS CodeStar Connections\n\nAWS CodeStar Connections is a service that allows you to connect external Git repositories (such as GitHub or Bitbucket) to AWS services. You can also connect GitLab to AWS services via CodeStar Connections. When using GitLab, you may need to set up a custom connection as an HTTP Git server.\nThe following AWS services can be connected to GitLab using this method:\n\n- **AWS Service Catalog**\n\nAWS Service Catalog helps organizations standardize and manage AWS resources. Integrating it with GitLab improves transparency in resource management and simplifies change tracking. Specifically, you can automate catalog updates based on GitLab commits, enhancing operational efficiency.\n\n- __AWS CodeBuild__\n\nAWS CodeBuild is a managed build service that compiles source code, runs tests, and produces deployable software packages. Integrating GitLab with CodeBuild allows automated build processes to start whenever code changes are pushed to GitLab. This ensures consistency in builds and facilitates easier collaboration and version control.\n\n- __AWS Glue Notebook Jobs__\n\nAWS Glue Notebook Jobs is a service that allows you to interactively develop and run data preparation and ETL (Extract, Transform, Load) tasks. Integrating GitLab with Glue Notebook Jobs enables version control for notebooks and ETL scripts, promotes collaboration among team members, and improves the quality management of data processing pipelines.\n\n- __AWS Proton__\n\nAWS Proton is a service that automates the development and deployment of microservices and serverless applications. By integrating GitLab with AWS Proton, you can manage infrastructure as code, automate deployments, and ensure consistent environment management, leading to more efficient development processes.\n\nAs AWS CodeStar Connections supports more services, connecting GitLab with additional AWS services will become easier. It's advisable to regularly check for new services that support CodeStar Connections.\n\n2. Connecting CodePipeline with GitLab via AWS CodeStar Connections (including CodeDeploy)\n\nAWS CodePipeline is a continuous delivery service that automates the release process for software. To connect GitLab with CodePipeline, you need to use AWS CodeStar Connections. This setup allows you to designate a GitLab repository as the source and automate the entire CI/CD pipeline.\nThe primary actions supported by CodePipeline include:\n- **Source control:** AWS CodeCommit, GitHub, Bitbucket, GitLab\n- **Build and test:** AWS CodeBuild, Jenkins\n- **Deploy:** AWS CodeDeploy, Elastic Beanstalk, ECS, S3\n- **Approval:** Manual approval\n- **Infrastructure management:** AWS CloudFormation\n- **Serverless:** AWS Lambda\n- **Testing:** AWS Device Farm\n- **Custom Actions:** AWS Step Functions\n\nBy integrating GitLab with CodePipeline, you can automatically trigger the pipeline whenever code changes are pushed to GitLab, allowing a consistent process from build to deployment. Additionally, combining this with GitLab's version control capabilities makes it easier to track deployment history and states, leading to more flexible and reliable software delivery.\n\n## What you've learned\nThis guide has provided comprehensive information on migrating to and integrating GitLab with AWS. Through the four main topics, we've covered:\n- Parallel migration to GitLab: How to gradually migrate from existing AWS-hosted repositories to GitLab.com while minimizing risks.\n- Integration with AWS CodeBuild: Steps to set up a powerful CI environment integrated with GitLab repositories.\n- Integration with AWS CodePipeline: How to build efficient continuous delivery pipelines using GitLab repositories.\n- Downstream integrations for CodePipeline and CodeStar Connections: Leveraging GitLab-AWS connections for widespread service access, unlocking a cascade of integration possibilities across the AWS ecosystem.\n\nAs every organization's code hosting and integration implementation strategy is unique, this tutorial may be used as a starting point for your own GitLab + AWS integration and implementation strategy.\n\n## Additional resources\n\nFor more detailed information and advanced configurations, refer to the following resources:\n\n- [GitLab documentation](https://docs.gitlab.com/)\n- [AWS CodeBuild User Guide](https://docs.aws.amazon.com/codebuild/latest/userguide/welcome.html)\n- [AWS CodePipeline User Guide](https://docs.aws.amazon.com/codepipeline/latest/userguide/welcome.html)\n- [GitLab CI/CD documentation](https://docs.gitlab.com/ee/ci/)\n- [Integrate with AWS](https://docs.gitlab.com/ee/solutions/cloud/aws/gitlab_aws_integration.html)\n\nIf you have questions or need support, please contact [GitLab Support](https://about.gitlab.com/support/) or AWS Support. We hope this comprehensive guide helps you in your AWS-GitLab integration journey.",[108,794,478,9,1000,678,230],{"slug":4372,"featured":90,"template":684},"ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab","content:en-us:blog:ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","Ultimate Guide To Migrating From Aws Codecommit To Gitlab","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab.yml","en-us/blog/ultimate-guide-to-migrating-from-aws-codecommit-to-gitlab",{"_path":4378,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4379,"content":4385,"config":4392,"_id":4394,"_type":13,"title":4395,"_source":15,"_file":4396,"_stem":4397,"_extension":18},"/en-us/blog/understanding-and-improving-total-blocking-time",{"title":4380,"description":4381,"ogTitle":4380,"ogDescription":4381,"noIndex":6,"ogImage":4382,"ogUrl":4383,"ogSiteName":669,"ogType":670,"canonicalUrls":4383,"schema":4384},"Total Blocking Time - The metric to know for faster website performance","Learn how to identify and fix some root causes for high Total Blocking Time.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749682637/Blog/Hero%20Images/tbt_cover_image.jpg","https://about.gitlab.com/blog/understanding-and-improving-total-blocking-time","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Total Blocking Time - The metric to know for faster website performance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jacques Erasmus\"}],\n        \"datePublished\": \"2023-02-14\",\n      }",{"title":4380,"description":4381,"authors":4386,"heroImage":4382,"date":4388,"body":4389,"category":769,"tags":4390},[4387],"Jacques Erasmus","2023-02-14","\n\nOur world overwhelms us with information that is more accessible than ever. The increasing rates of content production and consumption are gifts that keep on giving. We can't seem to keep up with the information thrown at us. We're limited by our cognitive limitations and time constraints, and a [recent study](https://www.nature.com/articles/s41467-019-09311-w) concluded the result is a shortening of attention spans. Websites are no exception.\n\nUsers who interact with your website want feedback, and want it fast. Preferably immediately! Website performance has become an important factor in keeping users engaged. But how do you measure how unresponsive a page is before it becomes fully interactive?\n\nMany [performance metrics](https://web.dev/vitals/) exist, but this blog post focuses on Total Blocking Time (TBT).\n\n## What is Total Blocking Time?\n\nTBT measures the total amount of time tasks were blocking your browser's main thread. This metric represents the total amount of time that a user could not interact with your website. It's measured between [First Contentful Paint (FCP)](https://web.dev/fcp/) and [Time to Interactive (TTI)](https://web.dev/tti/), and represents the combined blocking time for all long tasks.\n\n## What is a long task?\n\nA long task is a process that runs on the main thread for longer than 50 milliseconds (ms). After a task starts, a browser can't interrupt it, and a single long-running task can block the main thread. The result: a website that is unresponsive to user input until the task completes.\n\nAfter the first 50 ms, all time spent on a task is counted as _blocking time_. This diagram shows five tasks, two of which block the main thread for 140 ms:\n\n![A diagram containing five tasks, two of which are blocking the main thread. The TBT for these tasks adds up to 140 ms.](https://about.gitlab.com/images/blogimages/tbt/tasks_diagram.png)\n\n## How can we measure TBT?\n\nMany tools measure TBT, but here we’ll use [Chrome DevTools](https://developer.chrome.com/docs/devtools/evaluate-performance/) to analyze runtime performance.\n\nAs an example: We recently improved performance on GitLab's [**View Source** page](https://gitlab.com/gitlab-org/gitlab/-/blob/master/.gitlab-ci.yml). This screenshot, taken before the performance improvement, shows eight long-running tasks containing a TBT of **2388.16 ms**. That's more than **two seconds**:\n\n![A screenshot indicating that there are eight long-running tasks. The TBT of these tasks adds up to 2388.16 ms.](https://about.gitlab.com/images/blogimages/tbt/summary_before.png)\n\n## How can we improve TBT?\n\nAs you might have guessed by now, reducing the time needed to complete long-running tasks reduces TBT.\n\nBy selecting one of the tasks from the previous screenshot, we can get a breakdown of how the browser executed it. This **Bottom-Up** view shows that much time is spent on rendering content in the Document Object Model (DOM):\n\n![A screenshot of the Bottom-Up view of one of tasks from the previous screenshot. It indicates that most of the time is being spent on rendering content in the DOM.](https://about.gitlab.com/images/blogimages/tbt/task_7_before.png)\n\nThis page has a lot of content that is below the fold – not immediately visible. The browser is spending a lot of resources upfront to render content that is not even visible to the user yet!\n\nSo what can we do? Some ideas:\n\n- **Change the UX.**\n  - Add a Show More button, paging, or virtual scrolling for long lists.\n- **Lazy-load images.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65745))\n    - Lazy-loading images reduces page weight, allowing the browser to spend resources on more important tasks.\n- **Lazy-load long lists.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71633))\n    - Similar to lazy-loading images, this approach allows the browser to spend resources on more important tasks.\n- **Reduce excessive HTML.**\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/65835))\n    - For example, when loading large pages consider removing unnecessary content. Or, consider rendering some content (like icons) with CSS instead.\n- **Defer rendering when possible.**\n    - The [`content-visibility: auto;`](https://developer.mozilla.org/en-US/docs/Web/CSS/content-visibility) CSS property ensures the rendering of off-screen elements (and thus irrelevant to the user) is skipped without affecting the page layout. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/67050))\n    - The [Intersection Observer API](https://developer.mozilla.org/en-US/docs/Web/API/Intersection_Observer_API) allows you to observe when elements intersect with the viewport. This information can be used to show or hide certain elements. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/71633))\n    - The global [`requestIdleCallback` method](https://developer.mozilla.org/en-US/docs/Web/API/Window/requestIdleCallback?qs=requestIdleCallback) can be used to render content after the browser goes into an idle state.\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942/diffs#7eed73783787184e5b1c029b9668e48638f3a6e8_64_78))\n\nFrameworks such as VueJS and React are already heavily optimized. However, be mindful of how you use these frameworks to avoid expensive tasks.\n\n### Change VueJS usage to improve TBT\n\nThis screenshot shows the **Bottom-Up** view of a task. Much of the task time is spent on activities from third-party code in the VueJS framework:\n\n![A screenshot of the Bottom-Up view of one of tasks. It indicates that a lot of the time is being spent on activities in the third-party VueJS framework.](https://about.gitlab.com/images/blogimages/tbt/task_6_before.png)\n\nWhat improvements can we make?\n\n- **Use [Server-side rendering (SSR)](https://gitlab.com/gitlab-org/gitlab/-/issues/215365) or [streaming](https://gitlab.com/gitlab-org/frontend/rfcs/-/issues/101)** for pages that are sensitive to page load performance.\n- **If you don't _need_ Vue, don't use it.**\n  Component instances are a lot more expensive than using plain DOM nodes. Try to avoid unnecessary component abstractions.\n- **Optimize component [props](https://vuejs.org/guide/components/props.html).**\n  Child components in Vue update when at least one of their received props are being updated. Analyze the data that you pass to components. You may find that you can avoid unnecessary updates by making changes to your props strategy.\n- **Use [v-memo](https://vuejs.org/api/built-in-directives.html#v-memo) to skip updates.**\n    - In Vue versions 3.2 and later, `v-memo` enables you to cache parts of your template. The cached template updates and re-renders only if one of its provided dependencies changes.\n- **Use [v-once](https://vuejs.org/api/built-in-directives.html#v-once) for data** that does not need to be reactive after the initial load.\n  ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942))\n    - `v-once` ensures the element and component are only rendered once. Any future updates will be skipped.\n- **Reduce expensive tasks in your Vue components.**\n  Even a small script may take a long time to finish if it’s not optimized enough. Some suggestions:\n    - By using [`requestIdleCallback`](https://developer.mozilla.org/en-US/docs/Web/API/Window/requestIdleCallback?qs=requestIdleCallback) you can defer the execution of the non-critical tasks. ([example](https://gitlab.com/gitlab-org/gitlab/-/merge_requests/101942/diffs#7eed73783787184e5b1c029b9668e48638f3a6e8_64_78))\n    - By executing expensive scripts in [WebWorkers](https://developer.mozilla.org/en-US/docs/Web/API/Web_Workers_API/Using_web_workers) you can unblock the main thread.\n\n### Results and methods\n\nBy using three of the methods suggested above, we reduced TBT from about **3 seconds** to approximately **500 ms**:\n\n![A chart indicating a drop in TBT from ~3 seconds to ~500 milliseconds.](https://about.gitlab.com/images/blogimages/tbt/chart_after.png)\n\nWhat did we do?\n\n- Deferred rendering by using the [`content-visibility: auto;`](https://developer.mozilla.org/en-US/docs/Web/CSS/content-visibility) CSS property.\n- Deferred rendering by using the [Intersection Observer API](https://developer.mozilla.org/en-US/docs/Web/API/Intersection_Observer_API).\n- Used [v-once](https://vuejs.org/api/built-in-directives.html#v-once) for content that didn't need to be reactive after rendering.\n\nRemember, the size of the decrease always depends on how optimized your app already is to begin with.\n\nThere is a lot more we can do to improve TBT. While the specific approach depends on the app you're optimizing, the general methods discussed here are very effective at finding improvement opportunities in any app. Like most things in life, a series of the smallest changes often yield the biggest impact. So let's [iterate](/blog/dont-confuse-these-twelve-shortcuts-with-iteration/) together, and adapt to this ever-changing world.\n\n> “Adaptability is the simple secret of survival.” – Jessica Hagedorn\n\n_Cover image by [Growtika](https://unsplash.com/@growtika?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText) on [Unsplash](https://unsplash.com/photos/Iqi0Rm6gBkQ?utm_source=unsplash&utm_medium=referral&utm_content=creditCopyText)_\n",[4391,728,9],"frontend",{"slug":4393,"featured":6,"template":684},"understanding-and-improving-total-blocking-time","content:en-us:blog:understanding-and-improving-total-blocking-time.yml","Understanding And Improving Total Blocking Time","en-us/blog/understanding-and-improving-total-blocking-time.yml","en-us/blog/understanding-and-improving-total-blocking-time",{"_path":4399,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4400,"content":4406,"config":4410,"_id":4412,"_type":13,"title":4413,"_source":15,"_file":4414,"_stem":4415,"_extension":18},"/en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"title":4401,"description":4402,"ogTitle":4401,"ogDescription":4402,"noIndex":6,"ogImage":4403,"ogUrl":4404,"ogSiteName":669,"ogType":670,"canonicalUrls":4404,"schema":4405},"Use GitLab Duo to build and deploy a simple Quarkus-native project","This tutorial shows how a Java application is compiled to machine code and deployed to a Kubernetes cluster using a CI/CD pipeline. See how AI makes the process faster and more efficient.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666069/Blog/Hero%20Images/AdobeStock_639935439.jpg","https://about.gitlab.com/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo to build and deploy a simple Quarkus-native project\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2024-10-17\",\n      }",{"title":4401,"description":4402,"authors":4407,"heroImage":4403,"date":3465,"body":4408,"category":702,"tags":4409},[699],"In [“How to automate software delivery using Quarkus and GitLab,”](https://about.gitlab.com/blog/how-to-automate-software-delivery-using-quarkus-and-gitlab/) you learned how to develop and deploy a simple Quarkus-JVM application to a Kubernetes cluster using [GitLab Auto DevOps](https://docs.gitlab.com/ee/topics/autodevops/). Now, you'll learn how to use Quarkus-native to compile a Java application to machine code and deploy it to a Kubernetes cluster using a CI/CD pipeline. Follow our journey from development to deployment leveraging [GitLab Duo](https://about.gitlab.com/gitlab-duo/) as our AI companion, including the specific prompts we used.\n\n## What is Quarkus?\n\n[Quarkus](https://quarkus.io/), also known as the Supersonic Subatomic Java, is an open source, Kubernetes-native Java stack tailored to OpenJDK HotSpot and GraalVM. The Quarkus project recently moved to the [Commonhaus Foundation](https://www.commonhaus.org/), a nonprofit organization dedicated to the sustainability of open source libraries and frameworks that provides a balanced approach to governance and support.\n\n## Prerequisites\n\nThis tutorial assumes:\n\n- You have a running Kubernetes cluster, e.g. GKE.\n- You have access to the Kubernetes cluster from your local laptop via the `kubectl` command.\n- The cluster is connected to your GitLab project.\n- You have [Maven (Version 3.9.6 or later)](https://maven.apache.org/) installed on your local laptop.\n- You have Visual Studio Code installed on your local laptop.\n\nIf you’d like to set up a Kubernetes cluster connected to your GitLab project, you can follow the instructions in this [tutorial](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/), up to but not including the “Creating an instance of MySQL database in your cluster via Flux” section (you do not need a database for this tutorial).\n\nYou will also need to install an nginx ingress in your Kubernetes cluster. Here are two ways to do this:\n1. You can follow the instructions in [“Creating and importing projects”](https://about.gitlab.com/blog/eliminate-risk-with-feature-flags-tutorial/#creating-and-importing-projects), up to the creation of the variable `KUBE_INGRESS_BASE_DOMAIN`.\n2. Or, just create an ingress in your Kubernetes cluster by following the instructions in our [Auto DevOps with GKE documentation](https://docs.gitlab.com/ee/topics/autodevops/cloud_deployments/auto_devops_with_gke.html#install-ingress).\n\n**NOTE:** For this article, we used the first method above to install an ingress and cert-manager in the Kubernetes cluster.\n\n## Creating necessary project files using GitLab Duo Chat\n\nWe started our endeavor from VS Code and an empty project called `quarkus-native`, which we had previously created in GitLab and had already cloned to our local laptop.\n\n1. We opened GitLab Duo Chat, within VS Code, and entered the following prompt:\n\n**_Create a “Hello World” Quarkus application that can be natively compiled_**\n\n![hello-world-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/hello-world-prompt.png)\n\nChat replied with the prerequisites and process of what to do to create a simple “Hello World” Quarkus application. At this point, we were after the generated body of the class `HelloResource.java`:\n\n```\npackage org.acme;\n\nimport javax.ws.rs.GET;\nimport javax.ws.rs.Path;\nimport javax.ws.rs.Produces;\nimport javax.ws.rs.core.MediaType;\n\n@Path(\"/hello\")\npublic class HelloResource {\n\n    @GET\n    @Produces(MediaType.TEXT_PLAIN)\n    public String hello() {\n        return \"Hello World\";\n    }\n}\n```\n\n2. We knew that for Quarkus we needed a special Dockerfile, named `Dockerfile.native`, as part of the creation of the image for the application executable. We then entered the following prompt in Chat:\n\n**_Create the Dockerfile.native file for the sample application above_**\n\n![create-dockerfile-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675941/Blog/Content%20Images/create-dockerfile-prompt.png)\n\nChat replied with a very thorough answer about this file, its contents, and the containerization process. At this point, we were just after the generated body of the file `Dockerfile.native`:\n\n```\n####\n# This Dockerfile is used in order to build a container that runs the Quarkus application in native (no JVM) mode\n####\nFROM registry.access.redhat.com/ubi8/ubi-minimal:8.6\nWORKDIR /work/\nRUN chown 1001 /work \\\n    && chmod \"g+rwX\" /work \\\n    && chown 1001:root /work\nCOPY --chown=1001:root target/*-runner /work/application\n\nEXPOSE 8080\nUSER 1001\n\nCMD [\"./application\", \"-Dquarkus.http.host=0.0.0.0\"]\n```\n\n3. Next, we asked Chat to generate the `pom.xml` for this application by entering the following prompt:\n\n**_Create the pom.xml file for the sample application above_**\n\n![create-pom-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pom-prompt.png)\n\nChat provided the content of the `pom.xml` file, an explanation of its contents, and how to use it. At this point, we were after the generated body of the file `pom.xml`:\n\n```\n\u003C?xml version=\"1.0\"?>\n\u003Cproject xsi:schemaLocation=\"http://maven.apache.org/POM/4.0.0 https://maven.apache.org/xsd/maven-4.0.0.xsd\" xmlns=\"http://maven.apache.org/POM/4.0.0\"\n    xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\">\n  \u003CmodelVersion>4.0.0\u003C/modelVersion>\n  \u003CgroupId>org.acme\u003C/groupId>\n  \u003CartifactId>hello-world-quarkus\u003C/artifactId>\n  \u003Cversion>1.0.0-SNAPSHOT\u003C/version>\n  \u003Cproperties>\n    \u003Ccompiler-plugin.version>3.10.1\u003C/compiler-plugin.version>\n    \u003Cmaven.compiler.release>11\u003C/maven.compiler.release>\n    \u003Cproject.build.sourceEncoding>UTF-8\u003C/project.build.sourceEncoding>\n    \u003Cproject.reporting.outputEncoding>UTF-8\u003C/project.reporting.outputEncoding>\n    \u003Cquarkus.platform.artifact-id>quarkus-bom\u003C/quarkus.platform.artifact-id>\n    \u003Cquarkus.platform.group-id>io.quarkus.platform\u003C/quarkus.platform.group-id>\n    \u003Cquarkus.platform.version>2.16.5.Final\u003C/quarkus.platform.version>\n    \u003CskipITs>true\u003C/skipITs>\n    \u003Csurefire-plugin.version>3.0.0-M7\u003C/surefire-plugin.version>\n  \u003C/properties>\n  \u003CdependencyManagement>\n    \u003Cdependencies>\n      \u003Cdependency>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>${quarkus.platform.artifact-id}\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Ctype>pom\u003C/type>\n        \u003Cscope>import\u003C/scope>\n      \u003C/dependency>\n    \u003C/dependencies>\n  \u003C/dependencyManagement>\n  \u003Cdependencies>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-arc\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-resteasy\u003C/artifactId>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.quarkus\u003C/groupId>\n      \u003CartifactId>quarkus-junit5\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n    \u003Cdependency>\n      \u003CgroupId>io.rest-assured\u003C/groupId>\n      \u003CartifactId>rest-assured\u003C/artifactId>\n      \u003Cscope>test\u003C/scope>\n    \u003C/dependency>\n  \u003C/dependencies>\n  \u003Cbuild>\n    \u003Cplugins>\n      \u003Cplugin>\n        \u003CgroupId>${quarkus.platform.group-id}\u003C/groupId>\n        \u003CartifactId>quarkus-maven-plugin\u003C/artifactId>\n        \u003Cversion>${quarkus.platform.version}\u003C/version>\n        \u003Cextensions>true\u003C/extensions>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>build\u003C/goal>\n              \u003Cgoal>generate-code\u003C/goal>\n              \u003Cgoal>generate-code-tests\u003C/goal>\n            \u003C/goals>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-compiler-plugin\u003C/artifactId>\n        \u003Cversion>${compiler-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CcompilerArgs>\n            \u003Carg>-parameters\u003C/arg>\n          \u003C/compilerArgs>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-surefire-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cconfiguration>\n          \u003CsystemPropertyVariables>\n            \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n            \u003Cmaven.home>${maven.home}\u003C/maven.home>\n          \u003C/systemPropertyVariables>\n        \u003C/configuration>\n      \u003C/plugin>\n      \u003Cplugin>\n        \u003CartifactId>maven-failsafe-plugin\u003C/artifactId>\n        \u003Cversion>${surefire-plugin.version}\u003C/version>\n        \u003Cexecutions>\n          \u003Cexecution>\n            \u003Cgoals>\n              \u003Cgoal>integration-test\u003C/goal>\n              \u003Cgoal>verify\u003C/goal>\n            \u003C/goals>\n            \u003Cconfiguration>\n              \u003CsystemPropertyVariables>\n                \u003Cnative.image.path>${project.build.directory}/${project.build.finalName}-runner\u003C/native.image.path>\n                \u003Cjava.util.logging.manager>org.jboss.logmanager.LogManager\u003C/java.util.logging.manager>\n                \u003Cmaven.home>${maven.home}\u003C/maven.home>\n              \u003C/systemPropertyVariables>\n            \u003C/configuration>\n          \u003C/execution>\n        \u003C/executions>\n      \u003C/plugin>\n    \u003C/plugins>\n  \u003C/build>\n  \u003Cprofiles>\n    \u003Cprofile>\n      \u003Cid>native\u003C/id>\n      \u003Cactivation>\n        \u003Cproperty>\n          \u003Cname>native\u003C/name>\n        \u003C/property>\n      \u003C/activation>\n      \u003Cproperties>\n        \u003CskipITs>false\u003C/skipITs>\n        \u003Cquarkus.package.type>native\u003C/quarkus.package.type>\n      \u003C/properties>\n    \u003C/profile>\n  \u003C/profiles>\n\u003C/project>\n```\n\n4. We then asked Chat to generate the Quarkus test file for this application by entering the following prompt:\n\n**_Create the content for the Quarkus test for the application above_**\n\n![create-quarkus-test-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-quarkus-test-prompt.png)\n\nChat returned the body for the Quarkus test, how to use it, and the body of the Java class for the test needed for its native compilation. At this point, we were after the generated bodies of the file `HelloResourceTest.java` and `HelloResourceIT.java`.\n\nHere is the generated body for `HelloResourceTest.java`:\n\n```\npackage org.acme;\n\nimport io.quarkus.test.junit.QuarkusTest;\nimport org.junit.jupiter.api.Test;\n\nimport static io.restassured.RestAssured.given;\nimport static org.hamcrest.CoreMatchers.is;\n\n@QuarkusTest\npublic class HelloResourceTest {\n\n    @Test\n    public void testHelloEndpoint() {\n        given()\n          .when().get(\"/hello\")\n          .then()\n             .statusCode(200)\n             .body(is(\"Hello World\"));\n    }\n\n}\n```\n\nHere is the generated body for `HelloResourceIT.java`:\n\n```\npackage org.acme;\n\nimport io.quarkus.test.junit.QuarkusIntegrationTest;\n\n@QuarkusIntegrationTest\npublic class HelloResourceIT extends HelloResourceTest {\n    // Execute the same tests but in native mode.\n}\n```\n\n5. We needed to know how to organize these files in the GitLab project, so we asked about the directory structure for all these files by entering the following prompt in Chat:\n\n**_Give me the entire directory structure for this project including the location of each file, e.g. pom.xml, Dockerfile.native, application.properties, HelloResource.java, HelloResourceTest.java, and the location of the target directory_**\n\n![create-dir-struct-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-dir-struct-prompt.png)\n\nChat replied with a detailed diagram about the entire directory structure for the project and where all these files should be located as well as a description of the purpose of each of them. It even mentioned that the directory `target/` and its contents should not be version controlled since it was generated by the build process. Another interesting aspect of the reply was the existence of a file called `resources/application.properties` in the directory structure.\n\n![dir-struct-chat-response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/dir-struct-chat-response.png)\n\nWith all this information in our hands, we were ready to start creating these files in our GitLab project.\n\n## Populating our project with the generated content for each file\n\nWe created each of the following files in their corresponding location and their generated content as provided by Chat:\n\n- `src/main/java/org/acme/HelloResource.java`\n- `resources/application.properties`\n- `src/test/java/org/acme/HelloResourceTest.java`\n- `src/test/java/org/acme/HelloResourceIT.java`\n- `pom.xml`\n- `Dockerfile.native`\n\n**NOTE:** We considered using GitLab Auto Deploy for this endeavor but later realized that it would not be a supported option. We are mentioning this because in the video at the end of this tutorial, you will see that we asked Chat: `How to set the service internalPort to 8080 for auto deploy`. Then we created a file named `.gitlab/auto-deploy-values.yaml` with the generated content from Chat. The creation of this file is not necessary for this tutorial.\n\nBefore we started tackling the pipeline to build, containerize, and deploy the application to our Kubernetes cluster, we decided to generate the executable locally on our Mac and test the application locally.\n\n## Testing the application locally\n\nHere is the process we went through to test the application on our local machine.\n\n1. To build the application on the local Mac laptop, from a Terminal window, we entered the following command:\n\n```\nmvn clean package -Pnative\n```\n\n![first-build](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/first-build.png)\n\nThe native compilation failed with the error message:\n\n`Cannot find the ‘native-image’ in the GRAALVM_HOME, JAVA_HOME and System PATH. Install it using ‘gu install native-image’`\n\n2. So, we used our trusty GitLab Duo Chat again and asked it the following:\n\n**_The command “mvn clean package -Pnative” is failing with error “java.lang.RuntimeException: Cannot find the ‘native-image’ in the GRAALVM_HOME, JAVA_HOME and System PATH. Install it using gu install native-image”. I’m using a MacOS Sonoma. How do I fix this error on my Mac?_**\n\n![how-to-fix-build-failure-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-fix-build-failure-prompt.png)\n\nChat replied with a detailed set of steps on how to install the necessary software and set the appropriate environment variables.\n\n3. We copied and pasted the following commands from the Chat window to a Terminal window:\n\n```\nbrew install –cask graalvm/tap/graalvm-ce-java17\nexport JAVA_HOME=/Library/Java/JavaVIrtualMachines/graalvm-ce-java17-22.3.1\nexport GRAALVM_HOME=${JAVA_HOME}\nexport PATH=${GRAALVM_HOME}/bin:$PATH\nxattr -r -d com.apple.quarantine ${GRAALVM_HOME}/../..\ngu install native-image\n```\n\nThe commands above installed the community edition of GraalVM Version 22.3.1 that supported Java 17. We noticed, during the brew install, that the version of the GraalVM being installed was `java17-22.3.1`, so we had to update the pasted value for `JAVA_HOME` from `graalvm-ce-java17-22.3.0` to `graalvm-ce-java17-22.3.1`.\n\nWe also had to run the `xattr` command to get the GraalVM, which we had downloaded and installed on our Mac, out of quarantine so that it could run locally. Lastly, we installed the GraalVM native-image.\n\n4. At this point, we again, from a Terminal window, entered the following command to build the application on the local Mac laptop:\n\n```\nmvn clean package -Pnative\n```\n\nThis time the compilation was successful and an executable was generated in the `target` directory.\n\n![successful-local-compilation](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/successful-local-compilation.png)\n\n5. We ran the executable by entering the following commands from a Terminal window:\n\n```\ncd target\n./quarkus-native-1.0.0-SNAPSHOT-runner “-Dquarkus.http.host=0.0.0.0”\n```\n\n![executable-local-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/executable-local-run.png)\n\n6. With the application running, we opened a browser window, and in the URL field, we entered:\n\n```\nhttp://localhost:8080/hello\n```\n\n![app-running-locally](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-locally.png)\n\nThe application returned the string `Hello World`, which was displayed in the browser window.\n\nAt this point, we committed and pushed all the changes to our GitLab project and started working on creating a CI/CD pipeline that would build and deploy the application to a Kubernetes cluster running on the cloud.\n\nBut before continuing, we remembered to add, commit, and push a `.gitignore` file to our project that included the path `target/`, since this was the directory where the executable would be created and we didn’t need to keep it - or its contents - under version control.\n\n## Creating the pipeline with GitLab Duo Chat\n\nNow that we had already successfully tested the application locally on our Mac, we needed to create the CI/CD pipeline that would compile the application, containerize it, and deploy it to our Kubernetes cluster. We wanted to keep the pipeline simple, brief, and have a single environment in which to deploy it. To this end, the pipeline would not tackle multiple environments or feature branches, for example.\n\n1. To avoid manually creating a pipeline from scratch, we decided to once again leverage Chat. We entered the following prompt\n\n**_Create a .gitlab-ci.yml file with 3 stages: build, containerize, and deploy. Each of these stages should have a single job with the same name. The build job should compile the application natively using the -Pnative maven option and the builder image for mandrel jdk-22 for java17 and store the application executable and its Dockerfile as artifacts. The containerize job should use docker to build and push the image to the built-in container registry. The deploy job should rollout the containerized application, named quarkus-app, to the production environment in the Kubernetes cluster by creating a deployment resource, a service resource and an ingress rule with host ${KUBE_INGRESS_BASE_DOMAIN} and /hello prefix. The deploy job should also include the url for the production environment. When setting the Kubernetes context, use ${KUBE_CONTEXT}, and for application namespace, use ${KUBE_NAMESPACE}_**\n\n![create-pipeline-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/create-pipeline-prompt.png)\n\nThe prompt above provided some points that we thought would be useful and necessary for a good outcome. The requirements listed by the prompt were:\n\n- It specified that the pipeline needed to have three stages: build, containerize, and deploy.\n- Each stage would have a single job.\n- The build job should natively compile (not build) the application using the -Pnative option.\n- The build job should use the open source software `mandrel` and use the version that would support jdk-22 for Java 17. Remember that these were the versions that we tested on our local machine.\n- The build job would also need to pass the resulting executable and the project Dockerfile to the next job, i.e. containerize, in the pipeline.\n- The containerize job should build a docker image with the executable using the Dockerfile, both of which were passed as artifacts.\n- The containerize job should push the image to the GitLab container registry.\n- The deploy job should use the image from the GitLab container registry, name it `quarkus-app`, and deploy it to the Kubernetes cluster using deployment, service and ingress rule resources.\n- The deploy job should use the environment variables `${KUBE_INGRESS_BASE_DOMAIN}`, `${KUBE_CONTEXT}`, and `${KUBE_NAMESPACE}`, which had been defined as group/project variables, accordingly.\n\nChat responded with the following `.gitlab-ci.yml` example:\n\n```\n# Define the stages for the pipeline\nstages:\n  - build\n  - containerize\n  - deploy\n\n# Variables section to use GitLab CI/CD variables\nvariables:\n  MAVEN_OPTS: \"-Dmaven.repo.local=.m2/repository\"\n  MAVEN_CLI_OPTS: \"--batch-mode --errors --fail-at-end --show-version -DinstallAtEnd=true -DdeployAtEnd=true\"\n  CONTAINER_IMAGE: ${CI_REGISTRY_IMAGE}:${CI_COMMIT_SHA}\n\n# Cache configuration for Maven dependencies\ncache:\n  paths:\n    - .m2/repository\n\n# Build stage: Compile the application natively\nbuild:\n  stage: build\n  image:\n    name: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n      - \"\"\n  script:\n    - ./mvnw ${MAVEN_CLI_OPTS} clean package -Pnative\n  artifacts:\n    paths:\n      - target/*-runner\n      - src/main/docker/Dockerfile.native-micro\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Containerize stage: Build and push Docker image\ncontainerize:\n  stage: containerize\n  image: docker:20.10.16\n  services:\n    - docker:20.10.16-dind\n  variables:\n    DOCKER_TLS_CERTDIR: \"/certs\"\n  before_script:\n    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY\n  script:\n    - docker build -f src/main/docker/Dockerfile.native-micro -t ${CONTAINER_IMAGE} .\n    - docker push ${CONTAINER_IMAGE}\n  # This job runs only on the main branch\n  only:\n      - main\n\n# Deploy stage: Deploy to Kubernetes\ndeploy:\n  stage: deploy\n  image: \n    name: bitnami/kubectl:latest\n    entrypoint: ['']\n  script:\n    - kubectl config use-context ${KUBE_CONTEXT}\n    - kubectl config set-context --current --namespace=${KUBE_NAMESPACE}\n    - |\n      cat \u003C\u003CEOF | kubectl apply -f -\n      apiVersion: apps/v1\n      kind: Deployment\n      metadata:\n        name: quarkus-app\n      spec:\n        replicas: 1\n        selector:\n          matchLabels:\n            app: quarkus-app\n        template:\n          metadata:\n            labels:\n              app: quarkus-app\n          spec:\n            containers:\n            - name: quarkus-app\n              image: ${CONTAINER_IMAGE}\n              ports:\n              - containerPort: 8080\n      ---\n      apiVersion: v1\n      kind: Service\n      metadata:\n        name: quarkus-app\n      spec:\n        selector:\n          app: quarkus-app\n        ports:\n        - port: 80\n          targetPort: 8080\n      ---\n      apiVersion: networking.k8s.io/v1\n      kind: Ingress\n      metadata:\n        name: quarkus-app\n      spec:\n        rules:\n        - host: ${KUBE_INGRESS_BASE_DOMAIN}\n          http:\n            paths:\n            - path: /hello\n              pathType: Prefix\n              backend:\n                service:\n                  name: quarkus-app\n                  port: \n                    number: 80\n      EOF\n    - echo \"Application deployed to https://${KUBE_INGRESS_BASE_DOMAIN}/hello\"\n  # This job runs only on the main branch\n  only:\n      - main\n```\n\n2. There were some things we needed to adjust in the sample `.gitlab-ci.yml` file above before we could commit it to our `main` branch. These are the updates we made to the file:\n\n- We deleted all occurrences of `only: -main` because we wanted to keep of pipeline definition file simple and with no branch-related rules.\n- We fixed the name of the file `Dockerfile.native-micro` to `Dockerfile.native`.\n\n3. At this point, we wanted to ensure that the deployment would be to the `production` environment so we asked Chat the following prompt:\n\n**_What is the syntax to specify an environment with its url in a pipeline?_**\n\n![how-to-add-env-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-env-prompt.png)\n\nThe response from Chat included an example of how to do this so we used this information to add the following environment block to our pipeline:\n\n```\n  environment:\n       name: production\n       url: http://${KUBE_INGRESS_BASE_DOMAIN}/hello\n```\n\n4. The example provided by Chat includes a URL that started with `https` and we modified that to `http` since we didn’t really need a secure connection for this simple application.\n\n5. Lastly, we noticed that in the `build` job, there was a script `mvnw` that we didn’t have in our project. So, we asked Chat the following:\n\n**_How can I get the mvnw script for Quarkus?_**\n\n![how-to-add-mvnw-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-add-mvnw-prompt.png)\n\nChat responded with the command to execute to bootstrap and create this script. We executed this command from a Terminal window:\n\n```\nmvn wrapper:wrapper\n```\n\nWe were now ready to commit all of our changes to the `main` branch and have the pipeline executed. However, on our first attempt, our first pipeline failed at the build job.\n\n## Troubleshooting using GitLab Duo Root Cause Analysis\n\nOur first attempt at running our brand-new pipeline failed. So, we took advantage of [GitLab Duo Root Cause Analysis](https://about.gitlab.com/blog/developing-gitlab-duo-blending-ai-and-root-cause-analysis-to-fix-ci-cd/), which looks at the job logs and provides a thorough natural language explanation (with examples) of the root cause of the problem and, most importantly, how to fix it.\n\n![build-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/build-job-troubleshooting.png)\n\nRoot Cause Analysis recommended we look at the compatibility of the command that was trying to be executed with the image of mandrel used in the build job. We were not using any command with the image so we concluded that it must have been the predefined `entrypoint` for the image itself. We needed to override this so we asked Chat the following:\n\n**_How do I override the entrypoint of an image using gitlab keywords?_**\n\n![how-to-override-entrypoint-prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/how-to-override-entrypoint-prompt.png)\n\nChat replied with some use case examples of overriding an image entry point. We used that information to update the build job image definition:\n\n```\nbuild:\n    stage: build\n    image: quay.io/quarkus/ubi-quarkus-mandrel-builder-image:22.3-java17\n    entrypoint:\n        - “”\n```\n\nWe committed our changes to the `main` branch, which launched a new instance of the pipeline. This time the build job executed successfully but the pipeline failed at the `containerize` job.\n\n## Running a successful pipeline\n\nBefore drilling down into the log of the failed `containerize` job, we decided to drill into the log of the successfully completed build job first. Everything looked good in the log of the build job with the exception of this warning message at the very end of it:\n\n```\nWARNING: src/main/docker/Dockerfile.native: no matching files. Ensure that the artifact path is relative to the working directory …\n``` \n\nWe took notice of this warning and then headed to the log of the failed `containerize` job. In it, we saw that the `docker build` command had failed due to a non-existent Dockerfile. We ran Root Cause Analysis on the job and among its suggested fixes was for us to verify that the project structure matched the path of the specified `Dockerfile.native` file.\n\n![containerize-job-troubleshooting](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/containerize-job-troubleshooting.png)\n\nThis information confirmed our suspicion of the misplaced `Dockerfile.native` file. Instead of being at the directory `src/main/docker` as specified in the pipeline, it was located at the root directory of the project.\n\nSo, we went back to our project and updated every occurrence of the location of this file in our `.gitlab-ci.yml` file. We modified the two locations where this happened, one in the `build` job and one in the `containerize` job, as follows:\n\n```\nsrc/main/docker/Dockerfile.native\n```\n\nto\n\n```\nDockerfile.native\n```\n\nWe committed our updates to the `main` branch and this time our entire pipeline executed successfully!\n\n![pipeline-successful-run](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/pipeline-successful-run.png)\n\nOur last step was to check the running application in the `production` environment in our Kubernetes cluster.\n\n## Accessing the deployed application running in cluster\n\nOnce the pipeline ran successfully to completion, we drilled in the log file for the `deploy` job. Remember, this job printed the URL of the application at the end of its execution. We scrolled down to the bottom of the log and clicked on the `https` application link, which opened a browser window warning us that the connection was not private (we disabled `https` for the environment URL but forgot it for this string). We proceeded past the browser warning and then the string \"Hello World\" was displaced in the browser window indicating that the application was up and running in the Kubernetes cluster.\n\nFinally, to double-check our production deployment URL, we headed to the project **Operate > Environments** window, and clicked on the \"Open\" button for it, which immediately opened a browser window with the \"Hello World\" message.\n\n![app-running-on-k8s](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749675940/Blog/Content%20Images/app-running-on-k8s.png)\n\n## Try it \n\nWe created, compiled, built, and deployed a simple Quarkus application to a Kubernetes cluster using [GitLab Duo](https://about.gitlab.com/gitlab-duo/). This approach allowed us to be more efficient and productive in all the tasks that we performed and it helped us streamline our DevSecOps processes. We have shown only a small portion of how GitLab Duo's AI-powered capabilities can help you, namely Chat and Root Cause Analysis. There’s so much more you can leverage in GitLab Duo to help you create better software faster and more securely.\n\nWatch this whole use case in action:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/xDpycxz3RPY?si=HHZrFt1O_8XoLATf\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nAll the project assets we used are available [here](https://gitlab.com/gitlab-da/use-cases/ai/ai-applications/quarkusn/quarkus-native).\n\n> [Try GitLab Duo for free for 60 days](https://about.gitlab.com/solutions/gitlab-duo-pro/sales/?type=free-trial&toggle=gitlab-duo-pro) and get started on exciting projects like this.",[704,9,835,1574,680,678,108],{"slug":4411,"featured":90,"template":684},"use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project","content:en-us:blog:use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","Use Gitlab Duo To Build And Deploy A Simple Quarkus Native Project","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project.yml","en-us/blog/use-gitlab-duo-to-build-and-deploy-a-simple-quarkus-native-project",{"_path":4417,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4418,"content":4424,"config":4429,"_id":4431,"_type":13,"title":4432,"_source":15,"_file":4433,"_stem":4434,"_extension":18},"/en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance",{"title":4419,"description":4420,"ogTitle":4419,"ogDescription":4420,"noIndex":6,"ogImage":4421,"ogUrl":4422,"ogSiteName":669,"ogType":670,"canonicalUrls":4422,"schema":4423},"Use GitLab Duo Workflow to improve application quality assurance","Learn step-by-step how to add unit tests to a Java application using agentic AI (includes a video tutorial).","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097617/Blog/Hero%20Images/Blog/Hero%20Images/Workflow%201800x945_2gQoQIbY9NvjLFpXtsxtXy_1750097616649.png","https://about.gitlab.com/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Use GitLab Duo Workflow to improve application quality assurance\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Cesar Saavedra\"}],\n        \"datePublished\": \"2025-04-10\",\n      }",{"title":4419,"description":4420,"authors":4425,"heroImage":4421,"date":4426,"body":4427,"category":702,"tags":4428},[699],"2025-04-10","Assuring the quality of your applications via test-driven design, good code coverage, and issue detection is critically important to your customers and your reputation, but it can also be a time-consuming endeavor. [GitLab Duo Workflow](https://about.gitlab.com/gitlab-duo/workflow/), agentic AI built on top of the most comprehensive DevSecOps platform, can help you quickly complete development tasks such as adding unit tests to a Java application. This tutorial demonstrates how by using this sample [Java project](https://gitlab.com/gitlab-da/playground/csaavedra/gdw/prodmgr-gdw).\n\n> GitLab Duo Workflow is currently in private beta. Join the [waitlist](https://about.gitlab.com/gitlab-duo/workflow/) to see what’s possible with AI agents that understand your entire SDLC.\n\n## Opening your project in VS Code\n\n1. Open the Java project in Visual Studio Code (after cloning it to your local machine). Ensure that you’re in a feature branch (not the main or default branch) before you start. If you’re already working on a merge request, it will have its own associated feature branch.\n\n2. (This step is optional.) Navigate to the file that defines the Java class for which you’d like to have GitLab Duo Workflow create unit tests. Inspect it so that you can later confirm that the generated unit tests do cover its class members. This is what you would see:\n\n![File that defines the Java class for which you’d like to have GitLab Duo Workflow create unit tests](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097627482.png)\n\n**Note:** We are assuming that you already enabled the GitLab Duo Workflow extension in your VS Code. If not, please refer to the [setup documentation](https://docs.gitlab.com/user/duo_workflow/#use-workflow-in-vs-code).\n\n3. Launch GitLab Duo Workflow by opening the VS Code command palette [Ctrl + Shift + P] and entering \"GitLab Duo Workflow\" in it and selecting **GitLab: Show Duo Workflow**. A tab will appear that looks like this:\n\n![Launching GitLab Duo Workflow with VS Code](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097627483.png)\n\n4. The next step is to add tests for the default constructor, the verification of the object creation, and the initial state of the properties of the Product class. To accomplish this, enter the following prompt in the text area in GitLab Duo Workflow:\n\n```unset\nCreate unit tests for class defined in the Product.java file and store the unit tests in its own file titled ProductTest.java\n```\n\n![Prompt area in GitLab Duo Workflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097627484.png)\n\n5. Click the **Start** button in the GitLab Duo Workflow window. Two new windows will appear: one in the center of the screen and one to the right. The one on the right displays the analysis that GitLab Duo Workflow is performing to come up with a plan that will achieve the goal as specified in your prompt. The plan is displayed in the center window. After the analysis and the plan are finished, you should see an output like this:\n\n![Analysis and plan generated by GitLab Duo Workflow](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097627486.png)\n\n6. Review the analysis and plan and, if you are satisfied with them, click **Approve plan** at the bottom of the window.\n\n7. GitLab Duo Workflow will start executing the approved plan and making modifications to your project accordingly.\n\n8. Once the execution of the plan is finished, you will see a new directory `src/test/java/csaa/jspring/ProductManager` in the project with a new file in it named `ProductTest.java`, which contains all the unit tests for the `Product.java` class.\n\n![New directory in the project iwth a new file name `ProductTest.java`](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image7_aHR0cHM6_1750097627488.png)\n\n9. Navigate to the newly created file `ProductTest.java` and you will see that it has some import statements underlined in red indicating some import errors:\n\n![`ProductTest.java` include imports statement and error indicators in red](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097627489.png)\n\nLet’s have GitLab Duo Workflow fix these for us.\n\n**Note:** We could have also asked GitLab Duo Workflow in our first prompt to update the `pom.xml` file accordingly. But since we didn’t, let’s fix these errors in a new workflow.\n\n## Launching a GitLab Duo Workflow to fix errors in generated code\n\n10. Start a new workflow by clicking on the **New workflow** button at the bottom of the analysis window on the right side of your screen.\n\n![New workflow button](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097628/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097627491.png)\n\n11. In the prompt text area, enter the following:\n\n```unset\nThe file ProductTest.java has an error “The import org.junit cannot be resolved”. Please fix it\n```\n\n12. After you approve the proposed plan, GitLab Duo Workflow starts its analysis by reading the current `pom.xml` file. It then edits it and removes the outdated JUnit dependency, and follows that with the addition of the correct dependency and version for JUnit. Lastly, it reads the `ProductTest.java` file to clear all the dependency errors.\n\n![GitLab Duo Workflow carrying out analysis by reading pom.xml](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097627/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097627492.png)\n\n## Watch the tutorial\n\nThrough the execution of this plan, GitLab Duo Workflow is effectively making updates to the project to achieve what was requested in the prompt, saving time and effort, and increasing productivity so that developers can spend more time innovating and creating value for their organization.\n\nIf you’d like to see what you read above in action, watch the following video:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/Tuj7TgqY81Q?si=RReuL1pUsLafvAzs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n> Sign up for the [GitLab Duo Workflow private beta waitlist](https://about.gitlab.com/gitlab-duo/workflow/) to see what’s possible with AI agents that understand your entire SDLC.\n\n## Read more about GitLab Duo Workflow and agentic AI\n\n- [GitLab Duo Workflow: Enterprise visibility and control for agentic AI](https://about.gitlab.com/blog/gitlab-duo-workflow-enterprise-visibility-and-control-for-agentic-ai/)\n- [GitLab Duo Workflow documentation](https://docs.gitlab.com/user/duo_workflow/)\n- [GitLab Duo](https://about.gitlab.com/gitlab-duo/)\n- [Agentic AI: Unlocking developer potential at scale (The Source)](https://about.gitlab.com/the-source/ai/agentic-ai-unlocking-developer-potential-at-scale/)\n",[704,478,9,678,680],{"slug":4430,"featured":6,"template":684},"use-gitlab-duo-workflow-to-improve-application-quality-assurance","content:en-us:blog:use-gitlab-duo-workflow-to-improve-application-quality-assurance.yml","Use Gitlab Duo Workflow To Improve Application Quality Assurance","en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance.yml","en-us/blog/use-gitlab-duo-workflow-to-improve-application-quality-assurance",{"_path":4436,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4437,"content":4443,"config":4447,"_id":4449,"_type":13,"title":4450,"_source":15,"_file":4451,"_stem":4452,"_extension":18},"/en-us/blog/use-gitlab-to-detect-vulnerabilities",{"title":4438,"description":4439,"ogTitle":4438,"ogDescription":4439,"noIndex":6,"ogImage":4440,"ogUrl":4441,"ogSiteName":669,"ogType":670,"canonicalUrls":4441,"schema":4442},"How to use GitLab security features to detect log4j vulnerabilities","Detailed guidance to help customers detect vulnerabilities.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749666816/Blog/Hero%20Images/security-cover.png","https://about.gitlab.com/blog/use-gitlab-to-detect-vulnerabilities","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use GitLab security features to detect log4j vulnerabilities\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"GitLab\"}],\n        \"datePublished\": \"2021-12-15\",\n      }",{"title":4438,"description":4439,"authors":4444,"heroImage":4440,"date":1653,"body":4445,"category":298,"tags":4446},[852],"\n_Note: Out of an abundance of caution, we encourage users who are using older versions of GitLab SAST and Dependency Scanning to update to the latest versions. You can find more information and recommended actions in [this blog post](/blog/updates-and-actions-to-address-logj-in-gitlab/)._\n\n_Any customer leveraging the [recommended includes](https://docs.gitlab.com/ee/user/application_security/sast/#configure-sast-in-your-cicd-yaml) for GitLab SAST has automatically received the new patched versions released Dec 13, 2021._\n\nIn light of the recently discovered log4j vulnerabilities, we would like to demonstrate how GitLab can be used to assess and remediate the log4j vulnerability as well as other security vulnerabilities that may exist in your projects.\n\nThe solutions shared here are: \n* [Dependency Scanning (Ultimate)](#use-gitlab-dependency-scanning-to-detect-and-mitigate-log4j-vulnerabilities)\n* [Container Scanning (Ultimate)](#detect-log4j-vulnerabilities-with-container-scanning)\n* [Cluster image scanning (Ultimate)](#detect-vulnerable-containers-in-your-kubernetes-cluster)\n* [Advanced Search (Premium)](#search-gitlab-projects-which-use-the-log4j-java-library)\n\nFree users wishing to access Premium and Ultimate features can do so by signing up for a [free trial](https://about.gitlab.com/free-trial/) of GitLab. \n\n### Use GitLab dependency scanning to detect and mitigate log4j vulnerabilities \n\n[Dependency scanning](https://docs.gitlab.com/ee/user/application_security/dependency_scanning) uses Gemnasium, which has been [updated](https://gitlab.com/gitlab-org/security-products/gemnasium-db/-/merge_requests/11381) to detect the log4j vulnerability, to automatically find security vulnerabilities in your software dependencies.\n\nLet’s try dependency scanning with a vulnerable project. Navigate to `Create new project > Import project > from URL` and use `https://github.com/christophetd/log4shell-vulnerable-app.git`. \n\nNext, navigate to `Security & Compliance > Security dashboard` and select to configure `Dependency Scanning`. This will create a new merge request enabling the dependency scanner, and you can immediately see the first [scanning results](https://gitlab.com/gitlab-de/playground/log4shell-vulnerable-app/-/pipelines/427550530/security) in the [merge request](https://gitlab.com/gitlab-de/playground/log4shell-vulnerable-app/-/merge_requests/1). \n\nAlternatively, you can edit the `.gitlab-ci.yml` configuration file and include the Dependency Scanning CI/CD template.\n\n```yaml\ninclude:\n- template: Security/Dependency-Scanning.gitlab-ci.yml\n```\n\nCreate a new merge request and wait for the pipeline to finish. Inspect the security reports. \n\n![GitLab security report](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image2.png){: .shadow}\n\nTake action on the critical vulnerability, open the details and create a new confidential security issue to follow-up. \n\n![Details of security vulnerability](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image9.png){: .shadow}\n\nAfter merging the MR to add dependency scanning, future MRs and code changes will detect the log4j vulnerabilities. This helps to avoid accidentally introducing older versions again. Open the `Security report` in `Security & Compliance` to get an overview of the vulnerabilities. \n\n![Panel showing security vulnerabilities](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image4.png){: .shadow}\n\nYou can customize the default settings using [CI/CD variables](https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings), for example increasing the log level to debug with `SECURE_LOG_LEVEL: ‘debug’`. \n\nThe project created in the examples above is located [here](https://gitlab.com/gitlab-de/playground/log4shell-vulnerable-app). \n\n### Detect log4j vulnerabilities with Container Scanning\n\nVulnerabilities in container images can come not only from the source code for the application, but also from packages and libraries that are installed on the base image. Images can inherit packages and vulnerabilities from other container images using the `FROM` keyword in a `Dockerfile`. [Container Scanning](https://docs.gitlab.com/ee/user/application_security/container_scanning/) helps detect these vulnerabilities for the Operating System including packages. The latest release adds language vulnerability scans as a new optional feature to help detect the log4j library vulnerability using the underlying scanners (Trivy as default, Grype optional). You can also use this capability to scan remote images using the `DOCKER_IMAGE` variable.\n\nYou can enable the `CS_DISABLE_LANGUAGE_VULNERABILITY_SCAN` variable to [scan for language specific packages](https://docs.gitlab.com/ee/user/application_security/container_scanning/#report-language-specific-findings). Please note that the additionally detected language dependencies can cause duplicates when you enable Dependency Scanning too. \n\nTo try it, navigate to `CI/CD > Pipeline Editor` and add the following configuration for Container Scanning:\n\n```yaml\ninclude:\n    - template: Security/Container-Scanning.gitlab-ci.yml\n\nvariables:\n    # Use Trivy or Grype as security scanners (Trivy is the default in the included template)\n    # CS_ANALYZER_IMAGE: \"registry.gitlab.com/security-products/container-scanning/trivy:4\"\n    # CS_ANALYZER_IMAGE: \"registry.gitlab.com/security-products/container-scanning/grype:4\"\n    # Detect language libraries as dependencies\n    CS_DISABLE_LANGUAGE_VULNERABILITY_SCAN: \"false\"\n    # Test the vulnerable log4j image \n    DOCKER_IMAGE: registry.gitlab.com/gitlab-de/playground/log4shell-vulnerable-app:latest \n```\n\nCreate a new branch, commit the changes and create a new MR. Once the pipeline has completed, inspect the security report in the MR. \n\n![List of vulnerabilities detected by container scanning](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image6.png){: .shadow}\n\nAfter merging the MR, you can view the vulnerabilities that exist in your default branch by navigating to `Security & Compliance > Vulnerability Report`. \n\n![Panel showing security vulnerabilities](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image7.png){: .shadow}\n\nInspect the vulnerability details to take action.\n\n![Detail on vulnerability](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image8.png){: .shadow}\n\nThis feature is available for customers using the default CI/CD templates, or the tagged `:4` scanner images from  GitLab's Container Registry (registry.gitlab.com). If you are using custom images, please rebuild them based on the latest release.\n\n### Detect vulnerable containers in your Kubernetes cluster\n\nYou can use [cluster image scanning in Kubernetes](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) which uses Starboard and [uses Trivy as a security scanner](https://aquasecurity.github.io/starboard/v0.13.1/integrations/vulnerability-scanners/trivy/) under the hood. Trivy’s vulnerability DB is able to detect CVE-2021-44228.\n\nLet’s try it! A quick way to bring up a Kubernetes cluster is in Civo Cloud. Create an account, and follow the documentation on [how to set up the CLI](https://www.civo.com/learn/kubernetes-cluster-administration-using-civo-cli) with an API token. Next, create a k3s cluster. \n\n```shell\n$ civo kubernetes create log4j\n$ civo kubernetes config log4j --save\n$ kubectl config use-context log4j\n$ kubectl get node\n```\n\n`registry.gitlab.com/gitlab-de/playground/log4shell-vulnerable-app:latest` provides a vulnerable container image we can deploy and then scan. \n\n```shell\n$ vim deployment.yaml\n\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: log4j\nspec:\n  replicas: 2\n  selector:\n    matchLabels:\n      app: log4j\n  template:\n    metadata:\n      labels:\n        app: log4j\n    spec:\n      containers:\n        - image: registry.gitlab.com/gitlab-de/playground/log4shell-vulnerable-app:latest\n          name: log4j\n\n$ kubectl apply -f deployment.yaml\n```\n\n```shell\n$ vim service.yaml\n\napiVersion: v1\nkind: Service\nmetadata:\n  name: log4j\n  labels:\n    app: log4j\nspec:\n  ports:\n    - name: \"log4j\"\n      port: 8080\n  selector:\n    app: log4j\n\n$ kubectl apply -f service.yaml\n```\n\nTest the application container with port forwarding, and open your browser at http://localhost:80808. You can close the connection with `ctrl+c`. \n\n```\n$ kubectl port-forward service/log4j 8080:8080\n```\n\nAfter the deployment is finished, let’s add the cluster image scanning integration. Follow the [Starboard Operator](https://aquasecurity.github.io/starboard/v0.13.1/operator/installation/kubectl/) installation documentation. Next, configure the [Kubernetes Cluster Image Scanning](https://docs.gitlab.com/ee/user/clusters/agent/vulnerabilities.html) with GitLab. \n\nThe final step is to integrate the CI/CD template and run the pipelines. \n\n```yaml\ninclude:\n  - template: Security/Cluster-Image-Scanning.gitlab-ci.yml\n```\n\nNavigate into `Security & Compliance > Vulnerability report` and select the `Operational vulnerabilities` tab to inspect the vulnerabilities. There you can see that `log4j` was detected in the deployed application running in our Kubernetes cluster 💜. \n\n![Panel showing security vulnerabilities](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image5.png){: .shadow}\n\nInspect the `log4j` vulnerability to see more details. \n\n![Detail on vulnerability](https://about.gitlab.com/images/blogimages/2021-12-15-use-gitlab-to-detect-log4j/image3.png){: .shadow}\n\nThe full project is located [here](https://gitlab.com/gitlab-de/playground/log4j-kubernetes-container-scanning).\n\n### Search GitLab projects which use the log4j Java library\n\nYou can use the [advanced search with scope blobs](https://docs.gitlab.com/ee/api/search.html#scope-blobs). Let’s try it! Navigate to your profile and add a new personal access token (PAT). Export it into the environment to access it in the next step:\n\n```shell\n$ export GITLAB_TOKEN=xxxxxxxxx\n\n$ curl --header \"PRIVATE-TOKEN: $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/search?scope=blobs&search=log4j\" \n```\n\nTip: Install jq to format the JSON body. More insights in [this blog post](/blog/devops-workflows-json-format-jq-ci-cd-lint/). \n\n```shell\n$ curl --header \"PRIVATE-TOKEN: $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/search?scope=blobs&search=log4j\" | jq\n\n  {\n    \"basename\": \"src/main/resources/log4j\",\n    \"data\": \"log4j.rootLogger=ERROR, stdout\\n \\n# Direct log messages to stdout\\n\",\n    \"path\": \"src/main/resources/log4j.properties\",\n    \"filename\": \"src/main/resources/log4j.properties\",\n    \"id\": null,\n    \"ref\": \"9a1df407e1a5365950a77f715163f6dba915fdf4\",\n    \"startline\": 2,\n    \"project_id\": 12345678\n  },\n\n```\n\nYou can use `jq` to further transform and filter the result set, for example only listing the paths where `log4j` as a string exists.  \n\n```\ncurl --header \"PRIVATE-TOKEN: $GITLAB_TOKEN\" \"https://gitlab.com/api/v4/search?scope=blobs&search=log4j\" | jq -c '.[] | select (.path | contains (\"log4j\"))' | jq\n```\n\n### Next steps \n\nThe GitLab security team is continuing to proactively monitor the situation and ensure our product and customers are secure. We will continue to communicate should we identify additional opportunities to help our customers and community navigate through this situation. Please [subscribe to our security alerts mailing list](https://about.gitlab.com/company/preference-center/).\n\nPlease visit the public [log4j-resources project](https://gitlab.com/gitlab-de/log4j-resources) and visit our [forum](https://forum.gitlab.com/c/devsecops-security/) for additional information.\n",[814,1225,9],{"slug":4448,"featured":6,"template":684},"use-gitlab-to-detect-vulnerabilities","content:en-us:blog:use-gitlab-to-detect-vulnerabilities.yml","Use Gitlab To Detect Vulnerabilities","en-us/blog/use-gitlab-to-detect-vulnerabilities.yml","en-us/blog/use-gitlab-to-detect-vulnerabilities",{"_path":4454,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4455,"content":4460,"config":4465,"_id":4467,"_type":13,"title":4468,"_source":15,"_file":4469,"_stem":4470,"_extension":18},"/en-us/blog/use-inputs-in-includable-files",{"title":4456,"description":4457,"ogTitle":4456,"ogDescription":4457,"noIndex":6,"ogImage":2124,"ogUrl":4458,"ogSiteName":669,"ogType":670,"canonicalUrls":4458,"schema":4459},"Define input parameters to includable CI/CD configuration files","This is the first milestone of the long-term roadmap of the CI/CD Components Catalog roadmap.","https://about.gitlab.com/blog/use-inputs-in-includable-files","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Define input parameters to includable CI/CD configuration files\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Itzik Gan Baruch\"}],\n        \"datePublished\": \"2023-05-08\",\n      }",{"title":4456,"description":4457,"authors":4461,"heroImage":2124,"date":4462,"body":4463,"category":769,"tags":4464},[1080],"2023-05-08","\nIn GitLab 15.11, we introduced an exciting new feature that allows users to [define input parameters for includable configuration files](/releases/2023/04/22/gitlab-15-11-released/#define-inputs-for-included-cicd-configuration). With the ability to use input parameters in [CI templates](https://docs.gitlab.com/ee/development/cicd/templates.html), you can replace any keyword in the template with a parameter, including stage, script, or job name. For example, you can add a prefix to all of the jobs to better isolate them from the pipeline into which you are including the configuration.\n\nThese input parameters can be declared as mandatory or optional for each configuration file, reducing the need for global variables and making your CI/CD templates more robust and isolated. The input parameters are scoped to the included configuration only, which means they have no impact on the rest of the pipeline. This allows you to declare and enforce constraints, for example by enforcing mandatory inputs for templates.\n\nThis development is the first milestone of the long-term roadmap of the [CI/CD Components Catalog](https://gitlab.com/groups/gitlab-org/-/epics/7462), a new feature that will allow users to search and reuse single-purpose CI/CD configuration units with specific parameters for their use case. If you want to learn more about this exciting new development, you can read our [blog post about our CI templates feature](/blog/how-to-build-reusable-ci-templates/).\n\nIn this technical blog post, we will provide step-by-step instructions on how to define CI/CD templates with input parameters and how to use them when including templates.\n\n## Step 1: Create a template YAML document\nThe first step is to create a template YAML document that describes what input arguments can be used with the template. The second part of the template is the definition of the jobs that may include references to values using the interpolation format `$[[ inputs.input-name ]]`. You should use three dash lines between the two parts.\n\nHere is an example of a deploy-template.yml:\n\n```yaml\nspec:\n  inputs:\n    website:\n    environment:\n      default: test\n---\ndeploy:\n  stage: deploy\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\nIn this template, we have defined two input parameters: website and environment. The environment parameter has a default value. In the content section, we define a job that interpolates the input arguments.\n\n## Step 2: Include the template in the CI configuration\nIn your main CI configuration file `.gitlab-ci.yml`, include the template and add input parameters using the `inputs` keyword.\n\nHere is an example of including the `deploy-template.yml` with input parameters:\n\n```yaml\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      website: my-website.example.com\n```\n\nIn this example, we included a local template in our project. Note: You can use `inputs` with the other [include types](https://docs.gitlab.com/ee/ci/yaml/index.html#include) such as `include:project`, `include:template`, `include:remote`.\n\nIn the below example, we use inputs to add a prefix to jobs name, and make the stage dynamic as well.\n\n```yaml\nspec:\n  inputs:\n    website:\n    environment:\n      default: staging\n    stage:\n      default: test\n    job_prefix:\n      default: \"\"\n---\n\"$[[ inputs.job_prefix ]]deploy\":\n  stage: $[[ inputs.stage ]]\n  script: echo \"deploy $[[ inputs.website ]] to $[[ inputs.environment ]]\"\n```\n\nThen we can include it from the `.gitlab-ci.yml` with the input parameters:\n\n```\ninclude:\n  - local: deploy-template.yml\n    inputs:\n      stage: deploy\n      website: http://example.com\n      environment: production\n      job_prefix: \"my-app-\"\n```\n\nYou can [fork](https://gitlab.com/tech-marketing/ci-interpolation-example) this project, which uses the above examples:\n\n- [Dynamic job](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/dynamic-job.yml)\n- [Dynamic script](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/deploy-template.yml)\n- [Main CI configuration](https://gitlab.com/tech-marketing/ci-interpolation-example/-/blob/main/.gitlab-ci.yml)\n\nFor more information, please use our [online documentation](https://docs.gitlab.com/ee/ci/yaml/includes.html#define-input-parameters-with-specinputs).\n\nThat's it! You have successfully created CI templates that accept inputs and used them in a pipeline configuration. By using templates with inputs, you can simplify pipeline configuration and make templates more modular and reusable.\n\nThank you to [Fabio Pitino](https://gitlab.com/fabiopitino) and [Grzegorz Bizon](https://gitlab.com/grzesiek) for their content reviews.",[9,773,108],{"slug":4466,"featured":6,"template":684},"use-inputs-in-includable-files","content:en-us:blog:use-inputs-in-includable-files.yml","Use Inputs In Includable Files","en-us/blog/use-inputs-in-includable-files.yml","en-us/blog/use-inputs-in-includable-files",{"_path":4472,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4473,"content":4478,"config":4484,"_id":4486,"_type":13,"title":4487,"_source":15,"_file":4488,"_stem":4489,"_extension":18},"/en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds",{"title":4474,"description":4475,"ogTitle":4474,"ogDescription":4475,"noIndex":6,"ogImage":1053,"ogUrl":4476,"ogSiteName":669,"ogType":670,"canonicalUrls":4476,"schema":4477},"How to use Bazel with GitLab to speed up your builds","We explain why Bazel and GitLab CI are a great match to speed up your build times.","https://about.gitlab.com/blog/using-bazel-to-speed-up-gitlab-ci-builds","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to use Bazel with GitLab to speed up your builds\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Jason Yavorska\"}],\n        \"datePublished\": \"2020-09-01\",\n      }",{"title":4474,"description":4475,"authors":4479,"heroImage":1053,"date":4481,"body":4482,"category":769,"tags":4483},[4480],"Jason Yavorska","2020-09-01","\n[Bazel](https://bazel.build/) is a useful tool that can be used with GitLab CI to push your build pipelines into overdrive.\n\nFor maximum correctness, [CI/CD](/topics/ci-cd/) systems will usually rebuild all of the artifacts from scratch on every run. This method is considered safer since artifacts from one pipeline won't negatively impact subsequent pipelines, and is a lesson learned from older CI tools where the agent state was persistent over time – so you never really knew if you could do a build from scratch. The problem with redoing everything every time though, is that it's slow. GitLab improves upon this by using caches and shared artifacts, but there's only so far that approach can take you.\n\nBazel is a good example of tackling things in a different way – it speeds up builds by only rebuilding what is necessary. On the surface, this might sound a lot like just having a cache and doing an incremental build. But the main difference is that Bazel is really good at not only being fast, but also [correct](https://docs.bazel.build/versions/3.4.0/guide.html#correct-incremental-rebuilds). Bazel is much more reliable than traditional `Makefiles` or build scripts, which are notorious for occasionally forcing you to `make clean` because they get into some inconsistent state they can't recover from.\n\nAs of now, Bazel supports building Java, C, C++, Python, and Objective-C, and can also produce packages for deployment on Android or iOS. More capabilities are being added all the time, as well as open source rule sets for other languages like Go, Scala and many more, so be sure to check their latest [product overview](https://docs.bazel.build/versions/3.4.0/bazel-overview.html) for updates.\n\n## Setting up Bazel builds in GitLab CI\n\nSetting up Bazel for builds is very straightforward. A job like the following does everything you need:\n\n```yaml\nvariables:\n  BAZEL_DIGEST_VERSION: \"f670e9aec235aa23a5f068566352c5850a67eb93de8d7a2350240c68fcec3b25\" # Bazel 3.4.1\n\nbuild:\n  image:\n    name: gcr.io/cloud-marketplace-containers/google/bazel@sha256:$BAZEL_DIGEST_VERSION\n    entrypoint: [\"\"]\n  stage: build\n  script:\n    - bazel --output_base output build //main/...\n  artifacts:\n    paths:\n      - bazel-bin/main/hello-world\n  cache:\n    key: $BAZEL_DIGEST_VERSION\n    paths:\n      - output\n```\n\nWhat this script does is define a job called `build` which uses the official Google Bazel image. We track the digest version for two reasons: First, to ensure immutability (tags can be updated), and second to use it as a cache key so that the cache is invalidated whenever we upgrade the Bazel version. We also override the entry point because we want to pass our own parameters to our `bazel` invocation. The second parameter is the [label](https://docs.bazel.build/versions/master/glossary.html#label) of the [target](https://docs.bazel.build/versions/master/glossary.html#target) we want to build. A [target pattern](https://docs.bazel.build/versions/master/glossary.html#target-pattern) can also be used here to tell Bazel to build multiple things (and what they depend on), rather than one thing (and what it depends on).\n\nThe first parameter (`--output_base output`) is to help Bazel work with a security feature of the GitLab runner. By default, the runner will [not access files outside of the build dir](https://docs.gitlab.com/ee/ci/yaml/#artifactspaths), but Bazel places its own cache outside by default. This parameter tells Bazel to place it inside, where the runner can access it. The next two sections (`artifacts` and `cache`), tell the runner where the output file you want to keep is, and importantly for Bazel, where the cache is that you want to persist. Note that until [this issue to allow for traversing symlinks](https://gitlab.com/gitlab-org/gitlab/-/issues/19746) is resolved, you must give the full path to the specific outputs you want to keep within the `bazel-bin` folder.\n\nWhen this job runs, it places the current cache (if it exists, and only for the current `BAZEL_DIGEST_VERSION`) in the `output` folder, and then runs `bazel` to build the `main:hello-world` target. It saves the artifact from `bazel-bin/main/hello-world`, and then caches everything in `output` for the next run.\n\n### Bazel: notes on caching\n\nIn this example we've set up Bazel to work with GitLab caching, and this is how we currently use it internally. If you already have Bazel remote cache (or even better, Bazel remote execution), there is no need to set up GitLab CI cache: It actually would likely make things slower since in that case there is no need to download and unpack the cache at all. Setting up remote caching or remote execution are more advanced and outside of the scope of this article, but are even better ways to speed up the build. Until then, using a GitLab cache can be a good interim step. If you're interested in learning more about remote cache/remote execution, this [BazelCon video](https://www.youtube.com/watch?v=MyuJRUwT5LI&t=1017s) or Bazel's official [documentation on remote caching](https://docs.bazel.build/versions/master/remote-caching.html) may be helpful.\n\n## Building and testing with Bazel\n\nUsing Bazel to run your tests is just as easy, and there are nice benefits to doing so. If you can rely on accurately knowing what has changed, you can be more selective in doing incremental tests and have the confidence that tests that were skipped were truly unnecessary. This is also quite easy to set up using Bazel, but one thing to consider is that running builds and tests all at once (rather than splitting build and test into different jobs) is going to be more efficient. You can do that by using a build job that looks like this:\n\n```yaml\nvariables:\n  BAZEL_DIGEST_VERSION: \"f670e9aec235aa23a5f068566352c5850a67eb93de8d7a2350240c68fcec3b25\" # 3.4.1\n\nbuild:\n  image:\n    name: gcr.io/cloud-marketplace-containers/google/bazel@sha256:$BAZEL_DIGEST_VERSION\n    entrypoint: [\"\"]\n  stage: build\n  script:\n    - bazel --output_base output test //main/...\n  artifacts:\n    paths:\n      - bazel-bin/main/hello-world\n  cache:\n    key: $BAZEL_DIGEST_VERSION\n    paths:\n      - output\n```\n\nIn a build that includes all tests, you typically want to run everything that changed. That's usually done using an invocation like `bazel test //main/...` which:\n\n1. Finds all targets (referred to as `...`) in the workspace location (`//` denotes the root of the [workspace](https://docs.bazel.build/versions/master/glossary.html#workspace)), so we are referring to `main` relative to the root.) Note that you probably don't want to include a bare `//` (without `main`), since that will include the custom `output` folder and that is probably not what you intended.\n1. Builds usual targets.\n1. Builds test targets.\n1. Runs test targets.\n\nOnly using the `test` parameter works because `bazel test` not only runs tests, but also builds everything that matched the target pattern by default. Individual targets can be excluded from being matched by `...` by applying a `manual` tag to them ([see `tags` in the Bazel glossary table](https://docs.bazel.build/versions/master/be/common-definitions.html#common-attributes)). One callout - in the example project we're building ([details below](#examples)), there actually aren't any tests, so this fails because we requested a test pass and there weren't any. If your project has tests in it, it will work fine.\n\n## Examples using Bazel\n\nWe're actually using Bazel here at GitLab to build our [GitLab Agent for Kubernetes](https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent). If you're interested in seeing a more complex, complete implementation using Bazel then that's a great one to explore. The simple example from this blog can be found live in [my own personal project](https://gitlab.com/jyavorska/testbazel), and it is based on the [stage three build tutorial](https://docs.bazel.build/versions/3.4.0/tutorial/cpp.html) from Bazel's own documentation.\n\nBazel itself is also highly configurable through its own `.bazelrc`, `BUILD` files, and more. The [user documentation for Bazel](https://docs.bazel.build/versions/master/guide.html) contains several examples along with an exhaustive configuration reference.\n\n## What's next with Bazel?\n\nWe are considering using Bazel in few more areas within GitLab:\n\n- In an ideal world, after a minor change, the build and test should only take a few seconds to complete. When the jobs are fast enough, it could even be triggered via an editor on every change before being committed to git at all. This kind of capability could be integrated with the Web IDE, giving you immediate insight into the results of your change. We have an issue related to [making it easier to run pipelines from the Web IDE](https://gitlab.com/gitlab-org/gitlab/-/issues/213604) that could take advantage of this.\n- By default, GitLab uses [a gem we created](https://gitlab.com/gitlab-org/ci-cd/test_file_finder/) (which is available in this [template](https://docs.gitlab.com/ee/ci/testing/fail_fast_testing.html) for test execution optimization, but all we're doing so far is running the riskiest tests first. As Bazel grows and adds support for more languages, it could potentially become a standard for this purpose, allowing you to run even fewer tests (and among those, the riskiest ones first). We have an [epic](https://gitlab.com/groups/gitlab-org/-/epics/4121) where you can track progress toward this idea.\n- Finally, Bazel also supports distributed builds and caching, opening the door to autoscaling compilation and test capacity alongside runner capacity, or even sharing the same capacity for whatever jobs are needed at a given moment. This function would require managing your own capacity for this purpose, but in the future we could imagine this being added to GitLab. We have an [issue for exploring different ways Bazel could support distributed jobs](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26663) using the GitLab Runner.\n\n## Tell us your Bazel success stories\n\nAre you using Bazel with GitLab CI? We'd love your feedback on what features we could add to make things work better and hear about the performance gains you've found from the combo. Please let us know in the Meta issue below, or contact [Jason Yavorska](https://twitter.com/j4yav) on Twitter.\n\n## Related content\n\n- [Bazel website](https://bazel.build/)\n- [Meta issue for deeper integration in GitLab](https://gitlab.com/gitlab-org/gitlab/-/issues/201484)\n- [Bazel blog on integrating it with CI systems](https://blog.bazel.build/2016/01/27/continuous-integration.html)\n- [GitLab CI quick start](https://docs.gitlab.com/ee/ci/quick_start/)\n\nCover image by [Lucas van Oort](https://unsplash.com/@switch_dtp_fotografie) on [Unsplash](https://unsplash.com)\n{: .note}\n",[108,230,9],{"slug":4485,"featured":6,"template":684},"using-bazel-to-speed-up-gitlab-ci-builds","content:en-us:blog:using-bazel-to-speed-up-gitlab-ci-builds.yml","Using Bazel To Speed Up Gitlab Ci Builds","en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds.yml","en-us/blog/using-bazel-to-speed-up-gitlab-ci-builds",{"_path":4491,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4492,"content":4498,"config":4504,"_id":4506,"_type":13,"title":4507,"_source":15,"_file":4508,"_stem":4509,"_extension":18},"/en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"title":4493,"description":4494,"ogTitle":4493,"ogDescription":4494,"noIndex":6,"ogImage":4495,"ogUrl":4496,"ogSiteName":669,"ogType":670,"canonicalUrls":4496,"schema":4497},"Using child pipelines to continuously deploy to five environments","Learn how to manage continuous deployment to multiple environments, including temporary, on-the-fly sandboxes, with a minimalist GitLab workflow.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097012/Blog/Hero%20Images/Blog/Hero%20Images/AdobeStock_397632156_3Ldy1urjMStQCl4qnOBvE0_1750097011626.jpg","https://about.gitlab.com/blog/using-child-pipelines-to-continuously-deploy-to-five-environments","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Using child pipelines to continuously deploy to five environments\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Olivier Dupré\"}],\n        \"datePublished\": \"2024-09-26\",\n      }",{"title":4493,"description":4494,"authors":4499,"heroImage":4495,"date":4501,"body":4502,"category":769,"tags":4503},[4500],"Olivier Dupré","2024-09-26","DevSecOps teams sometimes require the ability to manage continuous deployment across multiple environments — and they need to do so without changing their workflows. The [GitLab DevSecOps platform](https://about.gitlab.com/) supports this need, including temporary, on-the-fly sandboxes, with a minimalist approach. In this article, you'll learn how to run continuous deployment of infrastructure using Terraform, over multiple environments.\n\nThis strategy can easily be applied to any project, whether it is infrastructure as code (IaC) relying on another technology, such as [Pulumi](https://www.pulumi.com/) or [Ansible](https://www.ansible.com/), source code in any language, or a monorepo that mixes many languages.\n\nThe final pipeline that you will have at the end of this tutorial will deploy:\n\n* A temporary **review** environment for each feature branch.\n* An **integration** environment, easy to wipe out and deployed from the main branch.\n* A **QA** environment, also deployed from the main branch, to run quality assurance steps.\n* A **staging** environment, deployed for every tag. This is the last round before production.\n* A **production** environment, just after the staging environment. This one is triggered manually for demonstration, but can also be continuously deployed.\n\n>Here is the legend for the flow charts in this article:\n> * Round boxes are the GitLab branches.\n> * Square boxes are the environments.\n> * Text on the arrows are the actions to flow from one box to the next.\n> * Angled squares are decision steps.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\nOn each step, you'll learn the [why](#why) and the [what](#what) before moving to the [how](#how). This will help you fully understand and replicate this tutorial.\n\n## Why\n\n* [Continuous integration](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-integration-ci) is almost a de facto standard. Most companies have implemented CI pipelines or are willing to standardize their practice.\n\n* [Continuous delivery](https://about.gitlab.com/topics/ci-cd/#what-is-continuous-delivery-cd), which pushes artifacts to a repository or registry at the end of the CI pipeline, is also popular.\n\n* Continuous deployment, which goes further and deploys these artifacts automatically, is less widespread. When it has been implemented, we see it essentially in the application field. When discussing continuously deploying  infrastructure, the picture seems less obvious, and is more about managing several environments. In contrast, testing, securing, and verifying the infrastructure's code seems more challenging. And this is one of the fields where DevOps has not yet reached its maturity. One of the other fields is to shift security left, integrating security teams and, more importantly, security concerns, earlier in the delivery lifecycle, to upgrade from DevOps to ***DevSecOps***.\n\nGiven this high-level picture, in this tutorial, you will work toward a simple, yet efficient way to implement DevSecOps for your infrastructure through the example of deploying resources to five environments, gradually progressing from development to production.\n\n__Note:__ Even if I advocate embracing a FinOps approach and reducing the number of environments, sometimes there are excellent reasons to maintain more than just dev, staging, and production. So, please, adapt the examples below to match your needs.\n\n## What\n\nThe rise of cloud technology has driven the usage of IaC. Ansible and Terraform were among the first to pave the road here. OpenTofu, Pulumi, AWS CDK, Google Deploy Manager, and many others joined the party.\n\nDefining IaC is a perfect solution to feel safe when deploying infrastructure. You can test it, deploy it, and replay it again and again until you reach your goal.\n\nUnfortunately, we often see companies maintain several branches, or even repositories, for each of their target environments. And this is where the problems start. They are no longer enforcing a process. They are no longer ensuring that any change in the production code base has been accurately tested in previous environments. And they start seeing drifts from one environment to the other.\n\nI realized this tutorial was necessary when, at a conference I attended, every participant said they do not have a workflow that enforces the infrastructure to be tested thoroughly before being deployed to production. And they all agreed that sometimes they patch the code directly in production. Sure, this is fast, but is it safe? How do you report back to previous environments? How do you ensure there are no side effects? How do you control whether you are putting your company at risk with new vulnerabilities being pushed too quickly in production?\n\nThe question of *why* DevOps teams deploy directly to production is critical here. Is it because the pipeline could be more efficient or faster? Is there no automation? Or, even worse, because there is *no way to test accurately outside of production*?\n\nIn the next section, you will learn how to implement automation for your infrastructure and ensure that your DevOps team can effectively test what you are doing before pushing to any environment impacting others. You will see how your code is secured and its deployment is controlled, end-to-end.\n\n## How\n\nAs mentioned earlier, there are many IaC languages out there nowadays and we objectively cannot cover *all* of them in a single article. So, I will rely on a basic Terraform code running on Version 1.4. Please do not focus on the IaC language itself but instead on the process that you could apply to your own ecosystem.\n\n### The Terraform code\n\nLet's start with a fundamental Terraform code.\n\nWe are going to deploy to AWS, a virtual private cloud (VPC), which is a virtual network. In that VPC, we will deploy a public and a private subnet. As their name implies, they are subnets of the main VPC. Finally, we will add an Elastic Cloud Compute (EC2) instance (a virtual machine) in the public subnet.\n\nThis demonstrates the deployment of four resources without adding too much complexity. The idea is to focus on the pipeline, not the code.\n\nHere is the target we want to reach for your repository.\n\n![target for repository](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097033415.png)\n\nLet’s do it step by step.\n\nFirst, we declare all resources in a `terraform/main.tf` file:\n\n```terraform\nprovider \"aws\" {\n  region = var.aws_default_region\n}\n\nresource \"aws_vpc\" \"main\" {\n  cidr_block = var.aws_vpc_cidr\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n\nresource \"aws_subnet\" \"public_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_public_subnet_cidr\n\n  tags = {\n    Name = \"Public Subnet\"\n  }\n}\nresource \"aws_subnet\" \"private_subnet\" {\n  vpc_id     = aws_vpc.main.id\n  cidr_block = var.aws_private_subnet_cidr\n\n  tags = {\n    Name = \"Private Subnet\"\n  }\n}\n\nresource \"aws_instance\" \"sandbox\" {\n  ami           = var.aws_ami_id\n  instance_type = var.aws_instance_type\n\n  subnet_id = aws_subnet.public_subnet.id\n\n  tags = {\n    Name     = var.aws_resources_name\n  }\n}\n```\n\nAs you can see, there are a couple of variables that are needed for this code, so let's declare them in a `terraform/variables.tf` file:\n\n```terraform\nvariable \"aws_ami_id\" {\n  description = \"The AMI ID of the image being deployed.\"\n  type        = string\n}\n\nvariable \"aws_instance_type\" {\n  description = \"The instance type of the VM being deployed.\"\n  type        = string\n  default     = \"t2.micro\"\n}\n\nvariable \"aws_vpc_cidr\" {\n  description = \"The CIDR of the VPC.\"\n  type        = string\n  default     = \"10.0.0.0/16\"\n}\n\nvariable \"aws_public_subnet_cidr\" {\n  description = \"The CIDR of the public subnet.\"\n  type        = string\n  default     = \"10.0.1.0/24\"\n}\n\nvariable \"aws_private_subnet_cidr\" {\n  description = \"The CIDR of the private subnet.\"\n  type        = string\n  default     = \"10.0.2.0/24\"\n}\n\nvariable \"aws_default_region\" {\n  description = \"Default region where resources are deployed.\"\n  type        = string\n  default     = \"eu-west-3\"\n}\n\nvariable \"aws_resources_name\" {\n  description = \"Default name for the resources.\"\n  type        = string\n  default     = \"demo\"\n}\n```\n\nAlready, we are almost good to go on the IaC side. What's missing is a way to share the Terraform states. For those who don't know, Terraform works schematically doing the following:\n\n* `plan` checks the differences between the current state of the infrastructure and what is defined in the code. Then, it outputs the differences.\n* `apply` applies the differences in the `plan` and updates the state.\n\nFirst round, the state is empty, then it is filled with the details (ID, etc.) of the resources applied by Terraform.\n\nThe problem is: Where is that state stored? How do we share it so several developers can collaborate on code?\n\nThe solution is fairly simple: Leverage GitLab to store and share the state for you through a [Terraform HTTP backend](https://docs.gitlab.com/ee/user/infrastructure/iac/terraform_state.html).\n\nThe first step in using this backend is to create the most simple `terraform/backend.tf` file. The second step will be handled in the pipeline.\n\n```terraform\nterraform {\n  backend \"http\" {\n  }\n}\n```\n\nEt voilà! We have a bare minimum Terraform code to deploy these four resources. We will provide the variable values at the runtime, so let's do that later.\n\n### The workflow\n\nThe workflow that we are going to implement now is the following:\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\n    D -->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\n    D -->|tag| G(X.Y.Z)\n    F -->|validate| G\n\n    G -->|auto deploy| H[staging]\n    H -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n1. Create a **feature** branch. This will continuously run all scanners on the code to ensure that it is still compliant and secured. This code will be continuously deployed to a temporary environment `review/feature_branch` with the name of the current branch. This is a safe environment where the developers and operations teams can test their code without impacting anybody. This is also where we will enforce the process, like enforcing code reviews and running scanners, to ensure that the quality and security of the code are acceptable and do not put your assets at risk. The infrastructure deployed by this branch is automatically destroyed when the branch is closed. This helps you keep your budget under control.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    A(main) -->|new feature| B(feature_X)\n\n    B -->|auto deploy| C[review/feature_X]\n    B -->|merge| D(main)\n    C -->|destroy| D\n\u003C/pre>\n\n2. Once approved, the feature branch will be **merged** into the main branch. This is a [protected branch](https://docs.gitlab.com/ee/user/project/protected_branches.html) where no one can push. This is mandatory to ensure that every change request to production is thoroughly tested. That branch is also continuously deployed. The target here is the `integration` environment. To keep this environment slightly more stable, its deletion is not automated but can be triggered manually.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    D(main) -->|auto deploy| E[integration]\n\u003C/pre>\n\n3. From there, manual approval is required to trigger the next deployment. This will deploy the main branch to the `qa` environment. Here, I have set a rule to prevent deletion from the pipeline. The idea is that this environment should be quite stable (after all, it's already the third environment), and I would like to prevent deletion by mistake. Feel free to adapt the rules to match your processes.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    D(main)-->|auto deploy| E[integration]\n    E -->|manual| F[qa]\n\u003C/pre>\n\n4. To proceed, we will need to **tag** the code. We are relying on [protected tags](https://docs.gitlab.com/ee/user/project/protected_tags.html) here to ensure that only a specific set of users are allowed to deploy to these last two environments. This will immediately trigger a deployment to the `staging` environment.\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    D(main) -->|tag| G(X.Y.Z)\n    F[qa] -->|validate| G\n\n    G -->|auto deploy| H[staging]\n\u003C/pre>\n\n5. Finally, we are landing to `production`. When discussing infrastructure, it is often challenging to deploy progressively (10%, 25%, etc.), so we will deploy the whole infrastructure. Still, we control that deployment with a manual trigger of this last step. And to enforce maximum control on this highly critical environment, we will control it as a [protected environment](https://docs.gitlab.com/ee/ci/environments/protected_environments.html).\n\n\u003Cpre class=\"mermaid\">\nflowchart LR\n    H[staging] -->|manual| I{plan}\n    I -->|manual| J[production]\n\u003C/pre>\n\n### The pipeline\n\nTo implement the above [workflow](#the-workflow), we are now going to implement a pipeline with two [downstream pipelines](https://docs.gitlab.com/ee/ci/pipelines/downstream_pipelines.html).\n\n#### The main pipeline\n\nLet's start with the main pipeline. This is the one that will be triggered automatically on any **push to a feature branch**, any **merge to the default branch**, or any **tag**. *The one* that will do true **continuous deployment** to the following environments: `dev`, `integration`, and `staging`. And it is declared in the `.gitlab-ci.yml` file at the root of your project.\n\n![the repository target](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097033417.png)\n\n```yml\nStages:\n  - test\n  - environments\n\n.environment:\n  stage: environments\n  variables:\n    TF_ROOT: terraform\n    TF_CLI_ARGS_plan: \"-var-file=../vars/$variables_file.tfvars\"\n  trigger:\n    include: .gitlab-ci/.first-layer.gitlab-ci.yml\n    strategy: depend            # Wait for the triggered pipeline to successfully complete\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nreview:\n  extends: .environment\n  variables:\n    environment: review/$CI_COMMIT_REF_SLUG\n    TF_STATE_NAME: $CI_COMMIT_REF_SLUG\n    variables_file: review\n    TF_VAR_aws_resources_name: $CI_COMMIT_REF_SLUG  # Used in the tag Name of the resources deployed, to easily differenciate them\n  rules:\n    - if: $CI_COMMIT_BRANCH && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n\nintegration:\n  extends: .environment\n  variables:\n    environment: integration\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nstaging:\n  extends: .environment\n  variables:\n    environment: staging\n    TF_STATE_NAME: $environment\n    variables_file: $environment\n  rules:\n    - if: $CI_COMMIT_TAG\n\n#### TWEAK\n# This tweak is needed to display vulnerability results in the merge widgets.\n# As soon as this issue https://gitlab.com/gitlab-org/gitlab/-/issues/439700 is resolved, the `include` instruction below can be removed.\n# Until then, the SAST IaC scanners will run in the downstream pipelines, but their results will not be available directly in the merge request widget, making it harder to track them.\n# Note: This workaround is perfectly safe and will not slow down your pipeline.\ninclude:\n  - template: Security/SAST-IaC.gitlab-ci.yml\n#### END TWEAK\n\n```\n\nThis pipeline runs only two stages: `test` and  `environments`. The former is needed for the *TWEAK* to run scanners. The later triggers a child pipeline with a different set of variables for each case defined above (push to the branch, merge to the default branch, or tag).\n\nWe are adding here a dependency with the keyword [strategy:depend](https://docs.gitlab.com/ee/ci/yaml/index.html#triggerstrategy) on our child pipeline so the pipeline view in GitLab will be updated only once the deployment is finished.\n\nAs you can see here, we are defining a base job, [hidden](https://docs.gitlab.com/ee/ci/jobs/#hide-jobs), and we are extending it with specific variables and rules to trigger only one deployment for each target environment.\n\nBesides the [predefined variables](https://docs.gitlab.com/ee/ci/variables/predefined_variables.html), we are using two new entries that we need to define:\n1. [The variables specific](#the-variable-definitions) to each environment: `../vars/$variables_file.tfvars`\n2. [The child pipeline](#the-child-pipeline), defined in `.gitlab-ci/.first-layer.gitlab-ci.yml`\n\nLet's start with the smallest part, the variable definitions.\n\n### The variable definitions\n\nWe are going here to mix two solutions to provide variables to Terraform:\n\n* The first one using [.tfvars files](https://developer.hashicorp.com/terraform/language/values/variables#variable-definitions-tfvars-files) for all non-sensitive input, which should be stored within GitLab.\n\n![solution one to provide variables to Terraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097033419.png)\n\n* The second using [environment variables](https://developer.hashicorp.com/terraform/language/values/variables#environment-variables) with the prefix `TF_VAR`. That second way to inject variables, associated with the GitLab capacity to [mask variables](https://docs.gitlab.com/ee/ci/variables/#mask-a-cicd-variable), [protect them](https://docs.gitlab.com/ee/ci/variables/#protect-a-cicd-variable), and [scope them to environments](https://docs.gitlab.com/ee/ci/environments/index.html#limit-the-environment-scope-of-a-cicd-variable) is a powerful solution to **prevent sensitive information leakages**. (If you consider your production’s private CIDR very sensitive, you could protect it like this, ensuring it is only available for the `production` environment, for pipelines running against protected branches and tags, and that its value is masked in the job’s logs.)\n\n![solution two to provide variables to Terraform](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097034/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097033422.png)\n\nAdditionally, each variable file should be controlled through a [`CODEOWNERS` file](https://docs.gitlab.com/ee/user/project/codeowners/) to set who can modify each of them.\n\n```\n[Production owners] \nvars/production.tfvars @operations-group\n\n[Staging owners]\nvars/staging.tfvars @odupre @operations-group\n\n[CodeOwners owners]\nCODEOWNERS @odupre\n```\n\nThis article is not a Terraform training, so we will go very fast and simply show here the `vars/review.tfvars` file. Subsequent environment files are, of course, very similar. Just set the non-sensitive variables and their values here.\n\n```shell\naws_vpc_cidr = \"10.1.0.0/16\"\naws_public_subnet_cidr = \"10.1.1.0/24\"\naws_private_subnet_cidr = \"10.1.2.0/24\"\n```\n\n#### The child pipeline\n\nThis one is where the actual work is done. So, it is slightly more complex than the first one. But there is no difficulty here that we cannot overcome together!\n\nAs we have seen in the definition of the [main pipeline](#the-main-pipeline), that downstream pipeline is declared in the file `.gitlab-ci/.first-layer.gitlab-ci.yml`.\n\n![Downstream pipeline declared in file](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097033/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097033424.png)\n\nLet's break it down into small chunks. We'll see the big picture at the end.\n\n##### Run Terraform commands and secure the code\n\nFirst, we want to run a pipeline for Terraform. We, at GitLab, are open source. So, our Terraform template is open source. And you simply need to include it. This can be achieved with the following snippet:\n\n```yml\ninclude:\n  - template: Terraform.gitlab-ci.yml\n```\n\nThis template runs for you the Terraform checks on the formatting and validates your code, before planning and applying it. It also allows you to destroy what you have deployed.\n\nAnd, because GitLab is the a single, unified DevSecOps platform, we are also automatically including two security scanners within that template to find potential threats in your code and warn you before you deploy it to the next environments.\n\nNow that we have checked, secured, built, and deployed our code, let's do some tricks.\n\n##### Share cache between jobs\n\nWe will cache the job results to reuse them in subsequent pipeline jobs. This is as simple as adding the following piece of code:\n\n```yml\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n```\n\nHere, we are defining a different cache for each commit, falling back to the main branch name if needed.\n\nIf we look carefully at the templates that we are using, we can see that it has some rules to control when jobs are run. We want to run all controls (both QA and security) on all branches. So, we are going to override these settings.\n\n##### Run controls on all branches\n\nGitLab templates are a powerful feature where one can override only a piece of the template. Here, we are interested only in overwriting the rules of some jobs to always run quality and security checks. Everything else defined for these jobs will stay as defined in the template.\n\n```yml\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - when: always\n\niac-sast:\n  rules:\n    - when: always\n```\n\nNow that we have enforced the quality and security controls, we want to differentiate how the main environments (integration and staging) in the [workflow](#the-workflow) and review environments behave. Let's start by defining the main environment’s behavior, and we will tweak this configuration for the review environments.\n\n##### CD to integration and staging\n\nAs defined earlier, we want to deploy the main branch and the tags to these two environments. We are adding rules to control that on both the `build` and `deploy` jobs. Then, we want to enable `destroy` only for the `integration` as we have defined `staging` to be too critical to be deleted with a single click. This is error-prone and we don't want to do that.\n\nFinally, we are linking the `deploy` job to the `destroy` one, so we can `stop` the environment directly from GitLab GUI.\n\nThe `GIT_STRATEGY` is here to prevent retrieving the code from the source branch in the runner when destroying. This would fail if the branch has been deleted manually, so we are relying on the cache to get everything we need to run the Terraform instructions.\n\n```yml\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env (integration or staging) when merging to default branch or tagging. Second layer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n```\n\nAs said, this matches the need to deploy to `integration` and `staging`. But we are still missing a temporary environment where the developers can experience and validate their code without impacts on others. This is where the deployment to the `review` environment takes place.\n\n##### CD to review environments\n\nDeploying to review environment is not too different than deploying to `integration` and `staging`. So we will once again leverage GitLab's capacity to overwrite only pieces of job definition here.\n\nFirst, we set rules to run these jobs only on feature branches.\n\nThen, we link the `deploy_review` job to `destroy_review`. This will allow us to stop the environment **manually** from the GitLab user interface, but more importantly, it will **automatically trigger the environment destruction** when the feature branch is closed. This is a good FinOps practice to help you control your operational expenditures.\n\nSince Terraform needs a plan file to destroy an infrastructure, exactly like it needs one to build an infrastructure, then we are adding a dependency from `destroy_review` to `build_review`, to retrieve its artifacts.\n\nFinally, we see here that the environment's name is set to `$environment`. It has been set in the [main pipeline](#the-main-pipeline) to `review/$CI_COMMIT_REF_SLUG`, and forwarded to this child pipeline with the instruction `trigger:forward:yaml_variables:true`.\n\n```yml\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n```\n\nSo, to recap, we now have a pipeline that can:\n\n* Deploy temporary review environments, which are automatically cleaned up when the feature branch is closed\n* Continuously deploy the **default branch** to `integration`\n* Continuously deploy the **tags** to `staging`\n\nLet's now add an extra layer, where we will deploy, based on a manual trigger this time, to `qa` and `production` environments.\n\n##### Continously deploy to QA and production\n\nBecause not everybody is willing to deploy continuously to production, we will add a manual validation to the next two deployments. From a purely **CD** perspective, we would not add this trigger, but take this as an opportunity to learn how to run jobs from other triggers.\n\nSo far, we have started a [child pipeline](#the-child-pipeline) from the [main pipeline](#the-main-pipeline) to run all deployments.\n\nSince we want to run other deployments from the default branch and the tags, we will add another layer dedicated to these additional steps. Nothing new here. We will just repeat exactly the same process as the one we only did for the [main pipeline](#the-main-pipeline). Going this way allows you to manipulate as many layers as you need. I have already seen up to nine environments in some places.\n\nWithout arguing once again on the benefits to have fewer environments, the process that we are using here makes it very easy to implement the same pipeline all the way from early stages to final delivery, while keeping your pipeline definition simple and split in small chunks that you can maintain at no cost.\n\nTo prevent variable conflicts here, we are just using new var names to identify the Terraform state and input file.\n\n```yml\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n```\n\n**One important trick here is the strategy used for the new downstream pipeline.** We leave that `trigger:strategy` to its default value; otherwise, the [main pipeline](#the-main-pipeline) would wait for your [grand-child pipeline](#the-grand-child-pipeline) to finish. With a manual trigger, this could last for a very long time and make your pipeline dashboard harder to read and understand.\n\nYou have probably already wondered what is the content of that `.gitlab-ci/.second-layer.gitlab-ci.yml` file we are including here.  We will cover that in the next section.\n\n##### The first layer complete pipeline definition\n\nIf you are looking for a complete view of this first layer (stored in `.gitlab-ci/.first-layer.gitlab-ci.yml`), just expand the section below.\n\n```yml\nvariables:\n  TF_VAR_aws_ami_id: $AWS_AMI_ID\n  TF_VAR_aws_instance_type: $AWS_INSTANCE_TYPE\n  TF_VAR_aws_default_region: $AWS_DEFAULT_REGION\n\ninclude:\n  - template: Terraform.gitlab-ci.yml\n\ndefault:\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: cache-$CI_COMMIT_REF_SLUG\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n  - cleanup\n  - 2nd_layer       # Use to deploy a 2nd environment on both the main branch and on the tags\n\nfmt:\n  rules:\n    - when: always\n\nvalidate:\n  rules:\n    - when: always\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\niac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: on_success\n\n###########################################################################################################\n## Integration env. and Staging. env\n##  * Auto-deploy to Integration on merge to main.\n##  * Auto-deploy to Staging on tag.\n##  * Integration can be manually destroyed if TF_DESTROY is set to true.\n##  * Destroy of next env. is not automated to prevent errors.\n###########################################################################################################\nbuild:  # terraform plan\n  environment:\n    name: $TF_STATE_NAME\n    action: prepare\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndeploy: # terraform apply --> automatically deploy on corresponding env (integration or staging) when merging to default branch or tagging. Second layer environments (qa and production) will be controlled manually\n  environment: \n    name: $TF_STATE_NAME\n    action: start\n    on_stop: destroy\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG\n\ndestroy:\n  extends: .terraform:destroy\n  variables:\n    GIT_STRATEGY: none\n  dependencies:\n    - build\n  environment:\n    name: $TF_STATE_NAME\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $TF_DESTROY == \"true\" # Manually destroy integration env.\n      when: manual\n###########################################################################################################\n\n###########################################################################################################\n## Dev env.\n##  * Temporary environment. Lives and dies with the Merge Request.\n##  * Auto-deploy on push to feature branch.\n##  * Auto-destroy on when Merge Request is closed.\n###########################################################################################################\nbuild_review:\n  extends: build\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndeploy_review:\n  extends: deploy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: start\n    on_stop: destroy_review\n    # url: https://$CI_ENVIRONMENT_SLUG.example.com\n  rules:\n    - if: $CI_COMMIT_TAG\n      when: never\n    - if: $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH\n      when: on_success\n\ndestroy_review:\n  extends: destroy\n  dependencies:\n    - build_review\n  environment:\n    name: $environment\n    action: stop\n  rules:\n    - if: $CI_COMMIT_TAG  # Do not destroy production\n      when: never\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH   # Do not destroy staging\n      when: never\n    - when: manual\n###########################################################################################################\n\n###########################################################################################################\n## Second layer\n##  * Deploys from main branch to qa env.\n##  * Deploys from tag to production.\n###########################################################################################################\n.2nd_layer:\n  stage: 2nd_layer\n  variables:\n    TF_ROOT: terraform\n  trigger:\n    include: .gitlab-ci/.second-layer.gitlab-ci.yml\n    # strategy: depend            # Do NOT wait for the downstream pipeline to finish to mark upstream pipeline as successful. Otherwise, all pipelines will fail when reaching the pipeline timeout before deployment to 2nd layer.\n    forward:\n      yaml_variables: true      # Forward variables defined in the trigger job\n      pipeline_variables: true  # Forward manual pipeline variables and scheduled pipeline variables\n\nqa:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: qa\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n\nproduction:\n  extends: .2nd_layer\n  variables:\n    TF_STATE_NAME_2: production\n    environment: $TF_STATE_NAME_2\n    TF_CLI_ARGS_plan_2: \"-var-file=../vars/$TF_STATE_NAME_2.tfvars\"\n  rules:\n    - if: $CI_COMMIT_TAG\n###########################################################################################################\n```\n\nAt this stage, we are already deploying safely to three environments. That is my personal ideal recommendation. However, if you need more environments, add them to your CD pipeline.\n\nYou have certainly already noted that we include a downstream pipeline with the keyword `trigger:include`. This includes the file `.gitlab-ci/.second-layer.gitlab-ci.yml`. We want to run almost the same pipeline so obviously, its content is very similar to the one we have detailed above. The main advantage here to define this [grand-child pipeline](#the-grand-child-pipeline) is that it lives on its own, making both variables and rules way easier to define.\n\n### The grand-child pipeline\n\nThis second layer pipeline is a brand new pipeline. Hence, it needs to mimic the first layer definition with:\n\n* [Inclusion of the Terraform template](#run-terraform-commands-and-secure-the-code).\n* [Enforcement of security checks](#run-controls-on-all-branches). Terraform validation would be duplicates of the first layer, but security scanners may find threats that did not yet exist when scanners previously ran (for example, if you deploy to production a couple of days after your deployment to staging).\n* [Overwrite build and deploy jobs to set specific rules](#cd-to-review-environments). Note that the `destroy` stage is no longer automated to prevent too fast deletions.\n\nAs explained above, the `TF_STATE_NAME` and `TF_CLI_ARGS_plan` have been provided from the [main pipeline](#the-main-pipeline) to the [child pipeline](#the-child-pipeline). We needed another variable name to pass these values from the [child pipeline](#the-child-pipeline) to here, the [grand-child pipeline](#the-grand-child-pipeline). This is why they are postfixed with `_2` in the child pipeline and the value is copied back to the appropriate variable during the `before_script` here.\n\nSince we have already broken down each step above, we can zoom out here directly to the broad view of the global second layer definition (stored in `.gitlab-ci/.second-layer.gitlab-ci.yml`).\n\n```yml\n# Use to deploy a second environment on both the default branch and the tags.\n\ninclude:\n  template: Terraform.gitlab-ci.yml\n\nstages:\n  - validate\n  - test\n  - build\n  - deploy\n\nfmt:\n  rules:\n    - when: never\n\nvalidate:\n  rules:\n    - when: never\n\nkics-iac-sast:\n  rules:\n    - if: $SAST_DISABLED == 'true' || $SAST_DISABLED == '1'\n      when: never\n    - if: $SAST_EXCLUDED_ANALYZERS =~ /kics/\n      when: never\n    - when: always\n\n###########################################################################################################\n## QA env. and Prod. env\n##  * Manually trigger build and auto-deploy in QA\n##  * Manually trigger both build and deploy in Production\n##  * Destroy of these env. is not automated to prevent errors.\n###########################################################################################################\nbuild:  # terraform plan\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment:\n    name: $TF_STATE_NAME_2\n    action: prepare\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - when: manual\n\ndeploy: # terraform apply\n  cache:  # Use a shared cache or tagged runners to ensure terraform can run on apply and destroy\n    - key: $TF_STATE_NAME_2\n      fallback_keys:\n        - cache-$CI_DEFAULT_BRANCH\n      paths:\n        - .\n  environment: \n    name: $TF_STATE_NAME_2\n    action: start\n  before_script:  # Hack to set new variable values on the second layer, while still using the same variable names. Otherwise, due to variable precedence order, setting new value in the trigger job, does not cascade these new values to the downstream pipeline\n    - TF_STATE_NAME=$TF_STATE_NAME_2\n    - TF_CLI_ARGS_plan=$TF_CLI_ARGS_plan_2\n  rules:\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n    - if: $CI_COMMIT_TAG && $TF_AUTO_DEPLOY == \"true\"\n    - if: $CI_COMMIT_TAG\n      when: manual\n###########################################################################################################\n```\n\nEt voilà. **We are ready to go.** Feel free to change the way you control your job executions, leveraging for example GitLab's capacity to [delay a job](https://docs.gitlab.com/ee/ci/jobs/job_control.html#run-a-job-after-a-delay) before deploying to production.\n\n## Try it yourself\n\nWe finally reached our destination. We are now able to control **deployments to five different environments**, with only the **feature branches**, the **main branch**, and **tags**.\n* We are intensively reusing GitLab open source templates to ensure efficiency and security in our pipelines.\n* We are leveraging GitLab template capacities to overwrite only the blocks that need custom control.\n* We have split the pipeline in small chunks, controlling the downstream pipelines to match exactly what we need.\n\nFrom there, the floor is yours. You could, for example, easily update the main pipeline to trigger downstream pipelines for your software source code, with the [trigger:rules:changes](https://docs.gitlab.com/ee/ci/yaml/#ruleschanges) keyword. And use another [template](https://gitlab.com/gitlab-org/gitlab/-/tree/master/lib/gitlab/ci/templates/) depending on the changes that happened. But that is another story.",[108,771,772,478,9],{"slug":4505,"featured":6,"template":684},"using-child-pipelines-to-continuously-deploy-to-five-environments","content:en-us:blog:using-child-pipelines-to-continuously-deploy-to-five-environments.yml","Using Child Pipelines To Continuously Deploy To Five Environments","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments.yml","en-us/blog/using-child-pipelines-to-continuously-deploy-to-five-environments",{"_path":4511,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4512,"content":4518,"config":4523,"_id":4525,"_type":13,"title":4526,"_source":15,"_file":4527,"_stem":4528,"_extension":18},"/en-us/blog/using-gitlab-web-ide-gitlab-ci-cd",{"title":4513,"description":4514,"ogTitle":4513,"ogDescription":4514,"noIndex":6,"ogImage":4515,"ogUrl":4516,"ogSiteName":669,"ogType":670,"canonicalUrls":4516,"schema":4517},"How to make small changes using GitLab’s Web IDE","A quick three minute demo shows how teams can deliver better apps faster using GitLab CI/CD.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749678812/Blog/Hero%20Images/web-ide-cover.jpg","https://about.gitlab.com/blog/using-gitlab-web-ide-gitlab-ci-cd","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"How to make small changes using GitLab’s Web IDE\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Chrissie Buchanan\"}],\n        \"datePublished\": \"2020-05-28\",\n      }",{"title":4513,"description":4514,"authors":4519,"heroImage":4515,"date":4520,"body":4521,"category":3709,"tags":4522},[3173],"2020-05-28","\n\nIt’s not enough to say something is quick and easy. To have a better understanding of some of the benefits of using [GitLab CI/CD](/topics/ci-cd/), it’s much better to _show_ you.\n\nIn a [short video](https://www.youtube.com/watch?v=6207TKNGgJs&feature=emb_logo), [Itzik Gan-Baruch](/company/team/#iganbaruch) technical marketing manager, demonstrates how to submit a code change using GitLab Web IDE. In three minutes, teams can submit a code change and commit it, trigger a CI pipeline to scan for any errors, and ship the updated application to users.\n\n## Getting started with GitLab Web IDE\n\nAll code that gets automatically tested and deployed to production has a human at its source. In GitLab 10.7, we released the [first iteration of our Web Integrated Development Environment (IDE)](/blog/introducing-gitlab-s-integrated-development-environment/) after observing how non-developers struggled with editing multiple files and committing those changes. Since we believe that [everyone can contribute](/company/mission/#mission), building an editor that was integrated with GitLab that made it easier for anyone to contribute seemed like a natural fit. To access the Web IDE, just click the button from any GitLab project.\n\n![Web IDE](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_1.png){: .shadow.medium.center}\n\nThe Web IDE button\n{: .note.text-center}\n\nIn this simple project with a job application, you can use the Web IDE to make a code change and push it to a feature branch. Select the file you would like to change from the menu on the left.\n\n![Selecting a file](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_2.png){: .shadow.medium.center}\n\nSelecting a file from the Wed IDE\n{: .note.text-center}\n\nOnce you’ve modified the text in that file, add a commit message and create a new branch. Click `Commit` to create a merge request.\n\n![Commit](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_3.png){: .shadow.medium.center}\n\nCommit to create a merge request\n{: .note.text-center}\n\nYour commit generates a merge request, and from here you can add an assignee, tie this code change to a specific milestone, add labels, or add any additional information regarding the change.\n\n![Modify merge request](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_4.png){: .shadow.medium.center}\n\nSubmit merge request\n{: .note.text-center}\n\nA new [continuous integration pipeline](/solutions/continuous-integration/) is triggered automatically. Click on the pipeline to see the stages.\n\n![Pipeline](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_5.png){: .shadow.medium.center}\n\nClick on the pipeline from the merge request\n{: .note.text-center}\n\nIn this project, the pipeline needed zero-configuration because it was generated through GitLab's [Auto DevOps](/direction/delivery/auto_devops/) capability. The pipeline has stages and a few jobs within each stage.\n\n![Auto DevOps pipeline](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_6.png){: .shadow.medium.center}\n\nA CI pipeline automatically configured with GitLab Auto DevOps\n{: .note.text-center}\n\nFirst, it builds a Docker image for the code and pushes it to the container registry. From there, it begins tests and scans jobs that run in parallel to help speed up the pipeline.\n\n![Pipeline jobs](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_7.png){: .shadow.medium.center}\n\nClick on a job within the pipeline stage to get more information\n{: .note.text-center}\n\nBy clicking on a job within the stage, you can see what happens.\n\n![dependency scan](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_8.png){: .shadow.medium.center}\n\nDependency scanning details\n{: .note.text-center}\n\nOnce all tests are completed, all test results will be added to the merge request that was created. The merge request is really the key to using GitLab as a code collaboration and [version control platform](/topics/version-control/). It’s simply a request to merge one branch into another.\n\n![merge requests](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_9.png){: .shadow.medium.center}\n\nMerge requests for this project\n{: .note.text-center}\n\n[Review Apps](https://docs.gitlab.com/ee/ci/review_apps/) are a way to visualize the changes that were made. Click `View app` once the pipeline has completed to access the staging environment.\n\n![Review apps](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_10.png){: .shadow.medium.center}\n\nSelect `View app` to access a staging environment once a pipeline completes.\n{: .note.text-center}\n\nIn this environment, only changes that were made in the merge request will be displayed. This link can be sent to others so they can view the changes from a web browser.\n\n![staging environment](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_12.png){: .shadow.medium.center}\n\nThe Review App for this project\n{: .note.text-center}\n\nFrom the merge request, you can see the test results, including changes to code quality and the security scans. This scan detected 20 new vulnerabilities. If you’d like more information, just click `Expand` on the right.\n\n![pipeline test results](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_13.png){: .shadow.medium.center}\n\nPipeline test results\n{: .note.text-center}\n\nOnce the results have been expanded, you can click on each one to get more details.\n\n![SAST scan](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_14.png){: .shadow.medium.center}\n\nSAST vulnerabilities detected\n{: .note.text-center}\n\nBy clicking on one of these results, you can see the file that caused the vulnerability as well as the problematic lines of code.\n\n![security report](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_15.png){: .shadow.medium.center}\n\nSecurity report\n{: .note.text-center}\n\nFrom this menu, you can choose to dismiss the vulnerability or create an issue so that someone can fix it. Details from the test will be added to the issue automatically.\n\n![new issue](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_16.png){: .shadow.medium.center}\n\nA new issue created to investigate a vulnerability\n{: .note.text-center}\n\nFrom your original merge request, you can collaborate with others and have them take a look at the proposed changes.\n\n![collaborate on merge request](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_17.png){: .shadow.medium.center}\n\nTag someone in a merge request to have them see your changes\n{: .note.text-center}\n\nOnce you’ve gathered feedback and all pipelines have passed, click the `merge` button to trigger a new pipeline to deploy your application to production\n\n![Web IDE](https://about.gitlab.com/images/blogimages/CI_demo_blog_May_28/CI_demo_18.png){: .shadow.medium.center}\n\nClick `merge` to trigger a deployment pipeline\n{: .note.text-center}\n\nThis workflow shows how anyone can contribute code without using a command line. The Web IDE makes it easy for anyone to make changes without introducing additional risks or quality issues, all from the GitLab interface.\n\nTo see this three-minute demo in real-time, just watch the video below.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube-nocookie.com/embed/6207TKNGgJs\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n",[108,940,9],{"slug":4524,"featured":6,"template":684},"using-gitlab-web-ide-gitlab-ci-cd","content:en-us:blog:using-gitlab-web-ide-gitlab-ci-cd.yml","Using Gitlab Web Ide Gitlab Ci Cd","en-us/blog/using-gitlab-web-ide-gitlab-ci-cd.yml","en-us/blog/using-gitlab-web-ide-gitlab-ci-cd",{"_path":4530,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4531,"content":4536,"config":4541,"_id":4543,"_type":13,"title":4544,"_source":15,"_file":4545,"_stem":4546,"_extension":18},"/en-us/blog/vscode-extension-development-with-gitlab",{"title":4532,"description":4533,"ogTitle":4532,"ogDescription":4533,"noIndex":6,"ogImage":3376,"ogUrl":4534,"ogSiteName":669,"ogType":670,"canonicalUrls":4534,"schema":4535},"VS Code extension development with GitLab","As VS Code editor increases in popularity, find out how GitLab + VS Code can be used for extension development and how we develop the official GitLab VS Code extension.","https://about.gitlab.com/blog/vscode-extension-development-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"VS Code extension development with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2020-11-30\",\n      }",{"title":4532,"description":4533,"authors":4537,"heroImage":3376,"date":4538,"body":4539,"category":769,"tags":4540},[3381],"2020-11-30","\n## What is Visual Studio Code (VSC)?\n\nMicrosoft Visual Studio Code (VS Code) is an extensible text editor. It's implemented in TypeScript and runs on Node 12 and Electron. It was [first released in 2015](https://github.com/microsoft/vscode/releases/tag/0.10.1), and since then, become widely popular[^2]. This post explains the basics about the development of VS Code extensions, shows how you can use GitLab for extension development, and shares how we build the official [GitLab VS Code extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n\n## VS Code editor key features\n\nFor me, the key feature of the VS Code editor is that it created a platform for extensions. That means not just providing an API for extensions (which editors have done since the '90s [^3]) but also providing a marketplace and seamless way of publishing and updating extensions.\n\nThere is also a fully open source version of the VS Code called [VSCodium](https://vscodium.com/). This version removes some proprietary Microsoft code from the distribution and is analogous to the Google Chrome and Chromium projects.\n\n## VS Code extension\n\nVS Code extension is a JavaScript or TypeScript app that runs in node and has access to the [VS Code Extension API](https://code.visualstudio.com/api). The convenient thing about this architecture is that the extension is like any other node app and has full access to the host machine and network. It can choose its own library for network connection, manipulating file systems, and also for rendering web UI.\n\n## Extension API\n\nThe extension API is implemented in TypeScript; it allows users to manipulate almost every aspect of the editor. After months of using it, I find the design elegant (with the exception of testing, which seems to be an afterthought in many areas of the API).\n\nThe main features of the API are manipulating and searching the files, editing text, creating custom left panels and status bars, debuggers, custom webview tabs, (Jupyter) notebook providers, and more. The API also provides a simple way to communicate with the user via input fields and quick-pick panels, as well as showing output with info, warning, or error messages.\n\n## Extension Marketplace\n\nIf you are familiar with either AppStore or PlayStore, you'll find VS Code has an equivalent store called [Visual Studio Marketplace](https://marketplace.visualstudio.com/search?target=VSCode), and unlike on its older siblings, everything[^4] is for free. Both the easy browsing experience for the user and the ease of use for a developer are differentiators for VS Code.\n\nAs a developer, you set up your [Azure Cloud token](https://code.visualstudio.com/api/working-with-extensions/publishing-extension#get-a-personal-access-token) and then run `vsce publish` in your extension folder. That's it, within a few minutes, most of your users[^5] are running the latest and greatest version of your extension. This process greatly reduces the pressure on developers to get everything right before releasing, enabling faster iteration.\n\nThere is also an independent marketplace called [open-vsx](https://open-vsx.org/) used mainly by VSCodium but also by [GitPod](https://docs.gitlab.com/ee/integration/gitpod.html) and others.\n\n## Developing extensions in GitLab\n\nIf you'd like to try and develop your own extension, you can fork the [`gitlab-example-extension`](https://gitlab.com/viktomas/gitlab-example-extension) project. It contains a complete setup for linting, unit and integration testing, and publishing the extension to both [Visual Studio Marketplace](https://marketplace.visualstudio.com/search?target=VSCode) and [open-vsx](https://open-vsx.org/). Thanks to GitLab being a single platform for the whole [DevOps lifecycle](/topics/devops/), you can just push your code changes to GitLab, and CI/CD takes care of everything else. As always, if you find any useful tweaks, please submit an MR because [everyone can contribute](/company/mission/#mission).\n\nYou can see what the VS Code extension API offers in the [official documentation](https://code.visualstudio.com/api). You can then have a look at [extension examples](https://code.visualstudio.com/api/extension-guides/overview) and extend them to make the VS Code editor do almost anything you want.\n\n## Our extension: GitLab Workflow\n\nIn June the [GitLab Workflow extension became officially supported by GitLab](/blog/use-gitlab-with-vscode/). Since then we've done a lot of cleanup work and bug fixes. Recently, we released our first larger feature: [Inserting GitLab project snippets](https://about.gitlab.com/releases/2020/11/22/gitlab-13-6-released/#insert-gitlab-snippets-directly-in-vs-code).\n\nThe primary purpose of the extension is to integrate GitLab features into the editor, so users don't have to leave the editor to perform basic tasks such as read an issue description or create a snippet from the code. The extension is trying to plug in the GitLab features into an existing VS Code Extension API to both minimise the need for custom code and to make the experience as VS Code-like as possible.\n\nThere are several main areas of the VS Code Extension API that we take advantage of:\n\n### Commands\n\n[Commands](https://code.visualstudio.com/api/extension-guides/command) are a versatile concept for triggering actions. The most common way to trigger commands is to use the \u003Ckbd>Cmd\u003C/kbd>+\u003Ckbd>Shift\u003C/kbd>+\u003Ckbd>P\u003C/kbd> Command Palette. But commands can also be triggered from context menus, clicks on buttons, or even programmatically by other code in the extension. The most common example of triggering commands programatically is to call the `vscode.open` command with a URL as a parameter. GitLab workflow does that every time we open the GitLab web page[^6].\n\n![Command Palette](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/commands.png){: .shadow.medium.center}\nCommand Palette in GitLab Workflow\n{: .note .text-center}\n\n### Tree View\n\nVS Code uses the [Tree View](https://code.visualstudio.com/api/extension-guides/tree-view) for displaying the left panel. The panel shows the file tree for the project, changed Git files, an outline of the open file, full-text search results, and more. We use this Tree View panel to show lists of issues and merge requests.\n\n![Tree View](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/tree-view.png){: .shadow.medium.center}\nTree View in GitLab Workflow\n{: .note .text-center}\n\n### Status bar\n\n[Status bar](https://code.visualstudio.com/api/extension-capabilities/extending-workbench#status-bar-item) is the slim panel at the bottom of the editor. Any extension can add items to it. Extensions such as Git, spell checks, linters, and formatters all add items to the status bar to provide the user with quick feedback.\n\nThe GitLab Workflow extension shows the MR, issue, and pipeline for the current branch. It, for example, allows you to see if your pipeline failed after the last push.\n\n![Status bar](https://about.gitlab.com/images/blogimages/vscode-extension-development-with-gitlab/status-bar.png){: .shadow.medium.center}\nStatus bar in GitLab Workflow\n{: .note .text-center}\n\nAltogether the VS Code API provides a great foundation for bringing GitLab features closer to the editor. The GitLab VS Code extension is an exciting project that **you too can contribute to**. The best place to start is the [GitLab project page](https://gitlab.com/gitlab-org/gitlab-vscode-extension).\n\n[^2]: [17th most popular project on GitHub](https://github.com/search?p=2&q=stars%3A%3E100&s=stars&type=Repositories) at the time of writing (2020-11-20)\n[^3]: GNU Emacs supported Lisp extensions in [1985](https://en.wikipedia.org/wiki/Emacs#GNU_Emacs)\n[^4]: I haven't been able to find a paid extension in the store.\n[^5]: The auto-update feature is on by default in VS Code, but it can be turned off in which case your users are not going to auto-update.\n[^6]: [Using `vscode.open` in the GitLab Workflow](https://gitlab.com/search?utf8=%E2%9C%93&search=vscode.open&group_id=9970&project_id=5261717&scope=&search_code=true&snippets=false&repository_ref=main&nav_source=navbar)\n\n[Cover image](https://art.ljubicapetkovic.com/cc-licensed/) by [Ljubica Petkovic](https://art.ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[230,9,108],{"slug":4542,"featured":6,"template":684},"vscode-extension-development-with-gitlab","content:en-us:blog:vscode-extension-development-with-gitlab.yml","Vscode Extension Development With Gitlab","en-us/blog/vscode-extension-development-with-gitlab.yml","en-us/blog/vscode-extension-development-with-gitlab",{"_path":4548,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4549,"content":4554,"config":4559,"_id":4561,"_type":13,"title":4562,"_source":15,"_file":4563,"_stem":4564,"_extension":18},"/en-us/blog/vscode-workflow-new-features",{"title":4550,"description":4551,"ogTitle":4550,"ogDescription":4551,"noIndex":6,"ogImage":3376,"ogUrl":4552,"ogSiteName":669,"ogType":670,"canonicalUrls":4552,"schema":4553},"Four new tools for your Visual Studio Code and GitLab tool belt","Learn about new features that can help you review MRs and interact with GitLab","https://about.gitlab.com/blog/vscode-workflow-new-features","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Four new tools for your Visual Studio Code and GitLab tool belt\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2021-11-17\",\n      }",{"title":4550,"description":4551,"authors":4555,"heroImage":3376,"date":4556,"body":4557,"category":769,"tags":4558},[3381],"2021-11-17","\n\nIn our [previous post](/blog/mr-reviews-with-vs-code/), we talked about merge request (MR) Reviews. We explained how the GitLab Workflow extension helps you review MRs without leaving VS Code. Since releasing and polishing the MR reviews, we've been working on improvements to the extension. In this post, we will show you how the latest features fit into your workflow.\n\n### Do you have a lot to say? Use a snippet patch!\n\nOn GitLab's web UI there's the \"suggestions\" feature. It's handy for suggesting small changes in the MR review. The VS Code platform doesn't let us recreate the same experience, but the extension offers an alternative: Snippet patches.\n\nSnippet patches are code changes (git patches) of arbitrary size shared as GitLab snippets. Because they don't have a size limit, they are perfect for suggesting changes to multiple files during the MR review.\n\nThe extension has two commands, `Create snippet patch` and `Apply snippet patch`. These commands use `git diff` and `git apply`, respectively, which means people can still apply the snippet patch even if they don't use the GitLab Workflow extension.\n\nIf a suggestion in the comment is a hammer, then a snippet patch is a pneumatic tamping machine. Next time you'll review an MR, and you see a lot of space for improvement, remember the adage: \"A patch is worth a thousand words\".\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/QQxpLoKJULQ\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n### What's going on with my pipeline? - Improved CI status display\n\nThe extension always showed the latest CI pipeline status in both the status bar and the sidebar. However, if you tried to gauge your pipeline status, you probably run into one or more surprises. The status was hard to understand. Sometimes it related to a different branch, or it was out of date.\n\nWe've made the pipeline status much more reliable and readable. For starters, you can now see individual jobs and their status in the sidebar. Click on any job, and the extension opens a browser window with the GitLab job page.\n\nWe also improved the consistency of showing the pipeline status. The status bar and sidebar are now in sync and always showing pipeline for the current branch.\n\nWe are excited about the cleaner code. It makes it easier for anyone to contribute functionality. If you'd be interested in giving it a shot, we recommend starting with the [Download artifacts from the latest pipeline](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/106) feature request. \n\n\n![VS Code status bar](https://about.gitlab.com/images/blogimages/2021-11-05-vscode-workflow-new-features/ci-pipeline-panel.png){: .shadow.medium.center}\nVS Code CI Pipeline status overview from GitLab extension.\n{: .note .text-center}\n\n### Make the MR your own - Working with checked out code\n\nTwo recent improvements play well together to make your review more interactive. They help you spend less time on actions that don't directly relate to reviewing code. These improvements let you check out the MR branch and open a local file during a review.\n\n#### Check out the MR branch\n\nYou can checkout any MR locally, as long as it is not coming from a forked project. Right-click the MR in the side tree and select \"Checkout MR Branch\". After the command finishes, you'll have the MR branch checked out in your project. Now you can review and run the code.\n\n\u003Cfigure class=\"video_container\">\n  \u003Cvideo src=\"https://gitlab.com/gitlab-org/gitlab-vscode-extension/uploads/db804234ed4d338dea31a27778dba72e/checkout-mr-branch.mp4\" controls=\"true\" data-setup=\"{}\" data-title=\"checkout-mr-branch\" preload=\"metadata\" width=\"560\">\u003C/video>\n\u003C/figure>\n{: .shadow.medium.center}\n\n#### Open a local file during a review\n\nWhen you look at a changed file in an MR, you can click on a small \"file\" icon in the top-right corner. The extension will open the same file in your local repository.\n\nIf your local branch is different from the MR branch, the local file might not be the same as the MR file.\n\nOpening the local file is useful when you want to explore the surroundings of the file quickly. The VS Code automatically focuses the file in the file tree, which lets you see all the neighbouring files.\n\n\u003Cfigure class=\"video_container\">\n  \u003Cvideo src=\"https://gitlab.com/gitlab-org/gitlab-vscode-extension/uploads/de2839b1ceb1be6c33cd80d7fe72bc6d/open-mr-file.mp4\" controls=\"true\" data-setup=\"{}\" data-title=\"open-mr-file\" preload=\"metadata\" width=\"560\">\u003C/video>\n\u003C/figure>\n{: .shadow.medium.center}\n\n### Commitment problems? Browse repositories without checking them out\n\nAt GitLab, we've got some large repositories. The largest, which all GitLabbers use daily, is [www-gitlab-com](https://gitlab.com/gitlab-com/www-gitlab-com), the website you see when you visit `about.gitlab.com`. This 6 GB colossus takes several minutes to check out.\n\nExploring this repository is a perfect use case for our latest feature: Remote Repositories, [contributed by Ethan Reesor](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/321), a community member.\n\nRun the `GitLab: Open Remote Repository` command, pick which project and branch you want to use, and _voilà_.  The extension opens the repository in your local workspace, but it doesn't store data on your local machine.\n\nRemote repositories are useful when you want to browse a repository for a reference but don't plan to change the code.\n\nThis is the first iteration, and it's got some limitations - you can't use full-text search, fuzzy file navigation, and the files are read-only. It's useful nonetheless.\n\n\u003Ciframe width=\"560\" height=\"315\" src=\"https://www.youtube-nocookie.com/embed/p4GTVx_Nd2s\" title=\"YouTube video player\" frameborder=\"0\" allow=\"accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture\" allowfullscreen>\u003C/iframe>\n\n### Thank you community!\n\nMost of the features introduced in this post are either implemented or suggested by a community member. Ahmed Mohamadeen [suggested](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/390) opening local file during MR review, Musisimaru [created initial implementation](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/203) of checking out MR branch, and Ethan Reesor [implemented](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/321) remote repositories.\n\n\nIf you'd like to shape the future of the GitLab Workflow VS Code extension, you can create issues in [our issue tracker](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues), or look for [issues where we accept MRs](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues?label_name%5B%5D=Accepting+merge+requests). Our [CONTRIBUTING](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/main/CONTRIBUTING.md) guide is an excellent place to start.\n\nCover image by [Ljubica Petkovic](https://ljubicapetkovic.com), licensed under [CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/)\n{: .note}\n",[230,9,773],{"slug":4560,"featured":6,"template":684},"vscode-workflow-new-features","content:en-us:blog:vscode-workflow-new-features.yml","Vscode Workflow New Features","en-us/blog/vscode-workflow-new-features.yml","en-us/blog/vscode-workflow-new-features",{"_path":4566,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4567,"content":4572,"config":4577,"_id":4579,"_type":13,"title":4580,"_source":15,"_file":4581,"_stem":4582,"_extension":18},"/en-us/blog/vscode-workflows-for-working-with-gitlab",{"title":4568,"description":4569,"ogTitle":4568,"ogDescription":4569,"noIndex":6,"ogImage":3376,"ogUrl":4570,"ogSiteName":669,"ogType":670,"canonicalUrls":4570,"schema":4571},"Visual Studio code editor: Eight tips for using GitLab VS Code","Learn how to use the Visual Studio code editor more efficiently and meet some of the GitLab contributors that made these new features happen.","https://about.gitlab.com/blog/vscode-workflows-for-working-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Visual Studio code editor: Eight tips for using GitLab VS Code\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Tomas Vik\"}],\n        \"datePublished\": \"2021-05-20\",\n      }",{"title":4568,"description":4569,"authors":4573,"heroImage":3376,"date":4574,"body":4575,"category":769,"tags":4576},[3381],"2021-05-20","\n\nAs a software engineer, I spend a significant portion of my day in the Visual Studio code editor. Since I started maintaining the officially supported [GitLab VS Code extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow), I've developed a few tricks that make me a productive GitLab user. Below, I share eight tips that make my work more efficient and productive, while also introducing you to some of the GitLab contributors who made this tooling happen.\n\n## What is Visual Studio Code?\n[Visual Studio Code](https://en.wikipedia.org/wiki/Visual_Studio_Code), developed by Microsoft, lets a user debug source code in various languages from the editor. It is also used for syntax highlighting, intelligent code completion, code refactoring, embedded Git and autocomplete. VS Code, as it is commonly known, can be launched or attached to running apps.\n\nIt is designed for Windows, Linux, and MacOS. VS Code can be used with several programming languages such as Java, JavaScript, Node.js, Python, C++ and Fortran. Support for additional languages is provided by freely available extensions on the [VS Code Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow).\n\nBelow are eight tips for using GitLab VS Code.\n\n### How to clone any GitLab project\n\nGitLab contributor [Felix Haase](https://gitlab.com/haasef) recently [implemented a feature that lets you clone any GitLab project where you are a member](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/172). To clone the project, use the official `Git: Clone` command and select your GitLab instance. Use the `Git: Clone` command by selecting the command from the [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette).\n\nThis feature can save you time if you already know the name of the project you want to clone.\n\n![VS Code clone dialogue](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/clone.png){: .shadow.medium.center}\nVS Code lets you filter which project to clone.\n{: .note .text-center}\n\n### How to view MRs and issues\n\nIt is easy to look through issues and MRs that you created, are assigned to, or are reviewing using GitLab. The lesser-known feature of the GitLab Workflow extension is [custom queries](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/blob/main/docs/user/custom-queries.md). Custom search queries allow you to refine the search expressions for issues and MRs that appear in the VS Code side panel. You can apply all the advanced search terms you are used to from the GitLab web search: Labels, full-text search expression, milestones, authors, assignees, and more.\n\n![GitLab extension sidebar](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/issues-and-mrs.png){: .shadow.medium.center}\nSee your issues and MRs in the VS Code sidebar.\n{: .note .text-center}\n\nAnother option is [reviewing the MRs in VS Code](/blog/mr-reviews-with-vs-code/). The final functionality that is missing in MR review is [creating new comments on the MR diff](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues/342), which we plan to ship by July 2021.\n\n### How to create an MR with two clicks\n\nIf you use the `git` command in a terminal, you might have noticed that pushing your branch to GitLab produces the following output:\n\n```txt\nremote: To create a merge request for my-new-branch, visit:\nremote: https://gitlab-instance.com/my-group/my-project/merge_requests/new?merge_request%5Bsource_branch%5D=my-new-branch\n```\n\nAfter clicking the link, the terminal will open your browser on a new MR page where you can create an MR from the branch you just pushed.\n\nWhen I started pushing my branches through VS Code, I missed this feature. To the point that I searched through the VS Code Git Extension logs to find the create MR link (command `Git: Show Git Output`).\n\nLuckily, GitLab contributor [Jonas Tobias Hopusch](https://gitlab.com/jotoho) implemented a [status bar button](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/155) that lets you create MRs just as easily.\n\nTo create an MR from your changes, push them to your remote repository (the cloud icon next to the branch name) and then click on the `GitLab: Create MR.` button.\n\n![VS Code status bar](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/status-bar-create-mr.png){: .shadow.medium.center}\nVS Code status bar with buttons from GitLab extension.\n{: .note .text-center}\n\n### How to configure your GitLab CI\n\nThe GitLab extension helps you edit your `.gitlab-ci.yml` configuration file in two ways: Autocompleting environment variables and validating the configuration.\n\nThanks to [Kev's](https://gitlab.com/KevSlashNull) fantastic [contribution](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/merge_requests/140), you can [use CI](/solutions/continuous-integration/) variable autocompletion anywhere in your `.gitlab-ci.yml`. The hints even include variable descriptions and explain supported GitLab versions.\n\n![CI variables autocomlete dialogue](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/ci-autocomplete.png){: .shadow.medium.center}\nCI variables autocomplete dialogue.\n{: .note .text-center}\n\nWhen you finish writing your `.gitlab-ci.yml` CI configuration, you can use the `GitLab: Validate GitLab CI config` command to surface any problems before committing the CI config to your repository.\n\n### How to create and paste project snippets\n\nIs there a piece of text that you and your teammates often use? Maybe it is a license header for a file or a test scenario template. You can use GitLab snippets in combination with Visual studio code editor to save you a few keystrokes.\n\nFor example, you can create a [test file snippet](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/snippets/2110322) with the `GitLab: Create snippet` command and then paste it into every new test file you create with the `GitLab: Insert snippet` command.\n\n![Paste Snippet dialogue](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/paste-snippet.png){: .shadow.medium.center}\nPaste Snippet dialogue.\n{: .note .text-center}\n\nI mostly use snippets when I want to share a big blob of text. I select the text and then create the snippet with the `GitLab: Create snippet` command.\n\n### How to copy web URL for a project file\n\nMost of the [communication at GitLab happens asynchronously](https://handbook.gitlab.com/handbook/values/#bias-towards-asynchronous-communication). So instead of being able to show your colleague an open file in your editor, you'll need to be able to create a textual pointer to the file.\n\nA straightforward way to do that is to use the `GitLab: Copy link to active file on GitLab` command, which will copy the web URL of the open file into your clipboard. It even includes the line number or a range of lines based on your cursor or selection in the Visual studio code editor.\n\nYou might also consider using the `GitLens: Copy Remote File URL`, which even includes the commit SHA in the URL, making it a permalink. The permalink will always point to the same version of the file regardless of further commits to your branch. We'll look at the GitLens extension in tip number 7 a bit later on.\n\nFor the GitLab Enterprise/Community Edition you can use:\n\"gitweblinks.gitLabEnterprise\": [\n    {\n        \"http\": \"https://local-gitlab\",\n        \"ssh\": \"git@local-gitlab\"\n    }\n]\n\n#### What to do if VS Code source control is not working\n\nA `SourceControl` is the entity responsible for populating the [Source Control model](https://code.visualstudio.com/api/extension-guides/scm-provider) with resource states, instances of `SourceControlResourceState`. Resource states are organized in groups, instances of `SourceControlResourceGroup`.\n\nLinking to issues in source code is a normal part of the VS Code workflow, especially when there's some logic that's difficult to understand or when there's a //TODO comment that needs action. [Users report experiencing issues](https://stackoverflow.com/questions/60232215/visual-studio-code-source-control-not-showing-changes) with changes in the file not appearing to the source code, unless inputted manually.\n\nOne user offered a 7-step solution that worked for them. Another said that all they had to do was disable and then reenable the build in Git extension, which fixed it. Yet another said they went to their “code” folder where they keep all their repos, right-clicked on the folder containing the repo they wanted and opening that folder with VS code.\n\nAn often-used approach to look at issues is to pick one to work on, create a branch to work in, make some commits, then merge your changes back into the main or default branch with a pull request. You can do that from the new Issues view.\n\n#### GitLab Workflow extensions for VS Code\n\nThe [GitLab Workflow extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow) integrates GitLab with VS Code. You can decrease context switching and do more day-to-day tasks in VS Code, such as:\n\n- [View issues](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#browse-issues-review-mrs).\n- Run [common commands](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#commands) from the Visual Studio Code [command palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette).\n- Create and [review](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#merge-request-reviews) merge requests directly from Visual Studio Code.\n- [Validate](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#validate-gitlab-ci-configuration) your GitLab CI configuration.\n- [View the status](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#information-about-your-branch-pipelines-mr-closing-issue) of your current pipeline.\n- [Create](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#create-snippet) and paste snippets to, and from, your editor.\n- [Browse repositories](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#browse-a-repository-without-cloning) without cloning them\n\nDownload the extension from the [Visual Studio Code Marketplace](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow). Then you can configure:\n\n- [Features to display or hide](https://gitlab.com/gitlab-org/gitlab-vscode-extension#extension-settings).\n- [Self-signed certificate](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow#self-signed-certificates) information\n\nReport any issues, bugs, or feature requests in the [gitlab-vscode-extension issue queue](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues).\n\nThis extension supports GitLab Version 13.0 and later. To find your GitLab version, visit [help](https://gitlab.com/help).\n\nYou can also see pipeline status, open MR and closing issue links in the status bar. The pipeline status is updated automatically so you don’t need to open GitLab to see your pipeline status.\n\nWithin the marketplace you can also use the command palette to run the commands and create a GitLab personal access token (required) and assign it to the extension.\n\nYou can also set set the token in an environment variable and learn how to change the VS Code settings. There are instructions for several other in-depth features as well.\n\n## How GitLens simplifies working with VS Code editor\n\nUp until now, the tips were centered around the [GitLab Workflow extension](https://marketplace.visualstudio.com/items?itemName=GitLab.gitlab-workflow), but there is a fantastic extension that's improving VS Code git integration regardless of where you host your repository: [GitLens](https://marketplace.visualstudio.com/items?itemName=eamodio.gitlens).\n\n### Walking file history\n\nGitLens makes it easy to browse the history of changes to the current file. Each versioned file will have three new editor icons, which provides quick access to all previous revisions of the file. The middle button seen in the image below provides series of actions on the current version (e.g., opening the commit in GitLab web).\n\n![GitLens history browsing buttons](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/gitlens-history.png){: .shadow.medium.center}\nGitLens history browsing buttons\n{: .note .text-center}\n\n### How to compare current HEAD against branch or tag\n\nOne of my habits was inspecting `git diff` between my feature branch and the main branch before creating an MR. More often than not, I forgot to write a test or remove some pesky `console.log()`.\n\nGitLens adds multiple sections to your [\"Source Control\" tab](https://code.visualstudio.com/docs/editor/versioncontrol#_scm-providers). For each branch, tag, and commit, click a \"Compare\" icon which will show you changes between your current HEAD and the reference. Seeing the local diff is great for previewing changes before pushing the new branch to the remote.\n\n![GitLens - compare with branch](https://about.gitlab.com/images/blogimages/vscode-workflows-for-working-with-gitlab/gitlens-compare.png){: .shadow.medium.center}\nHow to compare with a branch using GitLens.\n{: .note .text-center}\n\n## Everyone can contribute\n\nNew features and fixes to the GitLab Visual Studio Code editor extension are added every month. If you find any issues or have a feature request, please go to our [GitLab VSCode issues tracker](https://gitlab.com/gitlab-org/gitlab-vscode-extension/-/issues) and if your request isn't already present in the tracker, create one. Everyone can contribute to GitLab, and we welcome your ideas on how to improve our Visual Studio Code editor.\n\n## Read more on Visual Studio and GitLab:\n\n- [Four new tools for your Visual Studio Code and GitLab tool belt](/blog/vscode-workflow-new-features/)\n\n- [VS Code extension development with GitLab](/blog/vscode-extension-development-with-gitlab/)\n\n- [How to do GitLab merge request reviews in VS Code](/blog/mr-reviews-with-vs-code/)\n\n- [How we created a GitLab Workflow Extension for VS Code](/blog/use-gitlab-with-vscode/)\n\n",[230,9],{"slug":4578,"featured":6,"template":684},"vscode-workflows-for-working-with-gitlab","content:en-us:blog:vscode-workflows-for-working-with-gitlab.yml","Vscode Workflows For Working With Gitlab","en-us/blog/vscode-workflows-for-working-with-gitlab.yml","en-us/blog/vscode-workflows-for-working-with-gitlab",{"_path":4584,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4585,"content":4591,"config":4596,"_id":4598,"_type":13,"title":4599,"_source":15,"_file":4600,"_stem":4601,"_extension":18},"/en-us/blog/vulnerability-risk-prioritization-made-simple-with-gitlab",{"title":4586,"description":4587,"ogTitle":4586,"ogDescription":4587,"noIndex":6,"ogImage":4588,"ogUrl":4589,"ogSiteName":669,"ogType":670,"canonicalUrls":4589,"schema":4590},"Vulnerability risk prioritization made simple with GitLab","GitLab provides detailed vulnerability risk data to assess the potential impact of detected vulnerabilities. Learn how this enables teams to effectively prioritize remediation efforts.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674528/Blog/Hero%20Images/blog-image-template-1800x945__5_.png","https://about.gitlab.com/blog/vulnerability-risk-prioritization-made-simple-with-gitlab","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Vulnerability risk prioritization made simple with GitLab\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Fernando Diaz\"}],\n        \"datePublished\": \"2025-03-12\",\n      }",{"title":4586,"description":4587,"authors":4592,"heroImage":4588,"date":4593,"body":4594,"category":814,"tags":4595},[1767],"2025-03-12","Development and security teams are often overwhelmed by the number of vulnerabilities they need to remediate. Many organizations remediate [less than 16%](https://arxiv.org/pdf/2302.14172) of their known vulnerabilities monthly. Vulnerability management teams face a constant challenge: which security flaws deserve immediate attention? Three key frameworks help answer this question: Common Vulnerability Scoring System ([CVSS](https://nvd.nist.gov/vuln-metrics/cvss)), Known Exploited Vulnerabilities ([KEV](https://www.cisa.gov/known-exploited-vulnerabilities-catalog)), and Exploit Prediction Scoring System ([EPSS](https://www.first.org/epss/)). The [GitLab 17.9 release](https://about.gitlab.com/releases/2025/02/20/gitlab-17-9-released/) adds support for these frameworks. In this article, you'll learn how to use these frameworks within GitLab to efficiently prioritize risk across your dependency and container image vulnerabilities using this data. \n\n![Vulnerability risk assessment data](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674763/Blog/Content%20Images/vulnerability_data.png)\n\n\u003Ccenter>\u003Ci>Vulnerability risk assessment data\u003C/i>\u003C/center>\n\n## CVSS\n\nCVSS provides a standardized method for rating the severity of security vulnerabilities. Scores range from 0 to 10, with higher values indicating greater severity.\n\nCVSS evaluates vulnerabilities across three dimension groups:\n\n* Base metrics: intrinsic qualities that don't change over time (attack complexity, privileges   required)  \n* Temporal metrics: factors that evolve (exploit maturity, remediation level)  \n* Environmental metrics: organization-specific impact considerations\n\nCVSS offers a consistent severity baseline and common language for security teams. Its comprehensive scoring methodology considers multiple aspects of a vulnerability's technical impact.\n\n## KEV\n\nThe Cybersecurity and Infrastructure Security Agency (CISA) maintains the KEV catalog, which identifies vulnerabilities actively exploited in the wild.\n\nUnlike academic severity scores, KEV focuses on real-world threat intelligence. Each entry includes:\n\n* CVE identifier  \n* Vulnerability name  \n* Action required  \n* Due date for remediation (for federal agencies)\n\nKEV provides actionable intelligence based on observed threat actor behavior. It cuts through scoring complexity with a binary signal: \"This vulnerability is being actively exploited right now.\"\n\n## EPSS\n\nThe EPSS uses machine learning to predict the likelihood a vulnerability will be exploited in the next 30 days. Scores range from 0 to 1 (or 0%-100%), representing probability.\n\nEPSS analyzes hundreds of factors, including:\n* Technical characteristics  \n* Social media mentions  \n* Exploit availability  \n* Vulnerability age\n\nEPSS brings risk-based prioritization to vulnerability management. Rather than focusing solely on technical severity, it helps teams understand which vulnerabilities attackers are most likely to target.\n\n## Combining the frameworks for effective prioritization\n\nEach framework serves a unique purpose:\n\n* CVSS indicates how severe a vulnerability is technically.  \n* KEV indicates which vulnerabilities are actively being exploited. \n* EPSS indicates which vulnerabilities are likely to be exploited soon.\n\nAn effective prioritization strategy leverages all three:\n\n1. Start with KEV-listed vulnerabilities as immediate priorities.  \n2. Use EPSS to identify high-probability threats not yet on KEV.  \n3. Consider CVSS for understanding technical impact.\n\nBy combining these complementary frameworks, security teams can focus limited resources on the vulnerabilities that pose the greatest actual risk to their organizations. You can get started with prioritizing vulnerabilities with GitLab by:\n\n1. Adding security scanners to your pipeline  \n2. Viewing vulnerability insights  \n3. Setting the vulnerability status based metrics\n\nWatch this video to learn more:\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/7-dWwoKfCHw?si=iC73JCRsxPUEWKf-\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\n### Adding security scanners to your pipeline\n\nGitLab provides built-in security scanning tools through its templates that can be integrated directly into your CI/CD pipeline. GitLab offers several security scanners that address different aspects of your application security:\n\n* **[Static Application Security Testing (SAST)](https://docs.gitlab.com/user/application_security/sast/):** Analyzes your source code for known vulnerabilities  \n* **[Dynamic Application Security Testing (DAST)](https://docs.gitlab.com/user/application_security/dast/):** Tests your running application for vulnerabilities  \n* **[Dependency Scanning](https://docs.gitlab.com/user/application_security/dependency_scanning/):** Checks your dependencies for known vulnerabilities  \n* **[Container Scanning](https://docs.gitlab.com/user/application_security/container_scanning/):** Identifies vulnerabilities in container images  \n* **[Secret Detection](https://docs.gitlab.com/user/application_security/secret_detection/):** Finds secrets and credentials accidentally committed to your repository  \n* **[Infrastructure as Code Scanning](https://docs.gitlab.com/user/application_security/iac_scanning/):** Detects security issues in IaC files  \n* **[Coverage-guided Fuzzing](https://docs.gitlab.com/user/application_security/coverage_fuzzing/):** Sends random inputs to an instrumented version of your application in an effort to detect bugs  \n* **[Web API Fuzzing](https://docs.gitlab.com/user/application_security/api_fuzzing/):** Sets operation parameters to unexpected values in an effort to cause unexpected behavior and errors in the API backend\n\nTo add them to your pipeline, simply add the appropriate templates to `.gitlab-ci.yml` file. For example, adding SAST and Dependency Scanning to your pipeline is as simple as:\n\n```yaml  \ninclude:  \n  - template: Security/SAST.gitlab-ci.yml  \n  - template: Security/Dependency-Scanning.gitlab-ci.yml\n\nstages:  \n  - test  \n```\n\nOnce you commit the above changes, security scanners will begin to run. These scanners can be further configured to meet the needs of your organization. To learn more about our various scanners, see the [GitLab application security documentation](https://docs.gitlab.com/user/application_security/).\n\n**Note:** EPSS and KEV metrics are only provided for [dependency](https://docs.gitlab.com/user/application_security/dependency_scanning/) and [container image](https://docs.gitlab.com/user/application_security/container_scanning/) vulnerabilities.\n\n### Viewing vulnerability insights\n\nOnce a pipeline with your security scanners is run on the default branch, you can access the vulnerability report. The vulnerability report provides a consolidated view of all security vulnerabilities detected across your project by GitLab's security scanners. You can access it from your project by going to the side-tab and selecting **Secure > Vulnerability Report**.\n\n![Vulnerability report grouped by tool](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674763/Blog/Content%20Images/vulnerability_report__1_.png)\n\n\u003Ccenter>\u003Ci>Vulnerability report grouped by tool\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nFrom the vulnerability report, select a vulnerability to see its insights page, which includes the severity, EPSS, KEV, and CVSS along with the following:\n\n* description  \n* when it was detected  \n* current status  \n* available actions  \n* linked issues  \n* actions log  \n* filename and line number of the vulnerability (if available)\n\nThis data can be used to effectively triage, remediate, or mitigate the vulnerability.\n\n__Note:__ From the insights page, you can also leverage GitLab Duo’s AI capabilities to [explain](https://docs.gitlab.com/user/application_security/vulnerabilities/#vulnerability-explanation) and [auto-resolve](https://docs.gitlab.com/user/application_security/vulnerabilities/#vulnerability-resolution) a vulnerability.\n\n### Setting the vulnerability status-based metrics\n\nAfter examining the provided data, we can go ahead and change the status of our vulnerability by clicking the **Change status** button:  \n\n![Change vulnerability status from insights page](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674764/Blog/Content%20Images/change_status.png)\n\n\u003Ccenter>\u003Ci>Change vulnerability status from insights page\u003C/i>\u003C/center>\n\u003Cbr>\u003C/br>\n\nThen we'll see a popup that will allow you to change the status of a vulnerability:\n\n![Change vulnerability status option](https://res.cloudinary.com/about-gitlab-com/image/upload/v1749674763/Blog/Content%20Images/change_status_2.png)\n\n\u003Ccenter>\u003Ci>Change vulnerability status option\u003C/i>\u003C/center>\n\n\u003Cbr>\u003C/br>\n\nWhen you dismiss a vulnerability you can choose one of the following reasons and optionally provide a comment:\n\n* **Acceptable risk:** The vulnerability is known, and has not been remediated or mitigated, but is considered to be an acceptable business risk.  \n* **False positive:** An error in reporting in which a test result incorrectly indicates the presence of a vulnerability in a system when the vulnerability is not present.  \n* **Mitigating control:** The vulnerability’s risk is mitigated by a management, operational, or technical control (that is, safeguard or countermeasure) employed by an organization that provides equivalent or comparable protection for an information system.  \n* **Used in tests:** The finding is not a vulnerability because it is part of a test or is test data.  \n* **Not applicable:** The vulnerability is known, and has not been remediated or mitigated, but is considered to be in a part of the application that will not be updated.\n\nAnd there you have it, quick and easy vulnerability risk prioritization with GitLab!\n\n> Get started today with [a free, 60-day trial of GitLab Ultimate](https://about.gitlab.com/pricing/ultimate/)!\n\n## Learn more\n\nTo learn more about GitLab security and governance features and how we can help enhance your security posture, check out the following resources:\n\n* [GitLab Risk Assessment Data](https://docs.gitlab.com/user/application_security/vulnerabilities/risk_assessment_data/)   \n* [GitLab Security and Compliance Solutions](https://about.gitlab.com/solutions/security-compliance/)  \n* [GitLab Application Security documentation](https://docs.gitlab.com/ee/user/application_security/)  \n* [GitLab Risk Assessment Data epic](https://gitlab.com/groups/gitlab-org/-/epics/11544)",[9,814,835],{"slug":4597,"featured":90,"template":684},"vulnerability-risk-prioritization-made-simple-with-gitlab","content:en-us:blog:vulnerability-risk-prioritization-made-simple-with-gitlab.yml","Vulnerability Risk Prioritization Made Simple With Gitlab","en-us/blog/vulnerability-risk-prioritization-made-simple-with-gitlab.yml","en-us/blog/vulnerability-risk-prioritization-made-simple-with-gitlab",{"_path":4603,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4604,"content":4610,"config":4617,"_id":4619,"_type":13,"title":4620,"_source":15,"_file":4621,"_stem":4622,"_extension":18},"/en-us/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies",{"title":4605,"description":4606,"ogTitle":4605,"ogDescription":4606,"noIndex":6,"ogImage":4607,"ogUrl":4608,"ogSiteName":669,"ogType":670,"canonicalUrls":4608,"schema":4609},"GitLab moves from compliance pipelines to security policies","Learn about our decision to deprecate compliance pipelines and how to migrate to pipeline execution policies. The process is detailed in this tutorial.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098128/Blog/Hero%20Images/Blog/Hero%20Images/security-checklist_security-checklist.png_1750098128272.png","https://about.gitlab.com/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Why GitLab is deprecating compliance pipelines in favor of security policies\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Ian Khor\"}],\n        \"datePublished\": \"2024-10-01\",\n      }",{"title":4611,"description":4606,"authors":4612,"heroImage":4607,"date":4614,"body":4615,"category":814,"tags":4616},"Why GitLab is deprecating compliance pipelines in favor of security policies",[4613],"Ian Khor","2024-10-01","GitLab compliance pipelines ensure security- and compliance-related jobs in applicable projects are run in accordance with compliance frameworks. Similarly, scan execution policies assure GitLab security scans are run in pipelines in a compliant manner.\n\nWhat we’ve learned from users is that they’d like to capture benefits offered by each feature through a single, simpler solution. Users would like to combine the flexibility of [compliance pipelines](https://docs.gitlab.com/ee/user/project/settings/index.html#compliance-pipeline-configuration) with the simplicity and versatility of [security policies](https://docs.gitlab.com/ee/user/application_security/policies/).\n\nTo meet this request, we developed a new feature, [pipeline execution policies](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html), to help users enforce customized CI/CD jobs for all applicable projects. Pipeline execution policies perform a similar function to compliance pipelines, but with increased focus on compliance enforcement, flexibility, and a foundation to build and solve for more use cases in the future.\n\nTo reduce confusion, compliance pipelines have been [deprecated](https://docs.gitlab.com/ee/update/deprecations.html#compliance-pipelines) in 17.3 now that pipeline execution policies are available and, as part of the deprecation, we are providing a step-by-step workflow for migrating from compliance pipelines to pipeline execution policy type in 17.5.\n\nYou can follow along with the work we’re doing with the deprecation through this [epic](https://gitlab.com/groups/gitlab-org/-/epics/11275).\n\n![compliance pipelines - image 1](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098139/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750098139599.png)\n\n## Why are we deprecating compliance pipelines?\n\nTo understand the reason behind this change, we first need to understand the difference between the [compliance management](https://about.gitlab.com/direction/govern/compliance/compliance-management/) and [policy management](https://about.gitlab.com/direction/govern/security_policies/security_policy_management/) features in GitLab. With compliance management, we are focused on helping you understand your compliance posture, providing tools to report to auditors, and surfacing compliance risks in a way that helps you take action.\n\nWe are also focused on increasing compliance visibility of framework requirements, violations, and audit events throughout the entire DevSecOps lifecycle. Our compliance management offering also establishes a direct association between controls and automations configured through policies back into compliance requirements established through compliance frameworks.\n\nPolicy management works hand in hand with compliance programs, as well as supporting scalable security initiatives. Policies give organizations a central location to globally enforce security controls, compliance controls, and automate security and compliance workflows. Security policies will continue to address core use cases across the lifecycle, such as defining enforcement around CI/CD component usage, blocking risks related to dependency and package management, and automating vulnerability management workflows to address security and compliance controls.\n\nTherefore, to ensure we provide the greatest value for our security and compliance users, we are deprecating compliance pipelines and providing a migration path for users to security policies. Not only does this make it clear and simple to the user how and when to enforce jobs as part of a project pipeline, but it also makes the distinction between compliance management and policy management in GitLab clearer. Compliance management is focused on compliance visibility, and policy management is focused on compliance and security enforcement across your entire GitLab instance.\n\n## What is the timeline for the deprecation and removal of compliance pipelines?\n\nThe iteration plan below can be found in the [issue that details the work we are doing](https://gitlab.com/groups/gitlab-org/-/epics/11275) to deprecate and remove compliance pipelines:\n\n**[Deprecation announcement](https://docs.gitlab.com/ee/update/deprecations.html#compliance-pipelines)**\n* Compliance pipeline deprecation and removal was announced in 17.3\n\n**[Compliance pipelines maintenance mode](https://gitlab.com/groups/gitlab-org/-/epics/12324)**\n* Adding banners and migration workflow, and docs\n* Released in 17.5\n\n**[Deter new compliance pipelines](https://gitlab.com/groups/gitlab-org/-/epics/14150)**\n* Adding warning banners for new pipelines\n* Encourage users to try the pipeline execution policy instead\n* Scheduled to start work on this 17.6\n* Scheduled to be released 17.8\n\n**[Compliance pipelines removal](https://gitlab.com/groups/gitlab-org/-/epics/12325) (Remove compliance pipelines)**\n* Provide tools to trial the removal and validate any errors\n* Scheduled to start work on this 17.8\n* Scheduled to be released 19.0\n\nAs you can see, we will start with the deprecation of compliance pipelines and the introduction of pipeline execution policy in the 17.3 release. \n\nLeading up to the removal of compliance pipelines in the 19.0 release, we are including new ways to inform and warn users about the upcoming removal. We are providing warning banners on new pipelines, as well as a workflow that can be used to migrate compliance pipelines to pipeline execution policy.\n\nWe ‘ll remove compliance pipelines in the 19.0 release, but provide a reverse feature flag in the milestones leading up that will help users test the removal and understand any impact prior to the removal date.\n\n## How to migrate your compliance pipelines to pipeline execution policy?\n\nThere are two ways users can access the workflow for migrating compliance pipelines to pipeline execution policy.\n\n1. When creating a new compliance framework, there will now be a warning banner that allows users to start using pipeline execution policy type instead of compliance frameworks:\n\n![compliance pipelines - image 3](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098140/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750098139599.png)\n\n2. When editing an existing compliance framework, there will now be a warning banner that enables users to migrate their compliance pipelines to pipeline execution policy type – if they have a compliance pipeline configured.\n\n![compliance pipelines - image 2](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098140/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750098139601.png)\n\nSelecting either \"Create policy\" or \"Migrate pipeline to a policy\" in either workflow will bring users to the \"New policy\" creation page in the \"Security Policies\" section. This will allow users to create a new security policy instead of a compliance pipeline. Or, if you migrate an existing compliance pipeline, the new policy will be populated with the compliance pipeline YAML as the remote source for the policy. Also, the policy scope will be populated with the framework from which you are migrating. \n\nThe policy will target all projects with that label for enforcement and apply enforcement of jobs defined in your remote file, now the pipeline execution YAML. By default, the new policy will be configured with the “override” mode, which will override downstream projects' `.gitlab-ci.yml` with the configuration you have defined (similar to compliance pipelines).\n\n![compliance pipelines - image 4](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750098140/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750098139604.png)\n\nAlternatively, you may use the “Inject” mode, which introduces a new set of reserved stages to run security and compliance jobs in isolation in a tamper-proof manner, without disrupting the project pipeline, and without coordinating with project teams to define stage names in their pipeline config. \n\nWith this approach, be sure to remove the `include:project`, which is no longer needed for this mode. And, depending on your version, ensure job names are unique (required in GitLab 17.2 and 17.3). In GitLab 17.4, we introduced additional enhancements for [managing conflicts](https://docs.gitlab.com/ee/user/application_security/policies/pipeline_execution_policies.html#job-naming-best-practice) for additional flexibility.\n\n## Start your migration today\n\nWe want to ensure that all GitLab users who are using compliance pipelines are fully aware of the deprecation of compliance pipelines in 17.3 and its eventual removal by the 19.0 release as a breaking change. \n\nWe are asking users to start migrating their compliance pipelines to the pipeline execution policy type as soon as possible, before the removal of compliance pipelines in GitLab 19.0.\n\nIf there are any questions, please contact your customer service representative or GitLab support for any help.\n\n> Follow along with the compliance pipeline deprecation progress in [this epic](https://gitlab.com/groups/gitlab-org/-/epics/11275).\n\n> Share feedback in [this issue](https://gitlab.com/gitlab-org/gitlab/-/issues/491924) regarding any gaps are blockers for adopting pipeline execution policies.",[814,9,835,108],{"slug":4618,"featured":90,"template":684},"why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies","content:en-us:blog:why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies.yml","Why Gitlab Is Deprecating Compliance Pipelines In Favor Of Security Policies","en-us/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies.yml","en-us/blog/why-gitlab-is-deprecating-compliance-pipelines-in-favor-of-security-policies",{"_path":4624,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4625,"content":4631,"config":4636,"_id":4638,"_type":13,"title":4639,"_source":15,"_file":4640,"_stem":4641,"_extension":18},"/en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions",{"title":4626,"description":4627,"ogTitle":4626,"ogDescription":4627,"noIndex":6,"ogImage":4628,"ogUrl":4629,"ogSiteName":669,"ogType":670,"canonicalUrls":4629,"schema":4630},"Write Terraform plans faster with GitLab Duo Code Suggestions","Follow this tutorial to learn how to use AI-powered code creation to manage your infrastructure with Terraform.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1749679194/Blog/Hero%20Images/duo-blog-post.png","https://about.gitlab.com/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"Write Terraform plans faster with GitLab Duo Code Suggestions\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Abubakar Siddiq Ango\"}],\n        \"datePublished\": \"2024-01-24\",\n      }",{"title":4626,"description":4627,"authors":4632,"heroImage":4628,"date":4633,"body":4634,"category":702,"tags":4635},[1570],"2024-01-24","[Terraform](https://www.terraform.io/) is an industry-standard for infrastructure orchestration. It can, however, be daunting and time-consuming to learn how to create infrastructure resources, especially when using Terraform providers you are unfamiliar with. That is where GitLab Duo Code Suggestions and AI-powered code creation comes in.\n\n[GitLab Duo Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) accelerates your coding in up to [15 supported programming languages](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#supported-languages-in-ides), including infrastructure as code (IaC) using Terraform.\n\nBy combining IaC with Terraform, infrastructure teams can adopt new Terraform providers/modules quickly and move changes to production faster. The combination also reduces the onboarding time for new users of Terraform or developers who are new to a code base by maintaining the context of the code base and dependencies in its suggestions. Instead of spending hours reading through documentation, you only need to review the given suggestions and update as necessary.\n\nIn this post, you will learn how to set up GitLab Duo Code Suggestions for Terraform.\n\n## How to set up GitLab Duo Code Suggestions for Terraform\n\nFollow these 6 steps to get started:\n\n1. Install the GitLab extension for your IDE of choice (read about [supported IDE extensions](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/index.html#supported-editor-extensions).\n2. Authenticate the GitLab extension with GitLab.com or your GitLab self-hosted instance.\n3. Enable Code Suggestions in your Profile settings under the Preferences > Preferences menu. Search for Code Suggestions, toggle the checkbox, and click Save.\n4. Confirm that Code Suggestions is also enabled in your GitLab extensions settings in the IDE.\n5. If you are using Visual Studio Code and Neovim, third-party extension support is needed to use Terraform with Code Suggestions. For VS Code, you can install the [official Terraform extension](https://marketplace.visualstudio.com/items?itemName=HashiCorp.terraform) from HashiCorp.\n6. To test and apply your Terraform plans, [install Terraform](https://developer.hashicorp.com/terraform/install?product_intent=terraform) in your terminal and any necessary provider SDKs.\n\n## Creating your plans\n\nOnce you have all the prerequisites in place, all you need to do next is create a Terraform file ending with `.tf` and prompt GitLab Duo with comments describing what you want. Suggestions will be displayed while maintaining context. Even when no prompt is provided, GitLab Duo will suggest other Terraform resources you might be interested in based on the current context in your plans.\n\nLet's see this in action with GitLab Duo helping with a Terraform plan for provisioning a load balancer with four instances and other necessary resources on the Google Cloud Platform.\n\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/saa2JJ57UaQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\n## Get started with Code Suggestions and Terraform today\n\n[Try Code Suggestions](https://about.gitlab.com/solutions/code-suggestions/) today and accelerate your software development. Code Suggestions is also available for [self-managed GitLab](https://docs.gitlab.com/ee/user/project/repository/code_suggestions/self_managed.html) (needs to be enabled by an administrator). Code Suggestions uses best-in-class large language models, and non-public customer data is never used as training data.",[478,704,9],{"slug":4637,"featured":90,"template":684},"write-terraform-plans-faster-with-gitlab-duo-code-suggestions","content:en-us:blog:write-terraform-plans-faster-with-gitlab-duo-code-suggestions.yml","Write Terraform Plans Faster With Gitlab Duo Code Suggestions","en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions.yml","en-us/blog/write-terraform-plans-faster-with-gitlab-duo-code-suggestions",{"_path":4643,"_dir":243,"_draft":6,"_partial":6,"_locale":7,"seo":4644,"content":4650,"config":4655,"_id":4657,"_type":13,"title":4658,"_source":15,"_file":4659,"_stem":4660,"_extension":18},"/en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat",{"title":4645,"description":4646,"ogTitle":4645,"ogDescription":4646,"noIndex":6,"ogImage":4647,"ogUrl":4648,"ogSiteName":669,"ogType":670,"canonicalUrls":4648,"schema":4649},"10 best practices for using AI-powered GitLab Duo Chat","Explore tips and tricks for integrating GitLab Duo Chat into your AI-powered DevSecOps workflows. Plus, expert advice on how to refine chat prompts for the best results.","https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097639/Blog/Hero%20Images/Blog/Hero%20Images/blog-image-template-1800x945%20%281%29_77JeTV9gAmbXM0224acirV_1750097638765.png","https://about.gitlab.com/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat","\n                        {\n        \"@context\": \"https://schema.org\",\n        \"@type\": \"Article\",\n        \"headline\": \"10 best practices for using AI-powered GitLab Duo Chat\",\n        \"author\": [{\"@type\":\"Person\",\"name\":\"Michael Friedrich\"}],\n        \"datePublished\": \"2024-04-02\",\n      }",{"title":4645,"description":4646,"authors":4651,"heroImage":4647,"date":4652,"body":4653,"category":702,"tags":4654},[1612],"2024-04-02","Getting into a conversation with AI can be challenging. What question do you start with? How do you frame the question? How much context is needed? Will the conversation provide the best and most efficient results?\n\nIn this tutorial, we explore 10 tips and best practices to integrate GitLab Duo Chat into your AI-powered DevSecOps workflows and refine your prompts for the best results.\n\n[Get started: Keep GitLab Duo Chat open and in sight](#get-started-keep-gitlab-duo-chat-open-and-in-sight)\n\n[10 best practices for using GitLab Duo Chat](#10-best-practices-for-using-gitlab-duo-chat)\n\n1. [Have a conversation](#1.-have-a-conversation)\n2. [Refine the prompt for more efficiency](#2.-refine-the-prompt-for-more-efficiency)\n3. [Follow prompt patterns](#3.-follow-prompt-patterns)\n4. [Use low-context communication](#4.-use-low-context-communication)\n5. [Repeat yourself](#5.-repeat-yourself)\n6. [Be patient](#6.-be-patient)\n7. [Reset and start anew](#7.-reset-and-start-anew)\n8. [Gain efficiency with slash commands in the IDE](#8.-gain-efficiency-with-slash-commands-in-the-ide)\n9. [Refine the prompt for slash commands](#9.-refine-the-prompt-for-slash-commands)\n10. [Get creative with slash commands](#10.-get-creative-with-slash-commands)\n\nBonus content:\n- [Shortcuts](#shortcuts)\n- [Fun exercises](#fun-exercises)\n- [Learn more](#learn-more)\n\n> Live demo! Discover the future of AI-driven software development with our GitLab 17 virtual launch event. [Register today!](https://about.gitlab.com/seventeen/)\n\n## Get started: Keep GitLab Duo Chat open and in sight\n\n[GitLab Duo Chat](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) is available in the GitLab UI, Web IDE, and supported programming IDEs, for example, VS Code. \n\nIn VS Code, you can open GitLab Duo Chat in the default left pane. You can also drag and drop the icon into the right pane. This allows you to keep Chat open while you write code and navigate the file tree, perform Git actions, etc. To reset the Chat location, open the command palette (by pressing the `Command+Shift+P` (on macOS) or `Ctrl+Shift+P` (on Windows/Linux) keyboard shortcut and then type `View: Reset View Locations`. The following short video shows you how to do it.\n\n\u003C!-- blank line -->\n\u003Cfigure class=\"video_container\">\n  \u003Ciframe src=\"https://www.youtube.com/embed/foZpUvWPRJQ\" frameborder=\"0\" allowfullscreen=\"true\"> \u003C/iframe>\n\u003C/figure>\n\u003C!-- blank line -->\n\nThe Web IDE and VS Code share the same framework – the same method works in the Web IDE for more efficient workflows.\n\n![Chat in Web IDE](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image1_aHR0cHM6_1750097645344.png)\n\n## 10 best practices for using GitLab Duo Chat\n\n### 1. Have a conversation\n\nChats are conversations, not search forms.\n\nFor the first conversation icebreaker, you can start with the same search terms similar to a browser search and experiment with the response and output. In this example, let's start with a C# project and best practices. \n\n> c# start project best practices\n\n![Chat prompt for C# start project best practices and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097646/Blog/Content%20Images/Blog/Content%20Images/image11_aHR0cHM6_1750097645345.png)\n\nThe response is helpful to understand a broad scope of C#, but does not kickstart immediate best practices. Let's follow up with a more focused question in the same context. \n\n> Please show the project structure for the C# project.\n\n![Chat prompt for project structure for the C# project and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image9_aHR0cHM6_1750097645346.png)\n\nThis answer is helpful. Next, let's follow up with a Git question, and use the same question structure: Direct request to show something.\n\n> Show an example for a .gitignore for C#\n\n![Chat prompt for a .gitignore for C# and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image25_aHR0cHM6_1750097645347.png)\n\nContinue with CI/CD and ask how to build the C# project.\n\n> Show a GitLab CI/CD configuration for building the C# project\n\n![Chat prompt for GitLab CI/CD configuration for building C# project and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image16_aHR0cHM6_1750097645349.png)\n\nIn this example, Chat encouraged us to request specific changes. Let's ask to use the .NET SDK 8.0 instead of 6.0. \n\n> In the above example, please use the .NET SDK 8.0 image\n\n![Chat prompt to use .NET SDK 8.0 image and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image32_aHR0cHM6_1750097645350.png)\n\nThe CI/CD configuration uses the .NET command line interface (CLI). Maybe we can use that for more efficient commands to create the projects and tests structure, too? \n\n> Explain how to create projects and test structure on the CLI \n\n![Chat prompt to explain how to create projects and test structure on the CLI and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image14_aHR0cHM6_1750097645351.png)\n\nOf course, we could execute these commands in the terminal, but what if we wanted to stay in VS Code? Let's ask Chat.\n\n> Explain how to open a new terminal in VS Code\n\n![Chat prompt to explain how to open a new terminal in VS Code and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image5_aHR0cHM6_1750097645351.png)\n\n### 2. Refine the prompt for more efficiency\n\nThink of GitLab Duo Chat as a human, and engage with full sentences that provide as much context into your thoughts and questions. \n\nExperienced browser search users might know this approach to queries: Build up the question, add more terms to refine the scope, and restart the search after opening plenty of tabs. \n\nIn a browser search, this probably would result in four to five different search windows. \n\n```markdown\nc# start project best practices\nc# .gitignore\nc# gitlab cicd \nc# gitlab security scanning \nc# solutions and projects, application and tests\n``` \n\nYou can follow this strategy in a chat conversation, too. It requires adding more context, making it a conversational approach. GitLab Duo Chat enables you to ask multiple questions in one conversation request. Example: You need to start with a new C# project, apply best practices, add a `.gitignore` file, and configure CI/CD and security scanning, just like in the above search. In Chat, you can combine the questions into one request.\n\n> How can I get started creating an empty C# console application in VS Code? Please show a .gitignore and .gitlab-ci.yml configuration with steps for C#, and add security scanning for GitLab. Explain how solutions and projects in C# work, and how to add a test project on the CLI.\n\n![Chat prompt adding more context and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image37_aHR0cHM6_1750097645352.png)\n\nIn this response, Chat suggests to ask for specific configuration examples in follow-up questions in the conversation. Async practice: Create follow-up questions. You can omit `C#` as context in the same chat session.\n\n> Please show an example for a .gitignore. Please show a CI/CD configuration. Include the SAST template.\n\n### 3. Follow prompt patterns \n\nFollow the pattern: `Problem statement, ask for help, provide additional requests`. Not everything comes to mind when asking the first question – don't feel blocked, and instead start with `Problem statement, ask for help` in the first iteration. \n\n> I need to fulfill compliance requirements. How can I get started with Codeowners and approval rules?\n\n![Chat prompt to get started with Codeowners and approval rules and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image19_aHR0cHM6_1750097645352.png)\n\nThe answer is helpful but obviously generic. Now, you may want to get specific help for your team setup. \n\n> Please show an example for Codeowners with different teams: backend, frontend, release managers.\n\n![Chat prompt to show an example for Codeowners with different teams: backend, frontend, release managers and reponse ](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image31_aHR0cHM6_1750097645353.png)\n\nAn alternative is to describe the situation you are in and to ask for input. It can feel a bit like a conversation to follow the STAR model (Situation, Task, Action, Results). \n\n> I have a Kubernetes cluster integrated in GitLab. Please generate a Yaml configuration for a Kubernetes service deployment. Explain how GitOps works as a second step. How to verify the results?\n\n![Chat prompt with multiple questions and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image27_aHR0cHM6_1750097645354.png)\n\n### 4. Use low-context communication \n\nProvide as much context as needed to provide an answer. Sometimes, the previous history or opened source code does not provide that helpful context. To make questions more efficient, apply a pattern of [low-context communication](https://handbook.gitlab.com/handbook/company/culture/all-remote/effective-communication/#understanding-low-context-communication), which is used in all-remote communication at GitLab.\n\nThe following question did not provide enough context in a C++ project.\n\n> Should I use virtual override instead of just override?\n\n![Chat prompt asking if the users should use virtual override instead of just override and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image34_aHR0cHM6_1750097645354.png)\n\nInstead, try to add more context:\n\n> When implementing a pure virtual function in an inherited class, should I use virtual function override, or just function override? Context is C++. \n\n![Chat prompt with more detail and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image36_aHR0cHM6_1750097645355.png)\n\nThe example is also shown in the [GitLab Duo Coffee Chat: Refactor C++ functions into OOP classes for abstract database handling](https://youtu.be/Z9EJh0J9358?t=2190). \n\n### 5. Repeat yourself\n\nAI is not predictable. Sometimes, it may not answer with the expected results, or does not produce source code examples or configuration snippets because it lacked context. It is recommended to repeat the question and refine the requirements.\n\nIn the following example, we want to create a C# application. In the first attempt, we did not specify the application type – C# can be used to create console/terminal but also UI applications. The result also does not provide an empty example source code. The second, repeated prompt adds two more words - `console` and `empty`. \n\n> How can I get started creating an C# application in VSCode?\n> \n> How can I get started creating an empty C# console application in VSCode?\n\nThe results in the prompt differ. The first response is helpful to get started by following the instructions in the VS Code window, but it does not tell us where the source code is located and how to modify it. The repeated prompt with refinements modifies the response and provides instructions how to override the default template with some “hello world” code.\n\n![Chat prompt with repeated prompt with modifications and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image28_aHR0cHM6_1750097645355.png)\n\nYou can also combine repeat and refine strategies, and ask Chat to show an example for application code and tests.\n\n> How can I get started creating an empty C# console application in VSCode? Please show an example for application and tests.\n\n![Chat prompt that asks for example for application and tests and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image3_aHR0cHM6_1750097645356.png)\n\n#### Repeat yourself after generic questions \n\nWhen asking generic technology questions, GitLab Duo Chat might not be able to help. In the following scenario, I wanted to get a suggestion for Java build tools and framework, and it did not work. There could be many answers: Maven, Gradle, etc., as build tools, and [100+ Java frameworks](https://en.wikipedia.org/wiki/List_of_Java_frameworks), depending on the technology stack and requirements.\n\n![Chat prompt for Java build tools and framework and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image2_aHR0cHM6_1750097645356.png)\n\nLet's assume that we want to focus on a customer environment with [Java Spring Boot](https://spring.io/projects/spring-boot). \n\n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example.\n\n![Chat prompt that asks for more, including a hello world example and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image26_aHR0cHM6_1750097645357.png)\n\nThis provides great results already. As an async exercise, repeat the prompt, and ask how to deploy the application, adding more refinements in each step. Alternatively, you can make it a follow-up conversation.\n\n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example. Show how to build and deploy the application in CI/CD.\n> \n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example. Show how to build and deploy the application in CI/CD, using container images.\n> \n> I want to create a Java Spring Boot application. Please explain the project structure and show a hello world example. Show how to build and deploy the application in CI/CD, using container images. Use Kubernetes and GitOps in GitLab.\n\n### 6. Be patient\n\nSingle words or short sentences might not generate the desired results, [as shown in this video example](https://youtu.be/JketELxLNEw?t=1220). Sometimes, GitLab Duo Chat is able to guess from available data, but sometimes also might insist on providing more context.\n\nExample: `labels` matches the GitLab documentation content.\n\n![Chat prompt about labels and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image12_aHR0cHM6_1750097645357.png)\n\nRefine the question to problem statements and more refinements for issue board usage.\n\n> Explain labels in GitLab. Provide an example for efficient usage with issue boards.\n\n![Chat prompt that includes asking for an example and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image21_aHR0cHM6_1750097645358.png)\n\nOr use a problem statement, followed by a question and the ask for additional examples.\n\n> I don't know how to use labels in GitLab. Please provide examples, and how to use them for filters in different views. Explain these views with examples.\n\n![Chat prompt with problem statement and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image10_aHR0cHM6_1750097645358.png)\n\nAlso, avoid `yes/no` questions and instead add specific context.\n\n> Can you help me fix performance regressions?\n\n![Chat promptt that asks for help with fixing performance regressions and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image18_aHR0cHM6_1750097645359.png)\n\nInstead, provide the context of the performance regression, including the programming languages, frameworks, technology stack, and environments. The following example uses an environment from some years ago, which can still be accurate today.\n\n> My PHP application encounters performance regressions using PHP 5.6 and MySQL 5.5. Please explain potential root causes, and how to address them. The app is deployed on Linux VMs.\n\n![Chat prompt that includes more detail and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image24_aHR0cHM6_1750097645360.png)\n\n### 7. Reset and start anew\n\nSometimes, the chat history shows a different learning curve and provides the wrong context for follow-up questions. Or, you asked specific questions where GitLab Duo Chat cannot provide answers. Since generative AI is not predictable, it might also lack the ability to provide certain examples, but think it gave them in a future response (observed in Chat Beta). The underlying large language models, or LLMs, sometimes might insist on giving a specific response, in an endless loop.\n\n> How can I get started creating an empty C# console application in VSCode? Please show a .gitignore and .gitlab-ci.yml configuration with steps for C#, and add security scanning for GitLab. Explain how solutions and projects in C# work, and how to add a test project on the CLI.\n\nAfter asking the question above with an example configuration, I wanted to reduce the scope of the question to get a more tailored response. It did not work as expected, since Chat knows about the chat history in context, and refers to previous answers.\n\n> How can I get started creating an empty C# console application in VSCode? Please show a .gitignore and .gitlab-ci.yml configuration with steps for C#.\n\n![Chat prompt that asks for configuration examples and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image23_aHR0cHM6_1750097645360.png)\n\nTo force Chat into a new context, use `/reset` as slash command to reset the session, and repeat the question to get better results. You can also use `/clean` or `/clear` to delete all messages in the conversation.\n\n### 8. Gain efficiency with slash commands in the IDE \n\n#### Explain code\n\n- Q: Generated code? Existing code? Legacy code?\n- A: Use the [`/explain` slash command in the IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#explain-code-in-the-ide).\n- A2: Refine the prompt with more focused responses, for example: `/explain focus on potential shortcomings or bugs`. \n\n![Chat prompt with /explain slash command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/gitlab_duo_chat_slash_commands_explain_01_aHR0cHM6_1750097645361.png)\n\n![Chat prompt with refined prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image6_aHR0cHM6_1750097645361.png)\n\n#### Refactor code \n\n- Q: Unreadable code? Long spaghetti code? Zero test coverage?\n- A: Use the [`/refactor` slash command in the IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#refactor-code-in-the-ide). \n- A2: Refine the prompt for more targeted actions, for example object-oriented patterns: `/refactor into object-oriented classes with methods and attributes`. \n\n![Chat prompt with /refactor slash command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image35_aHR0cHM6_1750097645362.png)\n\n![Chat prompt with refined prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image30_aHR0cHM6_1750097645362.png)\n\n#### Generate tests\n\n- Q: Testable code but writing tests takes too much time?\n- A: Use the [`/tests` slash command in the IDE](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html#write-tests-in-the-ide).\n- A2: Refine the prompt for specific test frameworks, or test targets. You can also instruct the prompt to focus on refactoring, and then generate tests: `/tests focus on refactoring the code into functions, and generate tests`.\n\n![Chat prompt with /tests slash command](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image29_aHR0cHM6_1750097645363.png)\n\n![Chat prompt with refined prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image4_aHR0cHM6_1750097645363.png)\n\nMore practical examples in complete development workflows are available in the [GitLab Duo examples](https://docs.gitlab.com/ee/user/gitlab_duo_examples.html) documentation.\n\n### 9. Refine the prompt for slash commands \n\nYou will see refined prompts tips in this blog post a lot. It is one of the ingredients for better AI-powered workflow efficiency. Slash commands are no different, and allow for better results in GitLab Duo Chat.\n\nA customer recently asked: \"Can code explanations using `/explain` create comments in code?\" The answer is: no. But you can use the Chat prompt to ask follow-up questions, and ask for a summary in a code comment format. It requires the context of the language. \n\nThe following example with a [C++ HTTP client code using the curl library](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5cc9bdd65ee8ee16c548bea0402c18f8209d4d06/chat/slash-commands/c++/cli.cpp) needs more documentation. You can refine the `/explain` prompt by giving more refined instructions to explain the code by adding code comments, and then copy-paste that into the editor.\n\n> /explain add documentation, rewrite the code snippet\n\n![Chat prompt to add documentation and rewrite code snippet and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image13_aHR0cHM6_1750097645363.png)\n\nAlternatively, you can ask Chat to `/refactor` the source code, and generate missing code comments through a refined prompt.\n\n> /refactor add code comments and documentation\n\n![Chat prompt to refactor source code and generate code comments](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image15_aHR0cHM6_1750097645364.png)\n\n### 10. Get creative with slash commands\n\nWhen the Chat prompt does not know an answer to a question about the source code or programming language, look into the slash commands `/explain`, `/refactor`, and `/tests` and how much they can help in the context.\n\nIn the following example, an SQL query string in C++ is created in a single line. To increase readability, and also add more database columns in the future, it can be helpful to change the formatting into a multi-line string.\n\n> std::string sql = \"CREATE TABLE IF NOT EXISTS users (id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT NOT NULL, email TEXT NOT NULL)\";\n\nYou can ask GitLab Duo Chat about it, for example, with the following question:\n\n> How to create a string in C++ using multiple lines?\n\nChat may answer with an explanation and optional, a source code example. In this context, it can interpret the question to create a C++ string value with multiple lines, for example, using the `\\n` character, assigned to a variable. \n\nThe requirement instead is to only format the written code, and variable value assignment in multiple lines. The string value itself does not need to contain a multi-line string representation. \n\nThere is an alternative for additional context in VS Code and the Web IDE: Select the source code in question, right-click, and navigate into `GitLab Duo Chat > Refactor`. This opens the Chat prompt and fires the `/refactor` code task immediately.\n\nAlthough, the code task might not bring the expected results. Refactoring a single-line SQL string can mean a lot of things: Use multiple lines for readability, create constants, etc.\n\nCode tasks provide an option to refine the prompt. You can add more text after the `/refactor` command, and instruct GitLab Duo Chat to use a specific code type, algorithm, or design pattern. \n\nLet's try it again: Select the source code, change focus into Chat, and type the following prompt, followed by `Enter`. \n\n> /refactor into a multi-line written string. Show different approaches for all C++ standards.\n\n![Chat prompt to refactor into a multi-line written string and response](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image17_aHR0cHM6_1750097645364.png)\n\n**Tip:** You can use GitLab Duo Code Suggestions to refine the source code even more after refactoring, or use alternative `/refactor` prompt refinements.\n\n>/refactor into a multi-line written string, show different approaches\n>\n> /refactor into multi-line string, not using raw string literals\n>\n> /refactor into a multi-line written string. Make the table name parametrizable\n\nAn alternative approach with the `stringstream` type is shown in the [GitLab Duo Coffee Chat: Refactor C++ functions into OOP classes for abstract database handling](https://www.youtube.com/watch?v=Z9EJh0J9358), [MR diff](https://gitlab.com/gitlab-da/use-cases/ai/gitlab-duo-coffee-chat/gitlab-duo-coffee-chat-2024-01-23/-/commit/7ea233138aed46d77e6ce0d930dd8e10560134eb#4ce01e4c84d4b62df8eed159c2db3768ad4ef8bf_33_35). \n\n#### Explain vulnerabilities\n\nIt might not always work, but the `/explain` slash command can be asked about security vulnerability explanations, too. In this example, the [C code](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5a5f293dfbfac7222ca4013d8f9ce9b462e4cd3a/chat/slash-commands/c/vuln.c) contains multiple vulnerabilities for strcpy() buffer overflows, world writable file permissions, race condition attacks, and more.\n\n> /explain why this code has multiple vulnerabilities\n\n![Chat prompt about the code's multiple vulnerabilities](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image20_aHR0cHM6_1750097645365.png)\n\n#### Refactor C code into Rust\n\nRust provides memory safety. You can ask Duo Chat to refactor the vulnerable [C code](https://gitlab.com/gitlab-da/use-cases/ai/ai-workflows/gitlab-duo-prompts/-/blob/5a5f293dfbfac7222ca4013d8f9ce9b462e4cd3a/chat/slash-commands/c/vuln.c) into Rust, using `/refactor into Rust`. Practice with more refined prompts to get better results.\n\n> /refactor into Rust and use high level libraries\n\n![Chat prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image8_aHR0cHM6_1750097645366.png)\n\n### Shortcuts \n\nGive these shortcuts a try in your environment, and practice async using GitLab Duo Chat.\n\n1. Inspect vulnerable code from CVEs, and ask what it does, and how to fix it, using `/explain why is this code vulnerable`. \n**Tip:** Import open-source projects in GitLab to take advantage of GitLab Duo Chat code explanations.\n1. Try to refactor code into new programming languages to help legacy code migration plans.\n1. You can also try to refactor Jenkins configuration into GitLab CI/CD, using `/refactor into GitLab CI/CD configuration`. \n\n### Fun exercises \n\nTry to convince Chat to behave like Clippy.\n![Chat prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image22_aHR0cHM6_1750097645366.png)\n\nAsk about GitLab's mission: \"Everyone can contribute.\"\n\n![Chat prompt](https://res.cloudinary.com/about-gitlab-com/image/upload/v1750097645/Blog/Content%20Images/Blog/Content%20Images/image33_aHR0cHM6_1750097645367.png)\n\n### Learn more\n\nThere are many different environments and challenges out there. We have updated the [GitLab Duo Chat documentation](https://docs.gitlab.com/ee/user/gitlab_duo_chat.html) with more practical examples, and added a new [GitLab Duo examples](https://docs.gitlab.com/ee/user/gitlab_duo_examples.html) section with deep dives into AI-powered DevSecOps workflows, including Chat.\n\n> Want to get going with GitLab Duo Chat? [Start your free trial today](https://about.gitlab.com/solutions/gitlab-duo-pro/self-managed-and-gitlab-dedicated-trial/).\n",[704,9,478,680],{"slug":4656,"featured":90,"template":684},"10-best-practices-for-using-ai-powered-gitlab-duo-chat","content:en-us:blog:10-best-practices-for-using-ai-powered-gitlab-duo-chat.yml","10 Best Practices For Using Ai Powered Gitlab Duo Chat","en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat.yml","en-us/blog/10-best-practices-for-using-ai-powered-gitlab-duo-chat",23,[662,689,711,735,756,780,801,820,842],1753207332229]