App

879 Topics
"}},"componentScriptGroups({\"componentId\":\"custom.widget.MicrosoftFooter\"})":{"__typename":"ComponentScriptGroups","scriptGroups":{"__typename":"ComponentScriptGroupsDefinition","afterInteractive":{"__typename":"PageScriptGroupDefinition","group":"AFTER_INTERACTIVE","scriptIds":[]},"lazyOnLoad":{"__typename":"PageScriptGroupDefinition","group":"LAZY_ON_LOAD","scriptIds":[]}},"componentScripts":[]},"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/community/NavbarDropdownToggle\"]})":[{"__ref":"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/common/OverflowNav\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/common/OverflowNav-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageView/MessageViewInline\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageView/MessageViewInline-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/common/Pager/PagerLoadMore\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/common/Pager/PagerLoadMore-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/users/UserLink\"]})":[{"__ref":"CachedAsset:text:en_US-components/users/UserLink-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageSubject\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageSubject-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageBody\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageBody-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageSolvedBadge\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageSolvedBadge-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageTime\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageTime-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeIcon\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageUnreadCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageUnreadCount-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageViewCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageViewCount-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/kudos/KudosCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/kudos/KudosCount-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageRepliesCount\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageRepliesCount-1745505309793"}],"cachedText({\"lastModified\":\"1745505309793\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/users/UserAvatar\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1745505309793"}]},"CachedAsset:pages-1745487429135":{"__typename":"CachedAsset","id":"pages-1745487429135","value":[{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"BlogViewAllPostsPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId/all-posts/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"CasePortalPage","type":"CASE_PORTAL","urlPath":"/caseportal","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"CreateGroupHubPage","type":"GROUP_HUB","urlPath":"/groups/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"CaseViewPage","type":"CASE_DETAILS","urlPath":"/case/:caseId/:caseNumber","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"InboxPage","type":"COMMUNITY","urlPath":"/inbox","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"HelpFAQPage","type":"COMMUNITY","urlPath":"/help","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"IdeaMessagePage","type":"IDEA_POST","urlPath":"/idea/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"IdeaViewAllIdeasPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/all-ideas/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"LoginPage","type":"USER","urlPath":"/signin","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"BlogPostPage","type":"BLOG","urlPath":"/category/:categoryId/blogs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"UserBlogPermissions.Page","type":"COMMUNITY","urlPath":"/c/user-blog-permissions/page","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ThemeEditorPage","type":"COMMUNITY","urlPath":"/designer/themes","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TkbViewAllArticlesPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId/all-articles/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730142000000,"localOverride":null,"page":{"id":"AllEvents","type":"CUSTOM","urlPath":"/Events","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"OccasionEditPage","type":"EVENT","urlPath":"/event/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"OAuthAuthorizationAllowPage","type":"USER","urlPath":"/auth/authorize/allow","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"PageEditorPage","type":"COMMUNITY","urlPath":"/designer/pages","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"PostPage","type":"COMMUNITY","urlPath":"/category/:categoryId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ForumBoardPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TkbBoardPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"EventPostPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"UserBadgesPage","type":"COMMUNITY","urlPath":"/users/:login/:userId/badges","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"GroupHubMembershipAction","type":"GROUP_HUB","urlPath":"/membership/join/:nodeId/:membershipType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"MaintenancePage","type":"COMMUNITY","urlPath":"/maintenance","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"IdeaReplyPage","type":"IDEA_REPLY","urlPath":"/idea/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"UserSettingsPage","type":"USER","urlPath":"/mysettings/:userSettingsTab","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"GroupHubsPage","type":"GROUP_HUB","urlPath":"/groups","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ForumPostPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"OccasionRsvpActionPage","type":"OCCASION","urlPath":"/event/:boardId/:messageSubject/:messageId/rsvp/:responseType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"VerifyUserEmailPage","type":"USER","urlPath":"/verifyemail/:userId/:verifyEmailToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"AllOccasionsPage","type":"OCCASION","urlPath":"/category/:categoryId/events/:boardId/all-events/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"EventBoardPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TkbReplyPage","type":"TKB_REPLY","urlPath":"/kb/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"IdeaBoardPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"CommunityGuideLinesPage","type":"COMMUNITY","urlPath":"/communityguidelines","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"CaseCreatePage","type":"SALESFORCE_CASE_CREATION","urlPath":"/caseportal/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TkbEditPage","type":"TKB","urlPath":"/kb/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ForgotPasswordPage","type":"USER","urlPath":"/forgotpassword","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"IdeaEditPage","type":"IDEA","urlPath":"/idea/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TagPage","type":"COMMUNITY","urlPath":"/tag/:tagName","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"BlogBoardPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"OccasionMessagePage","type":"OCCASION_TOPIC","urlPath":"/event/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ManageContentPage","type":"COMMUNITY","urlPath":"/managecontent","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ClosedMembershipNodeNonMembersPage","type":"GROUP_HUB","urlPath":"/closedgroup/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"CommunityPage","type":"COMMUNITY","urlPath":"/","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ForumMessagePage","type":"FORUM_TOPIC","urlPath":"/discussions/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"IdeaPostPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730142000000,"localOverride":null,"page":{"id":"CommunityHub.Page","type":"CUSTOM","urlPath":"/Directory","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"BlogMessagePage","type":"BLOG_ARTICLE","urlPath":"/blog/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"RegistrationPage","type":"USER","urlPath":"/register","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"EditGroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ForumEditPage","type":"FORUM","urlPath":"/discussions/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ResetPasswordPage","type":"USER","urlPath":"/resetpassword/:userId/:resetPasswordToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730142000000,"localOverride":null,"page":{"id":"AllBlogs.Page","type":"CUSTOM","urlPath":"/blogs","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TkbMessagePage","type":"TKB_ARTICLE","urlPath":"/kb/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"BlogEditPage","type":"BLOG","urlPath":"/blog/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ManageUsersPage","type":"USER","urlPath":"/users/manage/:tab?/:manageUsersTab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ForumReplyPage","type":"FORUM_REPLY","urlPath":"/discussions/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"PrivacyPolicyPage","type":"COMMUNITY","urlPath":"/privacypolicy","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"NotificationPage","type":"COMMUNITY","urlPath":"/notifications","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"UserPage","type":"USER","urlPath":"/users/:login/:userId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"OccasionReplyPage","type":"OCCASION_REPLY","urlPath":"/event/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ManageMembersPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/manage/:tab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"SearchResultsPage","type":"COMMUNITY","urlPath":"/search","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"BlogReplyPage","type":"BLOG_REPLY","urlPath":"/blog/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"GroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TermsOfServicePage","type":"COMMUNITY","urlPath":"/termsofservice","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"CategoryPage","type":"CATEGORY","urlPath":"/category/:categoryId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"ForumViewAllTopicsPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/all-topics/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"TkbPostPage","type":"TKB","urlPath":"/category/:categoryId/kbs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1745487429135,"localOverride":null,"page":{"id":"GroupHubPostPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"}],"localOverride":false},"CachedAsset:text:en_US-components/context/AppContext/AppContextProvider-0":{"__typename":"CachedAsset","id":"text:en_US-components/context/AppContext/AppContextProvider-0","value":{"noCommunity":"Cannot find community","noUser":"Cannot find current user","noNode":"Cannot find node with id {nodeId}","noMessage":"Cannot find message with id {messageId}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-0":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-0","value":{"title":"Loading..."},"localOverride":false},"User:user:-1":{"__typename":"User","id":"user:-1","uid":-1,"login":"Deleted","email":"","avatar":null,"rank":null,"kudosWeight":1,"registrationData":{"__typename":"RegistrationData","status":"ANONYMOUS","registrationTime":null,"confirmEmailStatus":false,"registrationAccessLevel":"VIEW","ssoRegistrationFields":[]},"ssoId":null,"profileSettings":{"__typename":"ProfileSettings","dateDisplayStyle":{"__typename":"InheritableStringSettingWithPossibleValues","key":"layout.friendly_dates_enabled","value":"false","localValue":"true","possibleValues":["true","false"]},"dateDisplayFormat":{"__typename":"InheritableStringSetting","key":"layout.format_pattern_date","value":"MMM dd yyyy","localValue":"MM-dd-yyyy"},"language":{"__typename":"InheritableStringSettingWithPossibleValues","key":"profile.language","value":"en-US","localValue":"en","possibleValues":["en-US"]}},"deleted":false},"Theme:customTheme1":{"__typename":"Theme","id":"customTheme1"},"CachedAsset:theme:customTheme1-1744326567472":{"__typename":"CachedAsset","id":"theme:customTheme1-1744326567472","value":{"id":"customTheme1","animation":{"fast":"150ms","normal":"250ms","slow":"500ms","slowest":"750ms","function":"cubic-bezier(0.07, 0.91, 0.51, 1)","__typename":"AnimationThemeSettings"},"avatar":{"borderRadius":"50%","collections":["default"],"__typename":"AvatarThemeSettings"},"basics":{"browserIcon":{"imageAssetName":"favicon-1730836283320.png","imageLastModified":"1730836286415","__typename":"ThemeAsset"},"customerLogo":{"imageAssetName":"favicon-1730836271365.png","imageLastModified":"1730836274203","__typename":"ThemeAsset"},"maximumWidthOfPageContent":"1300px","oneColumnNarrowWidth":"800px","gridGutterWidthMd":"30px","gridGutterWidthXs":"10px","pageWidthStyle":"WIDTH_OF_BROWSER","__typename":"BasicsThemeSettings"},"buttons":{"borderRadiusSm":"3px","borderRadius":"3px","borderRadiusLg":"5px","paddingY":"5px","paddingYLg":"7px","paddingYHero":"var(--lia-bs-btn-padding-y-lg)","paddingX":"12px","paddingXLg":"16px","paddingXHero":"60px","fontStyle":"NORMAL","fontWeight":"700","textTransform":"NONE","disabledOpacity":0.5,"primaryTextColor":"var(--lia-bs-white)","primaryTextHoverColor":"var(--lia-bs-white)","primaryTextActiveColor":"var(--lia-bs-white)","primaryBgColor":"var(--lia-bs-primary)","primaryBgHoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.85))","primaryBgActiveColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.7))","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","primaryBorderActive":"1px solid transparent","primaryBorderFocus":"1px solid var(--lia-bs-white)","primaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","secondaryTextColor":"var(--lia-bs-gray-900)","secondaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","secondaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","secondaryBgColor":"var(--lia-bs-gray-200)","secondaryBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","secondaryBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","secondaryBorder":"1px solid transparent","secondaryBorderHover":"1px solid transparent","secondaryBorderActive":"1px solid transparent","secondaryBorderFocus":"1px solid transparent","secondaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","tertiaryTextColor":"var(--lia-bs-gray-900)","tertiaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","tertiaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","tertiaryBgColor":"transparent","tertiaryBgHoverColor":"transparent","tertiaryBgActiveColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.04)","tertiaryBorder":"1px solid transparent","tertiaryBorderHover":"1px solid hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","tertiaryBorderActive":"1px solid transparent","tertiaryBorderFocus":"1px solid transparent","tertiaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","destructiveTextColor":"var(--lia-bs-danger)","destructiveTextHoverColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.95))","destructiveTextActiveColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.9))","destructiveBgColor":"var(--lia-bs-gray-200)","destructiveBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","destructiveBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","destructiveBorder":"1px solid transparent","destructiveBorderHover":"1px solid transparent","destructiveBorderActive":"1px solid transparent","destructiveBorderFocus":"1px solid transparent","destructiveBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","__typename":"ButtonsThemeSettings"},"border":{"color":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","mainContent":"NONE","sideContent":"LIGHT","radiusSm":"3px","radius":"5px","radiusLg":"9px","radius50":"100vw","__typename":"BorderThemeSettings"},"boxShadow":{"xs":"0 0 0 1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.08), 0 3px 0 -1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.16)","sm":"0 2px 4px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.12)","md":"0 5px 15px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","lg":"0 10px 30px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","__typename":"BoxShadowThemeSettings"},"cards":{"bgColor":"var(--lia-panel-bg-color)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":"var(--lia-box-shadow-xs)","__typename":"CardsThemeSettings"},"chip":{"maxWidth":"300px","height":"30px","__typename":"ChipThemeSettings"},"coreTypes":{"defaultMessageLinkColor":"var(--lia-bs-link-color)","defaultMessageLinkDecoration":"none","defaultMessageLinkFontStyle":"NORMAL","defaultMessageLinkFontWeight":"400","defaultMessageFontStyle":"NORMAL","defaultMessageFontWeight":"400","forumColor":"#4099E2","forumFontFamily":"var(--lia-bs-font-family-base)","forumFontWeight":"var(--lia-default-message-font-weight)","forumLineHeight":"var(--lia-bs-line-height-base)","forumFontStyle":"var(--lia-default-message-font-style)","forumMessageLinkColor":"var(--lia-default-message-link-color)","forumMessageLinkDecoration":"var(--lia-default-message-link-decoration)","forumMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","forumMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","forumSolvedColor":"#148563","blogColor":"#1CBAA0","blogFontFamily":"var(--lia-bs-font-family-base)","blogFontWeight":"var(--lia-default-message-font-weight)","blogLineHeight":"1.75","blogFontStyle":"var(--lia-default-message-font-style)","blogMessageLinkColor":"var(--lia-default-message-link-color)","blogMessageLinkDecoration":"var(--lia-default-message-link-decoration)","blogMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","blogMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","tkbColor":"#4C6B90","tkbFontFamily":"var(--lia-bs-font-family-base)","tkbFontWeight":"var(--lia-default-message-font-weight)","tkbLineHeight":"1.75","tkbFontStyle":"var(--lia-default-message-font-style)","tkbMessageLinkColor":"var(--lia-default-message-link-color)","tkbMessageLinkDecoration":"var(--lia-default-message-link-decoration)","tkbMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","tkbMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaColor":"#4099E2","qandaFontFamily":"var(--lia-bs-font-family-base)","qandaFontWeight":"var(--lia-default-message-font-weight)","qandaLineHeight":"var(--lia-bs-line-height-base)","qandaFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkColor":"var(--lia-default-message-link-color)","qandaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","qandaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaSolvedColor":"#3FA023","ideaColor":"#FF8000","ideaFontFamily":"var(--lia-bs-font-family-base)","ideaFontWeight":"var(--lia-default-message-font-weight)","ideaLineHeight":"var(--lia-bs-line-height-base)","ideaFontStyle":"var(--lia-default-message-font-style)","ideaMessageLinkColor":"var(--lia-default-message-link-color)","ideaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","ideaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","ideaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","contestColor":"#FCC845","contestFontFamily":"var(--lia-bs-font-family-base)","contestFontWeight":"var(--lia-default-message-font-weight)","contestLineHeight":"var(--lia-bs-line-height-base)","contestFontStyle":"var(--lia-default-message-link-font-style)","contestMessageLinkColor":"var(--lia-default-message-link-color)","contestMessageLinkDecoration":"var(--lia-default-message-link-decoration)","contestMessageLinkFontStyle":"ITALIC","contestMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","occasionColor":"#D13A1F","occasionFontFamily":"var(--lia-bs-font-family-base)","occasionFontWeight":"var(--lia-default-message-font-weight)","occasionLineHeight":"var(--lia-bs-line-height-base)","occasionFontStyle":"var(--lia-default-message-font-style)","occasionMessageLinkColor":"var(--lia-default-message-link-color)","occasionMessageLinkDecoration":"var(--lia-default-message-link-decoration)","occasionMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","occasionMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","grouphubColor":"#333333","categoryColor":"#949494","communityColor":"#FFFFFF","productColor":"#949494","__typename":"CoreTypesThemeSettings"},"colors":{"black":"#000000","white":"#FFFFFF","gray100":"#F7F7F7","gray200":"#F7F7F7","gray300":"#E8E8E8","gray400":"#D9D9D9","gray500":"#CCCCCC","gray600":"#717171","gray700":"#707070","gray800":"#545454","gray900":"#333333","dark":"#545454","light":"#F7F7F7","primary":"#0069D4","secondary":"#333333","bodyText":"#1E1E1E","bodyBg":"#FFFFFF","info":"#409AE2","success":"#41C5AE","warning":"#FCC844","danger":"#BC341B","alertSystem":"#FF6600","textMuted":"#707070","highlight":"#FFFCAD","outline":"var(--lia-bs-primary)","custom":["#D3F5A4","#243A5E"],"__typename":"ColorsThemeSettings"},"divider":{"size":"3px","marginLeft":"4px","marginRight":"4px","borderRadius":"50%","bgColor":"var(--lia-bs-gray-600)","bgColorActive":"var(--lia-bs-gray-600)","__typename":"DividerThemeSettings"},"dropdown":{"fontSize":"var(--lia-bs-font-size-sm)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius-sm)","dividerBg":"var(--lia-bs-gray-300)","itemPaddingY":"5px","itemPaddingX":"20px","headerColor":"var(--lia-bs-gray-700)","__typename":"DropdownThemeSettings"},"email":{"link":{"color":"#0069D4","hoverColor":"#0061c2","decoration":"none","hoverDecoration":"underline","__typename":"EmailLinkSettings"},"border":{"color":"#e4e4e4","__typename":"EmailBorderSettings"},"buttons":{"borderRadiusLg":"5px","paddingXLg":"16px","paddingYLg":"7px","fontWeight":"700","primaryTextColor":"#ffffff","primaryTextHoverColor":"#ffffff","primaryBgColor":"#0069D4","primaryBgHoverColor":"#005cb8","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","__typename":"EmailButtonsSettings"},"panel":{"borderRadius":"5px","borderColor":"#e4e4e4","__typename":"EmailPanelSettings"},"__typename":"EmailThemeSettings"},"emoji":{"skinToneDefault":"#ffcd43","skinToneLight":"#fae3c5","skinToneMediumLight":"#e2cfa5","skinToneMedium":"#daa478","skinToneMediumDark":"#a78058","skinToneDark":"#5e4d43","__typename":"EmojiThemeSettings"},"heading":{"color":"var(--lia-bs-body-color)","fontFamily":"Segoe UI","fontStyle":"NORMAL","fontWeight":"400","h1FontSize":"34px","h2FontSize":"32px","h3FontSize":"28px","h4FontSize":"24px","h5FontSize":"20px","h6FontSize":"16px","lineHeight":"1.3","subHeaderFontSize":"11px","subHeaderFontWeight":"500","h1LetterSpacing":"normal","h2LetterSpacing":"normal","h3LetterSpacing":"normal","h4LetterSpacing":"normal","h5LetterSpacing":"normal","h6LetterSpacing":"normal","subHeaderLetterSpacing":"2px","h1FontWeight":"var(--lia-bs-headings-font-weight)","h2FontWeight":"var(--lia-bs-headings-font-weight)","h3FontWeight":"var(--lia-bs-headings-font-weight)","h4FontWeight":"var(--lia-bs-headings-font-weight)","h5FontWeight":"var(--lia-bs-headings-font-weight)","h6FontWeight":"var(--lia-bs-headings-font-weight)","__typename":"HeadingThemeSettings"},"icons":{"size10":"10px","size12":"12px","size14":"14px","size16":"16px","size20":"20px","size24":"24px","size30":"30px","size40":"40px","size50":"50px","size60":"60px","size80":"80px","size120":"120px","size160":"160px","__typename":"IconsThemeSettings"},"imagePreview":{"bgColor":"var(--lia-bs-gray-900)","titleColor":"var(--lia-bs-white)","controlColor":"var(--lia-bs-white)","controlBgColor":"var(--lia-bs-gray-800)","__typename":"ImagePreviewThemeSettings"},"input":{"borderColor":"var(--lia-bs-gray-600)","disabledColor":"var(--lia-bs-gray-600)","focusBorderColor":"var(--lia-bs-primary)","labelMarginBottom":"10px","btnFontSize":"var(--lia-bs-font-size-sm)","focusBoxShadow":"0 0 0 3px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","checkLabelMarginBottom":"2px","checkboxBorderRadius":"3px","borderRadiusSm":"var(--lia-bs-border-radius-sm)","borderRadius":"var(--lia-bs-border-radius)","borderRadiusLg":"var(--lia-bs-border-radius-lg)","formTextMarginTop":"4px","textAreaBorderRadius":"var(--lia-bs-border-radius)","activeFillColor":"var(--lia-bs-primary)","__typename":"InputThemeSettings"},"loading":{"dotDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.2)","dotLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.5)","barDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.06)","barLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.4)","__typename":"LoadingThemeSettings"},"link":{"color":"var(--lia-bs-primary)","hoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) - 10%))","decoration":"none","hoverDecoration":"underline","__typename":"LinkThemeSettings"},"listGroup":{"itemPaddingY":"15px","itemPaddingX":"15px","borderColor":"var(--lia-bs-gray-300)","__typename":"ListGroupThemeSettings"},"modal":{"contentTextColor":"var(--lia-bs-body-color)","contentBg":"var(--lia-bs-white)","backgroundBg":"var(--lia-bs-black)","smSize":"440px","mdSize":"760px","lgSize":"1080px","backdropOpacity":0.3,"contentBoxShadowXs":"var(--lia-bs-box-shadow-sm)","contentBoxShadow":"var(--lia-bs-box-shadow)","headerFontWeight":"700","__typename":"ModalThemeSettings"},"navbar":{"position":"FIXED","background":{"attachment":null,"clip":null,"color":"var(--lia-bs-white)","imageAssetName":"","imageLastModified":"0","origin":null,"position":"CENTER_CENTER","repeat":"NO_REPEAT","size":"COVER","__typename":"BackgroundProps"},"backgroundOpacity":0.8,"paddingTop":"15px","paddingBottom":"15px","borderBottom":"1px solid var(--lia-bs-border-color)","boxShadow":"var(--lia-bs-box-shadow-sm)","brandMarginRight":"30px","brandMarginRightSm":"10px","brandLogoHeight":"30px","linkGap":"10px","linkJustifyContent":"flex-start","linkPaddingY":"5px","linkPaddingX":"10px","linkDropdownPaddingY":"9px","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkColor":"var(--lia-bs-body-color)","linkHoverColor":"var(--lia-bs-primary)","linkFontSize":"var(--lia-bs-font-size-sm)","linkFontStyle":"NORMAL","linkFontWeight":"400","linkTextTransform":"NONE","linkLetterSpacing":"normal","linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkBgColor":"transparent","linkBgHoverColor":"transparent","linkBorder":"none","linkBorderHover":"none","linkBoxShadow":"none","linkBoxShadowHover":"none","linkTextBorderBottom":"none","linkTextBorderBottomHover":"none","dropdownPaddingTop":"10px","dropdownPaddingBottom":"15px","dropdownPaddingX":"10px","dropdownMenuOffset":"2px","dropdownDividerMarginTop":"10px","dropdownDividerMarginBottom":"10px","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","controllerIconColor":"var(--lia-bs-body-color)","controllerIconHoverColor":"var(--lia-bs-body-color)","controllerTextColor":"var(--lia-nav-controller-icon-color)","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","controllerHighlightColor":"hsla(30, 100%, 50%)","controllerHighlightTextColor":"var(--lia-yiq-light)","controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerColor":"var(--lia-nav-controller-icon-color)","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","hamburgerBgColor":"transparent","hamburgerBgHoverColor":"transparent","hamburgerBorder":"none","hamburgerBorderHover":"none","collapseMenuMarginLeft":"20px","collapseMenuDividerBg":"var(--lia-nav-link-color)","collapseMenuDividerOpacity":0.16,"__typename":"NavbarThemeSettings"},"pager":{"textColor":"var(--lia-bs-link-color)","textFontWeight":"var(--lia-font-weight-md)","textFontSize":"var(--lia-bs-font-size-sm)","__typename":"PagerThemeSettings"},"panel":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-bs-border-radius)","borderColor":"var(--lia-bs-border-color)","boxShadow":"none","__typename":"PanelThemeSettings"},"popover":{"arrowHeight":"8px","arrowWidth":"16px","maxWidth":"300px","minWidth":"100px","headerBg":"var(--lia-bs-white)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius)","boxShadow":"0 0.5rem 1rem hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.15)","__typename":"PopoverThemeSettings"},"prism":{"color":"#000000","bgColor":"#f5f2f0","fontFamily":"var(--font-family-monospace)","fontSize":"var(--lia-bs-font-size-base)","fontWeightBold":"var(--lia-bs-font-weight-bold)","fontStyleItalic":"italic","tabSize":2,"highlightColor":"#b3d4fc","commentColor":"#62707e","punctuationColor":"#6f6f6f","namespaceOpacity":"0.7","propColor":"#990055","selectorColor":"#517a00","operatorColor":"#906736","operatorBgColor":"hsla(0, 0%, 100%, 0.5)","keywordColor":"#0076a9","functionColor":"#d3284b","variableColor":"#c14700","__typename":"PrismThemeSettings"},"rte":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":" var(--lia-panel-box-shadow)","customColor1":"#bfedd2","customColor2":"#fbeeb8","customColor3":"#f8cac6","customColor4":"#eccafa","customColor5":"#c2e0f4","customColor6":"#2dc26b","customColor7":"#f1c40f","customColor8":"#e03e2d","customColor9":"#b96ad9","customColor10":"#3598db","customColor11":"#169179","customColor12":"#e67e23","customColor13":"#ba372a","customColor14":"#843fa1","customColor15":"#236fa1","customColor16":"#ecf0f1","customColor17":"#ced4d9","customColor18":"#95a5a6","customColor19":"#7e8c8d","customColor20":"#34495e","customColor21":"#000000","customColor22":"#ffffff","defaultMessageHeaderMarginTop":"40px","defaultMessageHeaderMarginBottom":"20px","defaultMessageItemMarginTop":"0","defaultMessageItemMarginBottom":"10px","diffAddedColor":"hsla(170, 53%, 51%, 0.4)","diffChangedColor":"hsla(43, 97%, 63%, 0.4)","diffNoneColor":"hsla(0, 0%, 80%, 0.4)","diffRemovedColor":"hsla(9, 74%, 47%, 0.4)","specialMessageHeaderMarginTop":"40px","specialMessageHeaderMarginBottom":"20px","specialMessageItemMarginTop":"0","specialMessageItemMarginBottom":"10px","__typename":"RteThemeSettings"},"tags":{"bgColor":"var(--lia-bs-gray-200)","bgHoverColor":"var(--lia-bs-gray-400)","borderRadius":"var(--lia-bs-border-radius-sm)","color":"var(--lia-bs-body-color)","hoverColor":"var(--lia-bs-body-color)","fontWeight":"var(--lia-font-weight-md)","fontSize":"var(--lia-font-size-xxs)","textTransform":"UPPERCASE","letterSpacing":"0.5px","__typename":"TagsThemeSettings"},"toasts":{"borderRadius":"var(--lia-bs-border-radius)","paddingX":"12px","__typename":"ToastsThemeSettings"},"typography":{"fontFamilyBase":"Segoe UI","fontStyleBase":"NORMAL","fontWeightBase":"400","fontWeightLight":"300","fontWeightNormal":"400","fontWeightMd":"500","fontWeightBold":"700","letterSpacingSm":"normal","letterSpacingXs":"normal","lineHeightBase":"1.5","fontSizeBase":"16px","fontSizeXxs":"11px","fontSizeXs":"12px","fontSizeSm":"14px","fontSizeLg":"20px","fontSizeXl":"24px","smallFontSize":"14px","customFonts":[{"source":"SERVER","name":"Segoe UI","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"},{"style":"NORMAL","weight":"300","__typename":"FontStyleData"},{"style":"NORMAL","weight":"600","__typename":"FontStyleData"},{"style":"NORMAL","weight":"700","__typename":"FontStyleData"},{"style":"ITALIC","weight":"400","__typename":"FontStyleData"}],"assetNames":["SegoeUI-normal-400.woff2","SegoeUI-normal-300.woff2","SegoeUI-normal-600.woff2","SegoeUI-normal-700.woff2","SegoeUI-italic-400.woff2"],"__typename":"CustomFont"},{"source":"SERVER","name":"MWF Fluent Icons","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"}],"assetNames":["MWFFluentIcons-normal-400.woff2"],"__typename":"CustomFont"}],"__typename":"TypographyThemeSettings"},"unstyledListItem":{"marginBottomSm":"5px","marginBottomMd":"10px","marginBottomLg":"15px","marginBottomXl":"20px","marginBottomXxl":"25px","__typename":"UnstyledListItemThemeSettings"},"yiq":{"light":"#ffffff","dark":"#000000","__typename":"YiqThemeSettings"},"colorLightness":{"primaryDark":0.36,"primaryLight":0.74,"primaryLighter":0.89,"primaryLightest":0.95,"infoDark":0.39,"infoLight":0.72,"infoLighter":0.85,"infoLightest":0.93,"successDark":0.24,"successLight":0.62,"successLighter":0.8,"successLightest":0.91,"warningDark":0.39,"warningLight":0.68,"warningLighter":0.84,"warningLightest":0.93,"dangerDark":0.41,"dangerLight":0.72,"dangerLighter":0.89,"dangerLightest":0.95,"__typename":"ColorLightnessThemeSettings"},"localOverride":false,"__typename":"Theme"},"localOverride":false},"CachedAsset:text:en_US-components/common/EmailVerification-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/common/EmailVerification-1745505309793","value":{"email.verification.title":"Email Verification Required","email.verification.message.update.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. To change your email, visit My Settings.","email.verification.message.resend.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. Resend email."},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-1745505309793","value":{"title":"Loading..."},"localOverride":false},"CachedAsset:text:en_US-pages/tags/TagPage-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-pages/tags/TagPage-1745505309793","value":{"tagPageTitle":"Tag:\"{tagName}\" | {communityTitle}","tagPageForNodeTitle":"Tag:\"{tagName}\" in \"{title}\" | {communityTitle}","name":"Tags Page","tag":"Tag: {tagName}"},"localOverride":false},"CachedAsset:quilt:o365.prod:pages/tags/TagPage:community:gxcuf89792-1745502712743":{"__typename":"CachedAsset","id":"quilt:o365.prod:pages/tags/TagPage:community:gxcuf89792-1745502712743","value":{"id":"TagPage","container":{"id":"Common","headerProps":{"removeComponents":["community.widget.bannerWidget"],"__typename":"QuiltContainerSectionProps"},"items":[{"id":"tag-header-widget","layout":"ONE_COLUMN","bgColor":"var(--lia-bs-white)","showBorder":"BOTTOM","sectionEditLevel":"LOCKED","columnMap":{"main":[{"id":"tags.widget.TagsHeaderWidget","__typename":"QuiltComponent"}],"__typename":"OneSectionColumns"},"__typename":"OneColumnQuiltSection"},{"id":"messages-list-for-tag-widget","layout":"ONE_COLUMN","columnMap":{"main":[{"id":"messages.widget.messageListForNodeByRecentActivityWidget","props":{"viewVariant":{"type":"inline","props":{"useUnreadCount":true,"useViewCount":true,"useAuthorLogin":true,"clampBodyLines":3,"useAvatar":true,"useBoardIcon":false,"useKudosCount":true,"usePreviewMedia":true,"useTags":false,"useNode":true,"useNodeLink":true,"useTextBody":true,"truncateBodyLength":-1,"useBody":true,"useRepliesCount":true,"useSolvedBadge":true,"timeStampType":"conversation.lastPostingActivityTime","useMessageTimeLink":true,"clampSubjectLines":2}},"panelType":"divider","useTitle":false,"hideIfEmpty":false,"pagerVariant":{"type":"loadMore"},"style":"list","showTabs":true,"tabItemMap":{"default":{"mostRecent":true,"mostRecentUserContent":false,"newest":false},"additional":{"mostKudoed":true,"mostViewed":true,"mostReplies":false,"noReplies":false,"noSolutions":false,"solutions":false}}},"__typename":"QuiltComponent"}],"__typename":"OneSectionColumns"},"__typename":"OneColumnQuiltSection"}],"__typename":"QuiltContainer"},"__typename":"Quilt"},"localOverride":false},"CachedAsset:quiltWrapper:o365.prod:Common:1745505311080":{"__typename":"CachedAsset","id":"quiltWrapper:o365.prod:Common:1745505311080","value":{"id":"Common","header":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"community.widget.navbarWidget","props":{"showUserName":true,"showRegisterLink":true,"useIconLanguagePicker":true,"useLabelLanguagePicker":true,"className":"QuiltComponent_lia-component-edit-mode__0nCcm","links":{"sideLinks":[],"mainLinks":[{"children":[],"linkType":"INTERNAL","id":"gxcuf89792","params":{},"routeName":"CommunityPage"},{"children":[],"linkType":"EXTERNAL","id":"external-link","url":"/Directory","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft365","params":{"categoryId":"microsoft365"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows","params":{"categoryId":"Windows"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"Common-microsoft365-copilot-link","params":{"categoryId":"Microsoft365Copilot"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-teams","params":{"categoryId":"MicrosoftTeams"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-securityand-compliance","params":{"categoryId":"microsoft-security"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"azure","params":{"categoryId":"Azure"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"Common-content_management-link","params":{"categoryId":"Content_Management"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"exchange","params":{"categoryId":"Exchange"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows-server","params":{"categoryId":"Windows-Server"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"outlook","params":{"categoryId":"Outlook"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-endpoint-manager","params":{"categoryId":"microsoftintune"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-2","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities","url":"/","target":"BLANK"},{"children":[{"linkType":"INTERNAL","id":"a-i","params":{"categoryId":"AI"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"education-sector","params":{"categoryId":"EducationSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"partner-community","params":{"categoryId":"PartnerCommunity"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"i-t-ops-talk","params":{"categoryId":"ITOpsTalk"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"healthcare-and-life-sciences","params":{"categoryId":"HealthcareAndLifeSciences"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-mechanics","params":{"categoryId":"MicrosoftMechanics"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"public-sector","params":{"categoryId":"PublicSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"s-m-b","params":{"categoryId":"MicrosoftforNonprofits"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"io-t","params":{"categoryId":"IoT"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"startupsat-microsoft","params":{"categoryId":"StartupsatMicrosoft"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"driving-adoption","params":{"categoryId":"DrivingAdoption"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-1","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities-1","url":"/","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external","url":"/Blogs","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external-1","url":"/Events","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft-learn-1","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-learn-blog","params":{"boardId":"MicrosoftLearnBlog","categoryId":"MicrosoftLearn"},"routeName":"BlogBoardPage"},{"linkType":"EXTERNAL","id":"external-10","url":"https://learningroomdirectory.microsoft.com/","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-3","url":"https://docs.microsoft.com/learn/dynamics365/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-4","url":"https://docs.microsoft.com/learn/m365/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-5","url":"https://docs.microsoft.com/learn/topics/sci/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-6","url":"https://docs.microsoft.com/learn/powerplatform/?wt.mc_id=techcom_header-webpage-powerplatform","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-7","url":"https://docs.microsoft.com/learn/github/?wt.mc_id=techcom_header-webpage-github","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-8","url":"https://docs.microsoft.com/learn/teams/?wt.mc_id=techcom_header-webpage-teams","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-9","url":"https://docs.microsoft.com/learn/dotnet/?wt.mc_id=techcom_header-webpage-dotnet","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-2","url":"https://docs.microsoft.com/learn/azure/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"}],"linkType":"INTERNAL","id":"microsoft-learn","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"children":[],"linkType":"INTERNAL","id":"community-info-center","params":{"categoryId":"Community-Info-Center"},"routeName":"CategoryPage"}]},"style":{"boxShadow":"var(--lia-bs-box-shadow-sm)","controllerHighlightColor":"hsla(30, 100%, 50%)","linkFontWeight":"400","dropdownDividerMarginBottom":"10px","hamburgerBorderHover":"none","linkBoxShadowHover":"none","linkFontSize":"14px","backgroundOpacity":0.8,"controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerBgColor":"transparent","hamburgerColor":"var(--lia-nav-controller-icon-color)","linkTextBorderBottom":"none","brandLogoHeight":"30px","linkBgHoverColor":"transparent","linkLetterSpacing":"normal","collapseMenuDividerOpacity":0.16,"dropdownPaddingBottom":"15px","paddingBottom":"15px","dropdownMenuOffset":"2px","hamburgerBgHoverColor":"transparent","borderBottom":"1px solid var(--lia-bs-border-color)","hamburgerBorder":"none","dropdownPaddingX":"10px","brandMarginRightSm":"10px","linkBoxShadow":"none","collapseMenuDividerBg":"var(--lia-nav-link-color)","linkColor":"var(--lia-bs-body-color)","linkJustifyContent":"flex-start","dropdownPaddingTop":"10px","controllerHighlightTextColor":"var(--lia-yiq-dark)","controllerTextColor":"var(--lia-nav-controller-icon-color)","background":{"imageAssetName":"","color":"var(--lia-bs-white)","size":"COVER","repeat":"NO_REPEAT","position":"CENTER_CENTER","imageLastModified":""},"linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkHoverColor":"var(--lia-bs-body-color)","position":"FIXED","linkBorder":"none","linkTextBorderBottomHover":"2px solid var(--lia-bs-body-color)","brandMarginRight":"30px","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","linkBorderHover":"none","collapseMenuMarginLeft":"20px","linkFontStyle":"NORMAL","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","linkPaddingX":"10px","linkPaddingY":"5px","paddingTop":"15px","linkTextTransform":"NONE","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","linkBgColor":"transparent","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkDropdownPaddingY":"9px","controllerIconColor":"var(--lia-bs-body-color)","dropdownDividerMarginTop":"10px","linkGap":"10px","controllerIconHoverColor":"var(--lia-bs-body-color)"},"showSearchIcon":false,"languagePickerStyle":"iconAndLabel"},"__typename":"QuiltComponent"},{"id":"community.widget.breadcrumbWidget","props":{"backgroundColor":"transparent","linkHighlightColor":"var(--lia-bs-primary)","visualEffects":{"showBottomBorder":true},"linkTextColor":"var(--lia-bs-gray-700)"},"__typename":"QuiltComponent"},{"id":"custom.widget.community_banner","props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"usePageWidth":false,"useBackground":false,"title":"","lazyLoad":false},"__typename":"QuiltComponent"},{"id":"custom.widget.HeroBanner","props":{"widgetVisibility":"signedInOrAnonymous","usePageWidth":false,"useTitle":true,"cMax_items":3,"useBackground":false,"title":"","lazyLoad":false,"widgetChooser":"custom.widget.HeroBanner"},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"footer":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"custom.widget.MicrosoftFooter","props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"useBackground":false,"title":"","lazyLoad":false},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"__typename":"QuiltWrapper","localOverride":false},"localOverride":false},"CachedAsset:text:en_US-components/common/ActionFeedback-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/common/ActionFeedback-1745505309793","value":{"joinedGroupHub.title":"Welcome","joinedGroupHub.message":"You are now a member of this group and are subscribed to updates.","groupHubInviteNotFound.title":"Invitation Not Found","groupHubInviteNotFound.message":"Sorry, we could not find your invitation to the group. The owner may have canceled the invite.","groupHubNotFound.title":"Group Not Found","groupHubNotFound.message":"The grouphub you tried to join does not exist. It may have been deleted.","existingGroupHubMember.title":"Already Joined","existingGroupHubMember.message":"You are already a member of this group.","accountLocked.title":"Account Locked","accountLocked.message":"Your account has been locked due to multiple failed attempts. Try again in {lockoutTime} minutes.","editedGroupHub.title":"Changes Saved","editedGroupHub.message":"Your group has been updated.","leftGroupHub.title":"Goodbye","leftGroupHub.message":"You are no longer a member of this group and will not receive future updates.","deletedGroupHub.title":"Deleted","deletedGroupHub.message":"The group has been deleted.","groupHubCreated.title":"Group Created","groupHubCreated.message":"{groupHubName} is ready to use","accountClosed.title":"Account Closed","accountClosed.message":"The account has been closed and you will now be redirected to the homepage","resetTokenExpired.title":"Reset Password Link has Expired","resetTokenExpired.message":"Try resetting your password again","invalidUrl.title":"Invalid URL","invalidUrl.message":"The URL you're using is not recognized. Verify your URL and try again.","accountClosedForUser.title":"Account Closed","accountClosedForUser.message":"{userName}'s account is closed","inviteTokenInvalid.title":"Invitation Invalid","inviteTokenInvalid.message":"Your invitation to the community has been canceled or expired.","inviteTokenError.title":"Invitation Verification Failed","inviteTokenError.message":"The url you are utilizing is not recognized. Verify your URL and try again","pageNotFound.title":"Access Denied","pageNotFound.message":"You do not have access to this area of the community or it doesn't exist","eventAttending.title":"Responded as Attending","eventAttending.message":"You'll be notified when there's new activity and reminded as the event approaches","eventInterested.title":"Responded as Interested","eventInterested.message":"You'll be notified when there's new activity and reminded as the event approaches","eventNotFound.title":"Event Not Found","eventNotFound.message":"The event you tried to respond to does not exist.","redirectToRelatedPage.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.message":"The content you are trying to access is archived","redirectToRelatedPage.message":"The content you are trying to access is archived","relatedUrl.archivalLink.flyoutMessage":"The content you are trying to access is archived View Archived Content"},"localOverride":false},"CachedAsset:component:custom.widget.community_banner-en-1744400828360":{"__typename":"CachedAsset","id":"component:custom.widget.community_banner-en-1744400828360","value":{"component":{"id":"custom.widget.community_banner","template":{"id":"community_banner","markupLanguage":"HANDLEBARS","style":".community-banner {\n a.top-bar.btn {\n top: 0px;\n width: 100%;\n z-index: 999;\n text-align: center;\n left: 0px;\n background: #0068b8;\n color: white;\n padding: 10px 0px;\n display: block;\n box-shadow: none !important;\n border: none !important;\n border-radius: none !important;\n margin: 0px !important;\n font-size: 14px;\n }\n}\n","texts":null,"defaults":{"config":{"applicablePages":[],"description":"community announcement text","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.community_banner","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":"community announcement text","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_community_banner_community-banner_1x9u2_1 {\n a.custom_widget_community_banner_top-bar_1x9u2_2.custom_widget_community_banner_btn_1x9u2_2 {\n top: 0;\n width: 100%;\n z-index: 999;\n text-align: center;\n left: 0;\n background: #0068b8;\n color: white;\n padding: 0.625rem 0;\n display: block;\n box-shadow: none !important;\n border: none !important;\n border-radius: none !important;\n margin: 0 !important;\n font-size: 0.875rem;\n }\n}\n","tokens":{"community-banner":"custom_widget_community_banner_community-banner_1x9u2_1","top-bar":"custom_widget_community_banner_top-bar_1x9u2_2","btn":"custom_widget_community_banner_btn_1x9u2_2"}},"form":null},"localOverride":false},"CachedAsset:component:custom.widget.HeroBanner-en-1744400828360":{"__typename":"CachedAsset","id":"component:custom.widget.HeroBanner-en-1744400828360","value":{"component":{"id":"custom.widget.HeroBanner","template":{"id":"HeroBanner","markupLanguage":"REACT","style":null,"texts":{"searchPlaceholderText":"Search this community","followActionText":"Follow","unfollowActionText":"Following","searchOnHoverText":"Please enter your search term(s) and then press return key to complete a search.","blogs.sidebar.pagetitle":"Latest Blogs | Microsoft Tech Community","followThisNode":"Follow this node","unfollowThisNode":"Unfollow this node"},"defaults":{"config":{"applicablePages":[],"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.HeroBanner","form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"__typename":"Component","localOverride":false},"globalCss":null,"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"}},"localOverride":false},"CachedAsset:component:custom.widget.MicrosoftFooter-en-1744400828360":{"__typename":"CachedAsset","id":"component:custom.widget.MicrosoftFooter-en-1744400828360","value":{"component":{"id":"custom.widget.MicrosoftFooter","template":{"id":"MicrosoftFooter","markupLanguage":"HANDLEBARS","style":".context-uhf {\n min-width: 280px;\n font-size: 15px;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.c-uhff-link {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.c-uhff {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.c-uhff-nav {\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n .c-heading-4 {\n color: #616161;\n word-break: break-word;\n font-size: 15px;\n line-height: 20px;\n padding: 36px 0 4px;\n font-weight: 600;\n }\n .c-uhff-nav-row {\n .c-uhff-nav-group {\n display: block;\n float: left;\n min-height: 1px;\n vertical-align: text-top;\n padding: 0 12px;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.c-list.f-bare {\n font-size: 11px;\n line-height: 16px;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 8px 0;\n margin: 0;\n }\n }\n }\n }\n}\n.c-uhff-base {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 30px 5% 16px;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.c-uhff-ccpa {\n font-size: 11px;\n line-height: 16px;\n float: left;\n margin: 3px 0;\n }\n a.c-uhff-ccpa:hover {\n text-decoration: underline;\n }\n ul.c-list {\n font-size: 11px;\n line-height: 16px;\n float: right;\n margin: 3px 0;\n color: #616161;\n li {\n padding: 0 24px 4px 0;\n display: inline-block;\n }\n }\n .c-list.f-bare {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 30px 24px 16px;\n }\n}\n\n.social-share {\n position: fixed;\n top: 60%;\n transform: translateY(-50%);\n left: 0;\n z-index: 1000;\n}\n\n.sharing-options {\n list-style: none;\n padding: 0;\n margin: 0;\n display: block;\n flex-direction: column;\n background-color: white;\n width: 43px;\n border-radius: 0px 7px 7px 0px;\n}\n.linkedin-icon {\n border-top-right-radius: 7px;\n}\n.linkedin-icon:hover {\n border-radius: 0;\n}\n.social-share-rss-image {\n border-bottom-right-radius: 7px;\n}\n.social-share-rss-image:hover {\n border-radius: 0;\n}\n\n.social-link-footer {\n position: relative;\n display: block;\n margin: -2px 0;\n transition: all 0.2s ease;\n}\n.social-link-footer:hover .linkedin-icon {\n border-radius: 0;\n}\n.social-link-footer:hover .social-share-rss-image {\n border-radius: 0;\n}\n\n.social-link-footer img {\n width: 40px;\n height: auto;\n transition: filter 0.3s ease;\n}\n\n.social-share-list {\n width: 40px;\n}\n.social-share-rss-image {\n width: 40px;\n}\n\n.share-icon {\n border: 2px solid transparent;\n display: inline-block;\n position: relative;\n}\n\n.share-icon:hover {\n opacity: 1;\n border: 2px solid white;\n box-sizing: border-box;\n}\n\n.share-icon:hover .label {\n opacity: 1;\n visibility: visible;\n border: 2px solid white;\n box-sizing: border-box;\n border-left: none;\n}\n\n.label {\n position: absolute;\n left: 100%;\n white-space: nowrap;\n opacity: 0;\n visibility: hidden;\n transition: all 0.2s ease;\n color: white;\n border-radius: 0 10 0 10px;\n top: 50%;\n transform: translateY(-50%);\n height: 40px;\n border-radius: 0 6px 6px 0;\n display: flex;\n align-items: center;\n justify-content: center;\n padding: 20px 5px 20px 8px;\n margin-left: -1px;\n}\n.linkedin {\n background-color: #0474b4;\n}\n.facebook {\n background-color: #3c5c9c;\n}\n.twitter {\n background-color: white;\n color: black;\n}\n.reddit {\n background-color: #fc4404;\n}\n.mail {\n background-color: #848484;\n}\n.bluesky {\n background-color: white;\n color: black;\n}\n.rss {\n background-color: #ec7b1c;\n}\n#RSS {\n width: 40px;\n height: 40px;\n}\n\n@media (max-width: 991px) {\n .social-share {\n display: none;\n }\n}\n","texts":{"New tab":"What's New","New 1":"Surface Laptop Studio 2","New 2":"Surface Laptop Go 3","New 3":"Surface Pro 9","New 4":"Surface Laptop 5","New 5":"Surface Studio 2+","New 6":"Copilot in Windows","New 7":"Microsoft 365","New 8":"Windows 11 apps","Store tab":"Microsoft Store","Store 1":"Account Profile","Store 2":"Download Center","Store 3":"Microsoft Store Support","Store 4":"Returns","Store 5":"Order tracking","Store 6":"Certified Refurbished","Store 7":"Microsoft Store Promise","Store 8":"Flexible Payments","Education tab":"Education","Edu 1":"Microsoft in education","Edu 2":"Devices for education","Edu 3":"Microsoft Teams for Education","Edu 4":"Microsoft 365 Education","Edu 5":"How to buy for your school","Edu 6":"Educator Training and development","Edu 7":"Deals for students and parents","Edu 8":"Azure for students","Business tab":"Business","Bus 1":"Microsoft Cloud","Bus 2":"Microsoft Security","Bus 3":"Dynamics 365","Bus 4":"Microsoft 365","Bus 5":"Microsoft Power Platform","Bus 6":"Microsoft Teams","Bus 7":"Microsoft Industry","Bus 8":"Small Business","Developer tab":"Developer & IT","Dev 1":"Azure","Dev 2":"Developer Center","Dev 3":"Documentation","Dev 4":"Microsoft Learn","Dev 5":"Microsoft Tech Community","Dev 6":"Azure Marketplace","Dev 7":"AppSource","Dev 8":"Visual Studio","Company tab":"Company","Com 1":"Careers","Com 2":"About Microsoft","Com 3":"Company News","Com 4":"Privacy at Microsoft","Com 5":"Investors","Com 6":"Diversity and inclusion","Com 7":"Accessiblity","Com 8":"Sustainibility"},"defaults":{"config":{"applicablePages":[],"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.MicrosoftFooter","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_MicrosoftFooter_context-uhf_105bp_1 {\n min-width: 17.5rem;\n font-size: 0.9375rem;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-link_105bp_12 {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff_105bp_12 {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.custom_widget_MicrosoftFooter_c-uhff-nav_105bp_35 {\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n .custom_widget_MicrosoftFooter_c-heading-4_105bp_49 {\n color: #616161;\n word-break: break-word;\n font-size: 0.9375rem;\n line-height: 1.25rem;\n padding: 2.25rem 0 0.25rem;\n font-weight: 600;\n }\n .custom_widget_MicrosoftFooter_c-uhff-nav-row_105bp_57 {\n .custom_widget_MicrosoftFooter_c-uhff-nav-group_105bp_58 {\n display: block;\n float: left;\n min-height: 0.0625rem;\n vertical-align: text-top;\n padding: 0 0.75rem;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.custom_widget_MicrosoftFooter_c-list_105bp_78.custom_widget_MicrosoftFooter_f-bare_105bp_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 0.5rem 0;\n margin: 0;\n }\n }\n }\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff-base_105bp_94 {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 1.875rem 5% 1rem;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_105bp_107 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: left;\n margin: 0.1875rem 0;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_105bp_107:hover {\n text-decoration: underline;\n }\n ul.custom_widget_MicrosoftFooter_c-list_105bp_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: right;\n margin: 0.1875rem 0;\n color: #616161;\n li {\n padding: 0 1.5rem 0.25rem 0;\n display: inline-block;\n }\n }\n .custom_widget_MicrosoftFooter_c-list_105bp_78.custom_widget_MicrosoftFooter_f-bare_105bp_78 {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 1.875rem 1.5rem 1rem;\n }\n}\n.custom_widget_MicrosoftFooter_social-share_105bp_138 {\n position: fixed;\n top: 60%;\n transform: translateY(-50%);\n left: 0;\n z-index: 1000;\n}\n.custom_widget_MicrosoftFooter_sharing-options_105bp_146 {\n list-style: none;\n padding: 0;\n margin: 0;\n display: block;\n flex-direction: column;\n background-color: white;\n width: 2.6875rem;\n border-radius: 0 0.4375rem 0.4375rem 0;\n}\n.custom_widget_MicrosoftFooter_linkedin-icon_105bp_156 {\n border-top-right-radius: 7px;\n}\n.custom_widget_MicrosoftFooter_linkedin-icon_105bp_156:hover {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162 {\n border-bottom-right-radius: 7px;\n}\n.custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162:hover {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169 {\n position: relative;\n display: block;\n margin: -0.125rem 0;\n transition: all 0.2s ease;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169:hover .custom_widget_MicrosoftFooter_linkedin-icon_105bp_156 {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169:hover .custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162 {\n border-radius: 0;\n}\n.custom_widget_MicrosoftFooter_social-link-footer_105bp_169 img {\n width: 2.5rem;\n height: auto;\n transition: filter 0.3s ease;\n}\n.custom_widget_MicrosoftFooter_social-share-list_105bp_188 {\n width: 2.5rem;\n}\n.custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162 {\n width: 2.5rem;\n}\n.custom_widget_MicrosoftFooter_share-icon_105bp_195 {\n border: 2px solid transparent;\n display: inline-block;\n position: relative;\n}\n.custom_widget_MicrosoftFooter_share-icon_105bp_195:hover {\n opacity: 1;\n border: 2px solid white;\n box-sizing: border-box;\n}\n.custom_widget_MicrosoftFooter_share-icon_105bp_195:hover .custom_widget_MicrosoftFooter_label_105bp_207 {\n opacity: 1;\n visibility: visible;\n border: 2px solid white;\n box-sizing: border-box;\n border-left: none;\n}\n.custom_widget_MicrosoftFooter_label_105bp_207 {\n position: absolute;\n left: 100%;\n white-space: nowrap;\n opacity: 0;\n visibility: hidden;\n transition: all 0.2s ease;\n color: white;\n border-radius: 0 10 0 0.625rem;\n top: 50%;\n transform: translateY(-50%);\n height: 2.5rem;\n border-radius: 0 0.375rem 0.375rem 0;\n display: flex;\n align-items: center;\n justify-content: center;\n padding: 1.25rem 0.3125rem 1.25rem 0.5rem;\n margin-left: -0.0625rem;\n}\n.custom_widget_MicrosoftFooter_linkedin_105bp_156 {\n background-color: #0474b4;\n}\n.custom_widget_MicrosoftFooter_facebook_105bp_237 {\n background-color: #3c5c9c;\n}\n.custom_widget_MicrosoftFooter_twitter_105bp_240 {\n background-color: white;\n color: black;\n}\n.custom_widget_MicrosoftFooter_reddit_105bp_244 {\n background-color: #fc4404;\n}\n.custom_widget_MicrosoftFooter_mail_105bp_247 {\n background-color: #848484;\n}\n.custom_widget_MicrosoftFooter_bluesky_105bp_250 {\n background-color: white;\n color: black;\n}\n.custom_widget_MicrosoftFooter_rss_105bp_254 {\n background-color: #ec7b1c;\n}\n#custom_widget_MicrosoftFooter_RSS_105bp_1 {\n width: 2.5rem;\n height: 2.5rem;\n}\n@media (max-width: 991px) {\n .custom_widget_MicrosoftFooter_social-share_105bp_138 {\n display: none;\n }\n}\n","tokens":{"context-uhf":"custom_widget_MicrosoftFooter_context-uhf_105bp_1","c-uhff-link":"custom_widget_MicrosoftFooter_c-uhff-link_105bp_12","c-uhff":"custom_widget_MicrosoftFooter_c-uhff_105bp_12","c-uhff-nav":"custom_widget_MicrosoftFooter_c-uhff-nav_105bp_35","c-heading-4":"custom_widget_MicrosoftFooter_c-heading-4_105bp_49","c-uhff-nav-row":"custom_widget_MicrosoftFooter_c-uhff-nav-row_105bp_57","c-uhff-nav-group":"custom_widget_MicrosoftFooter_c-uhff-nav-group_105bp_58","c-list":"custom_widget_MicrosoftFooter_c-list_105bp_78","f-bare":"custom_widget_MicrosoftFooter_f-bare_105bp_78","c-uhff-base":"custom_widget_MicrosoftFooter_c-uhff-base_105bp_94","c-uhff-ccpa":"custom_widget_MicrosoftFooter_c-uhff-ccpa_105bp_107","social-share":"custom_widget_MicrosoftFooter_social-share_105bp_138","sharing-options":"custom_widget_MicrosoftFooter_sharing-options_105bp_146","linkedin-icon":"custom_widget_MicrosoftFooter_linkedin-icon_105bp_156","social-share-rss-image":"custom_widget_MicrosoftFooter_social-share-rss-image_105bp_162","social-link-footer":"custom_widget_MicrosoftFooter_social-link-footer_105bp_169","social-share-list":"custom_widget_MicrosoftFooter_social-share-list_105bp_188","share-icon":"custom_widget_MicrosoftFooter_share-icon_105bp_195","label":"custom_widget_MicrosoftFooter_label_105bp_207","linkedin":"custom_widget_MicrosoftFooter_linkedin_105bp_156","facebook":"custom_widget_MicrosoftFooter_facebook_105bp_237","twitter":"custom_widget_MicrosoftFooter_twitter_105bp_240","reddit":"custom_widget_MicrosoftFooter_reddit_105bp_244","mail":"custom_widget_MicrosoftFooter_mail_105bp_247","bluesky":"custom_widget_MicrosoftFooter_bluesky_105bp_250","rss":"custom_widget_MicrosoftFooter_rss_105bp_254","RSS":"custom_widget_MicrosoftFooter_RSS_105bp_1"}},"form":null},"localOverride":false},"CachedAsset:text:en_US-components/community/Breadcrumb-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/community/Breadcrumb-1745505309793","value":{"navLabel":"Breadcrumbs","dropdown":"Additional parent page navigation"},"localOverride":false},"CachedAsset:text:en_US-components/tags/TagsHeaderWidget-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/tags/TagsHeaderWidget-1745505309793","value":{"tag":"{tagName}","topicsCount":"{count} {count, plural, one {Topic} other {Topics}}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageListForNodeByRecentActivityWidget-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageListForNodeByRecentActivityWidget-1745505309793","value":{"title@userScope:other":"Recent Content","title@userScope:self":"Contributions","title@board:FORUM@userScope:other":"Recent Discussions","title@board:BLOG@userScope:other":"Recent Blogs","emptyDescription":"No content to show","MessageListForNodeByRecentActivityWidgetEditor.nodeScope.label":"Scope","title@instance:1722894000155":"Recent Discussions","title@instance:1727367112619":"Recent Blog Articles","title@instance:1727367069748":"Recent Discussions","title@instance:1727366213114":"Latest Discussions","title@instance:1727899609720":"","title@instance:1727363308925":"Latest Discussions","title@instance:1737115580352":"Latest Articles","title@instance:1720453418992":"Recent Discssions","title@instance:1727365950181":"Latest Blog Articles","title@instance:bmDPnI":"Latest Blog Articles","title@instance:IiDDJZ":"Latest Blog Articles","title@instance:1721244347979":"Latest blog posts","title@instance:1728383752171":"Related Content","title@instance:1722893956545":"Latest Skilling Resources","title@instance:dhcgCU":"Latest Discussions"},"localOverride":false},"Category:category:Exchange":{"__typename":"Category","id":"category:Exchange","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Outlook":{"__typename":"Category","id":"category:Outlook","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Community-Info-Center":{"__typename":"Category","id":"category:Community-Info-Center","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:EducationSector":{"__typename":"Category","id":"category:EducationSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:DrivingAdoption":{"__typename":"Category","id":"category:DrivingAdoption","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Azure":{"__typename":"Category","id":"category:Azure","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows-Server":{"__typename":"Category","id":"category:Windows-Server","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftTeams":{"__typename":"Category","id":"category:MicrosoftTeams","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}},"displayId":"MicrosoftTeams"},"Category:category:PublicSector":{"__typename":"Category","id":"category:PublicSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft365":{"__typename":"Category","id":"category:microsoft365","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:IoT":{"__typename":"Category","id":"category:IoT","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:HealthcareAndLifeSciences":{"__typename":"Category","id":"category:HealthcareAndLifeSciences","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:ITOpsTalk":{"__typename":"Category","id":"category:ITOpsTalk","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftLearn":{"__typename":"Category","id":"category:MicrosoftLearn","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Blog:board:MicrosoftLearnBlog":{"__typename":"Blog","id":"board:MicrosoftLearnBlog","blogPolicies":{"__typename":"BlogPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}},"boardPolicies":{"__typename":"BoardPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:AI":{"__typename":"Category","id":"category:AI","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftMechanics":{"__typename":"Category","id":"category:MicrosoftMechanics","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftforNonprofits":{"__typename":"Category","id":"category:MicrosoftforNonprofits","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:StartupsatMicrosoft":{"__typename":"Category","id":"category:StartupsatMicrosoft","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:PartnerCommunity":{"__typename":"Category","id":"category:PartnerCommunity","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Microsoft365Copilot":{"__typename":"Category","id":"category:Microsoft365Copilot","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows":{"__typename":"Category","id":"category:Windows","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Content_Management":{"__typename":"Category","id":"category:Content_Management","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}},"displayId":"Content_Management"},"Category:category:microsoft-security":{"__typename":"Category","id":"category:microsoft-security","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoftintune":{"__typename":"Category","id":"category:microsoftintune","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Conversation:conversation:2965954":{"__typename":"Conversation","id":"conversation:2965954","topic":{"__typename":"ForumTopicMessage","uid":2965954},"lastPostingActivityTime":"2024-10-23T04:46:32.036-07:00","solved":true},"Category:category:Project":{"__typename":"Category","id":"category:Project","displayId":"Project"},"Forum:board:Project":{"__typename":"Forum","id":"board:Project","displayId":"Project","nodeType":"board","conversationStyle":"FORUM","title":"Project","shortTitle":"General Discussion","parent":{"__ref":"Category:category:Project"}},"User:user:1219049":{"__typename":"User","uid":1219049,"login":"MZ_Rasheed","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-4.svg?time=0"},"id":"user:1219049"},"ForumTopicMessage:message:2965954":{"__typename":"ForumTopicMessage","subject":"Opening and Viewing Option for Two MS Project Files Simultaneously on Two Screens?","conversation":{"__ref":"Conversation:conversation:2965954"},"id":"message:2965954","revisionNum":3,"uid":2965954,"depth":0,"board":{"__ref":"Forum:board:Project"},"author":{"__ref":"User:user:1219049"},"metrics":{"__typename":"MessageMetrics","views":149702},"postTime":"2021-11-15T07:22:39.031-08:00","lastPublishTime":"2023-11-09T11:10:06.165-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Hi Can two MS Project files be opened on two different screens at the same time? This option is should be there as it is for other usual MS software like Word, Excel etc.   Hope this can be fixed as will ease out while working from two parallel files simultaneously. Appreciate if this can be looked at, please asap. Looking forward ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"361","kudosSumWeight":1,"repliesCount":40,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"Conversation:conversation:3885602":{"__typename":"Conversation","id":"conversation:3885602","topic":{"__typename":"BlogTopicMessage","uid":3885602},"lastPostingActivityTime":"2024-05-07T05:35:33.028-07:00","solved":false},"Category:category:FastTrack":{"__typename":"Category","id":"category:FastTrack","displayId":"FastTrack"},"Blog:board:FastTrackforAzureBlog":{"__typename":"Blog","id":"board:FastTrackforAzureBlog","displayId":"FastTrackforAzureBlog","nodeType":"board","conversationStyle":"BLOG","title":"FastTrack for Azure","shortTitle":"FastTrack for Azure","parent":{"__ref":"Category:category:FastTrack"}},"User:user:988334":{"__typename":"User","uid":988334,"login":"paolosalvatori","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS05ODgzMzQtMzg1MjYyaTE4QTU5MkIyQUVCMkM0MDE"},"id":"user:988334"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8","title":"architecture.png","associationType":"TEASER","width":900,"height":516,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8","title":"architecture.png","associationType":"BODY","width":900,"height":516,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8","title":"chainlit-welcome-screen.png","associationType":"BODY","width":1399,"height":1072,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8","title":"chainlit-simple-chat.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8","title":"chainlit-format-result.png","associationType":"BODY","width":1399,"height":1154,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8","title":"chainlit-dark-mode.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8","title":"chainlit-before-upload.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8","title":"chainlit-processing-documents.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8","title":"chainlit-document-reply.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8","title":"chainlit-chain-of-thought.png","associationType":"BODY","width":1358,"height":3039,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8","title":"chainlit-source.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8","title":"chainlit-prompt-playground.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8","title":"chainlit-prompt-playground-variable.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8","title":"chainlit-prompt-playground-question.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8","title":"chainlit-prompt-playground-reply.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8","title":"log-stream.png","associationType":"BODY","width":1281,"height":1059,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8","title":"logs.png","associationType":"BODY","width":1281,"height":1059,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8","title":"resources.png","associationType":"BODY","width":857,"height":1081,"altText":null},"BlogTopicMessage:message:3885602":{"__typename":"BlogTopicMessage","subject":"Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terraform","conversation":{"__ref":"Conversation:conversation:3885602"},"id":"message:3885602","revisionNum":8,"uid":3885602,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:988334"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" \n This article and the companion sample show how to create two Azure Container Apps that use OpenAI, LangChain, ChromaDB, and Chainlit using Terraform. \n \n   \n \n ","introduction":"","metrics":{"__typename":"MessageMetrics","views":81101},"postTime":"2023-07-27T06:47:11.162-07:00","lastPublishTime":"2024-05-07T05:35:33.028-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" This article shows how to quickly build chat applications using Python and leveraging powerful technologies such as OpenAI ChatGPT models, Embedding models, LangChain framework, ChromaDB vector database, and Chainlit, an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. These applications are hosted on Azure Container Apps, a fully managed environment that enables you to run microservices and containerized applications on a serverless platform. \n \n Simple Chat: This simple chat application utilizes OpenAI's language models to generate real-time completion responses. \n Documents QA Chat: This chat application goes beyond simple conversations. Users can upload up to 10  .pdf  and  .docx  documents, which are then processed to create vector embeddings. These embeddings are stored in ChromaDB for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. \n \n Both applications use a user-defined managed identity to authenticate and authorize against Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) and use Azure Private Endpoints to connect privately and securely to these services. The chat UIs are built using Chainlit, an open-source Python package designed explicitly for creating AI applications. Chainlit seamlessly integrates with LangChain, LlamaIndex, and LangFlow, making it a powerful tool for easily developing ChatGPT-like applications. \n By following our example, you can quickly create sophisticated chat applications that utilize cutting-edge technologies, empowering users with intelligent conversational capabilities. \n   \n You can find the code and Visio diagrams in the companion GitHub repository. Also, check the following articles: \n \n Deploy and run an Azure OpenAI ChatGPT application on AKS via Bicep \n Deploy and run an Azure OpenAI ChatGPT application on AKS via Terraform \n \n   \n Prerequisites \n \n An active Azure subscription. If you don't have one, create a free Azure account before you begin. \n Visual Studio Code installed on one of the supported platforms along with the HashiCorp Terraform. \n Azure CLI version 2.49.0 or later installed. To install or upgrade, see Install Azure CLI. \n aks-preview  Azure CLI extension of version 0.5.140 or later installed \n Terraform v1.5.2 or later. \n \n   \n Architecture \n The following diagram shows the architecture and network topology of the sample: \n   \n \n   \n This sample provides two sets of Terraform modules to deploy the infrastructure and the chat applications. \n   \n Infrastructure Terraform Modules \n You can use the Terraform modules in the  terraform/infra  folder to deploy the infrastructure used by the sample, including the Azure Container Apps Environment, Azure OpenAI Service (AOAI), and Azure Container Registry (ACR), but not the Azure Container Apps (ACA). The Terraform modules in the  terraform/infra  folder deploy the following resources: \n \n azurerm_virtual_network: an Azure Virtual Network with two subnets:\n \n ContainerApps : this subnet hosts the Azure Container Apps Environment. \n PrivateEndpoints : this subnet contains the Azure Private Endpoints to the Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) resources. \n \n \n azurerm_container_app_environment: the Azure Container Apps Environment hosting the Azure Container Apps. \n azurerm_cognitive_account: an Azure OpenAI Service (AOAI) with a GPT-3.5 model used by the chatbot applications. Azure OpenAI Service gives customers advanced language AI with OpenAI GPT-4, GPT-3, Codex, and DALL-E models with Azure's security and enterprise promise. Azure OpenAI co-develops the APIs with OpenAI, ensuring compatibility and a smooth transition from one to the other. The Terraform modules create the following models:\n \n GPT-35: a  gpt-35-turbo-16k  model is used to generate human-like and engaging conversational responses. \n Embeddings model: the  text-embedding-ada-002  model is to transform input documents into meaningful and compact numerical representations called embeddings. Embeddings capture the semantic or contextual information of the input data in a lower-dimensional space, making it easier for machine learning algorithms to process and analyze the data effectively. Embeddings can be stored in a vector database, such as ChromaDB or Facebook AI Similarity Search, explicitly designed for efficient storage, indexing, and retrieval of vector embeddings. \n \n \n azurerm_user_assigned_identity: a user-defined managed identity used by the chatbot applications to acquire a security token to call the Chat Completion API of the ChatGPT model provided by the Azure OpenAI Service and to call the Embedding model. \n azurerm_container_registry: an Azure Container Registry (ACR) to build, store, and manage container images and artifacts in a private registry for all container deployments. In this sample, the registry stores the container images of the two chat applications. \n azurerm_private_endpoint: an Azure Private Endpoint is created for each of the following resources:\n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n \n \n azurerm_private_dns_zone: an Azure Private DNS Zone is created for each of the following resources:\n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n \n \n azurerm_log_analytics_workspace: a centralized Azure Log Analytics workspace is used to collect the diagnostics logs and metrics from all the Azure resources:\n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n Azure Container Apps (ACA) \n \n \n \n   \n Application Terraform Modules \n You can use these Terraform modules in the  terraform/apps  To deploy the Azure Container Apps (ACA) using the Docker container images stored in the Azure Container Registry you deployed in the previous step. \n \n azurerm_container_app: this sample deploys the following applications:\n \n chatapp: this simple chat application utilizes OpenAI's language models to generate real-time completion responses. \n docapp: This chat application goes beyond conversations. Users can upload up to 10  .pdf  and  .docx  documents, which are then processed to create vector embeddings. These embeddings are stored in ChromaDB for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the files that were used as a source for the response. \n \n \n \n   \n Azure Container Apps \n Azure Container Apps (ACA) is a serverless compute service provided by Microsoft Azure that allows developers to easily deploy and manage containerized applications without the need to manage the underlying infrastructure. It provides a simplified and scalable solution for running applications in containers, leveraging the power and flexibility of the Azure ecosystem. \n With Azure Container Apps, developers can package their applications into containers using popular containerization technologies such as Docker. These containers encapsulate the application and its dependencies, ensuring consistent execution across different environments. \n Powered by Kubernetes and open-source technologies like Dapr, KEDA, and envoy, the service abstracts away the complexities of managing the infrastructure, including provisioning, scaling, and monitoring, allowing developers to focus solely on building and deploying their applications. Azure Container Apps handles automatic scaling, and load balancing, and natively integrates with other Azure services, such as Azure Monitor and Azure Container Registry (ACR), to provide a comprehensive and secure application deployment experience. \n Azure Container Apps offers benefits such as rapid deployment, easy scalability, cost-efficiency, and seamless integration with other Azure services, making it an attractive choice for modern application development and deployment scenarios. \n   \n Azure OpenAI Service \n The Azure OpenAI Service is a platform offered by Microsoft Azure that provides cognitive services powered by OpenAI models. One of the models available through this service is the ChatGPT model, which is designed for interactive conversational tasks. It allows developers to integrate natural language understanding and generation capabilities into their applications. \n Azure OpenAI Service provides REST API access to OpenAI's powerful language models including the GPT-3, Codex and Embeddings model series. In addition, the new GPT-4 and ChatGPT model series have now reached general availability. These models can be easily adapted to your specific task, including but not limited to content generation, summarization, semantic search, and natural language-to-code translation. Users can access the service through REST APIs, Python SDK, or our web-based interface in the Azure OpenAI Studio. \n You can use Embeddings model to transform raw data or inputs into meaningful and compact numerical representations called embeddings. Embeddings capture the semantic or contextual information of the input data in a lower-dimensional space, making it easier for machine learning algorithms to process and analyze the data effectively. Embeddings can be stored in a vector database, such as ChromaDB or Facebook AI Similarity Search (FAISS), explicitly designed for efficient storage, indexing, and retrieval of vector embeddings. \n The Chat Completion API, which is part of the Azure OpenAI Service, provides a dedicated interface for interacting with the ChatGPT and GPT-4 models. This API is currently in preview and is the preferred method for accessing these models. The GPT-4 models can only be accessed through this API. \n GPT-3, GPT-3.5, and GPT-4 models from OpenAI are prompt-based. With prompt-based models, the user interacts with the model by entering a text prompt, to which the model responds with a text completion. This completion is the model’s continuation of the input text. While these models are compelling, their behavior is also very sensitive to the prompt. This makes prompt construction a critical skill to develop. For more information, see Introduction to prompt engineering. \n Prompt construction can be complex. In practice, the prompt acts to configure the model weights to complete the desired task, but it's more of an art than a science, often requiring experience and intuition to craft a successful prompt. The goal of this article is to help get you started with this learning process. It attempts to capture general concepts and patterns that apply to all GPT models. However, it's essential to understand that each model behaves differently, so the learnings may not apply equally to all models. \n Prompt engineering refers to the process of creating instructions called prompts for Large Language Models (LLMs), such as OpenAI’s ChatGPT. With the immense potential of LLMs to solve a wide range of tasks, leveraging prompt engineering can empower us to save significant time and facilitate the development of impressive applications. It holds the key to unleashing the full capabilities of these huge models, transforming how we interact and benefit from them. For more information, see Prompt engineering techniques. \n   \n Vector Databases \n A vector database is a specialized database that goes beyond traditional storage by organizing information to simplify the search for similar items. Instead of merely storing words or numbers, it leverages vector embeddings - unique numerical representations of data. These embeddings capture meaning, context, and relationships. For instance, words are represented as vectors, whereas similar words have similar vector values. \n The applications of vector databases are numerous and powerful. In language processing, they facilitate the discovery of related documents or sentences. By comparing the vector embeddings of different texts, finding similar or related information becomes faster and more efficient. This capability benefits search engines and recommendation systems, which can suggest relevant articles or products based on user interests. \n In the realm of image analysis, vector databases excel in finding visually similar images. By representing images as vectors, a simple comparison of vector values can identify visually similar images. This capability is precious for tasks like reverse image search or content-based image retrieval. \n Additionally, vector databases find applications in fraud detection, anomaly detection, and clustering. By comparing vector embeddings of data points, unusual patterns can be detected, and similar items can be grouped together, aiding in effective data analysis and decision-making.  This is a list of Azure services that are suitable for use as a vector database in a retrieval-augmented generation (RAG) solution: \n   \n \n \n Azure Cosmos DB for MongoDB vCore: vCore-based Azure Cosmos DB for MongoDB provides developers with a fully managed MongoDB-compatible database service for building modern applications with a familiar architecture. Developers can enjoy the benefits of native Azure integrations, low total cost of ownership (TCO), and the familiar vCore architecture when migrating existing applications or building new ones. Azure Cosmos DB for MongoDB features built-in vector database capabilities enabling your data and vectors to be stored together for efficient and accurate vector searches. \n \n \n Azure Cosmos DB for NoSQL: Azure Cosmos DB for NoSQL is a globally distributed database service designed for scalable and high performance applications. It offers an industry-leading 99.999% Service Level Agreement (SLA), ensuring high availability for your mission-critical applications. With sub-10ms point reads and instant autoscale, it provides lightning-fast data access and seamless scalability. Its flexible, schemaless data model allows for agile and adaptable application development. Moreover, Azure Cosmos DB’s built-in vector index using DiskANN enables fast, accurate, and cost-effective vector search at any scale, enhancing the efficiency and effectiveness of your data-driven applications. \n \n \n Azure Cosmos DB for PostgreSQL You can use the natively integrated vector database in Azure Cosmos DB for PostgreSQL, which offers an efficient way to store, index, and search high-dimensional vector data directly alongside other application data. This approach removes the necessity of migrating your data to costlier alternative vector databases and provides a seamless integration of your AI-driven applications. \n \n \n Azure Cache for Redis Azure Cache for Redis can be used as a vector database by combining it models like Azure OpenAI for Retrieval-Augmented Generative AI and analysis scenarios. \n \n \n   \n Here is a list of the most popular vector databases: \n   \n \n ChromaDB is a powerful database solution that stores and retrieves vector embeddings efficiently. It is commonly used in AI applications, including chatbots and document analysis systems. By storing embeddings in ChromaDB, users can easily search and retrieve similar vectors, enabling faster and more accurate matching or recommendation processes. ChromaDB offers excellent scalability high performance, and supports various indexing techniques to optimize search operations. It is a versatile tool that enhances the functionality and efficiency of AI applications that rely on vector embeddings. \n Facebook AI Similarity Search (FAISS) is another widely used vector database. Facebook AI Research develops it and offers highly optimized algorithms for similarity search and clustering of vector embeddings. FAISS is known for its speed and scalability, making it suitable for large-scale applications. It offers different indexing methods like flat, IVF (Inverted File System), and HNSW (Hierarchical Navigable Small World) to organize and search vector data efficiently. \n SingleStore: SingleStore aims to deliver the world’s fastest distributed SQL database for data-intensive applications: SingleStoreDB, which combines transactional + analytical workloads in a single platform. \n Astra DB: DataStax Astra DB is a cloud-native, multi-cloud, fully managed database-as-a-service based on Apache Cassandra, which aims to accelerate application development and reduce deployment time for applications from weeks to minutes. \n Milvus: Milvus is an open source vector database built to power embedding similarity search and AI applications. Milvus makes unstructured data search more accessible and provides a consistent user experience regardless of the deployment environment. Milvus 2.0 is a cloud-native vector database with storage and computation separated by design. All components in this refactored version of Milvus are stateless to enhance elasticity and flexibility. \n Qdrant: Qdrant is a vector similarity search engine and database for AI applications. Along with open-source, Qdrant is also available in the cloud. It provides a production-ready service with an API to store, search, and manage points—vectors with an additional payload. Qdrant is tailored to extended filtering support. It makes it useful for all sorts of neural network or semantic-based matching, faceted search, and other applications. \n Pinecone: Pinecone is a fully managed vector database that makes adding vector search to production applications accessible. It combines state-of-the-art vector search libraries, advanced features such as filtering, and distributed infrastructure to provide high performance and reliability at any scale. \n Vespa: Vespa is a platform for applications combining data and AI online. Building such applications on Vespa helps users avoid integration work to get features, and it can scale to support any amount of traffic and data. To deliver that, Vespa provides a broad range of query capabilities, a computation engine with support for modern machine-learned models, hands-off operability, data management, and application development support. It is free and open source to use under the Apache 2.0 license. \n Zilliz: Milvus is an open-source vector database, with over 18,409 stars on GitHub and 3.4 million+ downloads. Milvus supports billion-scale vector search and has over 1,000 enterprise users. Zilliz Cloud provides a fully-managed Milvus service made by the creators of Milvus. This helps to simplify the process of deploying and scaling vector search applications by eliminating the need to create and maintain complex data infrastructure. As a DBaaS, Zilliz simplifies the process of deploying and scaling vector search applications by eliminating the need to create and maintain complex data infrastructure. \n Weaviate: Weaviate is an open-source vector database used to store data objects and vector embeddings from ML-models, and scale into billions of data objects from the same name company in Amsterdam. Users can index billions of data objects to search through and combine multiple search techniques, such as keyword-based and vector search, to provide search experiences. \n \n   \n This sample makes of ChromaDB vector database, but you can easily modify the code to use another vector database. You can even use Azure Cache for Redis Enterprise to store the vector embeddings and compute vector similarity with high performance and low latency. For more information, see Vector Similarity Search with Azure Cache for Redis Enterprise \n   \n LangChain \n LangChain is a software framework designed to streamline the development of applications using large language models (LLMs). It serves as a language model integration framework, facilitating various applications like document analysis and summarization, chatbots, and code analysis. \n LangChain's integrations cover an extensive range of systems, tools, and services, making it a comprehensive solution for language model-based applications. LangChain integrates with the major cloud platforms such as Microsoft Azure, Amazon AWS, and Google, and with API wrappers for various purposes like news, movie information, and weather, as well as support for Bash, web scraping, and more. It also supports multiple language models, including those from OpenAI, Anthropic, and Hugging Face. Moreover, LangChain offers various functionalities for document handling, code generation, analysis, debugging, and interaction with databases and other data sources. \n   \n Chainlit \n Chainlit is an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. It simplifies the process of building interactive chats and interfaces, making developing AI-powered applications faster and more efficient. While Streamlit is a general-purpose UI library, Chainlit is purpose-built for AI applications and seamlessly integrates with other AI technologies such as LangChain, LlamaIndex, and LangFlow. \n With Chainlit, developers can easily create intuitive UIs for their AI models, including ChatGPT-like applications. It provides a user-friendly interface for users to interact with AI models, enabling conversational experiences and information retrieval. Chainlit also offers unique features, such as displaying the Chain of Thought, which allows users to explore the reasoning process directly within the UI. This feature enhances transparency and enables users to understand how the AI arrives at its responses or recommendations. \n For more information, see the following resources: \n \n Documentation \n Examples \n API Reference \n Cookbook \n \n   \n Deploy the Infrastructure \n Before deploying the Terraform modules in the  terraform/infra  folder, specify a value for the following variables in the terraform.tfvars variable definitions file. \n   \n   \n name_prefix = \"Blue\"\nlocation = \"EastUS\" \n   \n   \n This is the definition of each variable: \n \n prefix : specifies a prefix for all the Azure resources. \n location : specifies the region (e.g., EastUS) where deploying the Azure resources. \n \n NOTE: Make sure to select a region where Azure OpenAI Service (AOAI) supports both GPT-3.5/GPT-4 models like  gpt-35-turbo-16k  and Embeddings models like  text-embedding-ada-002 . \n   \n OpenAI Module \n The following table contains the code from the  terraform/infra/modules/openai/main.tf  Terraform module used to deploy the Azure OpenAI Service. \n   \n   \n resource \"azurerm_cognitive_account\" \"openai\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n kind = \"OpenAI\"\n custom_subdomain_name = var.custom_subdomain_name\n sku_name = var.sku_name\n public_network_access_enabled = var.public_network_access_enabled\n tags = var.tags\n\n identity {\n type = \"SystemAssigned\"\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_cognitive_deployment\" \"deployment\" {\n for_each = {for deployment in var.deployments: deployment.name => deployment}\n\n name = each.key\n cognitive_account_id = azurerm_cognitive_account.openai.id\n\n model {\n format = \"OpenAI\"\n name = each.value.model.name\n version = each.value.model.version\n }\n\n scale {\n type = \"Standard\"\n }\n}\n\nresource \"azurerm_monitor_diagnostic_setting\" \"settings\" {\n name = \"DiagnosticsSettings\"\n target_resource_id = azurerm_cognitive_account.openai.id\n log_analytics_workspace_id = var.log_analytics_workspace_id\n\n enabled_log {\n category = \"Audit\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"RequestResponse\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"Trace\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n metric {\n category = \"AllMetrics\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n} \n   \n   \n Azure Cognitive Services uses custom subdomain names for each resource created through the Azure portal, Azure Cloud Shell, Azure CLI, Bicep, Azure Resource Manager (ARM), or Terraform. Unlike regional endpoints, which were common for all customers in a specific Azure region, custom subdomain names are unique to the resource. Custom subdomain names are required to enable authentication features like Azure Active Directory (Azure AD). We need to specify a custom subdomain for our Azure OpenAI Service, as our chatbot applications will use an Azure AD security token to access it. By default, the  terraform/infra/modules/openai/main.tf  module sets the value of the  custom_subdomain_name  parameter to the lowercase name of the Azure OpenAI resource. For more information on custom subdomains, see Custom subdomain names for Cognitive Services. \n This Terraform module allows you to pass an array containing the definition of one or more model deployments in the  deployments  variable. For more information on model deployments, see Create a resource and deploy a model using Azure OpenAI. The  openai_deployments  variable in the  terraform/infra/variables.tf  file defines the structure and the default models deployed by the sample: \n   \n   \n variable \"openai_deployments\" {\n description = \"(Optional) Specifies the deployments of the Azure OpenAI Service\"\n type = list(object({\n name = string\n model = object({\n name = string\n version = string\n })\n rai_policy_name = string \n }))\n default = [\n {\n name = \"gpt-35-turbo-16k\"\n model = {\n name = \"gpt-35-turbo-16k\"\n version = \"0613\"\n }\n rai_policy_name = \"\"\n },\n {\n name = \"text-embedding-ada-002\"\n model = {\n name = \"text-embedding-ada-002\"\n version = \"2\"\n }\n rai_policy_name = \"\"\n }\n ] \n} \n   \n   \n Alternatively, you can use the Terraform module for deploying Azure OpenAI Service. to deploy Azure OpenAI Service. \n Private Endpoint Module \n The  terraform/infra/main.tf  the module creates Azure Private Endpoints and Azure Private DNDS Zones for each of the following resources: \n \n Azure OpenAI Service (AOAI) \n Azure Container Registry (ACR) \n \n In particular, it creates an Azure Private Endpoint and Azure Private DNDS Zone to the Azure OpenAI Service as shown in the following code snippet: \n   \n   \n module \"openai_private_dns_zone\" {\n source = \"./modules/private_dns_zone\"\n name = \"privatelink.openai.azure.com\"\n resource_group_name = azurerm_resource_group.rg.name\n tags = var.tags\n virtual_networks_to_link = {\n (module.virtual_network.name) = {\n subscription_id = data.azurerm_client_config.current.subscription_id\n resource_group_name = azurerm_resource_group.rg.name\n }\n }\n}\n\nmodule \"openai_private_endpoint\" {\n source = \"./modules/private_endpoint\"\n name = \"${module.openai.name}PrivateEndpoint\"\n location = var.location\n resource_group_name = azurerm_resource_group.rg.name\n subnet_id = module.virtual_network.subnet_ids[var.vm_subnet_name]\n tags = var.tags\n private_connection_resource_id = module.openai.id\n is_manual_connection = false\n subresource_name = \"account\"\n private_dns_zone_group_name = \"AcrPrivateDnsZoneGroup\"\n private_dns_zone_group_ids = [module.openai_private_dns_zone.id]\n}\n \n   \n   \n Below you can read the code of the  terraform/infra/modules/private_endpoint/main.tf  module, which is used to create Azure Private Endpoints: \n   \n   \n resource \"azurerm_private_endpoint\" \"private_endpoint\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n subnet_id = var.subnet_id\n tags = var.tags\n\n private_service_connection {\n name = \"${var.name}Connection\"\n private_connection_resource_id = var.private_connection_resource_id\n is_manual_connection = var.is_manual_connection\n subresource_names = try([var.subresource_name], null)\n request_message = try(var.request_message, null)\n }\n\n private_dns_zone_group {\n name = var.private_dns_zone_group_name\n private_dns_zone_ids = var.private_dns_zone_group_ids\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n   \n   \n Private DNS Zone Module \n In the following box, you can read the code of the  terraform/infra/modules/private_dns_zone/main.tf  module, which is utilized to create the Azure Private DNS Zones. \n   \n   \n resource \"azurerm_private_dns_zone\" \"private_dns_zone\" {\n name = var.name\n resource_group_name = var.resource_group_name\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_private_dns_zone_virtual_network_link\" \"link\" {\n for_each = var.virtual_networks_to_link\n\n name = \"link_to_${lower(basename(each.key))}\"\n resource_group_name = var.resource_group_name\n private_dns_zone_name = azurerm_private_dns_zone.private_dns_zone.name\n virtual_network_id = \"/subscriptions/${each.value.subscription_id}/resourceGroups/${each.value.resource_group_name}/providers/Microsoft.Network/virtualNetworks/${each.key}\"\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n   \n   \n Workload Managed Identity Module \n Below you can read the code of the  terraform/infra/modules/managed_identity/main.tf  module, which is used to create the Azure Managed Identity used by the Azure Container Apps to pull container images from the Azure Container Registry, and by the chat applications to connect to the Azure OpenAI Service. You can use a system-assigned or user-assigned managed identity from Azure Active Directory (Azure AD) to let Azure Container Apps access any Azure AD-protected resource. For more information, see Managed identities in Azure Container Apps. You can pull container images from private repositories in an Azure Container Registry using user-assigned or user-assigned managed identities for authentication to avoid using administrative credentials. For more information, see Azure Container Apps image pull with managed identity. This user-defined managed identity is assigned the Cognitive Services User role on the Azure OpenAI Service namespace and ACRPull role on the Azure Container Registry (ACR). By assigning the above roles, you grant the user-defined managed identity access to these resources. \n   \n   \n resource \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.name\n resource_group_name = var.resource_group_name\n location = var.location\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_role_assignment\" \"cognitive_services_user_assignment\" {\n scope = var.openai_id\n role_definition_name = \"Cognitive Services User\"\n principal_id = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n skip_service_principal_aad_check = true\n}\n\nresource \"azurerm_role_assignment\" \"acr_pull_assignment\" {\n scope = var.acr_id\n role_definition_name = \"AcrPull\"\n principal_id = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n skip_service_principal_aad_check = true\n} \n   \n   \n Deploy the Applications \n Before deploying the Terraform modules in the  terraform/apps  folder, specify a value for the following variables in the Terraform.tfvars variable definitions file. \n   \n   \n resource_group_name = \"BlueRG\"\ncontainer_app_environment_name = \"BlueEnvironment\"\ncontainer_registry_name = \"BlueRegistry\"\nworkload_managed_identity_name = \"BlueWorkloadIdentity\"\ncontainer_apps = [\n {\n name = \"chatapp\"\n revision_mode = \"Single\"\n ingress = {\n allow_insecure_connections = true\n external_enabled = true\n target_port = 8000\n transport = \"http\"\n traffic_weight = {\n label = \"default\"\n latest_revision = true\n revision_suffix = \"default\"\n percentage = 100\n }\n }\n template = {\n containers = [\n {\n name = \"chat\"\n image = \"chat:v1\"\n cpu = 0.5\n memory = \"1Gi\"\n env = [\n {\n name = \"TEMPERATURE\"\n value = 0.9\n },\n {\n name = \"AZURE_OPENAI_BASE\"\n value = \"https://blueopenai.openai.azure.com/\"\n },\n {\n name = \"AZURE_OPENAI_KEY\"\n value = \"\"\n },\n {\n name = \"AZURE_OPENAI_TYPE\"\n value = \"azure_ad\"\n },\n {\n name = \"AZURE_OPENAI_VERSION\"\n value = \"2023-06-01-preview\"\n },\n {\n name = \"AZURE_OPENAI_DEPLOYMENT\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_MODEL\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n value = \"You are a helpful assistant.\"\n },\n {\n name = \"MAX_RETRIES\"\n value = 5\n },\n {\n name = \"BACKOFF_IN_SECONDS\"\n value = \"1\"\n },\n {\n name = \"TOKEN_REFRESH_INTERVAL\"\n value = 2700\n }\n ]\n liveness_probe = {\n failure_count_threshold = 3\n initial_delay = 30\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n readiness_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n success_count_threshold = 3\n timeout = 30\n transport = \"HTTP\"\n }\n startup_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n }\n ]\n min_replicas = 1\n max_replicas = 3\n }\n },\n {\n name = \"docapp\"\n revision_mode = \"Single\"\n ingress = {\n allow_insecure_connections = true\n external_enabled = true\n target_port = 8000\n transport = \"http\"\n traffic_weight = {\n label = \"default\"\n latest_revision = true\n revision_suffix = \"default\"\n percentage = 100\n }\n }\n template = {\n containers = [\n {\n name = \"doc\"\n image = \"doc:v1\"\n cpu = 0.5\n memory = \"1Gi\"\n env = [\n {\n name = \"TEMPERATURE\"\n value = 0.9\n },\n {\n name = \"AZURE_OPENAI_BASE\"\n value = \"https://blueopenai.openai.azure.com/\"\n },\n {\n name = \"AZURE_OPENAI_KEY\"\n value = \"\"\n },\n {\n name = \"AZURE_OPENAI_TYPE\"\n value = \"azure_ad\"\n },\n {\n name = \"AZURE_OPENAI_VERSION\"\n value = \"2023-06-01-preview\"\n },\n {\n name = \"AZURE_OPENAI_DEPLOYMENT\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_MODEL\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_ADA_DEPLOYMENT\"\n value = \"text-embedding-ada-002\"\n },\n {\n name = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n value = \"You are a helpful assistant.\"\n },\n {\n name = \"MAX_RETRIES\"\n value = 5\n },\n {\n name = \"CHAINLIT_MAX_FILES\"\n value = 10\n },\n {\n name = \"TEXT_SPLITTER_CHUNK_SIZE\"\n value = 1000\n },\n {\n name = \"TEXT_SPLITTER_CHUNK_OVERLAP\"\n value = 10\n },\n {\n name = \"EMBEDDINGS_CHUNK_SIZE\"\n value = 16\n },\n {\n name = \"BACKOFF_IN_SECONDS\"\n value = \"1\"\n },\n {\n name = \"CHAINLIT_MAX_SIZE_MB\"\n value = 100\n },\n {\n name = \"TOKEN_REFRESH_INTERVAL\"\n value = 2700\n }\n ]\n liveness_probe = {\n failure_count_threshold = 3\n initial_delay = 30\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n readiness_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n success_count_threshold = 3\n timeout = 30\n transport = \"HTTP\"\n }\n startup_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n }\n ]\n min_replicas = 1\n max_replicas = 3\n }\n }] \n   \n   \n This is the definition of each variable: \n \n resource_group_name : specifies the name of the resource group that contains the infrastructure resources: Azure OpenAI Service, Azure Container Registry, Azure Container Apps Environment, Azure Log Analytics, and user-defined managed identity. \n container_app_environment_name : the name of the Azure Container Apps Environment in which to deploy the chat applications. \n container_registry_name : the name of Azure Container Registry used to hold the container images of the chat applications. \n workload_managed_identity_name : the name of the user-defined managed identity used by the chat applications to authenticate with Azure OpenAI Service and Azure Container Registry. \n container_apps : the definition of the two chat applications. The application configuration does not specify the following data because the  container_app  module later defines this information:\n \n image : This field contains the name and tag of the container image but not the login server of the Azure Container Registry. \n identity : The identity of the container app. \n registry : The registry hosting the container image for the application. \n AZURE_CLIENT_ID : The client ID of the user-defined managed identity used by the application to authenticate with Azure OpenAI Service and Azure Container Registry. \n AZURE_OPENAI_TYPE : This environment variable specifies the authentication type with Azure OpenAI Service: if you set the value of the  AZURE_OPENAI_TYPE  environment variable to  azure , you need to specify the OpenAI key as a value for the  AZURE_OPENAI_KEY  environment variable. Instead, if you set the value to  azure_ad  in the application code, assign an Azure AD security token to the  openai_api_key  property. For more information, see How to switch between OpenAI and Azure OpenAI endpoints with Python. \n \n \n \n   \n Container App Module \n The  terraform/apps/modules/container_app/main.tf  module is utilized to create the Azure Container Apps. The module defines and uses the following data source for the Azure Container Registry, Azure Container Apps Environment, and user-defined managed identity created when deploying the infrastructure. These data sources are used to access the properties of these Azure resources. \n   \n   \n data \"azurerm_container_app_environment\" \"container_app_environment\" {\n name = var.container_app_environment_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n name = var.container_registry_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.workload_managed_identity_name\n resource_group_name = var.resource_group_name\n} \n   \n   \n The module creates and utilizes the following local variables: \n   \n   \n locals {\n identity = {\n type = \"UserAssigned\"\n identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n }\n identity_env = {\n name = \"AZURE_CLIENT_ID\"\n secret_name = null\n value = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n }\n registry = {\n server = data.azurerm_container_registry.container_registry.login_server\n identity = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n }\n} \n   \n   \n This is the explanation of each local variable: \n \n identity : uses the resource ID of the user-defined managed identity to define the  identity  block for each container app deployed by the module. \n identity_env : uses the client ID of the user-defined managed identity to define the value of the  AZURE_CLIENT_ID  environment variable that is appended to the list of environment variables of each container app deployed by the module. \n registry : uses the login server of the Azure Container Registry to define the  registry  block for each container app deployed by the module. \n \n Here is the complete Terraform code of the module: \n   \n   \n data \"azurerm_container_app_environment\" \"container_app_environment\" {\n name = var.container_app_environment_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n name = var.container_registry_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.workload_managed_identity_name\n resource_group_name = var.resource_group_name\n}\n\nlocals {\n identity = {\n type = \"UserAssigned\"\n identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n }\n identity_env = {\n name = \"AZURE_CLIENT_ID\"\n secret_name = null\n value = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n }\n registry = {\n server = data.azurerm_container_registry.container_registry.login_server\n identity = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n }\n}\n\nresource \"azurerm_container_app\" \"container_app\" {\n for_each = {for app in var.container_apps: app.name => app}\n\n container_app_environment_id = data.azurerm_container_app_environment.container_app_environment.id\n name = each.key\n resource_group_name = var.resource_group_name\n revision_mode = each.value.revision_mode\n tags = each.value.tags\n\n template {\n max_replicas = each.value.template.max_replicas\n min_replicas = each.value.template.min_replicas\n revision_suffix = each.value.template.revision_suffix\n\n dynamic \"container\" {\n for_each = each.value.template.containers\n\n content {\n cpu = container.value.cpu\n image = \"${data.azurerm_container_registry.container_registry.login_server}/${container.value.image}\"\n memory = container.value.memory\n name = container.value.name\n args = container.value.args\n command = container.value.command\n\n dynamic \"env\" {\n for_each = container.value.env == null ? [local.identity_env] : concat(container.value.env, [local.identity_env])\n\n content {\n name = env.value.name\n secret_name = env.value.secret_name\n value = env.value.value\n }\n }\n\n dynamic \"liveness_probe\" {\n for_each = container.value.liveness_probe == null ? [] : [container.value.liveness_probe]\n\n content {\n port = liveness_probe.value.port\n transport = liveness_probe.value.transport\n failure_count_threshold = liveness_probe.value.failure_count_threshold\n host = liveness_probe.value.host\n initial_delay = liveness_probe.value.initial_delay\n interval_seconds = liveness_probe.value.interval_seconds\n path = liveness_probe.value.path\n timeout = liveness_probe.value.timeout\n\n dynamic \"header\" {\n for_each = liveness_probe.value.header == null ? [] : [liveness_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.value\n }\n }\n }\n }\n\n dynamic \"readiness_probe\" {\n for_each = container.value.readiness_probe == null ? [] : [container.value.readiness_probe]\n\n content {\n port = readiness_probe.value.port\n transport = readiness_probe.value.transport\n failure_count_threshold = readiness_probe.value.failure_count_threshold\n host = readiness_probe.value.host\n interval_seconds = readiness_probe.value.interval_seconds\n path = readiness_probe.value.path\n success_count_threshold = readiness_probe.value.success_count_threshold\n timeout = readiness_probe.value.timeout\n\n dynamic \"header\" {\n for_each = readiness_probe.value.header == null ? [] : [readiness_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.value\n }\n }\n }\n }\n\n dynamic \"startup_probe\" {\n for_each = container.value.startup_probe == null ? [] : [container.value.startup_probe]\n\n content {\n port = startup_probe.value.port\n transport = startup_probe.value.transport\n failure_count_threshold = startup_probe.value.failure_count_threshold\n host = startup_probe.value.host\n interval_seconds = startup_probe.value.interval_seconds\n path = startup_probe.value.path\n timeout = startup_probe.value.timeout\n\n dynamic \"header\" {\n for_each = startup_probe.value.header == null ? [] : [startup_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.name\n }\n }\n }\n }\n\n dynamic \"volume_mounts\" {\n for_each = container.value.volume_mounts == null ? [] : [container.value.volume_mounts]\n\n content {\n name = volume_mounts.value.name\n path = volume_mounts.value.path\n }\n }\n }\n }\n\n dynamic \"volume\" {\n for_each = each.value.template.volume == null ? [] : each.value.template.volume\n\n content {\n name = volume.value.name\n storage_name = volume.value.storage_name\n storage_type = volume.value.storage_type\n }\n }\n }\n\n dynamic \"dapr\" {\n for_each = each.value.dapr == null ? [] : [each.value.dapr]\n\n content {\n app_id = dapr.value.app_id\n app_port = dapr.value.app_port\n app_protocol = dapr.value.app_protocol\n }\n }\n\n dynamic \"identity\" {\n for_each = each.value.identity == null ? [local.identity] : [each.value.identity]\n\n content {\n type = identity.value.type\n identity_ids = identity.value.identity_ids\n }\n }\n\n dynamic \"ingress\" {\n for_each = each.value.ingress == null ? [] : [each.value.ingress]\n\n content {\n target_port = ingress.value.target_port\n allow_insecure_connections = ingress.value.allow_insecure_connections\n external_enabled = ingress.value.external_enabled\n transport = ingress.value.transport\n\n dynamic \"traffic_weight\" {\n for_each = ingress.value.traffic_weight == null ? [] : [ingress.value.traffic_weight]\n\n content {\n percentage = traffic_weight.value.percentage\n label = traffic_weight.value.label\n latest_revision = traffic_weight.value.latest_revision\n revision_suffix = traffic_weight.value.revision_suffix\n }\n }\n }\n }\n\n dynamic \"registry\" {\n for_each = each.value.registry == null ? [local.registry] : concat(each.value.registry, [local.registry])\n\n content {\n server = registry.value.server\n identity = registry.value.identity\n }\n }\n\n dynamic \"secret\" {\n for_each = nonsensitive(toset([for pair in lookup(var.container_app_secrets, each.key, []) : pair.name]))\n\n content {\n name = secret.key\n value = local.container_app_secrets[each.key][secret.key]\n }\n }\n}\n \n   \n   \n As you can notice, the module uses the login server of the Azure Container Registry to create the fully qualified name of the container image of the current container app. \n   \n Managed identities in Azure Container Apps \n Each chat application makes use of a DefaultAzureCredential object to acquire a security token from Azure Active Directory and authenticate and authorize with Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) using the credentials of the user-defined managed identity associated with the container app. \n You can use a managed identity in a running container app to authenticate and authorize with any service that supports Azure AD authentication. With managed identities: \n \n Container apps and applications connect to resources with the managed identity. You don't need to manage credentials in your container apps. \n You can use role-based access control to grant specific permissions to a managed identity. \n System-assigned identities are automatically created and managed. They are deleted when your container app or container app is deleted. \n You can add and delete user-assigned identities and assign them to multiple resources. They are independent of your container app or the container app's lifecycle. \n You can use managed identity to authenticate with a private Azure Container Registry without a username and password to pull containers for your Container App. \n You can use managed identity to create connections for Dapr-enabled applications via Dapr components \n \n For more information, see Managed identities in Azure Container Apps. The workloads running in a container app can use the Azure Identity client libraries to acquire a security token from the Azure Active Directory. You can choose one of the following approaches inside your code: \n \n Use  DefaultAzureCredential , which will attempt to use the  WorkloadIdentityCredential . \n Create a  ChainedTokenCredential  instance that includes  WorkloadIdentityCredential . \n Use  WorkloadIdentityCredential  directly. \n \n The following table provides the minimum package version required for each language's client library. \n   \n \n \n \n Language \n Library \n Minimum Version \n Example \n \n \n \n \n .NET \n Azure.Identity \n 1.9.0 \n Link \n \n \n Go \n azidentity \n 1.3.0 \n Link \n \n \n Java \n azure-identity \n 1.9.0 \n Link \n \n \n JavaScript \n @azure/identity \n 3.2.0 \n Link \n \n \n Python \n azure-identity \n 1.13.0 \n Link \n \n \n \n   \n NOTE: When using Azure Identity client library with Azure Container Apps, the client ID of the managed identity must be specified. When using the  DefaultAzureCredential , you can explicitly specify the client ID of the container app managed identity in the  AZURE_CLIENT_ID  environment variable. \n   \n Simple Chat Application \n The Simple Chat Application is a large language model-based chatbot that allows users to submit general-purpose questions to a GPT model, which generates and streams back human-like and engaging conversational responses. The following picture shows the welcome screen of the chat application. \n \n   \n You can modify the welcome screen in markdown by editing the  chainlit.md  file at the project's root. If you do not want a welcome screen, leave the file empty. The following picture shows what happens when a user submits a new message in the chat. \n \n   \n Chainlit can render messages in markdown format as shown by the following prompt: \n \n   \n Chainlit also provides classes to support the following elements: \n   \n \n Audio: The  Audio  class allows you to display an audio player for a specific audio file in the chatbot user interface. You must provide either a URL or a path or content bytes. \n Avatar: The  Avatar  class allows you to display an avatar image next to a message instead of the author's name. You need to send the element once. Next,, if an avatar's name matches an author's name, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n File: The  File  class allows you to display a button that lets users download the content of the file. You must provide either a URL or a path or content bytes. \n Image: The  Image  class is designed to create and handle image elements to be sent and displayed in the chatbot user interface. You must provide either a URL or a path or content bytes. \n Pdf: The  Pdf  class allows you to display a PDF hosted remotely or locally in the chatbot UI. This class either takes a URL of a PDF hosted online or the path of a local PDF. \n Pyplot: The  Pyplot  class allows you to display a Matplotlib pyplot chart in the chatbot UI. This class takes a pyplot figure. \n TaskList: The  TaskList  class allows you to display a task list next to the chatbot UI. \n Text: The  Text  class allows you to display a text element in the chatbot UI. This class takes a string and creates a text element that can be sent to the UI. It supports the markdown syntax for formatting text. You must provide either a URL or a path or content bytes. \n \n   \n You can click the user icon on the UI to access the chat settings and choose, for example, between the light and dark theme. \n \n   \n The application is built in Python. Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules. \n   \n   \n # Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\") \n   \n   \n These are the libraries used by the chat application: \n   \n \n os : This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc. \n sys : This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter. \n openai : The OpenAI Python library provides convenient access to the OpenAI API from applications written in Python. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook. \n logging : This module provides flexible logging of messages. \n chainlit as cl : This imports the Chainlit library and aliases it as  cl . Chainlit is used to create the UI of the application. \n from azure.identity import DefaultAzureCredential, get_bearer_token_provider : when the  openai_type  property value is  azure_ad,  a  DefaultAzureCredential  object from the Azure Identity client library for Python is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity federated with the service account. \n load_dotenv  and  dotenv_values  from  dotenv : Python-dotenv reads key-value pairs from a  .env  file and can set them as environment variables. It helps in the development of applications following the 12-factor principles. \n \n   \n The  requirements.txt  file under the  src  folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command: \n \n pip install -r requirements.txt --upgrade \n \n Next, the code reads the value of the environment variables used to initialize Azure OpenAI objects. In addition, it creates a token provider for Azure OpenAI. \n \n # Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n) \n \n Here's a brief explanation of each variable and related environment variable: \n   \n \n temperature : A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9. \n api_base : The base URL for the OpenAI API. \n api_key : The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI. \n api_type : A string representing the type of the OpenAI API. \n api_version : A string representing the version of the OpenAI API. \n engine : The engine used for OpenAI API calls. \n model : The model used for OpenAI API calls. \n system_content : The content of the system message used for OpenAI API calls. \n max_retries : The maximum number of retries for OpenAI API calls. \n timeout : The timeout in seconds. \n debug : When debug is equal to  true ,  t , or  1 , the logger writes the chat completion answers. \n \n   \n In the next section, the code creates the  AsyncAzureOpenAI  client object used by the application to communicate with the Azure OpenAI Service instance. When the  api_type  is equal to  azure , the code initializes the object with the API key. Otherwise, it initializes the  azure_ad_token_provider  property to the token provider created earlier. Then the code creates a logger. \n \n # Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__) \n \n The backoff time is calculated using the  backoff_in_seconds  and  attempt  variables. It follows the formula  backoff_in_seconds * 2 ** attempt + random.uniform(0, 1) . This formula increases the backoff time exponentially with each attempt and adds a random value between 0 and 1 to avoid synchronized retries. \n Next, the code defines a function called  start_chat  that is used to initialize the UI when the user connects to the application or clicks the  New Chat  button. \n   \n   \n .on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"User\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n \n   \n   \n Here is a brief explanation of the function steps: \n   \n \n cl.on_chat_start : The on_chat_start decorator registers a callback function  start_chat()  to be called when the Chainlit chat starts. It is used to set up the chat and send avatars for the Chatbot, Error, and User participants in the chat. \n cl.Avatar() : the Avatar class allows you to display an avatar image next to a message instead of the author name. You need to send the element once. Next if the name of an avatar matches the name of an author, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n cl.user_session.set() : This API call sets a value in the user_session dictionary. In this case, it initializes the  message_history  in the user's session with a system content message, which indicates the start of the chat. \n \n Finally, the application defines the method called whenever the user sends a new message in the chat. \n \n @cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send() \n \n Here is a detailed explanation of the function steps: \n   \n \n cl.on_message : The on_message decorator registers a callback function  main(message: str)  to be called when the user submits a new message in the chat. It is the main function responsible for handling the chat logic. \n cl.user_session.get() : This API call retrieves a value from the user's session data stored in the user_session dictionary. In this case, it fetches the  message_history  from the user's session to maintain the chat history. \n message_history.append() : This API call appends a new message to the  message_history  list. It is used to add the user's message and the assistant's response to the chat history. \n cl.Message() : This API call creates a Chainlit Message object. The  Message  class is designed to send, stream, edit, or remove messages in the chatbot user interface. In this sample, the  Message  object is used to stream the OpenAI response in the chat. \n msg.stream_token() : The stream_token method of the Message class streams a token to the response message. It is used to send the response from the OpenAI Chat API in chunks to ensure real-time streaming in the chat. \n await openai.chat.completions.create() : This API call sends a message to the OpenAI Chat API in an asynchronous mode and streams the response. It uses the provided  message_history  as context for generating the assistant's response. \n \n Below, you can read the complete code of the application. \n \n # Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n)\n\n# Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\n@cl.on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n\n\n@cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send() \n \n You can run the application locally using the following command. The  -w  flag` indicates auto-reload whenever we make changes live in our application code. \n \n chainlit run app.py -w \n \n Documents QA Chat \n The Documents QA Chat application allows users to submit up to 10  .pdf  and  .docx  documents. The application processes the uploaded documents to create vector embeddings. These embeddings are stored in ChromaDB vector database for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. The following picture shows the chat application interface. As you can see, you can click the  Browse  button and choose up to 10  .pdf  and  .docx  documents to upload. Alternatively, you can just drag and drop the files over the control area. \n \n   \n After uploading the documents, the application creates and stores embeddings to ChromaDB vector database. During the phase, the UI shows a message  Processing <file-1>, <file-2>... , as shown in the following picture: \n \n   \n When the code finished creating embeddings, the UI is ready to receive user's questions: \n \n   \n As your chat application grows in complexity, understanding the individual steps for generating a specific answer can become challenging. To solve this issue, Chainlit allows you to easily explore the reasoning process right from the user interface using the Chain of Thought. If you are using the LangChain integration, every intermediary step is automatically sent and displayed in the Chainlit UI just clicking and expanding the steps, as shown in the following picture: \n \n   \n To see the text chunks that were used by the large language model to originate the response, you can click the sources links, as shown in the following picture: \n \n   \n In the Chain of Thought, below the step used to invoke the OpenAI chat completion API, you can find an \n  Inspect in prompt playground  icon. Clicking on it opens the Prompt Playground dialog which allows you to modify and iterate on the prompt as needed. \n \n   \n As shown in the following picture, you can click and edit the value of the highlighted variables in the user prompt: \n \n   \n You can then click and edit the user question. \n \n   \n Then, you can click the submit button to test the effect of your changes, as shown in the following picture. \n \n   \n Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules. \n \n # Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\") \n \n These are the libraries used by the chat application: \n   \n \n os : This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc. \n sys : This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter. \n time : This module provides various time-related functions for time manipulation and measurement. \n openai : the OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses, which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook. \n logging : This module provides flexible logging of messages. \n chainlit as cl : This imports the Chainlit library and aliases it as  cl.  Chainlit is used to create the UI of the application. \n AzureChatOpenAI  from  chainlit.playground.config import : you need to import  AzureChatOpenAI  from  chainlit.playground.config  to use the Chainlit Playground. \n DefaultAzureCredential  from  azure.identity : when the  openai_type  property value is  azure_ad , a  DefaultAzureCredential  object from the Azure Identity client library for Python - version 1.13.0 is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity, whose client ID is defined in the  AZURE_CLIENT_ID  environment variable. \n load_dotenv  and  dotenv_values  from  dotenv : Python-dotenv reads key-value pairs from a  .env  file and can set them as environment variables. It helps in the development of applications following the 12-factor principles. \n langchain : Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge. LangChain library aims to assist in the development of those types of applications. \n \n The  requirements.txt  file under the  src  folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command: \n \n pip install -r requirements.txt --upgrade \n \n Next, the code reads environment variables and configures the OpenAI settings. \n \n # Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n\\`\\`\\`\nThe answer is foo\nSOURCES: xyz\n\\`\\`\\`\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment \n \n Here's a brief explanation of each variable and related environment variable: \n   \n \n temperature : A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9. \n api_base : The base URL for the OpenAI API. \n api_key : The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI. \n api_type : A string representing the type of the OpenAI API. \n api_version : A string representing the version of the OpenAI API. \n chat_completion_deployment : the name of the Azure OpenAI GPT model for chat completion. \n embeddings_deployment : the name of the Azure OpenAI deployment for embeddings. \n model : The model used for chat completion calls (e.g,  gpt-35-turbo-16k ). \n max_size_mb : the maximum size for the uploaded documents. \n max_files : the maximum number of documents that can be uploaded. \n text_splitter_chunk_size : the maximum chunk size used by the  RecursiveCharacterTextSplitter  object. \n text_splitter_chunk_overlap : the maximum chunk overlap used by the  RecursiveCharacterTextSplitter  object. \n embeddings_chunk_size : the maximum chunk size used by the  OpenAIEmbeddings  object. \n max_retries : The maximum number of retries for OpenAI API calls. \n retry_min_seconds : the minimum number of seconds before a retry. \n retry_max_seconds : the maximum number of seconds before a retry. \n timeout : The timeout in seconds. \n system_template : The content of the system message used for OpenAI API calls. \n debug : When debug is equal to  true ,  t , or  1 , the logger switches to verbose mode. \n \n   \n Next, the code defines a function called  start_chat  that is used to initialize the when the user connects to the application or clicks the  New Chat  button. \n \n @cl.on_chat_start\nasync def start_chat():\n # Sending Avatars for Chat Participants\n await cl.Avatar(\n name=\"Chatbot\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\"\n ).send() \n \n Here is a brief explanation of the function steps: \n   \n \n cl.on_chat_start : The on_chat_start decorator registers a callback function  start_chat()  to be called when the Chainlit chat starts. It is used to set up the chat and send avatars for the Chatbot, Error, and User participants in the chat. \n cl.Avatar() : the Avatar class allows you to display an avatar image next to a message instead of the author name. You need to send the element once. Next if the name of an avatar matches the name of an author, the avatar will be automatically displayed. You must provide either a URL or a path or content bytes. \n \n   \n The following code is used to initialize the large language model (LLM) chain used to reply to questions on the content of the uploaded documents. \n \n # Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send() \n \n The AskFileMessage API call prompts the user to upload up to a specified number of  .pdf  or  .docx  files. The uploaded files are stored in the  files  variable. The process continues until the user uploads files. For more information, see AskFileMessage. \n The following code processes each uploaded file by extracting its content. \n   \n \n The text content of each file is stored in the list  all_texts . \n This code performs text processing and chunking. It checks the file extension to read the file content accordingly, depending on if it's a  .pdf  or a  .docx  document. \n The text content is split into smaller chunks using the RecursiveCharacterTextSplitter LangChain object. \n Metadata is created for each chunk and stored in the  metadatas  list. \n \n \n # Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))] \n \n The next piece of code performs the following steps: \n   \n \n It creates an AzureOpenAIEmbeddings configured to use the embeddings model in the Azure OpenAI Service to create embeddings from text chunks. \n It creates a ChromaDB vector database using the  OpenAIEmbeddings  object, the text chunks list, and the metadata list. \n It creates an AzureChatOpenAI LangChain object based on the GPR model hosted in Azure OpenAI Service. \n It creates a chain using the RetrievalQAWithSourcesChain.from_chain_type API call uses previously created models and stores them as retrievers. \n It stores the metadata and text chunks in the user session using the  cl.user_session.set()  API call. \n It creates a message to inform the user that the files are ready for queries, and finally returns the  chain . \n The  cl.user_session.set(\"chain\", chain)  call stores the LLM chain in the user_session dictionary for later use. \n \n The next section create the LangChain LLM chain. \n \n # Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain) \n \n The following code handles the communication with the OpenAI API and incorporates retrying logic in case the API calls fail due to specific errors. \n   \n \n cl.on_message : The on_message decorator registers a callback function  main(message: str)  to be called when the user submits a new message in the chat. It is the main function responsible for handling the chat logic. \n cl.user_session.get(\"chain\") : this call retrieves the LLM chain from the user_session dictionary. \n cl.AsyncLangchainCallbackHandler : this call creates a LangChain callback handler. \n await chain.acall : The asynchronous call to the RetrievalQAWithSourcesChain.acall executes the LLM chain with the user message as an input. \n \n \n @cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content) \n \n The code below extracts the answers and sources from the API response and formats them to be sent as a message. \n \n The  answer  and  sources  are obtained from the  response  dictionary. \n The sources are then processed to find corresponding texts in the user session metadata ( metadatas ) and create  source_elements  using  cl.Text() . \n cl.Message().send() : the Message API creates and displays a message containing the answer and sources, if available. \n The last command sets the  AZURE_OPENAI_API_KEY  environment variable to a security key to access Azure OpenAI returned by the token provider. This key is used by the Chainlit playground. \n \n   \n   \n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider()\n \n   \n   \n   \n   \n Below, you can read the complete code of the application. \n \n # Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n```\nThe answer is foo\nSOURCES: xyz\n```\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment\n\n\n@cl.on_chat_start\nasync def start():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n\n # Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send()\n\n # Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))]\n\n # Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain)\n\n\n@cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content)\n\n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider() \n \n You can run the application locally using the following command. The  -w  flag` indicates auto-reload whenever we make changes live in our application code. \n \n chainlit run app.py -w \n \n Build Docker Images \n You can use the   src/01-build-docker-images.sh  Bash script to build the Docker container image for each container app. \n   \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Use a for loop to build the docker images using the array index\nfor index in ${!images[@]}; do\n # Build the docker image\n docker build -t ${images[$index]}:$tag -f Dockerfile --build-arg FILENAME=${filenames[$index]} --build-arg PORT=$port .\ndone \n   \n   \n   \n   \n Before running any script in the  src  folder, make sure to customize the value of the variables inside the  00-variables.sh  file located in the same folder. This file is embedded in all the scripts and contains the following variables: \n   \n   \n # Variables\n\n# Azure Container Registry\nprefix=\"Blue\"\nacrName=\"${prefix}Registry\"\nacrResourceGrougName=\"${prefix}RG\"\nlocation=\"EastUS\"\n\n# Python Files\ndocAppFile=\"doc.py\"\nchatAppFile=\"chat.py\"\n\n# Docker Images\ndocImageName=\"doc\"\nchatImageName=\"chat\"\ntag=\"v1\"\nport=\"8000\"\n\n# Arrays\nimages=($docImageName $chatImageName)\nfilenames=($docAppFile $chatAppFile) \n   \n   \n The  Dockerfile  under the  src  folder is parametric and can be used to build the container images for both chat applications. \n \n # app/Dockerfile\n\n# # Stage 1 - Install build dependencies\n\n# A Dockerfile must start with a FROM instruction that sets the base image for the container.\n# The Python images come in many flavors, each designed for a specific use case.\n# The python:3.11-slim image is a good base image for most applications.\n# It is a minimal image built on top of Debian Linux and includes only the necessary packages to run Python.\n# The slim image is a good choice because it is small and contains only the packages needed to run Python.\n# For more information, see: \n# * https://hub.docker.com/_/python \n# * https://docs.streamlit.io/knowledge-base/tutorials/deploy/docker\nFROM python:3.11-slim AS builder\n\n# The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile.\n# If the WORKDIR doesn’t exist, it will be created even if it’s not used in any subsequent Dockerfile instruction.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#workdir\nWORKDIR /app\n\n# Set environment variables. \n# The ENV instruction sets the environment variable <key> to the value <value>.\n# This value will be in the environment of all “descendant” Dockerfile commands and can be replaced inline in many as well.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#env\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\n# Install git so that we can clone the app code from a remote repo using the RUN instruction.\n# The RUN comand has 2 forms:\n# * RUN <command> (shell form, the command is run in a shell, which by default is /bin/sh -c on Linux or cmd /S /C on Windows)\n# * RUN [\"executable\", \"param1\", \"param2\"] (exec form)\n# The RUN instruction will execute any commands in a new layer on top of the current image and commit the results. \n# The resulting committed image will be used for the next step in the Dockerfile.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#run\nRUN apt-get update && apt-get install -y \\\n build-essential \\\n curl \\\n software-properties-common \\\n git \\\n && rm -rf /var/lib/apt/lists/*\n\n# Create a virtualenv to keep dependencies together\nRUN python -m venv /opt/venv\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the requirements.txt which contains dependencies to WORKDIR\n# COPY has two forms:\n# * COPY <src> <dest> (this copies the files from the local machine to the container's own filesystem)\n# * COPY [\"<src>\",... \"<dest>\"] (this form is required for paths containing whitespace)\n# For more information, see: https://docs.docker.com/engine/reference/builder/#copy\nCOPY requirements.txt .\n\n# Install the Python dependencies\nRUN pip install --no-cache-dir --no-deps -r requirements.txt\n\n# Stage 2 - Copy only necessary files to the runner stage\n\n# The FROM instruction initializes a new build stage for the application\nFROM python:3.11-slim\n\n# Define the filename to copy as an argument\nARG FILENAME\n\n# Deefine the port to run the application on as an argument\nARG PORT=8000\n\n# Set an environment variable\nENV FILENAME=${FILENAME}\n\n# Sets the working directory to /app\nWORKDIR /app\n\n# Copy the virtual environment from the builder stage\nCOPY --from=builder /opt/venv /opt/venv\n\n# Set environment variables\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the $FILENAME containing the application code\nCOPY $FILENAME .\n\n# Copy the chainlit.md file to the working directory\nCOPY chainlit.md .\n\n# Copy the .chainlit folder to the working directory\nCOPY ./.chainlit ./.chainlit\n\n# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#expose\nEXPOSE $PORT\n\n# The ENTRYPOINT instruction has two forms:\n# * ENTRYPOINT [\"executable\", \"param1\", \"param2\"] (exec form, preferred)\n# * ENTRYPOINT command param1 param2 (shell form)\n# The ENTRYPOINT instruction allows you to configure a container that will run as an executable.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#entrypoint\nCMD chainlit run $FILENAME --port=$PORT \n \n Test applications locally \n You can use the  src/02-run-docker-container.sh  Bash script to test the containers for the  sender ,  processor , and  receiver  applications. \n \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Print the menu\necho \"====================================\"\necho \"Run Docker Container (1-3): \"\necho \"====================================\"\noptions=(\n \"Doc\"\n \"Chat\"\n)\nname=\"\"\n# Select an option\nCOLUMNS=0\nselect option in \"${options[@]}\"; do\n case $option in\n \"Doc\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_ADA_DEPLOYMENT=$AZURE_OPENAI_ADA_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $docImageName \\\n $docImageName:$tag\n break\n ;;\n \"Chat\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $chatImageName \\\n $chatImageName:$tag\n break\n ;;\n \"Quit\")\n exit\n ;;\n *) echo \"invalid option $REPLY\" ;;\n esac\ndone \n \n Push Docker containers to the Azure Container Registry \n You can use the  src/03-push-docker-image.sh  Bash script to push the Docker container images for the  sender ,  processor , and  receiver  applications to the Azure Container Registry (ACR) \n   \n   \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Login to ACR\necho \"Logging in to [${acrName,,}] container registry...\"\naz acr login --name ${acrName,,}\n\n# Retrieve ACR login server. Each container image needs to be tagged with the loginServer name of the registry. \necho \"Retrieving login server for the [${acrName,,}] container registry...\"\nloginServer=$(az acr show --name ${acrName,,} --query loginServer --output tsv)\n\n# Use a for loop to tag and push the local docker images to the Azure Container Registry\nfor index in ${!images[@]}; do\n # Tag the local sender image with the loginServer of ACR\n docker tag ${images[$index],,}:$tag $loginServer/${images[$index],,}:$tag\n\n # Push the container image to ACR\n docker push $loginServer/${images[$index],,}:$tag\ndone \n   \n   \n Monitoring \n Azure Container Apps provides several built-in observability features that together give you a holistic view of your container app’s health throughout its application lifecycle. These features help you monitor and diagnose the state of your app to improve performance and respond to trends and critical problems. \n You can use the  Log Stream  panel on the Azure Portal to see the logs generated by a container app, as shown in the following screenshot. \n   \n \n   \n Alternatively, you can click open the  Logs  panel, as shown in the following screenshot, and use a Kusto Query Language (KQL) query to filter, project, and retrieve only the desired data. \n   \n \n   \n Review deployed resources \n You can use the Azure portal to list the deployed resources in the resource group, as shown in the following picture: \n   \n \n   \n You can also use Azure CLI to list the deployed resources in the resource group: \n \n az resource list --resource-group <resource-group-name> \n \n You can also use the following PowerShell cmdlet to list the deployed resources in the resource group: \n \n Get-AzResource -ResourceGroupName <resource-group-name> \n \n Clean up resources \n You can delete the resource group using the following Azure CLI command when you no longer need the resources you created. This will remove all the Azure resources. \n \n az group delete --name <resource-group-name> \n \n Alternatively, you can use the following PowerShell cmdlet to delete the resource group and all the Azure resources. \n   \n   \n   \n   \n Remove-AzResourceGroup -Name <resource-group-name> \n   \n   \n   \n   \n   \n   \n   \n   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"117665","kudosSumWeight":5,"repliesCount":10,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDY","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDc","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDg","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDk","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEw","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEx","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEy","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEz","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE0","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE1","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE2","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE3","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE4","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8\"}"}}],"totalCount":18,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:2984122":{"__typename":"Conversation","id":"conversation:2984122","topic":{"__typename":"ForumTopicMessage","uid":2984122},"lastPostingActivityTime":"2022-12-11T23:15:41.580-08:00","solved":false},"Forum:board:SharePoint_General":{"__typename":"Forum","id":"board:SharePoint_General","displayId":"SharePoint_General","nodeType":"board","conversationStyle":"FORUM","title":"SharePoint","shortTitle":"Discussions","parent":{"__ref":"Category:category:Content_Management"}},"User:user:1223704":{"__typename":"User","uid":1223704,"login":"jasenpeters","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-4.svg?time=0"},"id":"user:1223704"},"ForumTopicMessage:message:2984122":{"__typename":"ForumTopicMessage","subject":"SharePoint List calculated column formula using a Lookup column reference","conversation":{"__ref":"Conversation:conversation:2984122"},"id":"message:2984122","revisionNum":1,"uid":2984122,"depth":0,"board":{"__ref":"Forum:board:SharePoint_General"},"author":{"__ref":"User:user:1223704"},"metrics":{"__typename":"MessageMetrics","views":81099},"postTime":"2021-11-19T00:40:30.273-08:00","lastPublishTime":"2021-11-19T00:40:30.273-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Given: A SharePoint List (SPL) called Contractors with the following columns: ContractorID // Display name of the default ID column ContractorName   Another SPL called Reports with the following columns: ReportID // Display name of the default ID column ReportNumber // Calculated column Contractor_ID // Lookup column referring to the ContractorID column of the Contractors SPL   The calculated column, ReportNumber, would have a formula similar to: = [ReportID] & \"-\" & Contractor_ID   Question: Does SharePoint Lists support the use of a Lookup column in the formula of a calculated column? If so, I need help on the proper syntax for the formula. (The formula above is not working)   Thanks in Advance ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"757","kudosSumWeight":1,"repliesCount":7,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"Conversation:conversation:1075544":{"__typename":"Conversation","id":"conversation:1075544","topic":{"__typename":"ForumTopicMessage","uid":1075544},"lastPostingActivityTime":"2024-07-15T21:35:58.701-07:00","solved":false},"User:user:494500":{"__typename":"User","uid":494500,"login":"wkeber","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS00OTQ1MDAtMjUzNTE1aTQ3RTQyNTY0RjRBODZDMTI"},"id":"user:494500"},"ForumTopicMessage:message:1075544":{"__typename":"ForumTopicMessage","subject":"Force Office Files to Open in Desktop App (via link, from web parts.. in all cases)","conversation":{"__ref":"Conversation:conversation:1075544"},"id":"message:1075544","revisionNum":4,"uid":1075544,"depth":0,"board":{"__ref":"Forum:board:SharePoint_General"},"author":{"__ref":"User:user:494500"},"metrics":{"__typename":"MessageMetrics","views":76300},"postTime":"2019-12-19T22:56:40.509-08:00","lastPublishTime":"2019-12-20T12:29:03.864-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" I want to force Office files stored in SharePoint to open in the desktop app - NEVER in Office Online apps. I have configured the site and the Document Library but opening in the native app only seems to work when navigating to the file in the DL and clicking it directly. Web Parts seem to always open the web app, as do links copied via the \"Share\" or \"Copy Link\" options in the DL.    I've made up quick video to show and tell: https://youtu.be/SvJd2_TLsmw   Note - while I was making the video, I stumbled upon a workaround for when we share links internally via e-mail (and maybe I can use the same workaround when sending files in Teams.)   I also fumble through trying to explain what I've been toying with as far as other workarounds.. the limitations of my technical capabilities will be apparent!   I already looked at several other posts, e.g.: https://techcommunity.microsoft.com/t5/SharePoint/Creating-a-shareable-document-link-that-forces-a-document-to/m-p/296647#M24291 ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"1029","kudosSumWeight":0,"repliesCount":10,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"Conversation:conversation:3769764":{"__typename":"Conversation","id":"conversation:3769764","topic":{"__typename":"BlogTopicMessage","uid":3769764},"lastPostingActivityTime":"2024-04-19T14:31:15.025-07:00","solved":false},"User:user:878885":{"__typename":"User","uid":878885,"login":"ormeikop-msft","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS04Nzg4ODUtMzU5NjAxaTM0NkJGRjNBRTc4MUJFODM"},"id":"user:878885"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEzNWlBMzZCRURDRDA3RjE3MUY0?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEzNWlBMzZCRURDRDA3RjE3MUY0?revision=11","title":"ormeikopmsft_0-1678973527542.png","associationType":"TEASER","width":693,"height":469,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExNmk3NkQxNTMzRjYwNzAxNjc5?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExNmk3NkQxNTMzRjYwNzAxNjc5?revision=11","title":"ormeikopmsft_0-1678972065397.png","associationType":"BODY","width":330,"height":124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExN2k1NDZCOTc0RTE4OTRBMDBE?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExN2k1NDZCOTc0RTE4OTRBMDBE?revision=11","title":"ormeikopmsft_1-1678972065423.png","associationType":"BODY","width":886,"height":584,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExOGlCRDMyNDY0NDIzM0Y5RDZD?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExOGlCRDMyNDY0NDIzM0Y5RDZD?revision=11","title":"ormeikopmsft_2-1678972065453.png","associationType":"BODY","width":1914,"height":1534,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMWk3QjQ3NUU2MDA4MDVGMzEz?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMWk3QjQ3NUU2MDA4MDVGMzEz?revision=11","title":"ormeikopmsft_3-1678972065462.png","associationType":"BODY","width":848,"height":282,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMGk1ODFEREJDMkFBRjlENTUz?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMGk1ODFEREJDMkFBRjlENTUz?revision=11","title":"ormeikopmsft_4-1678972065473.png","associationType":"BODY","width":874,"height":584,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExOWk3NzRDRjkyNTJBMjk3OTk5?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExOWk3NzRDRjkyNTJBMjk3OTk5?revision=11","title":"ormeikopmsft_5-1678972065481.png","associationType":"BODY","width":1008,"height":354,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMmkyOTlBNkJBM0U2NjAzNjhF?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMmkyOTlBNkJBM0U2NjAzNjhF?revision=11","title":"ormeikopmsft_6-1678972065490.png","associationType":"BODY","width":1202,"height":348,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MjU0MWlDRjIyMjE3MjVDNjk5NThD?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MjU0MWlDRjIyMjE3MjVDNjk5NThD?revision=11","title":"motasem13_1-1679401740307.png","associationType":"BODY","width":552,"height":750,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNGlFQUM1QzQ4MzE4RkExMjE4?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNGlFQUM1QzQ4MzE4RkExMjE4?revision=11","title":"ormeikopmsft_8-1678972065514.png","associationType":"BODY","width":750,"height":638,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNWlCMkVGN0I2NTJEMzdGMTk1?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNWlCMkVGN0I2NTJEMzdGMTk1?revision=11","title":"ormeikopmsft_9-1678972065519.png","associationType":"BODY","width":672,"height":328,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNmk2MkFDMjMzMDVBM0Q1RDg3?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNmk2MkFDMjMzMDVBM0Q1RDg3?revision=11","title":"ormeikopmsft_10-1678972065522.png","associationType":"BODY","width":906,"height":202,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyN2kxNjFENDNBOTU4QTk4MEZB?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyN2kxNjFENDNBOTU4QTk4MEZB?revision=11","title":"ormeikopmsft_11-1678972065526.png","associationType":"BODY","width":1112,"height":274,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyOGkxRDNCMzAzQ0IxMjFERjI2?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyOGkxRDNCMzAzQ0IxMjFERjI2?revision=11","title":"ormeikopmsft_12-1678972065530.png","associationType":"BODY","width":1226,"height":236,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyOWkyNTRGQjU1NjQzMUFCNUQy?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyOWkyNTRGQjU1NjQzMUFCNUQy?revision=11","title":"ormeikopmsft_13-1678972065537.png","associationType":"BODY","width":608,"height":388,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEzMGlCQ0EzOTVDQTNGNEYzNUY2?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEzMGlCQ0EzOTVDQTNGNEYzNUY2?revision=11","title":"ormeikopmsft_14-1678972065541.png","associationType":"BODY","width":1078,"height":268,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MjU0MGk4QTc3MUQ3NDc5REQ4NERD?revision=11\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MjU0MGk4QTc3MUQ3NDc5REQ4NERD?revision=11","title":"motasem13_0-1679401698743.png","associationType":"BODY","width":750,"height":467,"altText":null},"BlogTopicMessage:message:3769764":{"__typename":"BlogTopicMessage","subject":"Azure Functions - Part 2 - Unit and Integration Testing","conversation":{"__ref":"Conversation:conversation:3769764"},"id":"message:3769764","revisionNum":11,"uid":3769764,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:878885"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" \n A unit test usually involves writing code to test a specific function or method, using input data and expected output data. The test code then executes the function or method with the input data, compares the resulting output to the expected output, and reports any discrepancies as errors or failures. This approach allows developers to test each unit of code in isolation, ensuring that their code is modular, well-designed, and functioning as intended. ","introduction":"","metrics":{"__typename":"MessageMetrics","views":70100},"postTime":"2023-03-23T06:26:53.945-07:00","lastPublishTime":"2023-11-09T11:09:52.936-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Unit Testing \n Introduction \n   \n A unit test usually involves writing code to test a specific function or method, using input data and expected output data. The test code then executes the function or method with the input data, compares the resulting output to the expected output, and reports any discrepancies as errors or failures. This approach allows developers to test each unit of code in isolation, ensuring that their code is modular, well-designed, and functioning as intended. \n For example, if you have a method that converts a full name to a first name and a last name and the input is John Doe , what you would want to validate is that the output of that method is John and Doe . If that breaks, then your assertion breaks, and you would need to fix your code. That’s the main idea behind any type of testing. \n Unit testing is testing what is called the “unit”. A “unit” can be anything, but, usually, unit tests assess the method. \n In unit testing any input and output (I/O) calls are mocked. I/O calls include: \n \n External network API \n Database \n File system \n \n   \n The main idea behind mocking in unit testing is to avoid making calls to external components, such as APIs or databases, during testing. These calls can slow down unit tests, which should ideally be fast. Instead of setting up a whole environment to make these calls, developers can use mocks to simulate their behavior. Mocks replace the actual calls with simulated responses, allowing developers to test their code without relying on external components that may not always be available or consistent. \n   \n Why is unit testing important \n The short execution time feedback loop, mocking dependencies and then getting a quick response on how your system performs is extremely powerful. It can catch bugs before they make it near production. \n \n Isn't it enough to test the system manually instead of relying on automated unit testing? \n If you do this every single time, you can’t possibly test every single scenario, every single time you make a code change, manually. You need an automated way to do that. A unit test which is fast and small. If a bug gets deployed into production and is found by a customer, then it would cost your company a lot of money, even more compared to the money spent for developers to write those unit tests in the first place. \n Another advantage of unit tests is that they can be read as documentation. You can give very descriptive names in your unit tests and then you can just read the method names of the unit tests and understand what your application is doing. \n They also “force” the developers to write better and cleaner code because unit testing needs some very specific techniques to be good, like SOLID principles, Inversion of Control, and Dependency Injection. \n In most companies, unit tests are mandatory. They are run as part of the build pipeline and if at least one unit test fails, the whole build fails. You need to have a certain number of tests before you can push anything into production, thus proving that your application code is covered. \n Finally, some people think unit testing is so important, that they choose to do what’s called “Test Driven Development” (TDD). TDD is a practice where you write your tests before you write your actual code. \n Core unit testing concepts \n In unit testing, you have three core concepts: \n \n Testing Library:\n \n The unit testing framework to use for writing your unit tests. \n Examples for .NET:\n \n xUnit \n NUnit \n MSTest \n \n \n \n \n Mocking Library:\n \n The mocking framework to use for isolating your class / method under test from its dependencies and ensure that the proper methods on the dependent objects are being called. \n Examples for .NET:\n \n Moq \n NSubstitute \n \n \n \n \n Assertion Library:\n \n A set of (usually) extension methods that allow you to specify the expected outcome of a TDD or BDD-style unit test more naturally. \n Examples for .NET:\n \n Fluent Assertions \n \n \n \n \n \n Practical Example \n To apply the concepts presented in this article, a sample API will be used. This API was developed using the HTTP-Trigger based Azure Function Template (C#) in VS2022. It contains a single POST endpoint, which you can call to create a Note. As part of this create note operation, the note is being persisted in Cosmos DB and then a simple noteCreated event is sent to a third party notification system. \n \n The structure of the solution follows this format: \n   \n \n \n   \n The src solution folder contains the source code of the Azure Functions API project (“Fta.DemoFunc.Api”) and the tests solution folder contains the two test projects, one for Unit ( Fta.DemoFunc.Api.Tests.Unit ) and the other for Integration ( Fta.DemoFunc.Api.Tests.Integration ) tests respectively. \n To be easier to write unit tests, the dependency injection (DI) software design pattern is being applied in the Azure Functions project, which is a technique to achieve Inversion of Control (IoC) between classes and their dependencies. Below you can see the Startup.cs file, containing the setup and configuration of the DI container. \n \n   \n The Azure Functions project contains a single Function called NotesFunction . The “business logic” of the Azure Function has been extracted into a service called NoteService , which implements the interface called INoteService . \n \n   \n As you can see in the image above the NotesFunction contains a single POST endpoint which accepts a CreateNoteRequest object from the client and delegates the note creation process to the service that implements the INoteService interface. As DI is applied to this class, now it is easy to write unit tests against it. The process you follow is the same as you would write unit tests for any other class. \n As mentioned above, there are three core unit testing concepts: The Testing, the Mocking, and the Assertion Library. In this demo project, xUnit is used for the Testing Library, NSubstitute for the Mocking Library and Fluent Assertions for the Assertion Library. \n To begin writing unit tests for the NotesFunction class, a new class called NotesFunctionTests is created inside the Fta.DemoFunc.Api.Tests.Unit project. \n \n   \n Based on the above implementation, we need to write three unit tests to cover all scenarios for the POST method of our “System Under Test - SUT” ( NotesFunction ). \n The first unit test will cover the scenario, where the NoteService is called and completed successfully, so our function will return a CreatedResult ( 201 status code) to the client along with the details of the created note. \n \n   \n The second unit test will cover the scenario, where the NoteService returns null , so our function will return a BadRequestObjectResult ( 400 status code) to the client along with an error message. \n \n   \n Finally, the third unit test will cover the scenario of an exception being thrown from the NoteService . In that case, we are going to log the error and just return an InternalServerErrorResult ( 500 status code) back to the client. \n \n   \n As you can see above, all unit tests have very descriptive names and read like documentation. Also, each unit test follows the “AAA” pattern. “AAA” stands for “Arrange”, “Act” and “Assert”. In the “Arrange” part, you write initialization code for your unit test, in the “Act” part you call the method of your SUT you write your unit test against and in the “Assert” part you make your assertions, based on what is expected as the output of the unit test. \n All code, examples and details for this project can be found in this GitHub repo. \n Integration Testing \n Introduction \n Integration testing is the phase in software testing in which individual software modules are combined and evaluated as a group. It is conducted to evaluate the compliance of a system or component with specific functional requirements. It occurs after unit testing and before end-to-end testing. \n Integration tests usually check for what we call the happy and unhappy path. There is also value in calling the database or API dependencies to make sure that they are behaving correctly. This is where integration tests come into the picture because they will call dependencies. This means that either you need to have a working and running environment for this, or you need to spin up one on demand (e.g., in docker). They are also bigger in scope compared to unit tests. \n Why is integration testing important \n If you are going through the full testing flow and you have your unit tests first, integration tests give you a better idea of how your system will perform when integrating with other components. You assume quite a lot in unit testing and that is not great when you want a realistic representation of your system. \n Scope of integration tests \n Let’s now examine what exactly is the scope of integration testing when it comes to things you are calling or mocking (i.e., file system, network calls, database calls). For example, let’s say you have an API that makes a few calls. One of them is to the GitHub API, another is to an internal API that you own, another is in the database and the final one is in the file system. \n In the context of an integration test, the call to the database must happen, so you would use a realistic database and you would not replace that with anything “mocked”. The call to the file system must also happen. \n The call to another API that your API needs to work with has some additional considerations to think about. If this is an API that you own (i.e., another API in your system), then you get to choose whether to run and call it or just mock it. On the other hand, if this is an external API that you do not own (e.g., the GitHub API), because you do not have any control over that, this is out of scope for your integration tests. Instead, you would replace that with an API that accepts requests and responds as if it was the GitHub API, ensuring that there is still some integration point with the same contracts / HTTP headers / models, etc. You can also choose to mock it, like you do in the unit testing way. \n \n   \n Practical Example \n Let’s examine how to apply the above concepts in the context of the example Azure Functions HTTP-Trigger based “Notes” API. To begin writing integration tests for the NotesFunction class, a new class called NotesFunctionTests is created inside the Fta.DemoFunc.Api.Tests.Integration project. \n \n   \n In this example, we have two dependencies in our code: The Cosmos DB component where notes are persisted and the 3 rd party API notification system. For the Cosmos DB case, as this is something that you own and control, you could spin up this dependency locally, if possible. You could choose to run your tests against a local Cosmos DB instance instead of a remote one. This has couple advantages: \n \n Tests are faster to run locally than against a remote database. \n You run tests independently from other developers because the test data from other machines will not impact your database. \n \n In our example, we will run our integration tests against a local instance of Azure CosmosDB, using the Azure Cosmos DB Emulator. \n The crucial part of our integration test setup is to configure dependency injection. You need the following classes for the setup: \n \n TestStartup:\n \n A new TestStartup.cs class will be introduced and will derive from the Azure Function’s Startup class to define dependency injection for our test. \n \n \n \n \n   \n \n Configuration:\n \n You should never store keys and secrets inside a git repository. \n For local development in an Azure Functions project, you can use a local.settings.json to store configuration, which will never leave your local machine. \n \n \n \n \n   \n \n TestsInitializer:\n \n We want to use a test host ( TestsInitializer.cs ) for our integration test using TestStartup . \n \n \n \n \n   \n \n We also need to include a Collection definition ( IntegrationTestsCollection.cs ) by deriving from ICollectionFixture class. \n \n \n   \n After setting up all the above components, we can continue with creating our integration tests. Based on the implementation of our NotesFunction class, we need to write two integration tests to cover the “happy” and the “unhappy” path for the POST method of our “System Under Test - SUT” ( NotesFunction ). \n The first integration test will cover the “happy path” scenario, where the POST endpoint is called with valid note details, so our function will return a CreatedResult ( 201 status code) to the client along with the details of the created note. \n \n   \n The second unit test will cover the scenario, where the POST endpoint is called with invalid note details, so our function will return a BadRequestObjectResult ( 400 status code) to the client along with some error message. \n \n   \n As you can see above, all integration tests have very descriptive names and read like documentation. Also, each integration test follows the “AAA” pattern, just like unit tests. \n All code, examples and details for this project can be found in this GitHub repo. \n The Testing Pyramid \n The “testing pyramid” is a visualization technique to see how important the distinct types of testing are and how much of it you need in a project. The pyramid has 3 levels: \n \n At the foundation level, you have unit testing. \n In the middle level you have integration testing. \n At the top level you have end-to-end testing. \n \n Unit tests are the larger number of tests you are going to have in your code, to cover any scenario that you need to validate against. Integration tests are a bit higher than unit tests, and you have less compared to unit tests, because you are testing a broader scenario in your application. Finally, end to end tests are the highest and the least because you are only evaluating the few things your application exposes. \n ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"14284","kudosSumWeight":1,"repliesCount":6,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEzNWlBMzZCRURDRDA3RjE3MUY0?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExNmk3NkQxNTMzRjYwNzAxNjc5?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExN2k1NDZCOTc0RTE4OTRBMDBE?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExOGlCRDMyNDY0NDIzM0Y5RDZD?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMWk3QjQ3NUU2MDA4MDVGMzEz?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDY","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMGk1ODFEREJDMkFBRjlENTUz?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDc","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTExOWk3NzRDRjkyNTJBMjk3OTk5?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDg","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyMmkyOTlBNkJBM0U2NjAzNjhF?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDk","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MjU0MWlDRjIyMjE3MjVDNjk5NThD?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEw","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNGlFQUM1QzQ4MzE4RkExMjE4?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEx","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNWlCMkVGN0I2NTJEMzdGMTk1?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEy","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyNmk2MkFDMjMzMDVBM0Q1RDg3?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEz","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyN2kxNjFENDNBOTU4QTk4MEZB?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE0","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyOGkxRDNCMzAzQ0IxMjFERjI2?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE1","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEyOWkyNTRGQjU1NjQzMUFCNUQy?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE2","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MTEzMGlCQ0EzOTVDQTNGNEYzNUY2?revision=11\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE3","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zNzY5NzY0LTQ1MjU0MGk4QTc3MUQ3NDc5REQ4NERD?revision=11\"}"}}],"totalCount":17,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:3437320":{"__typename":"Conversation","id":"conversation:3437320","topic":{"__typename":"ForumTopicMessage","uid":3437320},"lastPostingActivityTime":"2025-02-22T00:03:19.851-08:00","solved":false},"User:user:1404320":{"__typename":"User","uid":1404320,"login":"torresH","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-6.svg?time=0"},"id":"user:1404320"},"ForumTopicMessage:message:3437320":{"__typename":"ForumTopicMessage","subject":"AutoNumbering Columns in SharePoint List","conversation":{"__ref":"Conversation:conversation:3437320"},"id":"message:3437320","revisionNum":1,"uid":3437320,"depth":0,"board":{"__ref":"Forum:board:SharePoint_General"},"author":{"__ref":"User:user:1404320"},"metrics":{"__typename":"MessageMetrics","views":65101},"postTime":"2022-05-27T02:35:52.677-07:00","lastPublishTime":"2022-05-27T02:35:52.677-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Good morning,   I have a Sharepoint list used for tracking my documents. I manually number each document, but this has become challenging over the past month.    Is there a way to automatically create a unique ID?   I need something that starts with the two-digit year and has the document number created that year. for example, 22-205 22 for the two-digit year 205 for the document added that year.   The number will also need to be reset every year. Once January first, 2023 comes, the following document created will be 23-001.   Can someone assist me? ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"598","kudosSumWeight":0,"repliesCount":4,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"Conversation:conversation:1294061":{"__typename":"Conversation","id":"conversation:1294061","topic":{"__typename":"ForumTopicMessage","uid":1294061},"lastPostingActivityTime":"2024-01-17T16:45:59.309-08:00","solved":false},"Forum:board:MicrosoftTeams":{"__typename":"Forum","id":"board:MicrosoftTeams","displayId":"MicrosoftTeams","nodeType":"board","conversationStyle":"FORUM","title":"Microsoft Teams","shortTitle":"Microsoft Teams","parent":{"__ref":"Category:category:MicrosoftTeams"}},"User:user:81259":{"__typename":"User","uid":81259,"login":"Steven Miori","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-5.svg?time=0"},"id":"user:81259"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0xMjk0MDYxLTE4MzE4OWlCRTg4REUzQjc3Qjc2NzM0?revision=1\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0xMjk0MDYxLTE4MzE4OWlCRTg4REUzQjc3Qjc2NzM0?revision=1","title":"IMG_0583.jpg","associationType":"BODY","width":1242,"height":2688,"altText":null},"ForumTopicMessage:message:1294061":{"__typename":"ForumTopicMessage","subject":"Can't Save or download Photo from Teams to iPhone Photos","conversation":{"__ref":"Conversation:conversation:1294061"},"id":"message:1294061","revisionNum":1,"uid":1294061,"depth":0,"board":{"__ref":"Forum:board:MicrosoftTeams"},"author":{"__ref":"User:user:81259"},"metrics":{"__typename":"MessageMetrics","views":54500},"postTime":"2020-04-08T14:45:00.487-07:00","lastPublishTime":"2020-04-08T14:45:00.487-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" If I open the share options in Teams for a jpg photo, it does not show an option to \"Save\" the photo. I need to save it to my iOS Photos, but cannot.    Any help greatly appreciated! I am on an iPhone 11 Pro Max, running the latest iOS 13 software and the most up to date Teams app.   ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"310","kudosSumWeight":2,"repliesCount":13,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0xMjk0MDYxLTE4MzE4OWlCRTg4REUzQjc3Qjc2NzM0?revision=1\"}"}}],"totalCount":1,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"Conversation:conversation:3839611":{"__typename":"Conversation","id":"conversation:3839611","topic":{"__typename":"BlogTopicMessage","uid":3839611},"lastPostingActivityTime":"2023-11-09T02:03:39.666-08:00","solved":false},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxM2kxRDQ1Qzk2QThDRjlCMjJG?revision=3\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxM2kxRDQ1Qzk2QThDRjlCMjJG?revision=3","title":"architecture.png","associationType":"TEASER","width":690,"height":758,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxNGk1NzQwRDJEMDIzMzhDQTVE?revision=3\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxNGk1NzQwRDJEMDIzMzhDQTVE?revision=3","title":"architecture.png","associationType":"BODY","width":690,"height":758,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxNmk0QTg3MTVGNDVBQTcwNjZE?revision=3\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxNmk0QTg3MTVGNDVBQTcwNjZE?revision=3","title":"openai.png","associationType":"BODY","width":1012,"height":587,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxN2lGQjEwNTFDNDhDRTBFQjI2?revision=3\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxN2lGQjEwNTFDNDhDRTBFQjI2?revision=3","title":"federatedidentitycredentials.png","associationType":"BODY","width":876,"height":673,"altText":null},"BlogTopicMessage:message:3839611":{"__typename":"BlogTopicMessage","subject":"Deploy and run a Azure OpenAI/ChatGPT app on AKS with Terraform","conversation":{"__ref":"Conversation:conversation:3839611"},"id":"message:3839611","revisionNum":3,"uid":3839611,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:988334"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" This article shows how to deploy an Azure Kubernetes Service(AKS) cluster and Azure OpenAI Service via Terraform and how to deploy a Terraform chatbot that authenticates against Azure OpenAI using Azure AD workload identity and calls the Chat Completion API of a ChatGPT model. \n \n   ","introduction":"","metrics":{"__typename":"MessageMetrics","views":50400},"postTime":"2023-06-05T03:10:12.868-07:00","lastPublishTime":"2023-11-09T02:03:39.666-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" \n \n \n \n \n \n \n \n \n This sample shows how to deploy an Azure Kubernetes Service(AKS) cluster and Azure OpenAI Service using Terraform modules with the Azure Provider Terraform Provider and how to deploy a Python chatbot that authenticates against Azure OpenAI using Azure AD workload identity and calls the Chat Completion API of a ChatGPT model. \n   \n You can find the code of the chatbot and Terraform modules to deploy the environment in this GitHub repository. For a Bicep version of the article and companion sample, see How to deploy and run an Azure OpenAI ChatGPT application on AKS via Bicep. \n   \n A chatbot is an application that simulates human-like conversations with users via chat. Its key task is to answer user questions with instant messages. Azure Kubernetes Service(AKS) cluster communicates with Azure OpenAI Service via an Azure Private Endpoint. The chatbot application simulates the original Magic 8 Ball plastic sphere, made to look like an oversized eight ball used for fortune-telling or seeking advice. \n   \n   \n AI applications can be used to perform tasks such as summarizing articles, writing stories, and engaging in long conversations with chatbots. This is made possible by large language models (LLMs) like OpenAI ChatGPT, which are deep learning algorithms capable of recognizing, summarizing, translating, predicting, and generating text and other content. LLMs leverage the knowledge acquired from extensive datasets, enabling them to perform tasks beyond teaching AI human languages. These models have succeeded in diverse domains, including understanding proteins, writing software code, and more. Apart from their applications in natural language processing, such as translation, chatbots, and AI assistants, large language models are also extensively employed in healthcare, software development, and various other fields. \n   \n For more information on Azure OpenAI Service and Large Language Models (LLMs), see the following articles: \n   \n \n What is Azure OpenAI Service? \n Azure OpenAI Service models \n Large Language Model \n Azure OpenAI Terraform deployment for sample chatbot \n Terraform module for deploying Azure OpenAI Service. \n \n   \n Prerequisites \n \n An active Azure subscription. If you don't have one, create a free Azure account before you begin. \n Visual Studio Code installed on one of the supported platforms along with the HashiCorp Terraform. \n Azure CLI version 2.49.0 or later installed. To install or upgrade, see Install Azure CLI. \n aks-preview  Azure CLI extension of version 0.5.140 or later installed \n \n   \n You can run  az --version  to verify above versions. \n To install the aks-preview extension, run the following command: \n az extension add --name aks-preview \n Run the following command to update to the latest version of the extension released: \n az extension update --name aks-preview \n Architecture \n This sample provides a set of Terraform modules to deploy an Azure Kubernetes Service(AKS) cluster and Azure OpenAI Service and how to deploy a Python chatbot that authenticates against Azure OpenAI using Azure AD workload identity and calls the Chat Completion API of the ChatGPT model. Azure Kubernetes Service(AKS) cluster communicates with Azure OpenAI Service via an Azure Private Endpoint. The following diagram shows the architecture and network topology deployed by the sample: \n   \n \n   \n Terraform modules are parametric, so you can choose any network plugin: \n   \n \n Azure CNI with static IP allocation \n Azure CNI with dynamic IP allocation \n Azure CNI Powered by Cilium \n Azure CNI Overlay \n BYO CNI \n Kubenet \n \n   \n In a production environment, we strongly recommend deploying a private AKS cluster with Uptime SLA. For more information, see private AKS cluster with a Public DNS address. Alternatively, you can deploy a public AKS cluster and secure access to the API server using authorized IP address ranges. \n The Terraform modules deploy the following Azure resources: \n   \n \n Azure OpenAI Service: an Azure OpenAI Service with a GPT-3.5 model used by the chatbot application. Azure OpenAI Service gives customers advanced language AI with OpenAI GPT-4, GPT-3, Codex, and DALL-E models with the security and enterprise promise of Azure. Azure OpenAI co-develops the APIs with OpenAI, ensuring compatibility and a smooth transition from one to the other. \n User-defined Managed Identity: a user-defined managed identity used by the AKS cluster to create additional resources like load balancers and managed disks in Azure. \n User-defined Managed Identity: a user-defined managed identity used by the chatbot application to acquire a security token via Azure AD workload identity to call the Chat Completion API of the ChatGPT model provided by the Azure OpenAI Service. \n Azure Virtual Machine: Terraform modules can optionally create a jump-box virtual machine to manage the private AKS cluster. \n Azure Bastion Host: a separate Azure Bastion is deployed in the AKS cluster virtual network to provide SSH connectivity to both agent nodes and virtual machines. \n Azure NAT Gateway: a bring-your-own (BYO) Azure NAT Gateway to manage outbound connections initiated by AKS-hosted workloads. The NAT Gateway is associated to the  SystemSubnet ,  UserSubnet , and  PodSubnet  subnets. The outboundType property of the cluster is set to  userAssignedNatGateway  to specify that a BYO NAT Gateway is used for outbound connections. NOTE: you can update the  outboundType  after cluster creation and this will deploy or remove resources as required to put the cluster into the new egress configuration. For more information, see Updating outboundType after cluster creation. \n Azure Storage Account: this storage account is used to store the boot diagnostics logs of both the service provider and service consumer virtual machines. Boot Diagnostics is a debugging feature that allows you to view console output and screenshots to diagnose virtual machine status. \n Azure Container Registry: an Azure Container Registry (ACR) to build, store, and manage container images and artifacts in a private registry for all container deployments. \n Azure Key Vault: an Azure Key Vault used to store secrets, certificates, and keys that can be mounted as files by pods using Azure Key Vault Provider for Secrets Store CSI Driver. For more information, see Use the Azure Key Vault Provider for Secrets Store CSI Driver in an AKS cluster and Provide an identity to access the Azure Key Vault Provider for Secrets Store CSI Driver. \n Azure Private Endpoints: an Azure Private Endpoint is created for each of the following resources:\n \n Azure OpenAI Service \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Azure Private DNDS Zones: an Azure Private DNS Zone is created for each of the following resources:\n \n Azure OpenAI Service \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n API Server when deploying a private AKS cluster. \n \n \n Azure Network Security Group: subnets hosting virtual machines and Azure Bastion Hosts are protected by Azure Network Security Groups that are used to filter inbound and outbound traffic. \n Azure Log Analytics Workspace: a centralized Azure Log Analytics workspace is used to collect the diagnostics logs and metrics from all the Azure resources:\n \n Azure OpenAI Service \n Azure Kubernetes Service cluster \n Azure Key Vault \n Azure Network Security Group \n Azure Container Registry \n Azure Storage Account \n Azure jump-box virtual machine \n \n \n Azure Deployment Script: a deployment script is used to run the  install-nginx-via-helm-and-create-sa.sh  Bash script which creates the namespace and service account for the sample application and installs the following packages to the AKS cluster via Helm. For more information on deployment scripts, see Use deployment scripts\n \n NGINX Ingress Controller \n Cert-Manager \n Prometheus \n \n \n \n \n NOTE You can find the  architecture.vsdx  file used for the diagram under the  visio  folder. \n \n   \n Azure Provider \n The Azure Provider can be used to configure infrastructure in Microsoft Azure using the Azure Resource Manager API's. For more information on the data sources and resources supported by the Azure Provider, see the documentation. To learn the basics of Terraform using this provider, follow the hands-on get started tutorials. If you are interested in the Azure Provider's latest features, see the changelog for version information and release notes. \n   \n What is Azure OpenAI Service? \n The Azure OpenAI Service is a platform offered by Microsoft Azure that provides cognitive services powered by OpenAI models. One of the models available through this service is the ChatGPT model, which is designed for interactive conversational tasks. It allows developers to integrate natural language understanding and generation capabilities into their applications. \n Azure OpenAI Service provides REST API access to OpenAI's powerful language models, including the GPT-3, Codex, and Embeddings model series. In addition, the new GPT-4 and ChatGPT model series have now reached general availability. These models can be easily adapted to your specific task, including but not limited to content generation, summarization, semantic search, and natural language-to-code translation. Users can access the service through REST APIs, Python SDK, or our web-based interface in the Azure OpenAI Studio. \n The Chat Completion API, part of the Azure OpenAI Service, provides a dedicated interface for interacting with the ChatGPT and GPT-4 models. This API is currently in preview and is the preferred method for accessing these models. The GPT-4 models can only be accessed through this API. \n GPT-3, GPT-3.5, and GPT-4 models from OpenAI are prompt-based. With prompt-based models, the user interacts with the model by entering a text prompt, to which the model responds with a text completion. This completion is the model’s continuation of the input text. While these models are extremely powerful, their behavior is also very sensitive to the prompt. This makes prompt construction a critical skill to develop. For more information, see Introduction to prompt engineering. \n Prompt construction can be complex. In practice, the prompt acts to configure the model weights to complete the desired task, but it's more of an art than a science, often requiring experience and intuition to craft a successful prompt. The goal of this article is to help get you started with this learning process. It attempts to capture general concepts and patterns that apply to all GPT models. However, it's essential to understand that each model behaves differently, so the learnings may not apply equally to all models. \n Prompt engineering refers to creating instructions called prompts for Large Language Models (LLMs), such as OpenAI’s ChatGPT. With the immense potential of LLMs to solve a wide range of tasks, leveraging prompt engineering can empower us to save significant time and facilitate the development of impressive applications. It holds the key to unleashing the full capabilities of these huge models, transforming how we interact and benefit from them. For more information, see Prompt engineering techniques. \n   \n Deploy Terraform modules \n Before deploying the Terraform modules in the  terraform  folder, specify a value for the following variables in the terraform.tfvars variable definitions file. \n name_prefix = \"magic8ball\"\ndomain = \"contoso.com\"\nsubdomain = \"magic\"\nnamespace = \"magic8ball\"\nservice_account_name = \"magic8ball-sa\"\nssh_public_key = \"XXXXXXX\"\nvm_enabled = true\nlocation = \"westeurope\"\nadmin_group_object_ids = [\"XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX\"] \n Description \n   \n \n prefix : specifies a prefix for all the Azure resources. \n domain : specifies the domain part (e.g., subdomain.domain) of the hostname of the ingress object used to expose the chatbot via the NGINX Ingress Controller. \n subdomain : specifies the subdomain part of the hostname of the ingress object used to expose the chatbot via the NGINX Ingress Controller. \n namespace : specifies the namespace of the workload application that accesses the Azure OpenAI Service. \n service_account_name : specifies the name of the service account of the workload application that accesses the Azure OpenAI Service. \n ssh_public_key : specifies the SSH public key used for the AKS nodes and jumpbox virtual machine. \n vm_enabled : a boleean value that specifies whether deploying or not a jumpbox virtual machine in the same virtual network of the AKS cluster. \n location : specifies the region (e.g., westeurope) where deploying the Azure resources. \n admin_group_object_ids : when deploying an AKS cluster with Azure AD and Azure RBAC integration, this array parameter contains the list of Azure AD group object IDs that will have the admin role of the cluster. \n \n We suggest reading sensitive configuration data such as passwords or SSH keys from a pre-existing Azure Key Vault resource. For more information, see Referencing Azure Key Vault secrets in Terraform. \n   \n OpenAI Terraform Module \n The following table contains the code from the  openai.tf  Terraform module used to deploy the Azure OpenAI Service. \n resource \"azurerm_cognitive_account\" \"openai\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n kind = \"OpenAI\"\n custom_subdomain_name = var.custom_subdomain_name\n sku_name = var.sku_name\n public_network_access_enabled = var.public_network_access_enabled\n tags = var.tags\n\n identity {\n type = \"SystemAssigned\"\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_cognitive_deployment\" \"deployment\" {\n for_each = {for deployment in var.deployments: deployment.name => deployment}\n\n name = each.key\n cognitive_account_id = azurerm_cognitive_account.openai.id\n\n model {\n format = \"OpenAI\"\n name = each.value.model.name\n version = each.value.model.version\n }\n\n scale {\n type = \"Standard\"\n }\n}\n\nresource \"azurerm_monitor_diagnostic_setting\" \"settings\" {\n name = \"DiagnosticsSettings\"\n target_resource_id = azurerm_cognitive_account.openai.id\n log_analytics_workspace_id = var.log_analytics_workspace_id\n\n enabled_log {\n category = \"Audit\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"RequestResponse\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"Trace\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n metric {\n category = \"AllMetrics\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n} \n Azure Cognitive Services use custom subdomain names for each resource created through the Azure portal, Azure Cloud Shell, Azure CLI, Bicep, Azure Resource Manager (ARM), or Terraform. Unlike regional endpoints, which were common for all customers in a specific Azure region, custom subdomain names are unique to the resource. Custom subdomain names are required to enable features like Azure Active Directory (Azure AD) for authentication. In our case, we need to specify a custom subdomain for our Azure OpenAI Service as our chatbot application will use an Azure AD security token to access it. By default, the  main.tf  module sets the value of the  custom_subdomain_name  parameter to the lowercase name of the Azure OpenAI resource. For more information on custom subdomains, see Custom subdomain names for Cognitive Services. \n This terraform module allows you to pass an array containing the definition of one or more model deployments in the  deployments  parameter. For more information on model deployments, see Create a resource and deploy a model using Azure OpenAI. \n Alternatively, you can use the Terraform module for deploying Azure OpenAI Service. to deploy an Azure OpenAI Service resource. \n   \n Private Endpoints \n The  main.tf  module creates Azure Private Endpoints and Azure Private DNDS Zones for each of the following resources: \n   \n \n Azure OpenAI Service \n Azure Container Registry \n Azure Key Vault \n Azure Storage Account \n \n   \n In particular, it creates an Azure Private Endpoint and Azure Private DNDS Zone to the Azure OpenAI Service as shown in the following code snippet: \n module \"openai_private_dns_zone\" {\n source = \"./modules/private_dns_zone\"\n name = \"privatelink.openai.azure.com\"\n resource_group_name = azurerm_resource_group.rg.name\n tags = var.tags\n virtual_networks_to_link = {\n (module.virtual_network.name) = {\n subscription_id = data.azurerm_client_config.current.subscription_id\n resource_group_name = azurerm_resource_group.rg.name\n }\n }\n}\n\nmodule \"openai_private_endpoint\" {\n source = \"./modules/private_endpoint\"\n name = \"${module.openai.name}PrivateEndpoint\"\n location = var.location\n resource_group_name = azurerm_resource_group.rg.name\n subnet_id = module.virtual_network.subnet_ids[var.vm_subnet_name]\n tags = var.tags\n private_connection_resource_id = module.openai.id\n is_manual_connection = false\n subresource_name = \"account\"\n private_dns_zone_group_name = \"AcrPrivateDnsZoneGroup\"\n private_dns_zone_group_ids = [module.acr_private_dns_zone.id]\n} \n Below you can read the code of the  private_dns_zone  and  private_endpoint  modules used, respectively, to create the Azure Private Endpoints and Azure Private DNDS Zones. \n private_dns_zone \n resource \"azurerm_private_dns_zone\" \"private_dns_zone\" {\n name = var.name\n resource_group_name = var.resource_group_name\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_private_dns_zone_virtual_network_link\" \"link\" {\n for_each = var.virtual_networks_to_link\n\n name = \"link_to_${lower(basename(each.key))}\"\n resource_group_name = var.resource_group_name\n private_dns_zone_name = azurerm_private_dns_zone.private_dns_zone.name\n virtual_network_id = \"/subscriptions/${each.value.subscription_id}/resourceGroups/${each.value.resource_group_name}/providers/Microsoft.Network/virtualNetworks/${each.key}\"\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n private_endpoint \n resource \"azurerm_private_endpoint\" \"private_endpoint\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n subnet_id = var.subnet_id\n tags = var.tags\n\n private_service_connection {\n name = \"${var.name}Connection\"\n private_connection_resource_id = var.private_connection_resource_id\n is_manual_connection = var.is_manual_connection\n subresource_names = try([var.subresource_name], null)\n request_message = try(var.request_message, null)\n }\n\n private_dns_zone_group {\n name = var.private_dns_zone_group_name\n private_dns_zone_ids = var.private_dns_zone_group_ids\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n} \n AKS Workload User-Defined Managed Identity \n The following code snippet from the  main.tf  Terraform module creates the user-defined managed identity used by the chatbot to acquire a security token from Azure Active Directory via Azure AD workload identity. \n resource \"azurerm_user_assigned_identity\" \"aks_workload_identity\" {\n name = var.name_prefix == null ? \"${random_string.prefix.result}${var.workload_managed_identity_name}\" : \"${var.name_prefix}${var.workload_managed_identity_name}\"\n resource_group_name = azurerm_resource_group.rg.name\n location = var.location\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_role_assignment\" \"cognitive_services_user_assignment\" {\n scope = module.openai.id\n role_definition_name = \"Cognitive Services User\"\n principal_id = azurerm_user_assigned_identity.aks_workload_identity.principal_id\n skip_service_principal_aad_check = true\n}\n\nresource \"azurerm_federated_identity_credential\" \"federated_identity_credential\" {\n name = \"${title(var.namespace)}FederatedIdentity\"\n resource_group_name = azurerm_resource_group.rg.name\n audience = [\"api://AzureADTokenExchange\"]\n issuer = module.aks_cluster.oidc_issuer_url\n parent_id = azurerm_user_assigned_identity.aks_workload_identity.id\n subject = \"system:serviceaccount:${var.namespace}:${var.service_account_name}\"\n} \n The above code snippets performs the following steps: \n   \n \n Creates a new user-defined managed identity. \n Assign the new managed identity to the Cognitive Services User role with the resource group as a scope. \n Federate the managed identity with the service account used by the chatbot. The following information are necessary to create the federated identity credentials:\n \n The Kubernetes service account name. \n The Kubernetes namespace that will host the chatbot application. \n The URL of the OpenID Connect (OIDC) token issuer endpoint for Azure AD workload identity \n \n \n \n For more information, see the following resources: \n \n How to configure Azure OpenAI Service with managed identities \n Use Azure AD workload identity with Azure Kubernetes Service (AKS) \n \n   \n Validate the deployment \n Open the Azure Portal, and navigate to the resource group. Open the Azure Open AI Service resource, navigate to  Keys and Endpoint , and check that the endpoint contains a custom subdomain rather than the regional Cognitive Services endpoint. \n   \n \n   \n   \n Open to the  <Prefix>WorkloadManagedIdentity  managed identity, navigate to the  Federated credentials , and verify that the federated identity credentials for the  magic8ball-sa  service account were created correctly, as shown in the following picture. \n   \n \n   \n Use Azure AD workload identity with Azure Kubernetes Service (AKS) \n Workloads deployed on an Azure Kubernetes Services (AKS) cluster require Azure Active Directory (Azure AD) application credentials or managed identities to access Azure AD-protected resources, such as Azure Key Vault and Microsoft Graph. Azure AD workload identity integrates with the capabilities native to Kubernetes to federate with external identity providers. \n Azure AD workload identity uses Service Account Token Volume Projection to enable pods to use a Kubernetes service account. When enabled, the AKS OIDC Issuer issues a service account security token to a workload, and OIDC federation enables the application to access Azure resources securely with Azure AD based on annotated service accounts. \n Azure AD workload identity works well with the Azure Identity client libraries and the Microsoft Authentication Library (MSAL) collection if you use a registered application instead of a managed identity. Your workload can use any of these libraries to authenticate and access Azure cloud resources seamlessly. For more information, see the following resources: \n   \n \n Azure Workload Identity open-source project \n Use an Azure AD workload identity on Azure Kubernetes Service (AKS \n Deploy and configure workload identity on an Azure Kubernetes Service (AKS) cluster \n Modernize application authentication with workload identity sidecar \n Tutorial: Use a workload identity with an application on Azure Kubernetes Service (AKS) \n Workload identity federation \n Use Azure AD Workload Identity for Kubernetes with a User-Assigned Managed Identity \n Use Azure AD workload identity for Kubernetes with an Azure AD registered application \n Azure Managed Identities with Workload Identity Federation \n Azure AD workload identity federation with Kubernetes \n Azure Active Directory Workload Identity Federation with external OIDC Identy Providers \n Minimal Azure AD Workload identity federation \n \n   \n Azure Identity client libraries \n In the Azure Identity client libraries, you can choose one of the following approaches: \n   \n \n Use  DefaultAzureCredential , which will attempt to use the  WorkloadIdentityCredential . \n Create a  ChainedTokenCredential  instance that includes  WorkloadIdentityCredential . \n Use  WorkloadIdentityCredential  directly. \n \n The following table provides the minimum package version required for each language's client library. \n   \n \n \n \n Language \n Library \n Minimum Version \n Example \n \n \n \n \n .NET \n Azure.Identity \n 1.9.0 \n Link \n \n \n Go \n azidentity \n 1.3.0 \n Link \n \n \n Java \n azure-identity \n 1.9.0 \n Link \n \n \n JavaScript \n @azure/identity \n 3.2.0 \n Link \n \n \n Python \n azure-identity \n 1.13.0 \n Link \n \n \n \n   \n Microsoft Authentication Library (MSAL) \n The following client libraries are the minimum version required \n   \n \n \n \n Language \n Library \n Image \n Example \n Has Windows \n \n \n \n \n .NET \n microsoft-authentication-library-for-dotnet \n ghcr.io/azure/azure-workload-identity/msal-net \n Link \n Yes \n \n \n Go \n microsoft-authentication-library-for-go \n ghcr.io/azure/azure-workload-identity/msal-go \n Link \n Yes \n \n \n Java \n microsoft-authentication-library-for-java \n ghcr.io/azure/azure-workload-identity/msal-java \n Link \n No \n \n \n JavaScript \n microsoft-authentication-library-for-js \n ghcr.io/azure/azure-workload-identity/msal-node \n Link \n No \n \n \n Python \n microsoft-authentication-library-for-python \n ghcr.io/azure/azure-workload-identity/msal-python \n Link \n No \n \n \n \n   \n Deployment Script \n The sample makes use of a Deployment Script to run the  install-nginx-via-helm-and-create-sa.sh  Bash script that creates the namespace and service account for the sample application and installs the following packages to the AKS cluster via Helm. \n   \n \n NGINX Ingress Controller \n Cert-Manager \n Prometheus \n \n   \n This sample uses the NGINX Ingress Controller to expose the chatbot to the public internet. \n # Install kubectl\naz aks install-cli --only-show-errors\n\n# Get AKS credentials\naz aks get-credentials \\\n --admin \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --only-show-errors\n\n# Check if the cluster is private or not\nprivate=$(az aks show --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --query apiServerAccessProfile.enablePrivateCluster \\\n --output tsv)\n\n# Install Helm\ncurl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 -o get_helm.sh -s\nchmod 700 get_helm.sh\n./get_helm.sh &>/dev/null\n\n# Add Helm repos\nhelm repo add prometheus-community https://prometheus-community.github.io/helm-charts\nhelm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx\nhelm repo add jetstack https://charts.jetstack.io\n\n# Update Helm repos\nhelm repo update\n\nif [[ $private == 'true' ]]; then\n # Log whether the cluster is public or private\n echo \"$clusterName AKS cluster is public\"\n\n # Install Prometheus\n command=\"helm install prometheus prometheus-community/kube-prometheus-stack \\\n --create-namespace \\\n --namespace prometheus \\\n --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \\\n --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Install NGINX ingress controller using the internal load balancer\n command=\"helm install nginx-ingress ingress-nginx/ingress-nginx \\\n --create-namespace \\\n --namespace ingress-basic \\\n --set controller.replicaCount=3 \\\n --set controller.nodeSelector.\\\"kubernetes\\.io/os\\\"=linux \\\n --set defaultBackend.nodeSelector.\\\"kubernetes\\.io/os\\\"=linux \\\n --set controller.metrics.enabled=true \\\n --set controller.metrics.serviceMonitor.enabled=true \\\n --set controller.metrics.serviceMonitor.additionalLabels.release=\\\"prometheus\\\" \\\n --set controller.service.annotations.\\\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\\\"=/healthz\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Install certificate manager\n command=\"helm install cert-manager jetstack/cert-manager \\\n --create-namespace \\\n --namespace cert-manager \\\n --set installCRDs=true \\\n --set nodeSelector.\\\"kubernetes\\.io/os\\\"=linux\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Create cluster issuer\n command=\"cat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-nginx\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: $email\n privateKeySecretRef:\n name: letsencrypt\n solvers:\n - http01:\n ingress:\n class: nginx\n podTemplate:\n spec:\n nodeSelector:\n \"kubernetes.io/os\": linux\nEOF\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Create workload namespace\n command=\"kubectl create namespace $namespace\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\n # Create service account\n command=\"cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n annotations:\n azure.workload.identity/client-id: $workloadManagedIdentityClientId\n azure.workload.identity/tenant-id: $tenantId\n labels:\n azure.workload.identity/use: \"true\"\n name: $serviceAccountName\n namespace: $namespace\nEOF\"\n\n az aks command invoke \\\n --name $clusterName \\\n --resource-group $resourceGroupName \\\n --subscription $subscriptionId \\\n --command \"$command\"\n\nelse\n # Log whether the cluster is public or private\n echo \"$clusterName AKS cluster is private\"\n\n # Install Prometheus\n helm install prometheus prometheus-community/kube-prometheus-stack \\\n --create-namespace \\\n --namespace prometheus \\\n --set prometheus.prometheusSpec.podMonitorSelectorNilUsesHelmValues=false \\\n --set prometheus.prometheusSpec.serviceMonitorSelectorNilUsesHelmValues=false\n\n # Install NGINX ingress controller using the internal load balancer\n helm install nginx-ingress ingress-nginx/ingress-nginx \\\n --create-namespace \\\n --namespace ingress-basic \\\n --set controller.replicaCount=3 \\\n --set controller.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set defaultBackend.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set controller.metrics.enabled=true \\\n --set controller.metrics.serviceMonitor.enabled=true \\\n --set controller.metrics.serviceMonitor.additionalLabels.release=\"prometheus\" \\\n --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\n\n helm install $nginxReleaseName $nginxRepoName/$nginxChartName \\\n --create-namespace \\\n --namespace $nginxNamespace\n\n # Install certificate manager\n helm install cert-manager jetstack/cert-manager \\\n --create-namespace \\\n --namespace cert-manager \\\n --set installCRDs=true \\\n --set nodeSelector.\"kubernetes\\.io/os\"=linux\n\n # Create cluster issuer\n cat <<EOF | kubectl apply -f -\napiVersion: cert-manager.io/v1\nkind: ClusterIssuer\nmetadata:\n name: letsencrypt-nginx\nspec:\n acme:\n server: https://acme-v02.api.letsencrypt.org/directory\n email: $email\n privateKeySecretRef:\n name: letsencrypt\n solvers:\n - http01:\n ingress:\n class: nginx\n podTemplate:\n spec:\n nodeSelector:\n \"kubernetes.io/os\": linux\nEOF\n\n # Create workload namespace\n kubectl create namespace $namespace\n\n # Create service account\n cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n annotations:\n azure.workload.identity/client-id: $workloadManagedIdentityClientId\n azure.workload.identity/tenant-id: $tenantId\n labels:\n azure.workload.identity/use: \"true\"\n name: $serviceAccountName\n namespace: $namespace\nEOF\n\nfi\n\n# Create output as JSON file\necho '{}' |\n jq --arg x $namespace '.namespace=$x' |\n jq --arg x $serviceAccountName '.serviceAccountName=$x' |\n jq --arg x 'prometheus' '.prometheus=$x' |\n jq --arg x 'cert-manager' '.certManager=$x' |\n jq --arg x 'ingress-basic' '.nginxIngressController=$x' >$AZ_SCRIPTS_OUTPUT_PATH \n The  install-nginx-via-helm-and-create-sa.sh  Bash script can run on a public AKS cluster or on a private AKS cluster using the az aks command invoke. For more information, see Use command invoke to access a private Azure Kubernetes Service (AKS) cluster. \n The  install-nginx-via-helm-and-create-sa.sh  Bash script returns the following outputs to the deployment script: \n \n Namespace hosting the chatbot sample. You can change the default  magic8ball  namespace by assigning a different value to the  namespace  variable in the  terraform.tfvars  file. \n Service account name \n Prometheus namespace \n Cert-manager namespace \n NGINX ingress controller namespace \n \n   \n Chatbot Application \n The chatbot is a Python application inspired by the sample code in the It’s Time To Create A Private ChatGPT For Yourself Today arctiel. The application is contained in a single file called  app.py . The application makes use of the following libraries: \n   \n \n OpenAPI: The OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook. \n Azure Identity: The Azure Identity library provides Azure Active Directory (Azure AD) token authentication support across the Azure SDK. It provides a set of TokenCredential implementations, which can be used to construct Azure SDK clients that support Azure AD token authentication. \n Streamlit: Streamlit is an open-source Python library that makes it easy to create and share beautiful, custom web apps for machine learning and data science. In just a few minutes you can build and deploy powerful data apps. For more information, see Streamlit documentation \n Streamlit-chat: a Streamlit component that provides a configurable user interface for chatbot applications. \n Dotenv: Python-dotenv reads key-value pairs from a .env file and can set them as environment variables. It helps in the development of applications following the 12-factor principles. \n \n   \n The  requirements.txt  file under the  scripts  folder contains the list of packages used by the  app.py  application that you can restore using the following command: \n pip install -r requirements.txt --upgrade \n The following table contains the code of the  app.py  chatbot: \n # Import packages\nimport os\nimport sys\nimport time\nimport openai\nimport logging\nimport streamlit as st\nfrom streamlit_chat import message\nfrom azure.identity import DefaultAzureCredential\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\nassistan_profile = \"\"\"\nYou are the infamous Magic 8 Ball. You need to randomly reply to any question with one of the following answers:\n\n- It is certain.\n- It is decidedly so.\n- Without a doubt.\n- Yes definitely.\n- You may rely on it.\n- As I see it, yes.\n- Most likely.\n- Outlook good.\n- Yes.\n- Signs point to yes.\n- Reply hazy, try again.\n- Ask again later.\n- Better not tell you now.\n- Cannot predict now.\n- Concentrate and ask again.\n- Don't count on it.\n- My reply is no.\n- My sources say no.\n- Outlook not so good.\n- Very doubtful.\n\nAdd a short comment in a pirate style at the end! Follow your heart and be creative! \nFor mor information, see https://en.wikipedia.org/wiki/Magic_8_Ball\n\"\"\"\ntitle = os.environ.get(\"TITLE\", \"Magic 8 Ball\")\ntext_input_label = os.environ.get(\"TEXT_INPUT_LABEL\", \"Pose your question and cross your fingers!\")\nimage_file_name = os.environ.get(\"IMAGE_FILE_NAME\", \"magic8ball.png\")\nimage_width = int(os.environ.get(\"IMAGE_WIDTH\", 80))\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\nsystem = os.environ.get(\"SYSTEM\", assistan_profile)\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-05-15\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\n\n# Configure OpenAI\nopenai.api_type = api_type\nopenai.api_version = api_version\nopenai.api_base = api_base \n\n# Set default Azure credential\ndefault_credential = DefaultAzureCredential() if openai.api_type == \"azure_ad\" else None\n\n# Configure a logger\nlogging.basicConfig(stream = sys.stdout, \n format = '[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s',\n level = logging.INFO)\nlogger = logging.getLogger(__name__)\n\n# Log variables\nlogger.info(f\"title: {title}\")\nlogger.info(f\"text_input_label: {text_input_label}\")\nlogger.info(f\"image_file_name: {image_file_name}\")\nlogger.info(f\"image_width: {image_width}\")\nlogger.info(f\"temperature: {temperature}\")\nlogger.info(f\"system: {system}\")\nlogger.info(f\"api_base: {api_base}\")\nlogger.info(f\"api_key: {api_key}\")\nlogger.info(f\"api_type: {api_type}\")\nlogger.info(f\"api_version: {api_version}\")\nlogger.info(f\"engine: {engine}\")\nlogger.info(f\"model: {model}\")\n\n# Authenticate to Azure OpenAI\nif openai.api_type == \"azure\":\n openai.api_key = api_key\nelif openai.api_type == \"azure_ad\":\n openai_token = default_credential.get_token(\"https://cognitiveservices.azure.com/.default\")\n openai.api_key = openai_token.token\n if 'openai_token' not in st.session_state:\n st.session_state['openai_token'] = openai_token\nelse:\n logger.error(\"Invalid API type. Please set the AZURE_OPENAI_TYPE environment variable to azure or azure_ad.\")\n raise ValueError(\"Invalid API type. Please set the AZURE_OPENAI_TYPE environment variable to azure or azure_ad.\")\n\n# Customize Streamlit UI using CSS\nst.markdown(\"\"\"\n<style>\n\ndiv.stButton > button:first-child {\n background-color: #eb5424;\n color: white;\n font-size: 20px;\n font-weight: bold;\n border-radius: 0.5rem;\n padding: 0.5rem 1rem;\n border: none;\n box-shadow: 0 0.5rem 1rem rgba(0,0,0,0.15);\n width: 300 px;\n height: 42px;\n transition: all 0.2s ease-in-out;\n} \n\ndiv.stButton > button:first-child:hover {\n transform: translateY(-3px);\n box-shadow: 0 1rem 2rem rgba(0,0,0,0.15);\n}\n\ndiv.stButton > button:first-child:active {\n transform: translateY(-1px);\n box-shadow: 0 0.5rem 1rem rgba(0,0,0,0.15);\n}\n\ndiv.stButton > button:focus:not(:focus-visible) {\n color: #FFFFFF;\n}\n\n@media only screen and (min-width: 768px) {\n /* For desktop: */\n div {\n font-family: 'Roboto', sans-serif;\n }\n\n div.stButton > button:first-child {\n background-color: #eb5424;\n color: white;\n font-size: 20px;\n font-weight: bold;\n border-radius: 0.5rem;\n padding: 0.5rem 1rem;\n border: none;\n box-shadow: 0 0.5rem 1rem rgba(0,0,0,0.15);\n width: 300 px;\n height: 42px;\n transition: all 0.2s ease-in-out;\n position: relative;\n bottom: -32px;\n right: 0px;\n } \n\n div.stButton > button:first-child:hover {\n transform: translateY(-3px);\n box-shadow: 0 1rem 2rem rgba(0,0,0,0.15);\n }\n\n div.stButton > button:first-child:active {\n transform: translateY(-1px);\n box-shadow: 0 0.5rem 1rem rgba(0,0,0,0.15);\n }\n\n div.stButton > button:focus:not(:focus-visible) {\n color: #FFFFFF;\n }\n\n input {\n border-radius: 0.5rem;\n padding: 0.5rem 1rem;\n border: none;\n box-shadow: 0 0.5rem 1rem rgba(0,0,0,0.15);\n transition: all 0.2s ease-in-out;\n height: 40px;\n }\n}\n</style>\n\"\"\", unsafe_allow_html=True)\n\n# Initialize Streamlit session state\nif 'prompts' not in st.session_state:\n st.session_state['prompts'] = [{\"role\": \"system\", \"content\": system}]\n\nif 'generated' not in st.session_state:\n st.session_state['generated'] = []\n\nif 'past' not in st.session_state:\n st.session_state['past'] = []\n\n# Refresh the OpenAI security token every 45 minutes\ndef refresh_openai_token():\n if st.session_state['openai_token'].expires_on < int(time.time()) - 45 * 60:\n st.session_state['openai_token'] = default_credential.get_token(\"https://cognitiveservices.azure.com/.default\")\n openai.api_key = st.session_state['openai_token'].token\n\n# Send user prompt to Azure OpenAI \ndef generate_response(prompt):\n try:\n st.session_state['prompts'].append({\"role\": \"user\", \"content\": prompt})\n\n if openai.api_type == \"azure_ad\":\n refresh_openai_token()\n\n completion = openai.ChatCompletion.create(\n engine = engine,\n model = model,\n messages = st.session_state['prompts'],\n temperature = temperature,\n )\n \n message = completion.choices[0].message.content\n return message\n except Exception as e:\n logging.exception(f\"Exception in generate_response: {e}\")\n\n# Reset Streamlit session state to start a new chat from scratch\ndef new_click():\n st.session_state['prompts'] = [{\"role\": \"system\", \"content\": system}]\n st.session_state['past'] = []\n st.session_state['generated'] = []\n st.session_state['user'] = \"\"\n\n# Handle on_change event for user input\ndef user_change():\n # Avoid handling the event twice when clicking the Send button\n chat_input = st.session_state['user']\n st.session_state['user'] = \"\"\n if (chat_input == '' or\n (len(st.session_state['past']) > 0 and chat_input == st.session_state['past'][-1])):\n return\n \n # Generate response invoking Azure OpenAI LLM\n if chat_input != '':\n output = generate_response(chat_input)\n \n # store the output\n st.session_state['past'].append(chat_input)\n st.session_state['generated'].append(output)\n st.session_state['prompts'].append({\"role\": \"assistant\", \"content\": output})\n\n# Create a 2-column layout. Note: Streamlit columns do not properly render on mobile devices.\n# For more information, see https://github.com/streamlit/streamlit/issues/5003\ncol1, col2 = st.columns([1, 7])\n\n# Display the robot image\nwith col1:\n st.image(image = os.path.join(\"images\", image_file_name), width = image_width)\n\n# Display the title\nwith col2:\n st.title(title)\n\n# Create a 3-column layout. Note: Streamlit columns do not properly render on mobile devices.\n# For more information, see https://github.com/streamlit/streamlit/issues/5003\ncol3, col4, col5 = st.columns([7, 1, 1])\n\n# Create text input in column 1\nwith col3:\n user_input = st.text_input(text_input_label, key = \"user\", on_change = user_change)\n\n# Create send button in column 2\nwith col4:\n st.button(label = \"Send\")\n\n# Create new button in column 3\nwith col5:\n st.button(label = \"New\", on_click = new_click)\n\n# Display the chat history in two separate tabs\n# - normal: display the chat history as a list of messages using the streamlit_chat message() function \n# - rich: display the chat history as a list of messages using the Streamlit markdown() function\nif st.session_state['generated']:\n tab1, tab2 = st.tabs([\"normal\", \"rich\"])\n with tab1:\n for i in range(len(st.session_state['generated']) - 1, -1, -1):\n message(st.session_state['past'][i], is_user = True, key = str(i) + '_user', avatar_style = \"fun-emoji\", seed = \"Nala\")\n message(st.session_state['generated'][i], key = str(i), avatar_style = \"bottts\", seed = \"Fluffy\")\n with tab2:\n for i in range(len(st.session_state['generated']) - 1, -1, -1):\n st.markdown(st.session_state['past'][i])\n st.markdown(st.session_state['generated'][i]) \n The application makes use of an internal cascading style sheet (CSS) inside an st.markdown element to add a unique style to the Streamlit chatbot for mobile and desktop devices. For more information on how to tweak the user interface of a Streamlit application, see 3 Tips to Customize your Streamlit App. \n streamlit run app.py \n Working with the ChatGPT and GPT-4 models \n The  generate_response  function creates and sends the prompt to the Chat Completion API of the ChatGPT model. \n def generate_response(prompt):\n try:\n st.session_state['prompts'].append({\"role\": \"user\", \"content\": prompt})\n\n if openai.api_type == \"azure_ad\":\n refresh_openai_token()\n\n completion = openai.ChatCompletion.create(\n engine = engine,\n model = model,\n messages = st.session_state['prompts'],\n temperature = temperature,\n )\n \n message = completion.choices[0].message.content\n return message\n except Exception as e:\n logging.exception(f\"Exception in generate_response: {e}\") \n OpenAI trained the ChatGPT and GPT-4 models to accept input formatted as a conversation. The messages parameter takes an array of dictionaries with a conversation organized by role or message: system, user, and assistant. The format of a basic Chat Completion is as follows: \n {\"role\": \"system\", \"content\": \"Provide some context and/or instructions to the model\"},\n{\"role\": \"user\", \"content\": \"The users messages goes here\"},\n{\"role\": \"assistant\", \"content\": \"The response message goes here.\"} \n The  system  role also known as the system message is included at the beginning of the array. This message provides the initial instructions to the model. You can provide various information in the system role including: \n   \n \n A brief description of the assistant \n Personality traits of the assistant \n Instructions or rules you would like the assistant to follow \n Data or information needed for the model, such as relevant questions from an FAQ \n You can customize the system role for your use case or just include basic instructions. \n \n   \n The  system  role or message is optional, but it's recommended to at least include a basic one to get the best results. The  user  role or message represents an input or inquiry from the user, while the  assistant  message corresponds to the response generated by the GPT API. This dialog exchange aims to simulate a human-like conversation, where the user message initiates the interaction and the assistant message provides a relevant and informative answer. This context helps the chat model generate a more appropriate response later on. The last user message refers to the prompt currently requested. For more information, see Learn how to work with the ChatGPT and GPT-4 models. \n   \n Application Configuration \n Make sure to provide a value for the following environment variables when testing the  app.py  Python app locally, for example in Visual Studio Code. You can eventually define environment variables in a  .env  file in the same folder as the  app.py  file. \n   \n \n AZURE_OPENAI_TYPE : specify  azure  if you want to let the application use the API key to authenticate against OpenAI. In this case, make sure to provide the Key in the  AZURE_OPENAI_KEY  environment variable. If you want to authenticate using an Azure AD security token, you need to specify  azure_ad  as a value. In this case, don't need to provide any value in the  AZURE_OPENAI_KEY  environment variable. \n AZURE_OPENAI_BASE : the URL of your Azure OpenAI resource. If you use the API key to authenticate against OpenAI, you can specify the regional endpoint of your Azure OpenAI Service (e.g., https://eastus.api.cognitive.microsoft.com/). If you instead plan to use Azure AD security tokens for authentication, you need to deploy your Azure OpenAI Service with a subdomain and specify the resource-specific endpoint url (e.g., https://myopenai.openai.azure.com/). \n AZURE_OPENAI_KEY : the key of your Azure OpenAI resource. \n AZURE_OPENAI_DEPLOYMENT : the name of the ChatGPT deployment used by your Azure OpenAI resource, for example  gpt-35-turbo . \n AZURE_OPENAI_MODEL : the name of the ChatGPT model used by your Azure OpenAI resource, for example  gpt-35-turbo . \n TITLE : the title of the Streamlit app. \n TEMPERATURE : the temperature used by the OpenAI API to generate the response. \n SYSTEM : give the model instructions about how it should behave and any context it should reference when generating a response. Used to describe the assistant's personality. \n \n   \n When deploying the application to Azure Kubernetes Service (AKS) these values are provided in a Kubernetes ConfigMap. For more information, see the next section. \n   \n OpenAI Library \n In order to use the  openai  library with Microsoft Azure endpoints, you need to set the  api_type ,  api_base  and  api_version  in addition to the  api_key . The  api_type  must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the engine parameter. In order to use OpenAI Key to authenticate to your Azure endpoint, you need to set the  api_type  to  azure  and pass the OpenAI Key to  api_key . \n import openai\nopenai.api_type = \"azure\"\nopenai.api_key = \"...\"\nopenai.api_base = \"https://example-endpoint.openai.azure.com\"\nopenai.api_version = \"2023-05-15\"\n\n# create a chat completion\nchat_completion = openai.ChatCompletion.create(deployment_id=\"gpt-3.5-turbo\", model=\"gpt-3.5-turbo\", messages=[{\"role\": \"user\", \"content\": \"Hello world\"}])\n\n# print the completion\nprint(completion.choices[0].message.content) \n For a detailed example of how to use fine-tuning and other operations using Azure endpoints, please check out the following Jupyter notebooks: \n   \n \n Using Azure completions \n Using Azure fine-tuning \n Using Azure embeddings \n \n   \n To use Microsoft Active Directory to authenticate to your Azure endpoint, you need to set the  api_type  to  azure_ad  and pass the acquired credential token to  api_key . The rest of the parameters need to be set as specified in the previous section. \n from azure.identity import DefaultAzureCredential\nimport openai\n\n# Request credential\ndefault_credential = DefaultAzureCredential()\ntoken = default_credential.get_token(\"https://cognitiveservices.azure.com/.default\")\n\n# Setup parameters\nopenai.api_type = \"azure_ad\"\nopenai.api_key = token.token\nopenai.api_base = \"https://example-endpoint.openai.azure.com/\"\nopenai.api_version = \"2023-05-15\"\n\n# ... \n You can use two different authentication methods in the  magic8ball  chatbot application: \n   \n \n API key : set the  AZURE_OPENAI_TYPE  environment variable to  azure  and the  AZURE_OPENAI_KEY  environment variable to the key of your Azure OpenAI resource. You can use the regional endpoint, such as https://eastus.api.cognitive.microsoft.com/, in the  AZURE_OPENAI_BASE  environment variable, to connect to the Azure OpenAI resource. \n Azure Active Directory : set the  AZURE_OPENAI_TYPE  environment variable to  azure_ad  and use a service principal or managed identity with the DefaultAzureCredential object to acquire a security token from Azure Active Directory. For more information on the DefaultAzureCredential in Python, see Authenticate Python apps to Azure services by using the Azure SDK for Python. Make sure to assign the  Cognitive Services User  role to the service principal or managed identity used to authenticate to your Azure OpenAI Service. For more information, see How to configure Azure OpenAI Service with managed identities. If you want to use Azure AD integrated security, you need to create a custom subdomain for your Azure OpenAI resource and use the specific endpoint containing the custom domain, such as https://myopenai.openai.azure.com/ where myopenai is the custom subdomain. If you specify the regional endpoint, you get an error like the following:  Subdomain does not map to a resource . Hence, pass the custom domain endpoint in the  AZURE_OPENAI_BASE  environment variable. In this case, you also need to refresh the security token periodically. \n \n   \n Build the container image \n You can build the container image using the  Dockerfile  and  01-build-docker-image.sh  in the  scripts  folder. \n Dockefile \n # app/Dockerfile\n\n# # Stage 1 - Install build dependencies\n\n# A Dockerfile must start with a FROM instruction which sets the base image for the container.\n# The Python images come in many flavors, each designed for a specific use case.\n# The python:3.11-slim image is a good base image for most applications.\n# It is a minimal image built on top of Debian Linux and includes only the necessary packages to run Python.\n# The slim image is a good choice because it is small and contains only the packages needed to run Python.\n# For more information, see: \n# * https://hub.docker.com/_/python \n# * https://docs.streamlit.io/knowledge-base/tutorials/deploy/docker\nFROM python:3.11-slim AS builder\n\n# The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile.\n# If the WORKDIR doesn’t exist, it will be created even if it’s not used in any subsequent Dockerfile instruction.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#workdir\nWORKDIR /app\n\n# Set environment variables. \n# The ENV instruction sets the environment variable <key> to the value <value>.\n# This value will be in the environment of all “descendant” Dockerfile commands and can be replaced inline in many as well.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#env\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\n# Install git so that we can clone the app code from a remote repo using the RUN instruction.\n# The RUN comand has 2 forms:\n# * RUN <command> (shell form, the command is run in a shell, which by default is /bin/sh -c on Linux or cmd /S /C on Windows)\n# * RUN [\"executable\", \"param1\", \"param2\"] (exec form)\n# The RUN instruction will execute any commands in a new layer on top of the current image and commit the results. \n# The resulting committed image will be used for the next step in the Dockerfile.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#run\nRUN apt-get update && apt-get install -y \\\n build-essential \\\n curl \\\n software-properties-common \\\n git \\\n && rm -rf /var/lib/apt/lists/*\n\n# Create a virtualenv to keep dependencies together\nRUN python -m venv /opt/venv\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the requirements.txt which contains dependencies to WORKDIR\n# COPY has two forms:\n# * COPY <src> <dest> (this copies the files from the local machine to the container's own filesystem)\n# * COPY [\"<src>\",... \"<dest>\"] (this form is required for paths containing whitespace)\n# For more information, see: https://docs.docker.com/engine/reference/builder/#copy\nCOPY requirements.txt .\n\n# Install the Python dependencies\nRUN pip install --no-cache-dir --no-deps -r requirements.txt\n\n# Stage 2 - Copy only necessary files to the runner stage\n\n# The FROM instruction initializes a new build stage for the application\nFROM python:3.11-slim\n\n# Sets the working directory to /app\nWORKDIR /app\n\n# Copy the virtual environment from the builder stage\nCOPY --from=builder /opt/venv /opt/venv\n\n# Set environment variables\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the app.py containing the application code\nCOPY app.py .\n\n# Copy the images folder to WORKDIR\n# The ADD instruction copies new files, directories or remote file URLs from <src> and adds them to the filesystem of the image at the path <dest>.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#add\nADD images ./images\n\n# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#expose\nEXPOSE 8501\n\n# The HEALTHCHECK instruction has two forms:\n# * HEALTHCHECK [OPTIONS] CMD command (check container health by running a command inside the container)\n# * HEALTHCHECK NONE (disable any healthcheck inherited from the base image)\n# The HEALTHCHECK instruction tells Docker how to test a container to check that it is still working. \n# This can detect cases such as a web server that is stuck in an infinite loop and unable to handle new connections, \n# even though the server process is still running. For more information, see: https://docs.docker.com/engine/reference/builder/#healthcheck\nHEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health\n\n# The ENTRYPOINT instruction has two forms:\n# * ENTRYPOINT [\"executable\", \"param1\", \"param2\"] (exec form, preferred)\n# * ENTRYPOINT command param1 param2 (shell form)\n# The ENTRYPOINT instruction allows you to configure a container that will run as an executable.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#entrypoint\nENTRYPOINT [\"streamlit\", \"run\", \"app.py\", \"--server.port=8501\", \"--server.address=0.0.0.0\"] \n 01-build-docker-image.sh \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Build the docker image\ndocker build -t $imageName:$tag -f Dockerfile . \n Before running any script, make sure to customize the value of the variables inside the  00-variables.sh  file. This file is embedded in all the scripts and contains the following variables: \n # Variables\nacrName=\"CoralAcr\"\nacrResourceGrougName=\"CoralRG\"\nlocation=\"FranceCentral\"\nattachAcr=false\nimageName=\"magic8ball\"\ntag=\"v2\"\ncontainerName=\"magic8ball\"\nimage=\"$acrName.azurecr.io/$imageName:$tag\"\nimagePullPolicy=\"IfNotPresent\" # Always, Never, IfNotPresent\nmanagedIdentityName=\"OpenAiManagedIdentity\"\nfederatedIdentityName=\"Magic8BallFederatedIdentity\"\n\n# Azure Subscription and Tenant\nsubscriptionId=$(az account show --query id --output tsv)\nsubscriptionName=$(az account show --query name --output tsv)\ntenantId=$(az account show --query tenantId --output tsv)\n\n# Parameters\ntitle=\"Magic 8 Ball\"\nlabel=\"Pose your question and cross your fingers!\"\ntemperature=\"0.9\"\nimageWidth=\"80\"\n\n# OpenAI\nopenAiName=\"CoralOpenAi \"\nopenAiResourceGroupName=\"CoralRG\"\nopenAiType=\"azure_ad\"\nopenAiBase=\"https://coralopenai.openai.azure.com/\"\nopenAiModel=\"gpt-35-turbo\"\nopenAiDeployment=\"gpt-35-turbo\"\n\n# Nginx Ingress Controller\nnginxNamespace=\"ingress-basic\"\nnginxRepoName=\"ingress-nginx\"\nnginxRepoUrl=\"https://kubernetes.github.io/ingress-nginx\"\nnginxChartName=\"ingress-nginx\"\nnginxReleaseName=\"nginx-ingress\"\nnginxReplicaCount=3\n\n# Certificate Manager\ncmNamespace=\"cert-manager\"\ncmRepoName=\"jetstack\"\ncmRepoUrl=\"https://charts.jetstack.io\"\ncmChartName=\"cert-manager\"\ncmReleaseName=\"cert-manager\"\n\n# Cluster Issuer\nemail=\"paolos@microsoft.com\"\nclusterIssuerName=\"letsencrypt-nginx\"\nclusterIssuerTemplate=\"cluster-issuer.yml\"\n\n# AKS Cluster\naksClusterName=\"CoralAks\"\naksResourceGroupName=\"CoralRG\"\n\n# Sample Application\nnamespace=\"magic8ball\"\nserviceAccountName=\"magic8ball-sa\"\ndeploymentTemplate=\"deployment.yml\"\nserviceTemplate=\"service.yml\"\nconfigMapTemplate=\"configMap.yml\"\nsecretTemplate=\"secret.yml\"\n\n# Ingress and DNS\ningressTemplate=\"ingress.yml\"\ningressName=\"magic8ball-ingress\"\ndnsZoneName=\"contoso.com\"\ndnsZoneResourceGroupName=\"DnsResourceGroup\"\nsubdomain=\"magic8ball\"\nhost=\"$subdomain.$dnsZoneName\" \n Upload Docker container image to Azure Container Registry (ACR) \n You can push the Docker container image to Azure Container Registry (ACR) using the  03-push-docker-image.sh  script in the  scripts  folder. \n 03-push-docker-image.sh \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Login to ACR\naz acr login --name $acrName \n\n# Retrieve ACR login server. Each container image needs to be tagged with the loginServer name of the registry. \nloginServer=$(az acr show --name $acrName --query loginServer --output tsv)\n\n# Tag the local image with the loginServer of ACR\ndocker tag ${imageName,,}:$tag $loginServer/${imageName,,}:$tag\n\n# Push latest container image to ACR\ndocker push $loginServer/${imageName,,}:$tag \n Deployment Scripts \n If you deployed the Azure infrastructure using the Terraform modules provided with this sample, you only need to deploy the application using the following scripts and YAML templates in the  scripts  folder. \n   \n Scripts \n   \n \n 09-deploy-app.sh \n 10-create-ingress.sh \n 11-configure-dns.sh \n \n YAML manifests \n   \n \n configMap.yml \n deployment.yml \n ingress.yml \n service.yml \n \n If you instead want to deploy the application in your AKS cluster, you can use the following scripts to configure your environment. \n   \n 04-create-nginx-ingress-controller.sh \n This script installs the  NGINX Ingress Controller  using Helm. \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Use Helm to deploy an NGINX ingress controller\nresult=$(helm list -n $nginxNamespace | grep $nginxReleaseName | awk '{print $1}')\n\nif [[ -n $result ]]; then\n echo \"[$nginxReleaseName] ingress controller already exists in the [$nginxNamespace] namespace\"\nelse\n # Check if the ingress-nginx repository is not already added\n result=$(helm repo list | grep $nginxRepoName | awk '{print $1}')\n\n if [[ -n $result ]]; then\n echo \"[$nginxRepoName] Helm repo already exists\"\n else\n # Add the ingress-nginx repository\n echo \"Adding [$nginxRepoName] Helm repo...\"\n helm repo add $nginxRepoName $nginxRepoUrl\n fi\n\n # Update your local Helm chart repository cache\n echo 'Updating Helm repos...'\n helm repo update\n\n # Deploy NGINX ingress controller\n echo \"Deploying [$nginxReleaseName] NGINX ingress controller to the [$nginxNamespace] namespace...\"\n helm install $nginxReleaseName $nginxRepoName/$nginxChartName \\\n --create-namespace \\\n --namespace $nginxNamespace \\\n --set controller.config.enable-modsecurity=true \\\n --set controller.config.enable-owasp-modsecurity-crs=true \\\n --set controller.config.modsecurity-snippet=\\\n'SecRuleEngine On\nSecRequestBodyAccess On\nSecAuditLog /dev/stdout\nSecAuditLogFormat JSON\nSecAuditEngine RelevantOnly\nSecRule REMOTE_ADDR \"@ipMatch 127.0.0.1\" \"id:87,phase:1,pass,nolog,ctl:ruleEngine=Off\"' \\\n --set controller.metrics.enabled=true \\\n --set controller.metrics.serviceMonitor.enabled=true \\\n --set controller.metrics.serviceMonitor.additionalLabels.release=\"prometheus\" \\\n --set controller.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set controller.replicaCount=$replicaCount \\\n --set defaultBackend.nodeSelector.\"kubernetes\\.io/os\"=linux \\\n --set controller.service.annotations.\"service\\.beta\\.kubernetes\\.io/azure-load-balancer-health-probe-request-path\"=/healthz\nfi \n 05-install-cert-manager.sh \n This script installs the  cert-manager  using Helm. \n #/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if the ingress-nginx repository is not already added\nresult=$(helm repo list | grep $cmRepoName | awk '{print $1}')\n\nif [[ -n $result ]]; then\n echo \"[$cmRepoName] Helm repo already exists\"\nelse\n # Add the Jetstack Helm repository\n echo \"Adding [$cmRepoName] Helm repo...\"\n helm repo add $cmRepoName $cmRepoUrl\nfi\n\n# Update your local Helm chart repository cache\necho 'Updating Helm repos...'\nhelm repo update\n\n# Install cert-manager Helm chart\nresult=$(helm list -n $cmNamespace | grep $cmReleaseName | awk '{print $1}')\n\nif [[ -n $result ]]; then\n echo \"[$cmReleaseName] cert-manager already exists in the $cmNamespace namespace\"\nelse\n # Install the cert-manager Helm chart\n echo \"Deploying [$cmReleaseName] cert-manager to the $cmNamespace namespace...\"\n helm install $cmReleaseName $cmRepoName/$cmChartName \\\n --create-namespace \\\n --namespace $cmNamespace \\\n --set installCRDs=true \\\n --set nodeSelector.\"kubernetes\\.io/os\"=linux\nfi \n 06-create-cluster-issuer.sh \n This script creates a cluster issuer for the  NGINX Ingress Controller  based on the  Let's Encrypt  ACME certificate issuer. \n #/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if the cluster issuer already exists\nresult=$(kubectl get ClusterIssuer -o json | jq -r '.items[].metadata.name | select(. == \"'$clusterIssuerName'\")')\n\nif [[ -n $result ]]; then\n echo \"[$clusterIssuerName] cluster issuer already exists\"\n exit\nelse\n # Create the cluster issuer\n echo \"[$clusterIssuerName] cluster issuer does not exist\"\n echo \"Creating [$clusterIssuerName] cluster issuer...\"\n cat $clusterIssuerTemplate |\n yq \"(.spec.acme.email)|=\"\\\"\"$email\"\\\" |\n kubectl apply -f -\nfi \n 07-create-workload-managed-identity.sh \n This script creates the managed identity used by the  magic8ball chatbot and assigns it the  Cognitive Services User  role on the Azure OpenAI Service. \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Check if the user-assigned managed identity already exists\necho \"Checking if [$managedIdentityName] user-assigned managed identity actually exists in the [$aksResourceGroupName] resource group...\"\n\naz identity show \\\n --name $managedIdentityName \\\n --resource-group $aksResourceGroupName &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$managedIdentityName] user-assigned managed identity actually exists in the [$aksResourceGroupName] resource group\"\n echo \"Creating [$managedIdentityName] user-assigned managed identity in the [$aksResourceGroupName] resource group...\"\n\n # Create the user-assigned managed identity\n az identity create \\\n --name $managedIdentityName \\\n --resource-group $aksResourceGroupName \\\n --location $location \\\n --subscription $subscriptionId 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$managedIdentityName] user-assigned managed identity successfully created in the [$aksResourceGroupName] resource group\"\n else\n echo \"Failed to create [$managedIdentityName] user-assigned managed identity in the [$aksResourceGroupName] resource group\"\n exit\n fi\nelse\n echo \"[$managedIdentityName] user-assigned managed identity already exists in the [$aksResourceGroupName] resource group\"\nfi\n\n# Retrieve the clientId of the user-assigned managed identity\necho \"Retrieving clientId for [$managedIdentityName] managed identity...\"\nclientId=$(az identity show \\\n --name $managedIdentityName \\\n --resource-group $aksResourceGroupName \\\n --query clientId \\\n --output tsv)\n\nif [[ -n $clientId ]]; then\n echo \"[$clientId] clientId for the [$managedIdentityName] managed identity successfully retrieved\"\nelse\n echo \"Failed to retrieve clientId for the [$managedIdentityName] managed identity\"\n exit\nfi\n\n# Retrieve the principalId of the user-assigned managed identity\necho \"Retrieving principalId for [$managedIdentityName] managed identity...\"\nprincipalId=$(az identity show \\\n --name $managedIdentityName \\\n --resource-group $aksResourceGroupName \\\n --query principalId \\\n --output tsv)\n\nif [[ -n $principalId ]]; then\n echo \"[$principalId] principalId for the [$managedIdentityName] managed identity successfully retrieved\"\nelse\n echo \"Failed to retrieve principalId for the [$managedIdentityName] managed identity\"\n exit\nfi\n\n# Get the resource id of the Azure OpenAI resource\nopenAiId=$(az cognitiveservices account show \\\n --name $openAiName \\\n --resource-group $openAiResourceGroupName \\\n --query id \\\n --output tsv)\n\nif [[ -n $openAiId ]]; then\n echo \"Resource id for the [$openAiName] Azure OpenAI resource successfully retrieved\"\nelse\n echo \"Failed to the resource id for the [$openAiName] Azure OpenAI resource\"\n exit -1\nfi\n\n# Assign the Cognitive Services User role on the Azure OpenAI resource to the managed identity\nrole=\"Cognitive Services User\"\necho \"Checking if the [$managedIdentityName] managed identity has been assigned to [$role] role with [$openAiName] Azure OpenAI resource as a scope...\"\ncurrent=$(az role assignment list \\\n --assignee $principalId \\\n --scope $openAiId \\\n --query \"[?roleDefinitionName=='$role'].roleDefinitionName\" \\\n --output tsv 2>/dev/null)\n\nif [[ $current == $role ]]; then\n echo \"[$managedIdentityName] managed identity is already assigned to the [\"$current\"] role with [$openAiName] Azure OpenAI resource as a scope\"\nelse\n echo \"[$managedIdentityName] managed identity is not assigned to the [$role] role with [$openAiName] Azure OpenAI resource as a scope\"\n echo \"Assigning the [$role] role to the [$managedIdentityName] managed identity with [$openAiName] Azure OpenAI resource as a scope...\"\n\n az role assignment create \\\n --assignee $principalId \\\n --role \"$role\" \\\n --scope $openAiId 1>/dev/null\n\n if [[ $? == 0 ]]; then\n echo \"[$managedIdentityName] managed identity successfully assigned to the [$role] role with [$openAiName] Azure OpenAI resource as a scope\"\n else\n echo \"Failed to assign the [$managedIdentityName] managed identity to the [$role] role with [$openAiName] Azure OpenAI resource as a scope\"\n exit\n fi\nfi \n 08-create-service-account.sh` \n This script creates the namespace and service account for the  magic8ball  chatbot and federate the service account with the user-defined managed identity created in the previous step. \n #!/bin/bash\n\n# Variables for the user-assigned managed identity\nsource ./00-variables.sh\n\n# Check if the namespace already exists\nresult=$(kubectl get namespace -o 'jsonpath={.items[?(@.metadata.name==\"'$namespace'\")].metadata.name'})\n\nif [[ -n $result ]]; then\n echo \"[$namespace] namespace already exists\"\nelse\n # Create the namespace for your ingress resources\n echo \"[$namespace] namespace does not exist\"\n echo \"Creating [$namespace] namespace...\"\n kubectl create namespace $namespace\nfi\n\n# Check if the service account already exists\nresult=$(kubectl get sa -n $namespace -o 'jsonpath={.items[?(@.metadata.name==\"'$serviceAccountName'\")].metadata.name'})\n\nif [[ -n $result ]]; then\n echo \"[$serviceAccountName] service account already exists\"\nelse\n # Retrieve the resource id of the user-assigned managed identity\n echo \"Retrieving clientId for [$managedIdentityName] managed identity...\"\n managedIdentityClientId=$(az identity show \\\n --name $managedIdentityName \\\n --resource-group $aksResourceGroupName \\\n --query clientId \\\n --output tsv)\n\n if [[ -n $managedIdentityClientId ]]; then\n echo \"[$managedIdentityClientId] clientId for the [$managedIdentityName] managed identity successfully retrieved\"\n else\n echo \"Failed to retrieve clientId for the [$managedIdentityName] managed identity\"\n exit\n fi\n\n # Create the service account\n echo \"[$serviceAccountName] service account does not exist\"\n echo \"Creating [$serviceAccountName] service account...\"\n cat <<EOF | kubectl apply -f -\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n annotations:\n azure.workload.identity/client-id: $managedIdentityClientId\n azure.workload.identity/tenant-id: $tenantId\n labels:\n azure.workload.identity/use: \"true\"\n name: $serviceAccountName\n namespace: $namespace\nEOF\nfi\n\n# Show service account YAML manifest\necho \"Service Account YAML manifest\"\necho \"-----------------------------\"\nkubectl get sa $serviceAccountName -n $namespace -o yaml\n\n# Check if the federated identity credential already exists\necho \"Checking if [$federatedIdentityName] federated identity credential actually exists in the [$aksResourceGroupName] resource group...\"\n\naz identity federated-credential show \\\n --name $federatedIdentityName \\\n --resource-group $aksResourceGroupName \\\n --identity-name $managedIdentityName &>/dev/null\n\nif [[ $? != 0 ]]; then\n echo \"No [$federatedIdentityName] federated identity credential actually exists in the [$aksResourceGroupName] resource group\"\n\n # Get the OIDC Issuer URL\n aksOidcIssuerUrl=\"$(az aks show \\\n --only-show-errors \\\n --name $aksClusterName \\\n --resource-group $aksResourceGroupName \\\n --query oidcIssuerProfile.issuerUrl \\\n --output tsv)\"\n\n # Show OIDC Issuer URL\n if [[ -n $aksOidcIssuerUrl ]]; then\n echo \"The OIDC Issuer URL of the $aksClusterName cluster is $aksOidcIssuerUrl\"\n fi\n\n echo \"Creating [$federatedIdentityName] federated identity credential in the [$aksResourceGroupName] resource group...\"\n\n # Establish the federated identity credential between the managed identity, the service account issuer, and the subject.\n az identity federated-credential create \\\n --name $federatedIdentityName \\\n --identity-name $managedIdentityName \\\n --resource-group $aksResourceGroupName \\\n --issuer $aksOidcIssuerUrl \\\n --subject system:serviceaccount:$namespace:$serviceAccountName\n\n if [[ $? == 0 ]]; then\n echo \"[$federatedIdentityName] federated identity credential successfully created in the [$aksResourceGroupName] resource group\"\n else\n echo \"Failed to create [$federatedIdentityName] federated identity credential in the [$aksResourceGroupName] resource group\"\n exit\n fi\nelse\n echo \"[$federatedIdentityName] federated identity credential already exists in the [$aksResourceGroupName] resource group\"\nfi \n 09-deploy-app.sh` \n This script creates the Kubernetes config map, deployment, and service used by the  magic8ball  chatbot. \n #!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Attach ACR to AKS cluster\nif [[ $attachAcr == true ]]; then\n echo \"Attaching ACR $acrName to AKS cluster $aksClusterName...\"\n az aks update \\\n --name $aksClusterName \\\n --resource-group $aksResourceGroupName \\\n --attach-acr $acrName\nfi\n\n# Check if namespace exists in the cluster\nresult=$(kubectl get namespace -o jsonpath=\"{.items[?(@.metadata.name=='$namespace')].metadata.name}\")\n\nif [[ -n $result ]]; then\n echo \"$namespace namespace already exists in the cluster\"\nelse\n echo \"$namespace namespace does not exist in the cluster\"\n echo \"creating $namespace namespace in the cluster...\"\n kubectl create namespace $namespace\nfi\n\n# Create config map\ncat $configMapTemplate |\n yq \"(.data.TITLE)|=\"\\\"\"$title\"\\\" |\n yq \"(.data.LABEL)|=\"\\\"\"$label\"\\\" |\n yq \"(.data.TEMPERATURE)|=\"\\\"\"$temperature\"\\\" |\n yq \"(.data.IMAGE_WIDTH)|=\"\\\"\"$imageWidth\"\\\" |\n yq \"(.data.AZURE_OPENAI_TYPE)|=\"\\\"\"$openAiType\"\\\" |\n yq \"(.data.AZURE_OPENAI_BASE)|=\"\\\"\"$openAiBase\"\\\" |\n yq \"(.data.AZURE_OPENAI_MODEL)|=\"\\\"\"$openAiModel\"\\\" |\n yq \"(.data.AZURE_OPENAI_DEPLOYMENT)|=\"\\\"\"$openAiDeployment\"\\\" |\n kubectl apply -n $namespace -f -\n\n# Create deployment\ncat $deploymentTemplate |\n yq \"(.spec.template.spec.containers[0].image)|=\"\\\"\"$image\"\\\" |\n yq \"(.spec.template.spec.containers[0].imagePullPolicy)|=\"\\\"\"$imagePullPolicy\"\\\" |\n yq \"(.spec.template.spec.serviceAccountName)|=\"\\\"\"$serviceAccountName\"\\\" |\n kubectl apply -n $namespace -f -\n\n# Create deployment\nkubectl apply -f $serviceTemplate -n $namespace \n 10-create-ingress.sh \n This script creates the ingress object to expose the service via the  NGINX Ingress Controller . \n #/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Create the ingress\necho \"[$ingressName] ingress does not exist\"\necho \"Creating [$ingressName] ingress...\"\ncat $ingressTemplate |\n yq \"(.spec.tls[0].hosts[0])|=\"\\\"\"$host\"\\\" |\n yq \"(.spec.rules[0].host)|=\"\\\"\"$host\"\\\" |\n kubectl apply -n $namespace -f - \n 11-configure-dns.sh \n This script creates an A record in the Azure DNS Zone to expose the application via a given subdomain (e.g., https://magic8ball.example.com). \n # Variables\nsource ./00-variables.sh\n\n# Retrieve the public IP address from the ingress\necho \"Retrieving the external IP address from the [$ingressName] ingress...\"\npublicIpAddress=$(kubectl get ingress $ingressName -n $namespace -o jsonpath='{.status.loadBalancer.ingress[0].ip}')\n\nif [ -n $publicIpAddress ]; then\n echo \"[$publicIpAddress] external IP address of the application gateway ingress controller successfully retrieved from the [$ingressName] ingress\"\nelse\n echo \"Failed to retrieve the external IP address of the application gateway ingress controller from the [$ingressName] ingress\"\n exit\nfi\n\n# Check if an A record for todolist subdomain exists in the DNS Zone\necho \"Retrieving the A record for the [$subdomain] subdomain from the [$dnsZoneName] DNS zone...\"\nipv4Address=$(az network dns record-set a list \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --query \"[?name=='$subdomain'].arecords[].ipv4Address\" \\\n --output tsv)\n\nif [[ -n $ipv4Address ]]; then\n echo \"An A record already exists in [$dnsZoneName] DNS zone for the [$subdomain] subdomain with [$ipv4Address] IP address\"\n\n if [[ $ipv4Address == $publicIpAddress ]]; then\n echo \"The [$ipv4Address] ip address of the existing A record is equal to the ip address of the [$ingressName] ingress\"\n echo \"No additional step is required\"\n exit\n else\n echo \"The [$ipv4Address] ip address of the existing A record is different than the ip address of the [$ingressName] ingress\"\n fi\n\n # Retrieving name of the record set relative to the zone\n echo \"Retrieving the name of the record set relative to the [$dnsZoneName] zone...\"\n\n recordSetName=$(az network dns record-set a list \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --query \"[?name=='$subdomain'].name\" \\\n --output name 2>/dev/null)\n\n if [[ -n $recordSetName ]]; then\n \"[$recordSetName] record set name successfully retrieved\"\n else\n \"Failed to retrieve the name of the record set relative to the [$dnsZoneName] zone\"\n exit\n fi\n\n # Remove the a record\n echo \"Removing the A record from the record set relative to the [$dnsZoneName] zone...\"\n\n az network dns record-set a remove-record \\\n --ipv4-address $ipv4Address \\\n --record-set-name $recordSetName \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName\n\n if [[ $? == 0 ]]; then\n echo \"[$ipv4Address] ip address successfully removed from the [$recordSetName] record set\"\n else\n echo \"Failed to remove the [$ipv4Address] ip address from the [$recordSetName] record set\"\n exit\n fi\nfi\n\n# Create the a record\necho \"Creating an A record in [$dnsZoneName] DNS zone for the [$subdomain] subdomain with [$publicIpAddress] IP address...\"\naz network dns record-set a add-record \\\n --zone-name $dnsZoneName \\\n --resource-group $dnsZoneResourceGroupName \\\n --record-set-name $subdomain \\\n --ipv4-address $publicIpAddress 1>/dev/null\n\nif [[ $? == 0 ]]; then\n echo \"A record for the [$subdomain] subdomain with [$publicIpAddress] IP address successfully created in [$dnsZoneName] DNS zone\"\nelse\n echo \"Failed to create an A record for the $subdomain subdomain with [$publicIpAddress] IP address in [$dnsZoneName] DNS zone\"\nfi \n The scripts used to deploy the YAML template use the yq tool to customize the manifests with the value of the variables defined in the  00-variables.sh  file. This tool is a lightweight and portable command-line YAML, JSON and XML processor that uses jq like syntax but works with YAML files as well as json, xml, properties, csv and tsv. It doesn't yet support everything jq does - but it does support the most common operations and functions, and more is being added continuously. \n   \n YAML manifests \n Below you can read the YAML manifests used to deploy the  magic8ball  chatbot to AKS. \n configmap.yml The  configmap.yml  defines a value for the environment variables passed to the application container. The configmap does not define any environment variable for the OpenAI key as the container. \n apiVersion: v1\nkind: ConfigMap\nmetadata:\n name: magic8ball-configmap\ndata:\n TITLE: \"Magic 8 Ball\"\n LABEL: \"Pose your question and cross your fingers!\"\n TEMPERATURE: \"0.9\"\n IMAGE_WIDTH: \"80\"\n AZURE_OPENAI_TYPE: azure_ad\n AZURE_OPENAI_BASE: https://myopenai.openai.azure.com/\n AZURE_OPENAI_KEY: \"\"\n AZURE_OPENAI_MODEL: gpt-35-turbo\n AZURE_OPENAI_DEPLOYMENT: magic8ballGPT \n These are the parameters defined by the configmap: \n   \n \n AZURE_OPENAI_TYPE : specify  azure  if you want to let the application use the API key to authenticate against OpenAI. In this case, make sure to provide the Key in the  AZURE_OPENAI_KEY  environment variable. If you want to authenticate using an Azure AD security token, you need to specify  azure_ad  as a value. In this case, don't need to provide any value in the  AZURE_OPENAI_KEY  environment variable. \n AZURE_OPENAI_BASE : the URL of your Azure OpenAI resource. If you use the API key to authenticate against OpenAI, you can specify the regional endpoint of your Azure OpenAI Service (e.g., https://eastus.api.cognitive.microsoft.com/). If you instead plan to use Azure AD security tokens for authentication, you need to deploy your Azure OpenAI Service with a subdomain and specify the resource-specific endpoint url (e.g., https://myopenai.openai.azure.com/). \n AZURE_OPENAI_KEY : the key of your Azure OpenAI resource. If you set  AZURE_OPENAI_TYPE  to  azure_ad  you can leave this parameter empty. \n AZURE_OPENAI_DEPLOYMENT : the name of the ChatGPT deployment used by your Azure OpenAI resource, for example  gpt-35-turbo . \n AZURE_OPENAI_MODEL : the name of the ChatGPT model used by your Azure OpenAI resource, for example  gpt-35-turbo . \n TITLE : the title of the Streamlit app. \n TEMPERATURE : the temperature used by the OpenAI API to generate the response. \n SYSTEM : give the model instructions about how it should behave and any context it should reference when generating a response. Used to describe the assistant's personality. \n \n   \n deployment.yml \n The  deployment.yml  manifest is used create a Kubernetes deployment that defines the application pods to create. azure.workload.identity/use label is required in the pod template spec. Only pods with this label will be mutated by the azure-workload-identity mutating admission webhook to inject the Azure specific environment variables and the projected service account token volume. \n apiVersion: apps/v1\nkind: Deployment\nmetadata:\n name: magic8ball\n labels:\n app: magic8ball\nspec:\n replicas: 3\n selector:\n matchLabels:\n app: magic8ball\n azure.workload.identity/use: \"true\"\n strategy:\n rollingUpdate:\n maxSurge: 1\n maxUnavailable: 1\n minReadySeconds: 5\n template:\n metadata:\n labels:\n app: magic8ball\n azure.workload.identity/use: \"true\"\n prometheus.io/scrape: \"true\"\n spec:\n serviceAccountName: magic8ball-sa\n topologySpreadConstraints:\n - maxSkew: 1\n topologyKey: topology.kubernetes.io/zone\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: magic8ball\n - maxSkew: 1\n topologyKey: kubernetes.io/hostname\n whenUnsatisfiable: DoNotSchedule\n labelSelector:\n matchLabels:\n app: magic8ball\n nodeSelector:\n \"kubernetes.io/os\": linux\n containers:\n - name: magic8ball\n image: paolosalvatori.azurecr.io/magic8ball:v1\n imagePullPolicy: Always\n resources:\n requests:\n memory: \"128Mi\"\n cpu: \"250m\"\n limits:\n memory: \"256Mi\"\n cpu: \"500m\"\n ports:\n - containerPort: 8501\n livenessProbe:\n httpGet:\n path: /\n port: 8501\n failureThreshold: 1\n initialDelaySeconds: 60\n periodSeconds: 30\n timeoutSeconds: 5\n readinessProbe:\n httpGet:\n path: /\n port: 8501\n failureThreshold: 1\n initialDelaySeconds: 60\n periodSeconds: 30\n timeoutSeconds: 5\n startupProbe:\n httpGet:\n path: /\n port: 8501\n failureThreshold: 1\n initialDelaySeconds: 60\n periodSeconds: 30\n timeoutSeconds: 5\n env:\n - name: TITLE\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: TITLE\n - name: IMAGE_WIDTH\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: IMAGE_WIDTH\n - name: LABEL\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: LABEL\n - name: TEMPERATURE\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: TEMPERATURE\n - name: AZURE_OPENAI_TYPE\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: AZURE_OPENAI_TYPE\n - name: AZURE_OPENAI_BASE\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: AZURE_OPENAI_BASE\n - name: AZURE_OPENAI_KEY\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: AZURE_OPENAI_KEY\n - name: AZURE_OPENAI_MODEL\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: AZURE_OPENAI_MODEL\n - name: AZURE_OPENAI_DEPLOYMENT\n valueFrom:\n configMapKeyRef:\n name: magic8ball-configmap\n key: AZURE_OPENAI_DEPLOYMENT \n service.yml \n The application is exposed using a  ClusterIP  Kubernetes service. \n apiVersion: v1\nkind: Service\nmetadata:\n name: magic8ball\n labels:\n app: magic8ball\nspec:\n type: ClusterIP\n ports:\n - protocol: TCP\n port: 8501\n selector:\n app: magic8ball \n ingress.yml \n The  ingress.yml  manifest defines a Kubernetes ingress object used to expose the service via the NGINX Ingress Controller. \n apiVersion: networking.k8s.io/v1\nkind: Ingress\nmetadata:\n name: magic8ball-ingress\n annotations:\n cert-manager.io/cluster-issuer: letsencrypt-nginx\n cert-manager.io/acme-challenge-type: http01 \n nginx.ingress.kubernetes.io/proxy-connect-timeout: \"360\"\n nginx.ingress.kubernetes.io/proxy-send-timeout: \"360\"\n nginx.ingress.kubernetes.io/proxy-read-timeout: \"360\"\n nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: \"360\"\n nginx.ingress.kubernetes.io/configuration-snippet: |\n more_set_headers \"X-Frame-Options: SAMEORIGIN\";\nspec:\n ingressClassName: nginx\n tls:\n - hosts:\n - magic8ball.contoso.com\n secretName: tls-secret\n rules:\n - host: magic8ball.contoso.com\n http:\n paths:\n - path: /\n pathType: Prefix\n backend:\n service:\n name: magic8ball\n port:\n number: 8501 \n The ingress object defines the following annotations: \n \n cert-manager.io/cluster-issuer: specifies the name of a cert-manager.io ClusterIssuer to acquire the certificate required for this Ingress. It does not matter which namespace your Ingress resides, as ClusterIssuers are non-namespaced resources. In this sample, the cert-manager is instructed to use the  letsencrypt-nginx  ClusterIssuer that you can create using the  06-create-cluster-issuer.sh  script. \n cert-manager.io/acme-challenge-type: specifies the challend type. \n nginx.ingress.kubernetes.io/proxy-connect-timeout: specifies the connection timeout in seconds. \n nginx.ingress.kubernetes.io/proxy-send-timeout: specifies the send timeout in seconds. \n nginx.ingress.kubernetes.io/proxy-read-timeout: specifies the read timeout in seconds. \n nginx.ingress.kubernetes.io/proxy-next-upstream-timeout: specifies the next upstream timeout in seconds. \n nginx.ingress.kubernetes.io/configuration-snippet: allows additional configuration to the NGINX location. \n \n   \n Review deployed resources \n Use the Azure portal, Azure CLI, or Azure PowerShell to list the deployed resources in the resource group. \n Azure CLI \n az resource list --resource-group <resource-group-name> \n PowerShell \n Get-AzResource -ResourceGroupName <resource-group-name> \n Clean up resources \n When you no longer need the resources you created, delete the resource group. This will remove all the Azure resources. \n Azure CLI \n az group delete --name <resource-group-name> \n PowerShell \n Remove-AzResourceGroup -Name <resource-group-name> \n \n \n \n \n \n   \n \n   \n \n \n \n \n \n \n \n \n \n   \n ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"91271","kudosSumWeight":13,"repliesCount":13,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxM2kxRDQ1Qzk2QThDRjlCMjJG?revision=3\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxNGk1NzQwRDJEMDIzMzhDQTVE?revision=3\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxNmk0QTg3MTVGNDVBQTcwNjZE?revision=3\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODM5NjExLTQ3NjkxN2lGQjEwNTFDNDhDRTBFQjI2?revision=3\"}"}}],"totalCount":4,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"Conversation:conversation:1929976":{"__typename":"Conversation","id":"conversation:1929976","topic":{"__typename":"ForumTopicMessage","uid":1929976},"lastPostingActivityTime":"2021-03-04T08:08:41.143-08:00","solved":false},"Category:category:MicrosoftEdgeInsider":{"__typename":"Category","id":"category:MicrosoftEdgeInsider","displayId":"MicrosoftEdgeInsider"},"Forum:board:EdgeInsiderDiscussions":{"__typename":"Forum","id":"board:EdgeInsiderDiscussions","displayId":"EdgeInsiderDiscussions","nodeType":"board","conversationStyle":"FORUM","title":"Discussions","shortTitle":"Discussions","parent":{"__ref":"Category:category:MicrosoftEdgeInsider"}},"User:user:310193":{"__typename":"User","uid":310193,"login":"HotCakeX","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS0zMTAxOTMtNTRMV25k?image-coordinates=0%2C0%2C720%2C720"},"id":"user:310193"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0xOTI5OTc2LTIzNTk0Nmk4M0ZEQ0JDMUQ1RTU2Nzcx?revision=1\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0xOTI5OTc2LTIzNTk0Nmk4M0ZEQ0JDMUQ1RTU2Nzcx?revision=1","title":"fdfdf.jpg","associationType":"BODY","width":2160,"height":1987,"altText":null},"ForumTopicMessage:message:1929976":{"__typename":"ForumTopicMessage","subject":"Edge on Android isn't available for opening PDFs","conversation":{"__ref":"Conversation:conversation:1929976"},"id":"message:1929976","revisionNum":1,"uid":1929976,"depth":0,"board":{"__ref":"Forum:board:EdgeInsiderDiscussions"},"author":{"__ref":"User:user:310193"},"metrics":{"__typename":"MessageMetrics","views":42899},"postTime":"2020-11-25T01:30:12.319-08:00","lastPublishTime":"2020-11-25T01:30:12.319-08:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Android 9 pie and latest version of Edge Android beta, in the file manager when I \"Open with\" on PDF file, Edge isn't available.     I'd like the same features we have on desktop, Mac, Linux to be available on Android and IOS too.   if you want the same, please send a feedback through Edge browser about this. thank you ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"348","kudosSumWeight":5,"repliesCount":24,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0xOTI5OTc2LTIzNTk0Nmk4M0ZEQ0JDMUQ1RTU2Nzcx?revision=1\"}"}}],"totalCount":1,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"Conversation:conversation:3054130":{"__typename":"Conversation","id":"conversation:3054130","topic":{"__typename":"BlogTopicMessage","uid":3054130},"lastPostingActivityTime":"2023-11-30T04:32:51.016-08:00","solved":false},"User:user:674981":{"__typename":"User","uid":674981,"login":"UmarMohamedUsman","registrationData":{"__typename":"RegistrationData","status":null},"deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS02NzQ5ODEtMzMyODAyaTUxNzZDMUI4MjZDMEU1QkI"},"id":"user:674981"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzODU0M2kzMkFFNzI4QjRGNDBDMkU4?revision=33\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzODU0M2kzMkFFNzI4QjRGNDBDMkU4?revision=33","title":"UmarMohamedUsman_1-1641866870741.png","associationType":"BODY","width":672,"height":156,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzkzNWkyMEU5Q0NBREZFRTk0ODM0?revision=33\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzkzNWkyMEU5Q0NBREZFRTk0ODM0?revision=33","title":"UmarMohamedUsman_0-1641545350269.png","associationType":"BODY","width":1459,"height":599,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0NWlGQzlFQ0NEMUUxODYxRkI4?revision=33\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0NWlGQzlFQ0NEMUUxODYxRkI4?revision=33","title":"UmarMohamedUsman_4-1641546693328.png","associationType":"BODY","width":1141,"height":690,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0NGk2QjM1OEM1NzMzREUxMjRE?revision=33\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0NGk2QjM1OEM1NzMzREUxMjRE?revision=33","title":"UmarMohamedUsman_3-1641546652164.png","associationType":"BODY","width":759,"height":589,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0MmlGNEI5QjEzRjNEMkQxOEZG?revision=33\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0MmlGNEI5QjEzRjNEMkQxOEZG?revision=33","title":"UmarMohamedUsman_2-1641546548938.png","associationType":"BODY","width":1426,"height":538,"altText":null},"BlogTopicMessage:message:3054130":{"__typename":"BlogTopicMessage","subject":"Protecting APIs in Azure API Management using OAuth 2.0 Client Credential Flow & test using Postman","conversation":{"__ref":"Conversation:conversation:3054130"},"id":"message:3054130","revisionNum":33,"uid":3054130,"depth":0,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"author":{"__ref":"User:user:674981"},"teaser@stripHtml({\"removeProcessingText\":true,\"truncateLength\":-1})":" Purpose of this blog is to go through how to protect your APIs published through Azure API Management using OAuth 2.0 Client Credential Flow and test using Postman. ","introduction":"","metrics":{"__typename":"MessageMetrics","views":41299},"postTime":"2022-01-10T16:13:40.271-08:00","lastPublishTime":"2022-06-05T22:58:15.517-07:00","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})":" Challenge: \n I recently helped a customer setting up OAuth 2.0 with Azure AD to protect their API backend in Azure API Management. While this Azure Doc has overall process, it uses OAuth 2.0 authorization code flow for APIM Developer Portal users to sign in and test APIs. This works great when you have applications calling APIs in an interactive manner or as the signed-in user. What if you have application(s) runs as a background service or daemon without a signed-in user that want to call APIs, for example you are using tools like Postman to test APIs. So, you need to set up client application using OAuth 2.0 Client Credentials Flow.  \n   \n Solution: \n Purpose of this blog is to go through how to protect your APIs published through Azure API Management using OAuth 2.0 Client Credential Flow and test using Postman. Again, use this Azure Doc to go through step 1 through 6 to complete the entire set up. If you are not interested in setting up APIM Developer Portal as Client Application, you can skip step 2, 3, 4 & 5 and follow steps below. For completeness' sake and to avoid going back and forth I'm including some of the steps from this Azure Doc in this blog. \n   \n High level steps: \n \n Step 1: Register an application in Azure AD to represent the API \n Step 2: Register another application in Azure AD to represent a client application \n Step 3: Grant permissions in Azure AD \n Step 4: Configure a JWT validation policy to pre-authorize requests \n Step 5: Request JWT token using Postman \n Step 6: Inspect the token (optional step) \n Step 7: Make the API call \n Step 8: Build an application to call the API \n \n   \n Step 1: Register an application in Azure AD to represent the API \n   \n \n In your Azure Portal, go to Azure Active Directory, select App Registrations \n \n   \n \n Select New registration \n \n   \n \n When the Register an application page appears, enter your application's registration information: \n \n In the Name section, enter a meaningful application name that will be displayed to users of the app, such as backend-app. \n In the Supported account types section, select an option that suits your scenario. \n \n \n   \n \n Leave the Redirect URI section empty. \n \n   \n \n Select Register to create the application. \n \n   \n \n On the app Overview page, find the Application (client) ID value and record it for later. \n \n   \n \n Under the Manage section of the side menu, select Expose an API and set the Application ID URI with the default value. Record this value for later. \n \n   \n \n Under the Manage section of the side menu, select App roles then click Create app role: \n \n In the Display name, enter a meaningful role name for example: AddRole \n Allowed member types: select Applications \n Value: example: AddRole \n Description: <as necessary> \n Do you want to enable this app role? checked \n Click Apply \n Record the role ID for later \n \n \n \n Repeat the step 8 to add additional App roles (if any) supported by your API. \n \n   \n Step 2: Register another application in Azure AD to represent a client application \n Register every client application that calls the API as an application in Azure AD. In this example, the client application is Postman that we will be using to test APIs. \n To register another application in Azure AD to represent Postman: \n   \n \n In your Azure Portal, go to Azure Active Directory, select App Registrations. \n \n   \n \n Select New registration. \n \n   \n \n When the Register an application page appears, enter your application's registration information: \n \n In the Name section, enter a meaningful application name that will be displayed to users of the app, such as client-app. \n In the Supported account types section, select Accounts in this organizational directory only (<tenant name> only - Single tenant). \n \n \n   \n \n In the Redirect URI section, select Web and leave the URL field empty. \n \n   \n \n Select Register to create the application. \n \n   \n \n On the app Overview page, find the Application (client) ID value and record it for later. \n \n   \n \n Create a client secret for this application to use in a subsequent step. \n \n Under the Manage section of the side menu, select Certificates & secrets. \n Under Client secrets, select New client secret. \n Under Add a client secret, provide a Description and choose when the key should expire. \n Select Add. \n \n \n   \n \n When the secret is created, note the key value for use in a subsequent step (Note: you can't see/copy this value once you move away from this page, but you can create New Client Secret as needed) \n  Repeat this step to register additional clients/customers (if any) who will be consuming your API \n \n   \n Step 3: Grant permissions in Azure AD \n Now that you have registered two applications to represent the API and the Postman client app, grant permissions to allow the client-app (Postman) to call the backend-app (API). \n   \n \n In your Azure Portal, go to Azure Active Directory, select App Registrations. \n \n   \n \n Choose your client app. Under the Manage section of the side menu, select API permissions. \n \n   \n \n Select Add a Permission. \n \n   \n \n Under Select an API, select My APIs, and then find and select your backend-app. \n \n   \n \n Select Application permissions, then select the appropriate role (for example: AddRole) of your backend-app. \n \n   \n \n Select Add permissions. \n \n   \n \n Select Grant admin consent for <your-tenant-name>. Select Yes, if you get green tick mark under Status with message Granted for <tenant name> then you are all set, then move to next step 4. \n If you are not an Azure AD Global Admin, you can't provide this admin consent (it will be grayed out). If you are the API App owner (in this case you are because we set up the API App in Step1), then you will be able to use Graph Explorer to provide Admin consent. \n \n \n Go to Graph Explorer (make sure to change the tenant's name in the URL below) For example: https://developer.microsoft.com/en-us/graph/graph-explorer?tenant=<tenantname>.onmicrosoft.com \n Sign in to Graph Explorer \n In the Search Sample queries enter: appRoleAssignment \n Select POST assign an appRoleAssignment to a serviceprincipal \n Click on Modify permissions (Preview) tab and consent Permission to AppRoleAssignment.ReadWrite.All, Directory.AccessAsUser.All & Directory.ReadWrite.All \n principalId = Client App's Service Principal ID \n resourceId = API App's Service Principal ID \n appRoleId = Id of App Role (in this example AddRole that we created in Step 1, you can get it using Portal) \n To get Client/API app's Service Principal ID (Object ID), use CLI or PowerShell command (It's not available under Azure Portal)\n Using CLI:  az ad sp list --display-name <Azure AD App Name> \n Capture objectId where objectType = ServicePrincipal \n \n   \n \n \n In the POST URL, make sure to change {id}, this is same as resourceId from above (i.e., API App's Service Principal ID) https://graph.microsoft.com/v1.0/servicePrincipals/{id}/appRoleAssignments \n \n \n Run query. If all goes well and you got all the right Ids, you should get Created - 201 response as shown below. Check the API permission again to make sure you got the green tick mark under Status with message Granted for <tenant name> \n \n \n \n   \n Step 4: Configure a JWT validation policy to pre-authorize requests \n Follow the instruction from the following doc to add Validate JWT policy to your API Protect API backend in API Management using OAuth 2.0 and Azure Active Directory - Azure API Management | Microsoft Docs \n   \n Add the following Validate JWT policy to <inbound> policy section of your API which checks the value of the audience claim in an access token obtained from Azure AD and returns an error message if the token is not valid. \n \n Update aad-tenant value \n Update aud claim value which is our API App's Application (client) ID which we created in Step1 \n You can also validate additional claims that the token will have, in our case we added App Roles called AddRole in Step 1 (see example below) \n \n   \n \t<validate-jwt header-name=\"Authorization\" failed-validation-httpcode=\"401\" failed-validation-error-message=\"Unauthorized. Access token is missing or invalid.\">\n\t <openid-config url=\"https://login.microsoftonline.com/{aad-tenant}/v2.0/.well-known/openid-configuration\" />\n\t <required-claims>\n\t <claim name=\"aud\">\n\t <value>{backend-api-application-client-id}</value>\n \t</claim>\n\t </required-claims>\n\t</validate-jwt> \n   \n   \n For example, to include additional claim: \n   \n <validate-jwt header-name=\"Authorization\" failed-validation-httpcode=\"401\" failed-validation-error-message=\"Unauthorized. Access token is missing or invalid.\">\n <openid-config url=\"https://login.microsoftonline.com/azurerampup.onmicrosoft.com/v2.0/.well-known/openid-configuration\" />\n <required-claims>\n <claim name=\"aud\">\n <value>3b0bd75a-d72f-46ec-99f1-040bab17d0ed</value>\n </claim>\n <claim name=\"roles\" match=\"all\">\n <value>AddRole</value>\n </claim>\n </required-claims>\n </validate-jwt> \n   \n   \n Step 5: Request JWT token using Postman \n Now that all the setup is over, let's get JWT token first, before we make API calls \n   \n \n Go to Postman, create POST request with following values: \n URL: Go to your Client App in Azure AD, click Endpoints, copy OAuth 2.0 token endpoint (v2) URL \n Select Body then x-www-form-urlencoded \n Enter following values \n \n \n \n \n \n \n KEY \n \n \n VALUE \n \n \n \n \n grant_type \n \n \n client_credentials \n \n \n \n \n client_id \n \n \n Your client App's (step 2 from above) Application (client) ID, you can copy from Portal \n \n \n \n \n client_secret \n \n \n Your client App's client secret \n \n \n \n \n scope \n \n \n <your API app's Application (client) ID>/.default Note: You need to include /.default at the end with your ID. \n \n \n \n \n   \n e. Send the request, if all goes well you should get the JWT token as shown below. \n \n   \n   \n Step 6: Inspect the token (optional step) \n \n Go to: https://jwt.ms \n Copy the JWT token from previous step and paste, it will decode the token \n It will include the roles (for example: roles: \"AddRole\") \n This proves that Postman (client) got the token which has permission to AddRole \n \n   \n   \n Step 7: Make the API call \n Time to call our APIs using Postman with JWT token that we got from step 5. \n \n Go to Postman, create Get/POST request based on your API operation \n Select Headers and enter the following values: \n \n \n \n \n \n \n KEY \n \n \n VALUE \n \n \n \n \n Ocp-Apim-Subscription-Key \n \n \n Subscription key from APIM \n \n \n \n \n Authorization \n \n \n Bearer <JWT token from step 5> \n \n \n \n \n   \n \n Click Send, if all goes well APIM should validate the JWT token, make backend API call and return the response  \n \n   \n Step 8: Build an application to call the API \n  In this blog we used Postman to test API calls, but in Production you or your customers would build an application and implement OAuth 2.0, see Azure AD code samples. \n   \n   \n   \n \n ","body@stripHtml({\"removeProcessingText\":true,\"removeSpoilerMarkup\":true,\"removeTocMarkup\":true,\"truncateLength\":-1})@stringLength":"11636","kudosSumWeight":3,"repliesCount":8,"readOnly":false,"images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzODU0M2kzMkFFNzI4QjRGNDBDMkU4?revision=33\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzkzNWkyMEU5Q0NBREZFRTk0ODM0?revision=33\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0NWlGQzlFQ0NEMUUxODYxRkI4?revision=33\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0NGk2QjM1OEM1NzMzREUxMjRE?revision=33\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zMDU0MTMwLTMzNzk0MmlGNEI5QjEzRjNEMkQxOEZG?revision=33\"}"}}],"totalCount":5,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"videos":{"__typename":"VideoConnection","edges":[],"totalCount":0,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""}},"CachedAsset:text:en_US-components/community/Navbar-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/community/Navbar-1745505309793","value":{"community":"Community Home","inbox":"Inbox","manageContent":"Manage Content","tos":"Terms of Service","forgotPassword":"Forgot Password","themeEditor":"Theme Editor","edit":"Edit Navigation Bar","skipContent":"Skip to content","gxcuf89792":"Tech Community","external-1":"Events","s-m-b":"Nonprofit Community","windows-server":"Windows Server","education-sector":"Education Sector","driving-adoption":"Driving Adoption","Common-content_management-link":"Content Management","microsoft-learn":"Microsoft Learn","s-q-l-server":"Content Management","partner-community":"Microsoft Partner Community","microsoft365":"Microsoft 365","external-9":".NET","external-8":"Teams","external-7":"Github","products-services":"Products","external-6":"Power Platform","communities-1":"Topics","external-5":"Microsoft Security","planner":"Outlook","external-4":"Microsoft 365","external-3":"Dynamics 365","azure":"Azure","healthcare-and-life-sciences":"Healthcare and Life Sciences","external-2":"Azure","microsoft-mechanics":"Microsoft Mechanics","microsoft-learn-1":"Community","external-10":"Learning Room Directory","microsoft-learn-blog":"Blog","windows":"Windows","i-t-ops-talk":"ITOps Talk","external-link-1":"View All","microsoft-securityand-compliance":"Microsoft Security","public-sector":"Public Sector","community-info-center":"Lounge","external-link-2":"View All","microsoft-teams":"Microsoft Teams","external":"Blogs","microsoft-endpoint-manager":"Microsoft Intune","startupsat-microsoft":"Startups at Microsoft","exchange":"Exchange","a-i":"AI and Machine Learning","io-t":"Internet of Things (IoT)","Common-microsoft365-copilot-link":"Microsoft 365 Copilot","outlook":"Microsoft 365 Copilot","external-link":"Community Hubs","communities":"Products"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarHamburgerDropdown-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarHamburgerDropdown-1745505309793","value":{"hamburgerLabel":"Side Menu"},"localOverride":false},"CachedAsset:text:en_US-components/community/BrandLogo-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/community/BrandLogo-1745505309793","value":{"logoAlt":"Khoros","themeLogoAlt":"Brand Logo"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarTextLinks-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarTextLinks-1745505309793","value":{"more":"More"},"localOverride":false},"CachedAsset:text:en_US-components/authentication/AuthenticationLink-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/authentication/AuthenticationLink-1745505309793","value":{"title.login":"Sign In","title.registration":"Register","title.forgotPassword":"Forgot Password","title.multiAuthLogin":"Sign In"},"localOverride":false},"CachedAsset:text:en_US-components/nodes/NodeLink-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/nodes/NodeLink-1745505309793","value":{"place":"Place {name}"},"localOverride":false},"CachedAsset:text:en_US-components/tags/TagSubscriptionAction-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/tags/TagSubscriptionAction-1745505309793","value":{"success.follow.title":"Following Tag","success.unfollow.title":"Unfollowed Tag","success.follow.message.followAcrossCommunity":"You will be notified when this tag is used anywhere across the community","success.unfollowtag.message":"You will no longer be notified when this tag is used anywhere in this place","success.unfollowtagAcrossCommunity.message":"You will no longer be notified when this tag is used anywhere across the community","unexpected.error.title":"Error - Action Failed","unexpected.error.message":"An unidentified problem occurred during the action you took. Please try again later.","buttonTitle":"{isSubscribed, select, true {Unfollow} false {Follow} other{}}","unfollow":"Unfollow"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageListTabs-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageListTabs-1745505309793","value":{"mostKudoed":"{value, select, IDEA {Most Votes} other {Most Likes}}","mostReplies":"Most Replies","mostViewed":"Most Viewed","newest":"{value, select, IDEA {Newest Ideas} OCCASION {Newest Events} other {Newest Topics}}","newestOccasions":"Newest Events","mostRecent":"Most Recent","noReplies":"No Replies Yet","noSolutions":"No Solutions Yet","solutions":"Solutions","mostRecentUserContent":"Most Recent","trending":"Trending","draft":"Drafts","spam":"Spam","abuse":"Abuse","moderation":"Moderation","tags":"Tags","PAST":"Past","UPCOMING":"Upcoming","sortBymostRecent":"Sort By Most Recent","sortBymostRecentUserContent":"Sort By Most Recent","sortBymostKudoed":"Sort By Most Likes","sortBymostReplies":"Sort By Most Replies","sortBymostViewed":"Sort By Most Viewed","sortBynewest":"Sort By Newest Topics","sortBynewestOccasions":"Sort By Newest Events","otherTabs":" Messages list in the {tab} for {conversationStyle}","guides":"Guides","archives":"Archives"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/QueryHandler-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/QueryHandler-1745505309793","value":{"title":"Query Handler"},"localOverride":false},"Category:category:top":{"__typename":"Category","id":"category:top","nodeType":"category"},"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarDropdownToggle-1745505309793","value":{"ariaLabelClosed":"Press the down arrow to open the menu"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/OverflowNav-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/OverflowNav-1745505309793","value":{"toggleText":"More"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageView/MessageViewInline-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageView/MessageViewInline-1745505309793","value":{"bylineAuthor":"{bylineAuthor}","bylineBoard":"{bylineBoard}","anonymous":"Anonymous","place":"Place {bylineBoard}","gotoParent":"Go to parent {name}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Pager/PagerLoadMore-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Pager/PagerLoadMore-1745505309793","value":{"loadMore":"Show More"},"localOverride":false},"CachedAsset:text:en_US-components/users/UserLink-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/users/UserLink-1745505309793","value":{"authorName":"View Profile: {author}","anonymous":"Anonymous"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageSubject-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageSubject-1745505309793","value":{"noSubject":"(no subject)"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageBody-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageBody-1745505309793","value":{"showMessageBody":"Show More","mentionsErrorTitle":"{mentionsType, select, board {Board} user {User} message {Message} other {}} No Longer Available","mentionsErrorMessage":"The {mentionsType} you are trying to view has been removed from the community.","videoProcessing":"Video is being processed. Please try again in a few minutes.","bannerTitle":"Video provider requires cookies to play the video. Accept to continue or {url} it directly on the provider's site.","buttonTitle":"Accept","urlText":"watch"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageSolvedBadge-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageSolvedBadge-1745505309793","value":{"solved":"Solved"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageTime-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageTime-1745505309793","value":{"postTime":"Published: {time}","lastPublishTime":"Last Update: {time}","conversation.lastPostingActivityTime":"Last posting activity time: {time}","conversation.lastPostTime":"Last post time: {time}","moderationData.rejectTime":"Rejected time: {time}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeIcon-1745505309793","value":{"contentType":"Content Type {style, select, FORUM {Forum} BLOG {Blog} TKB {Knowledge Base} IDEA {Ideas} OCCASION {Events} other {}} icon"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageUnreadCount-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageUnreadCount-1745505309793","value":{"unread":"{count} unread","comments":"{count, plural, one { unread comment} other{ unread comments}}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageViewCount-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageViewCount-1745505309793","value":{"textTitle":"{count, plural,one {View} other{Views}}","views":"{count, plural, one{View} other{Views}}"},"localOverride":false},"CachedAsset:text:en_US-components/kudos/KudosCount-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/kudos/KudosCount-1745505309793","value":{"textTitle":"{count, plural,one {{messageType, select, IDEA{Vote} other{Like}}} other{{messageType, select, IDEA{Votes} other{Likes}}}}","likes":"{count, plural, one{like} other{likes}}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageRepliesCount-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageRepliesCount-1745505309793","value":{"textTitle":"{count, plural,one {{conversationStyle, select, IDEA{Comment} OCCASION{Comment} other{Reply}}} other{{conversationStyle, select, IDEA{Comments} OCCASION{Comments} other{Replies}}}}","comments":"{count, plural, one{Comment} other{Comments}}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1745505309793":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/users/UserAvatar-1745505309793","value":{"altText":"{login}'s avatar","altTextGeneric":"User's avatar"},"localOverride":false}}}},"page":"/tags/TagPage/TagPage","query":{"messages.widget.messagelistfornodebyrecentactivitywidget-tab-main-messages-list-for-tag-widget-0":"mostViewed","tagName":"App"},"buildId":"HEhyUrv5OXNBIbfCLaOrw","runtimeConfig":{"buildInformationVisible":false,"logLevelApp":"info","logLevelMetrics":"info","openTelemetryClientEnabled":false,"openTelemetryConfigName":"o365","openTelemetryServiceVersion":"25.1.0","openTelemetryUniverse":"prod","openTelemetryCollector":"http://localhost:4318","openTelemetryRouteChangeAllowedTime":"5000","apolloDevToolsEnabled":false,"inboxMuteWipFeatureEnabled":false},"isFallback":false,"isExperimentalCompile":false,"dynamicIds":["./components/community/Navbar/NavbarWidget.tsx","./components/community/Breadcrumb/BreadcrumbWidget.tsx","./components/customComponent/CustomComponent/CustomComponent.tsx","./components/tags/TagsHeaderWidget/TagsHeaderWidget.tsx","./components/messages/MessageListForNodeByRecentActivityWidget/MessageListForNodeByRecentActivityWidget.tsx","./components/tags/TagSubscriptionAction/TagSubscriptionAction.tsx","./components/external/components/ExternalComponent.tsx","../shared/client/components/common/List/ListGroup/ListGroup.tsx","./components/messages/MessageView/MessageView.tsx","./components/messages/MessageView/MessageViewInline/MessageViewInline.tsx","../shared/client/components/common/Pager/PagerLoadMore/PagerLoadMore.tsx"],"appGip":true,"scriptLoader":[{"id":"analytics","src":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/pagescripts/1730819800000/analytics.js?page.id=TagPage","strategy":"afterInteractive"}]}