"}},"componentScriptGroups({\"componentId\":\"custom.widget.Social_Sharing\"})":{"__typename":"ComponentScriptGroups","scriptGroups":{"__typename":"ComponentScriptGroupsDefinition","afterInteractive":{"__typename":"PageScriptGroupDefinition","group":"AFTER_INTERACTIVE","scriptIds":[]},"lazyOnLoad":{"__typename":"PageScriptGroupDefinition","group":"LAZY_ON_LOAD","scriptIds":[]}},"componentScripts":[]},"component({\"componentId\":\"custom.widget.MicrosoftFooter\"})":{"__typename":"Component","render({\"context\":{\"component\":{\"entities\":[],\"props\":{}},\"page\":{\"entities\":[\"board:FastTrackforAzureBlog\",\"message:3885602\"],\"name\":\"BlogMessagePage\",\"props\":{},\"url\":\"https://techcommunity.microsoft.com/blog/fasttrackforazureblog/create-an-azure-openai-langchain-chromadb-and-chainlit-chat-app-in-container-app/3885602\"}}})":{"__typename":"ComponentRenderResult","html":""}},"componentScriptGroups({\"componentId\":\"custom.widget.MicrosoftFooter\"})":{"__typename":"ComponentScriptGroups","scriptGroups":{"__typename":"ComponentScriptGroupsDefinition","afterInteractive":{"__typename":"PageScriptGroupDefinition","group":"AFTER_INTERACTIVE","scriptIds":[]},"lazyOnLoad":{"__typename":"PageScriptGroupDefinition","group":"LAZY_ON_LOAD","scriptIds":[]}},"componentScripts":[]},"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/community/NavbarDropdownToggle\"]})":[{"__ref":"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/common/QueryHandler\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/common/QueryHandler-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageCoverImage\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageCoverImage-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeTitle\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeTitle-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageTimeToRead\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageTimeToRead-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageSubject\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageSubject-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/users/UserLink\"]})":[{"__ref":"CachedAsset:text:en_US-components/users/UserLink-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/users/UserRank\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/users/UserRank-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageTime\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageTime-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageBody\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageBody-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageCustomFields\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageCustomFields-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageRevision\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageRevision-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageReplyButton\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageReplyButton-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/messages/MessageAuthorBio\"]})":[{"__ref":"CachedAsset:text:en_US-components/messages/MessageAuthorBio-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/users/UserAvatar\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/ranks/UserRankLabel\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/ranks/UserRankLabel-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/users/UserRegistrationDate\"]})":[{"__ref":"CachedAsset:text:en_US-components/users/UserRegistrationDate-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeAvatar\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeAvatar-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeDescription\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeDescription-1743095130000"}],"message({\"id\":\"message:4082983\"})":{"__ref":"BlogReplyMessage:message:4082983"},"message({\"id\":\"message:4081732\"})":{"__ref":"BlogReplyMessage:message:4081732"},"message({\"id\":\"message:4081684\"})":{"__ref":"BlogReplyMessage:message:4081684"},"message({\"id\":\"message:3956486\"})":{"__ref":"BlogReplyMessage:message:3956486"},"message({\"id\":\"message:3913401\"})":{"__ref":"BlogReplyMessage:message:3913401"},"message({\"id\":\"message:3913052\"})":{"__ref":"BlogReplyMessage:message:3913052"},"message({\"id\":\"message:3899816\"})":{"__ref":"BlogReplyMessage:message:3899816"},"message({\"id\":\"message:3899784\"})":{"__ref":"BlogReplyMessage:message:3899784"},"message({\"id\":\"message:3885778\"})":{"__ref":"BlogReplyMessage:message:3885778"},"message({\"id\":\"message:3885752\"})":{"__ref":"BlogReplyMessage:message:3885752"},"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"components/tags/TagView/TagViewChip\"]})":[{"__ref":"CachedAsset:text:en_US-components/tags/TagView/TagViewChip-1743095130000"}],"cachedText({\"lastModified\":\"1743095130000\",\"locale\":\"en-US\",\"namespaces\":[\"shared/client/components/nodes/NodeIcon\"]})":[{"__ref":"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1743095130000"}]},"CachedAsset:pages-1743770138473":{"__typename":"CachedAsset","id":"pages-1743770138473","value":[{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"BlogViewAllPostsPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId/all-posts/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"CasePortalPage","type":"CASE_PORTAL","urlPath":"/caseportal","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"CreateGroupHubPage","type":"GROUP_HUB","urlPath":"/groups/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"CaseViewPage","type":"CASE_DETAILS","urlPath":"/case/:caseId/:caseNumber","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"InboxPage","type":"COMMUNITY","urlPath":"/inbox","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"HelpFAQPage","type":"COMMUNITY","urlPath":"/help","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"IdeaMessagePage","type":"IDEA_POST","urlPath":"/idea/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"IdeaViewAllIdeasPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/all-ideas/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"LoginPage","type":"USER","urlPath":"/signin","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"BlogPostPage","type":"BLOG","urlPath":"/category/:categoryId/blogs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"UserBlogPermissions.Page","type":"COMMUNITY","urlPath":"/c/user-blog-permissions/page","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ThemeEditorPage","type":"COMMUNITY","urlPath":"/designer/themes","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TkbViewAllArticlesPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId/all-articles/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730819800000,"localOverride":null,"page":{"id":"AllEvents","type":"CUSTOM","urlPath":"/Events","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"OccasionEditPage","type":"EVENT","urlPath":"/event/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"OAuthAuthorizationAllowPage","type":"USER","urlPath":"/auth/authorize/allow","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"PageEditorPage","type":"COMMUNITY","urlPath":"/designer/pages","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"PostPage","type":"COMMUNITY","urlPath":"/category/:categoryId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ForumBoardPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TkbBoardPage","type":"TKB","urlPath":"/category/:categoryId/kb/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"EventPostPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"UserBadgesPage","type":"COMMUNITY","urlPath":"/users/:login/:userId/badges","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"GroupHubMembershipAction","type":"GROUP_HUB","urlPath":"/membership/join/:nodeId/:membershipType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"MaintenancePage","type":"COMMUNITY","urlPath":"/maintenance","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"IdeaReplyPage","type":"IDEA_REPLY","urlPath":"/idea/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"UserSettingsPage","type":"USER","urlPath":"/mysettings/:userSettingsTab","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"GroupHubsPage","type":"GROUP_HUB","urlPath":"/groups","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ForumPostPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"OccasionRsvpActionPage","type":"OCCASION","urlPath":"/event/:boardId/:messageSubject/:messageId/rsvp/:responseType","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"VerifyUserEmailPage","type":"USER","urlPath":"/verifyemail/:userId/:verifyEmailToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"AllOccasionsPage","type":"OCCASION","urlPath":"/category/:categoryId/events/:boardId/all-events/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"EventBoardPage","type":"EVENT","urlPath":"/category/:categoryId/events/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TkbReplyPage","type":"TKB_REPLY","urlPath":"/kb/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"IdeaBoardPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"CommunityGuideLinesPage","type":"COMMUNITY","urlPath":"/communityguidelines","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"CaseCreatePage","type":"SALESFORCE_CASE_CREATION","urlPath":"/caseportal/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TkbEditPage","type":"TKB","urlPath":"/kb/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ForgotPasswordPage","type":"USER","urlPath":"/forgotpassword","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"IdeaEditPage","type":"IDEA","urlPath":"/idea/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TagPage","type":"COMMUNITY","urlPath":"/tag/:tagName","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"BlogBoardPage","type":"BLOG","urlPath":"/category/:categoryId/blog/:boardId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"OccasionMessagePage","type":"OCCASION_TOPIC","urlPath":"/event/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ManageContentPage","type":"COMMUNITY","urlPath":"/managecontent","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ClosedMembershipNodeNonMembersPage","type":"GROUP_HUB","urlPath":"/closedgroup/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"CommunityPage","type":"COMMUNITY","urlPath":"/","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ForumMessagePage","type":"FORUM_TOPIC","urlPath":"/discussions/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"IdeaPostPage","type":"IDEA","urlPath":"/category/:categoryId/ideas/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730819800000,"localOverride":null,"page":{"id":"CommunityHub.Page","type":"CUSTOM","urlPath":"/Directory","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"BlogMessagePage","type":"BLOG_ARTICLE","urlPath":"/blog/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"RegistrationPage","type":"USER","urlPath":"/register","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"EditGroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ForumEditPage","type":"FORUM","urlPath":"/discussions/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ResetPasswordPage","type":"USER","urlPath":"/resetpassword/:userId/:resetPasswordToken","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1730819800000,"localOverride":null,"page":{"id":"AllBlogs.Page","type":"CUSTOM","urlPath":"/blogs","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TkbMessagePage","type":"TKB_ARTICLE","urlPath":"/kb/:boardId/:messageSubject/:messageId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"BlogEditPage","type":"BLOG","urlPath":"/blog/:boardId/:messageSubject/:messageId/edit","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ManageUsersPage","type":"USER","urlPath":"/users/manage/:tab?/:manageUsersTab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ForumReplyPage","type":"FORUM_REPLY","urlPath":"/discussions/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"PrivacyPolicyPage","type":"COMMUNITY","urlPath":"/privacypolicy","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"NotificationPage","type":"COMMUNITY","urlPath":"/notifications","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"UserPage","type":"USER","urlPath":"/users/:login/:userId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"OccasionReplyPage","type":"OCCASION_REPLY","urlPath":"/event/:boardId/:messageSubject/:messageId/comments/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ManageMembersPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/manage/:tab?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"SearchResultsPage","type":"COMMUNITY","urlPath":"/search","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"BlogReplyPage","type":"BLOG_REPLY","urlPath":"/blog/:boardId/:messageSubject/:messageId/replies/:replyId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"GroupHubPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TermsOfServicePage","type":"COMMUNITY","urlPath":"/termsofservice","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"CategoryPage","type":"CATEGORY","urlPath":"/category/:categoryId","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"ForumViewAllTopicsPage","type":"FORUM","urlPath":"/category/:categoryId/discussions/:boardId/all-topics/(/:after|/:before)?","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"TkbPostPage","type":"TKB","urlPath":"/category/:categoryId/kbs/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"},{"lastUpdatedTime":1743770138473,"localOverride":null,"page":{"id":"GroupHubPostPage","type":"GROUP_HUB","urlPath":"/group/:groupHubId/:boardId/create","__typename":"PageDescriptor"},"__typename":"PageResource"}],"localOverride":false},"CachedAsset:text:en_US-components/context/AppContext/AppContextProvider-0":{"__typename":"CachedAsset","id":"text:en_US-components/context/AppContext/AppContextProvider-0","value":{"noCommunity":"Cannot find community","noUser":"Cannot find current user","noNode":"Cannot find node with id {nodeId}","noMessage":"Cannot find message with id {messageId}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-0":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-0","value":{"title":"Loading..."},"localOverride":false},"User:user:-1":{"__typename":"User","id":"user:-1","uid":-1,"login":"Deleted","email":"","avatar":null,"rank":null,"kudosWeight":1,"registrationData":{"__typename":"RegistrationData","status":"ANONYMOUS","registrationTime":null,"confirmEmailStatus":false,"registrationAccessLevel":"VIEW","ssoRegistrationFields":[]},"ssoId":null,"profileSettings":{"__typename":"ProfileSettings","dateDisplayStyle":{"__typename":"InheritableStringSettingWithPossibleValues","key":"layout.friendly_dates_enabled","value":"false","localValue":"true","possibleValues":["true","false"]},"dateDisplayFormat":{"__typename":"InheritableStringSetting","key":"layout.format_pattern_date","value":"MMM dd yyyy","localValue":"MM-dd-yyyy"},"language":{"__typename":"InheritableStringSettingWithPossibleValues","key":"profile.language","value":"en-US","localValue":"en","possibleValues":["en-US"]}},"deleted":false},"Theme:customTheme1":{"__typename":"Theme","id":"customTheme1"},"Category:category:FastTrack":{"__typename":"Category","id":"category:FastTrack","entityType":"CATEGORY","displayId":"FastTrack","nodeType":"category","depth":3,"title":"Microsoft FastTrack","shortTitle":"Microsoft FastTrack","parent":{"__ref":"Category:category:products-services"}},"Category:category:top":{"__typename":"Category","id":"category:top","displayId":"top","nodeType":"category","depth":0,"title":"Top","entityType":"CATEGORY","shortTitle":"Top"},"Category:category:communities":{"__typename":"Category","id":"category:communities","displayId":"communities","nodeType":"category","depth":1,"parent":{"__ref":"Category:category:top"},"title":"Communities","entityType":"CATEGORY","shortTitle":"Communities"},"Category:category:products-services":{"__typename":"Category","id":"category:products-services","displayId":"products-services","nodeType":"category","depth":2,"parent":{"__ref":"Category:category:communities"},"title":"Products","entityType":"CATEGORY","shortTitle":"Products"},"Blog:board:FastTrackforAzureBlog":{"__typename":"Blog","id":"board:FastTrackforAzureBlog","entityType":"BLOG","displayId":"FastTrackforAzureBlog","nodeType":"board","depth":4,"conversationStyle":"BLOG","title":"FastTrack for Azure","description":"","avatar":null,"profileSettings":{"__typename":"ProfileSettings","language":null},"parent":{"__ref":"Category:category:FastTrack"},"ancestors":{"__typename":"CoreNodeConnection","edges":[{"__typename":"CoreNodeEdge","node":{"__ref":"Community:community:gxcuf89792"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:communities"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:products-services"}},{"__typename":"CoreNodeEdge","node":{"__ref":"Category:category:FastTrack"}}]},"userContext":{"__typename":"NodeUserContext","canAddAttachments":false,"canUpdateNode":false,"canPostMessages":false,"isSubscribed":false},"boardPolicies":{"__typename":"BoardPolicies","canPublishArticleOnCreate":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.forums.policy_can_publish_on_create_workflow_action.accessDenied","key":"error.lithium.policies.forums.policy_can_publish_on_create_workflow_action.accessDenied","args":[]}}},"shortTitle":"FastTrack for Azure","repliesProperties":{"__typename":"RepliesProperties","sortOrder":"REVERSE_PUBLISH_TIME","repliesFormat":"threaded"},"eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/","tagProperties":{"__typename":"TagNodeProperties","tagsEnabled":{"__typename":"PolicyResult","failureReason":null}},"requireTags":true,"tagType":"PRESET_ONLY"},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/cmstNC05WEo0blc\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/cmstNC05WEo0blc","height":512,"width":512,"mimeType":"image/png"},"Rank:rank:4":{"__typename":"Rank","id":"rank:4","position":6,"name":"Microsoft","color":"333333","icon":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/cmstNC05WEo0blc\"}"},"rankStyle":"OUTLINE"},"User:user:988334":{"__typename":"User","id":"user:988334","uid":988334,"login":"paolosalvatori","deleted":false,"avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/dS05ODgzMzQtMzg1MjYyaTE4QTU5MkIyQUVCMkM0MDE"},"rank":{"__ref":"Rank:rank:4"},"email":"","messagesCount":67,"biography":null,"topicsCount":30,"kudosReceivedCount":160,"kudosGivenCount":29,"kudosWeight":1,"registrationData":{"__typename":"RegistrationData","status":null,"registrationTime":"2021-03-05T07:56:49.951-08:00","confirmEmailStatus":null},"followersCount":null,"solutionsCount":0,"entityType":"USER","eventPath":"community:gxcuf89792/user:988334"},"BlogTopicMessage:message:3885602":{"__typename":"BlogTopicMessage","uid":3885602,"subject":"Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terraform","id":"message:3885602","revisionNum":8,"repliesCount":10,"author":{"__ref":"User:user:988334"},"depth":0,"hasGivenKudo":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"conversation":{"__ref":"Conversation:conversation:3885602"},"messagePolicies":{"__typename":"MessagePolicies","canPublishArticleOnEdit":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.forums.policy_can_publish_on_edit_workflow_action.accessDenied","key":"error.lithium.policies.forums.policy_can_publish_on_edit_workflow_action.accessDenied","args":[]}},"canModerateSpamMessage":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.feature.moderation_spam.action.moderate_entity.allowed.accessDenied","key":"error.lithium.policies.feature.moderation_spam.action.moderate_entity.allowed.accessDenied","args":[]}}},"contentWorkflow":{"__typename":"ContentWorkflow","state":"PUBLISH","scheduledPublishTime":null,"scheduledTimezone":null,"userContext":{"__typename":"MessageWorkflowContext","canSubmitForReview":null,"canEdit":false,"canRecall":null,"canSubmitForPublication":null,"canReturnToAuthor":null,"canPublish":null,"canReturnToReview":null,"canSchedule":false},"shortScheduledTimezone":null},"readOnly":false,"editFrozen":false,"moderationData":{"__ref":"ModerationData:moderation_data:3885602"},"teaser":"
\n
This article and the companion sample show how to create two Azure Container Apps that use OpenAI, LangChain, ChromaDB, and Chainlit using Terraform.
\n
\n

โ€ƒ

\n
\n
","body":"

This article shows how to quickly build chat applications using Python and leveraging powerful technologies such as OpenAI ChatGPT models, Embedding models, LangChain framework, ChromaDB vector database, and Chainlit, an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. These applications are hosted on Azure Container Apps, a fully managed environment that enables you to run microservices and containerized applications on a serverless platform.

\n\n

Both applications use a user-defined managed identity to authenticate and authorize against Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) and use Azure Private Endpoints to connect privately and securely to these services. The chat UIs are built using Chainlit, an open-source Python package designed explicitly for creating AI applications. Chainlit seamlessly integrates with LangChain, LlamaIndex, and LangFlow, making it a powerful tool for easily developing ChatGPT-like applications.

\n

By following our example, you can quickly create sophisticated chat applications that utilize cutting-edge technologies, empowering users with intelligent conversational capabilities.

\n

 

\n

You can find the code and Visio diagrams in the companion GitHub repository. Also, check the following articles:

\n\n

 

\n

Prerequisites

\n\n

 

\n

Architecture

\n

The following diagram shows the architecture and network topology of the sample:

\n
 
\n

\n

 

\n

This sample provides two sets of Terraform modules to deploy the infrastructure and the chat applications.

\n

 

\n

Infrastructure Terraform Modules

\n

You can use the Terraform modules in the terraform/infra folder to deploy the infrastructure used by the sample, including the Azure Container Apps Environment, Azure OpenAI Service (AOAI), and Azure Container Registry (ACR), but not the Azure Container Apps (ACA). The Terraform modules in the terraform/infra folder deploy the following resources:

\n\n

 

\n

Application Terraform Modules

\n

You can use these Terraform modules in the terraform/apps To deploy the Azure Container Apps (ACA) using the Docker container images stored in the Azure Container Registry you deployed in the previous step.

\n\n

 

\n

Azure Container Apps

\n

Azure Container Apps (ACA) is a serverless compute service provided by Microsoft Azure that allows developers to easily deploy and manage containerized applications without the need to manage the underlying infrastructure. It provides a simplified and scalable solution for running applications in containers, leveraging the power and flexibility of the Azure ecosystem.

\n

With Azure Container Apps, developers can package their applications into containers using popular containerization technologies such as Docker. These containers encapsulate the application and its dependencies, ensuring consistent execution across different environments.

\n

Powered by Kubernetes and open-source technologies like Dapr, KEDA, and envoy, the service abstracts away the complexities of managing the infrastructure, including provisioning, scaling, and monitoring, allowing developers to focus solely on building and deploying their applications. Azure Container Apps handles automatic scaling, and load balancing, and natively integrates with other Azure services, such as Azure Monitor and Azure Container Registry (ACR), to provide a comprehensive and secure application deployment experience.

\n

Azure Container Apps offers benefits such as rapid deployment, easy scalability, cost-efficiency, and seamless integration with other Azure services, making it an attractive choice for modern application development and deployment scenarios.

\n

 

\n

Azure OpenAI Service

\n

The Azure OpenAI Service is a platform offered by Microsoft Azure that provides cognitive services powered by OpenAI models. One of the models available through this service is the ChatGPT model, which is designed for interactive conversational tasks. It allows developers to integrate natural language understanding and generation capabilities into their applications.

\n

Azure OpenAI Service provides REST API access to OpenAI's powerful language models including the GPT-3, Codex and Embeddings model series. In addition, the new GPT-4 and ChatGPT model series have now reached general availability. These models can be easily adapted to your specific task, including but not limited to content generation, summarization, semantic search, and natural language-to-code translation. Users can access the service through REST APIs, Python SDK, or our web-based interface in the Azure OpenAI Studio.

\n

You can use Embeddings model to transform raw data or inputs into meaningful and compact numerical representations called embeddings. Embeddings capture the semantic or contextual information of the input data in a lower-dimensional space, making it easier for machine learning algorithms to process and analyze the data effectively. Embeddings can be stored in a vector database, such as ChromaDB or Facebook AI Similarity Search (FAISS), explicitly designed for efficient storage, indexing, and retrieval of vector embeddings.

\n

The Chat Completion API, which is part of the Azure OpenAI Service, provides a dedicated interface for interacting with the ChatGPT and GPT-4 models. This API is currently in preview and is the preferred method for accessing these models. The GPT-4 models can only be accessed through this API.

\n

GPT-3, GPT-3.5, and GPT-4 models from OpenAI are prompt-based. With prompt-based models, the user interacts with the model by entering a text prompt, to which the model responds with a text completion. This completion is the modelโ€™s continuation of the input text. While these models are compelling, their behavior is also very sensitive to the prompt. This makes prompt construction a critical skill to develop. For more information, see Introduction to prompt engineering.

\n

Prompt construction can be complex. In practice, the prompt acts to configure the model weights to complete the desired task, but it's more of an art than a science, often requiring experience and intuition to craft a successful prompt. The goal of this article is to help get you started with this learning process. It attempts to capture general concepts and patterns that apply to all GPT models. However, it's essential to understand that each model behaves differently, so the learnings may not apply equally to all models.

\n

Prompt engineering refers to the process of creating instructions called prompts for Large Language Models (LLMs), such as OpenAIโ€™s ChatGPT. With the immense potential of LLMs to solve a wide range of tasks, leveraging prompt engineering can empower us to save significant time and facilitate the development of impressive applications. It holds the key to unleashing the full capabilities of these huge models, transforming how we interact and benefit from them. For more information, see Prompt engineering techniques.

\n

 

\n

Vector Databases

\n

A vector database is a specialized database that goes beyond traditional storage by organizing information to simplify the search for similar items. Instead of merely storing words or numbers, it leverages vector embeddings - unique numerical representations of data. These embeddings capture meaning, context, and relationships. For instance, words are represented as vectors, whereas similar words have similar vector values.

\n

The applications of vector databases are numerous and powerful. In language processing, they facilitate the discovery of related documents or sentences. By comparing the vector embeddings of different texts, finding similar or related information becomes faster and more efficient. This capability benefits search engines and recommendation systems, which can suggest relevant articles or products based on user interests.

\n

In the realm of image analysis, vector databases excel in finding visually similar images. By representing images as vectors, a simple comparison of vector values can identify visually similar images. This capability is precious for tasks like reverse image search or content-based image retrieval.

\n

Additionally, vector databases find applications in fraud detection, anomaly detection, and clustering. By comparing vector embeddings of data points, unusual patterns can be detected, and similar items can be grouped together, aiding in effective data analysis and decision-making.  This is a list of Azure services that are suitable for use as a vector database in a retrieval-augmented generation (RAG) solution:

\n

 

\n\n

 

\n

Here is a list of the most popular vector databases:

\n

 

\n\n

 

\n

This sample makes of ChromaDB vector database, but you can easily modify the code to use another vector database. You can even use Azure Cache for Redis Enterprise to store the vector embeddings and compute vector similarity with high performance and low latency. For more information, see Vector Similarity Search with Azure Cache for Redis Enterprise

\n

 

\n

LangChain

\n

LangChain is a software framework designed to streamline the development of applications using large language models (LLMs). It serves as a language model integration framework, facilitating various applications like document analysis and summarization, chatbots, and code analysis.

\n

LangChain's integrations cover an extensive range of systems, tools, and services, making it a comprehensive solution for language model-based applications. LangChain integrates with the major cloud platforms such as Microsoft Azure, Amazon AWS, and Google, and with API wrappers for various purposes like news, movie information, and weather, as well as support for Bash, web scraping, and more. It also supports multiple language models, including those from OpenAI, Anthropic, and Hugging Face. Moreover, LangChain offers various functionalities for document handling, code generation, analysis, debugging, and interaction with databases and other data sources.

\n

 

\n

Chainlit

\n

Chainlit is an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. It simplifies the process of building interactive chats and interfaces, making developing AI-powered applications faster and more efficient. While Streamlit is a general-purpose UI library, Chainlit is purpose-built for AI applications and seamlessly integrates with other AI technologies such as LangChain, LlamaIndex, and LangFlow.

\n

With Chainlit, developers can easily create intuitive UIs for their AI models, including ChatGPT-like applications. It provides a user-friendly interface for users to interact with AI models, enabling conversational experiences and information retrieval. Chainlit also offers unique features, such as displaying the Chain of Thought, which allows users to explore the reasoning process directly within the UI. This feature enhances transparency and enables users to understand how the AI arrives at its responses or recommendations.

\n

For more information, see the following resources:

\n\n

 

\n

Deploy the Infrastructure

\n

Before deploying the Terraform modules in the terraform/infra folder, specify a value for the following variables in the terraform.tfvars variable definitions file.

\n

 

\n

 

\n
name_prefix = \"Blue\"\nlocation    = \"EastUS\"
\n

 

\n

 

\n

This is the definition of each variable:

\n\n

NOTE: Make sure to select a region where Azure OpenAI Service (AOAI) supports both GPT-3.5/GPT-4 models like gpt-35-turbo-16k and Embeddings models like text-embedding-ada-002.

\n

 

\n

OpenAI Module

\n

The following table contains the code from the terraform/infra/modules/openai/main.tf Terraform module used to deploy the Azure OpenAI Service.

\n

 

\n

 

\n
resource \"azurerm_cognitive_account\" \"openai\" {\n  name                          = var.name\n  location                      = var.location\n  resource_group_name           = var.resource_group_name\n  kind                          = \"OpenAI\"\n  custom_subdomain_name         = var.custom_subdomain_name\n  sku_name                      = var.sku_name\n  public_network_access_enabled = var.public_network_access_enabled\n  tags                          = var.tags\n\n  identity {\n    type = \"SystemAssigned\"\n  }\n\n  lifecycle {\n    ignore_changes = [\n      tags\n    ]\n  }\n}\n\nresource \"azurerm_cognitive_deployment\" \"deployment\" {\n  for_each             = {for deployment in var.deployments: deployment.name => deployment}\n\n  name                 = each.key\n  cognitive_account_id = azurerm_cognitive_account.openai.id\n\n  model {\n    format  = \"OpenAI\"\n    name    = each.value.model.name\n    version = each.value.model.version\n  }\n\n  scale {\n    type = \"Standard\"\n  }\n}\n\nresource \"azurerm_monitor_diagnostic_setting\" \"settings\" {\n  name                       = \"DiagnosticsSettings\"\n  target_resource_id         = azurerm_cognitive_account.openai.id\n  log_analytics_workspace_id = var.log_analytics_workspace_id\n\n  enabled_log {\n    category = \"Audit\"\n\n    retention_policy {\n      enabled = true\n      days    = var.log_analytics_retention_days\n    }\n  }\n\n  enabled_log {\n    category = \"RequestResponse\"\n\n    retention_policy {\n      enabled = true\n      days    = var.log_analytics_retention_days\n    }\n  }\n\n  enabled_log {\n    category = \"Trace\"\n\n    retention_policy {\n      enabled = true\n      days    = var.log_analytics_retention_days\n    }\n  }\n\n  metric {\n    category = \"AllMetrics\"\n\n    retention_policy {\n      enabled = true\n      days    = var.log_analytics_retention_days\n    }\n  }\n}
\n

 

\n

 

\n

Azure Cognitive Services uses custom subdomain names for each resource created through the Azure portal, Azure Cloud Shell, Azure CLI, Bicep, Azure Resource Manager (ARM), or Terraform. Unlike regional endpoints, which were common for all customers in a specific Azure region, custom subdomain names are unique to the resource. Custom subdomain names are required to enable authentication features like Azure Active Directory (Azure AD). We need to specify a custom subdomain for our Azure OpenAI Service, as our chatbot applications will use an Azure AD security token to access it. By default, the terraform/infra/modules/openai/main.tf module sets the value of the custom_subdomain_name parameter to the lowercase name of the Azure OpenAI resource. For more information on custom subdomains, see Custom subdomain names for Cognitive Services.

\n

This Terraform module allows you to pass an array containing the definition of one or more model deployments in the deployments variable. For more information on model deployments, see Create a resource and deploy a model using Azure OpenAI. The openai_deployments variable in the terraform/infra/variables.tf file defines the structure and the default models deployed by the sample:

\n

 

\n

 

\n
variable \"openai_deployments\" {\n  description = \"(Optional) Specifies the deployments of the Azure OpenAI Service\"\n  type = list(object({\n    name = string\n    model = object({\n      name = string\n      version = string\n    })\n    rai_policy_name = string  \n  }))\n  default = [\n    {\n      name = \"gpt-35-turbo-16k\"\n      model = {\n        name = \"gpt-35-turbo-16k\"\n        version = \"0613\"\n      }\n      rai_policy_name = \"\"\n    },\n    {\n      name = \"text-embedding-ada-002\"\n      model = {\n        name = \"text-embedding-ada-002\"\n        version = \"2\"\n      }\n      rai_policy_name = \"\"\n    }\n  ] \n}
\n

 

\n

 

\n

Alternatively, you can use the Terraform module for deploying Azure OpenAI Service. to deploy Azure OpenAI Service.

\n

Private Endpoint Module

\n

The terraform/infra/main.tf the module creates Azure Private Endpoints and Azure Private DNDS Zones for each of the following resources:

\n\n

In particular, it creates an Azure Private Endpoint and Azure Private DNDS Zone to the Azure OpenAI Service as shown in the following code snippet:

\n

 

\n

 

\n
module \"openai_private_dns_zone\" {\n  source                       = \"./modules/private_dns_zone\"\n  name                         = \"privatelink.openai.azure.com\"\n  resource_group_name          = azurerm_resource_group.rg.name\n  tags                         = var.tags\n  virtual_networks_to_link     = {\n    (module.virtual_network.name) = {\n      subscription_id = data.azurerm_client_config.current.subscription_id\n      resource_group_name = azurerm_resource_group.rg.name\n    }\n  }\n}\n\nmodule \"openai_private_endpoint\" {\n  source                         = \"./modules/private_endpoint\"\n  name                           = \"${module.openai.name}PrivateEndpoint\"\n  location                       = var.location\n  resource_group_name            = azurerm_resource_group.rg.name\n  subnet_id                      = module.virtual_network.subnet_ids[var.vm_subnet_name]\n  tags                           = var.tags\n  private_connection_resource_id = module.openai.id\n  is_manual_connection           = false\n  subresource_name               = \"account\"\n  private_dns_zone_group_name    = \"AcrPrivateDnsZoneGroup\"\n  private_dns_zone_group_ids     = [module.openai_private_dns_zone.id]\n}\n
\n

 

\n

 

\n

Below you can read the code of the terraform/infra/modules/private_endpoint/main.tf module, which is used to create Azure Private Endpoints:

\n

 

\n

 

\n
resource \"azurerm_private_endpoint\" \"private_endpoint\" {\n  name                = var.name\n  location            = var.location\n  resource_group_name = var.resource_group_name\n  subnet_id           = var.subnet_id\n  tags                = var.tags\n\n  private_service_connection {\n    name                           = \"${var.name}Connection\"\n    private_connection_resource_id = var.private_connection_resource_id\n    is_manual_connection           = var.is_manual_connection\n    subresource_names              = try([var.subresource_name], null)\n    request_message                = try(var.request_message, null)\n  }\n\n  private_dns_zone_group {\n    name                 = var.private_dns_zone_group_name\n    private_dns_zone_ids = var.private_dns_zone_group_ids\n  }\n\n  lifecycle {\n    ignore_changes = [\n      tags\n    ]\n  }\n}
\n

 

\n

 

\n

Private DNS Zone Module

\n

In the following box, you can read the code of the terraform/infra/modules/private_dns_zone/main.tf module, which is utilized to create the Azure Private DNS Zones.

\n

 

\n

 

\n
resource \"azurerm_private_dns_zone\" \"private_dns_zone\" {\n  name                = var.name\n  resource_group_name = var.resource_group_name\n  tags                = var.tags\n\n  lifecycle {\n    ignore_changes = [\n      tags\n    ]\n  }\n}\n\nresource \"azurerm_private_dns_zone_virtual_network_link\" \"link\" {\n  for_each = var.virtual_networks_to_link\n\n  name                  = \"link_to_${lower(basename(each.key))}\"\n  resource_group_name   = var.resource_group_name\n  private_dns_zone_name = azurerm_private_dns_zone.private_dns_zone.name\n  virtual_network_id    = \"/subscriptions/${each.value.subscription_id}/resourceGroups/${each.value.resource_group_name}/providers/Microsoft.Network/virtualNetworks/${each.key}\"\n\n  lifecycle {\n    ignore_changes = [\n      tags\n    ]\n  }\n}
\n

 

\n

 

\n

Workload Managed Identity Module

\n

Below you can read the code of the terraform/infra/modules/managed_identity/main.tf module, which is used to create the Azure Managed Identity used by the Azure Container Apps to pull container images from the Azure Container Registry, and by the chat applications to connect to the Azure OpenAI Service. You can use a system-assigned or user-assigned managed identity from Azure Active Directory (Azure AD) to let Azure Container Apps access any Azure AD-protected resource. For more information, see Managed identities in Azure Container Apps. You can pull container images from private repositories in an Azure Container Registry using user-assigned or user-assigned managed identities for authentication to avoid using administrative credentials. For more information, see Azure Container Apps image pull with managed identity. This user-defined managed identity is assigned the Cognitive Services User role on the Azure OpenAI Service namespace and ACRPull role on the Azure Container Registry (ACR). By assigning the above roles, you grant the user-defined managed identity access to these resources.

\n

 

\n

 

\n
resource \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n  name                = var.name\n  resource_group_name = var.resource_group_name\n  location            = var.location\n  tags                = var.tags\n\n  lifecycle {\n    ignore_changes = [\n      tags\n    ]\n  }\n}\n\nresource \"azurerm_role_assignment\" \"cognitive_services_user_assignment\" {\n  scope                = var.openai_id\n  role_definition_name = \"Cognitive Services User\"\n  principal_id         = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n  skip_service_principal_aad_check = true\n}\n\nresource \"azurerm_role_assignment\" \"acr_pull_assignment\" {\n  scope                = var.acr_id\n  role_definition_name = \"AcrPull\"\n  principal_id         = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n  skip_service_principal_aad_check = true\n}
\n

 

\n

 

\n

Deploy the Applications

\n

Before deploying the Terraform modules in the terraform/apps folder, specify a value for the following variables in the Terraform.tfvars variable definitions file.

\n

 

\n

 

\n
resource_group_name            = \"BlueRG\"\ncontainer_app_environment_name = \"BlueEnvironment\"\ncontainer_registry_name        = \"BlueRegistry\"\nworkload_managed_identity_name = \"BlueWorkloadIdentity\"\ncontainer_apps                 = [\n  {\n    name                            = \"chatapp\"\n    revision_mode                   = \"Single\"\n    ingress                         = {\n      allow_insecure_connections    = true\n      external_enabled              = true\n      target_port                   = 8000\n      transport                     = \"http\"\n      traffic_weight                = {\n        label                       = \"default\"\n        latest_revision             = true\n        revision_suffix             = \"default\"\n        percentage                  = 100\n      }\n    }\n    template                        = {\n      containers                    = [\n        {\n          name                      = \"chat\"\n          image                     = \"chat:v1\"\n          cpu                       = 0.5\n          memory                    = \"1Gi\"\n          env                       = [\n            {\n              name                  = \"TEMPERATURE\"\n              value                 = 0.9\n            },\n            {\n              name                  = \"AZURE_OPENAI_BASE\"\n              value                 = \"https://blueopenai.openai.azure.com/\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_KEY\"\n              value                 = \"\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_TYPE\"\n              value                 = \"azure_ad\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_VERSION\"\n              value                 = \"2023-06-01-preview\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_DEPLOYMENT\"\n              value                 = \"gpt-35-turbo-16k\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_MODEL\"\n              value                 = \"gpt-35-turbo-16k\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n              value                 = \"You are a helpful assistant.\"\n            },\n            {\n              name                  = \"MAX_RETRIES\"\n              value                 = 5\n            },\n            {\n              name                  = \"BACKOFF_IN_SECONDS\"\n              value                 = \"1\"\n            },\n            {\n              name                  = \"TOKEN_REFRESH_INTERVAL\"\n              value                 = 2700\n            }\n          ]\n          liveness_probe            = {\n            failure_count_threshold = 3\n            initial_delay           = 30\n            interval_seconds        = 60\n            path                    = \"/\"\n            port                    = 8000\n            timeout                 = 30\n            transport               = \"HTTP\"\n          }\n          readiness_probe = {\n            failure_count_threshold = 3\n            interval_seconds        = 60\n            path                    = \"/\"\n            port                    = 8000\n            success_count_threshold = 3\n            timeout                 = 30\n            transport               = \"HTTP\"\n          }\n          startup_probe = {\n            failure_count_threshold = 3\n            interval_seconds        = 60\n            path                    = \"/\"\n            port                    = 8000\n            timeout                 = 30\n            transport               = \"HTTP\"\n          }\n        }\n      ]\n      min_replicas                  = 1\n      max_replicas                  = 3\n    }\n  },\n  {\n    name                            = \"docapp\"\n    revision_mode                   = \"Single\"\n    ingress                         = {\n      allow_insecure_connections    = true\n      external_enabled              = true\n      target_port                   = 8000\n      transport                     = \"http\"\n      traffic_weight                = {\n        label                       = \"default\"\n        latest_revision             = true\n        revision_suffix             = \"default\"\n        percentage                  = 100\n      }\n    }\n    template                        = {\n      containers                    = [\n        {\n          name                      = \"doc\"\n          image                     = \"doc:v1\"\n          cpu                       = 0.5\n          memory                    = \"1Gi\"\n          env                       = [\n            {\n              name                  = \"TEMPERATURE\"\n              value                 = 0.9\n            },\n            {\n              name                  = \"AZURE_OPENAI_BASE\"\n              value                 = \"https://blueopenai.openai.azure.com/\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_KEY\"\n              value                 = \"\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_TYPE\"\n              value                 = \"azure_ad\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_VERSION\"\n              value                 = \"2023-06-01-preview\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_DEPLOYMENT\"\n              value                 = \"gpt-35-turbo-16k\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_MODEL\"\n              value                 = \"gpt-35-turbo-16k\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_ADA_DEPLOYMENT\"\n              value                 = \"text-embedding-ada-002\"\n            },\n            {\n              name                  = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n              value                 = \"You are a helpful assistant.\"\n            },\n            {\n              name                  = \"MAX_RETRIES\"\n              value                 = 5\n            },\n            {\n              name                  = \"CHAINLIT_MAX_FILES\"\n              value                 = 10\n            },\n            {\n              name                  = \"TEXT_SPLITTER_CHUNK_SIZE\"\n              value                 = 1000\n            },\n            {\n              name                  = \"TEXT_SPLITTER_CHUNK_OVERLAP\"\n              value                 = 10\n            },\n            {\n              name                  = \"EMBEDDINGS_CHUNK_SIZE\"\n              value                 = 16\n            },\n            {\n              name                  = \"BACKOFF_IN_SECONDS\"\n              value                 = \"1\"\n            },\n            {\n              name                  = \"CHAINLIT_MAX_SIZE_MB\"\n              value                 = 100\n            },\n            {\n              name                  = \"TOKEN_REFRESH_INTERVAL\"\n              value                 = 2700\n            }\n          ]\n          liveness_probe = {\n            failure_count_threshold = 3\n            initial_delay           = 30\n            interval_seconds        = 60\n            path                    = \"/\"\n            port                    = 8000\n            timeout                 = 30\n            transport               = \"HTTP\"\n          }\n          readiness_probe = {\n            failure_count_threshold = 3\n            interval_seconds        = 60\n            path                    = \"/\"\n            port                    = 8000\n            success_count_threshold = 3\n            timeout                 = 30\n            transport               = \"HTTP\"\n          }\n          startup_probe = {\n            failure_count_threshold = 3\n            interval_seconds        = 60\n            path                    = \"/\"\n            port                    = 8000\n            timeout                 = 30\n            transport               = \"HTTP\"\n          }\n        }\n      ]\n      min_replicas                  = 1\n      max_replicas                  = 3\n    }\n  }]
\n

 

\n

 

\n

This is the definition of each variable:

\n\n

 

\n

Container App Module

\n

The terraform/apps/modules/container_app/main.tf module is utilized to create the Azure Container Apps. The module defines and uses the following data source for the Azure Container Registry, Azure Container Apps Environment, and user-defined managed identity created when deploying the infrastructure. These data sources are used to access the properties of these Azure resources.

\n

 

\n

 

\n
data \"azurerm_container_app_environment\" \"container_app_environment\" {\n  name                 = var.container_app_environment_name\n  resource_group_name  = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n  name                 = var.container_registry_name\n  resource_group_name  = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n  name                = var.workload_managed_identity_name\n  resource_group_name = var.resource_group_name\n}
\n

 

\n

 

\n

The module creates and utilizes the following local variables:

\n

 

\n

 

\n
locals {\n  identity = {\n    type         = \"UserAssigned\"\n    identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n  }\n  identity_env = {\n    name         = \"AZURE_CLIENT_ID\"\n    secret_name  = null\n    value        = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n  }\n  registry = {\n    server       = data.azurerm_container_registry.container_registry.login_server\n    identity     = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n  }\n}
\n

 

\n

 

\n

This is the explanation of each local variable:

\n\n

Here is the complete Terraform code of the module:

\n

 

\n

 

\n
data \"azurerm_container_app_environment\" \"container_app_environment\" {\n  name                 = var.container_app_environment_name\n  resource_group_name  = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n  name                 = var.container_registry_name\n  resource_group_name  = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n  name                = var.workload_managed_identity_name\n  resource_group_name = var.resource_group_name\n}\n\nlocals {\n  identity = {\n    type         = \"UserAssigned\"\n    identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n  }\n  identity_env = {\n    name         = \"AZURE_CLIENT_ID\"\n    secret_name  = null\n    value        = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n  }\n  registry = {\n    server       = data.azurerm_container_registry.container_registry.login_server\n    identity     = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n  }\n}\n\nresource \"azurerm_container_app\" \"container_app\" {\n  for_each                     = {for app in var.container_apps: app.name => app}\n\n  container_app_environment_id = data.azurerm_container_app_environment.container_app_environment.id\n  name                         = each.key\n  resource_group_name          = var.resource_group_name\n  revision_mode                = each.value.revision_mode\n  tags                         = each.value.tags\n\n  template {\n    max_replicas    = each.value.template.max_replicas\n    min_replicas    = each.value.template.min_replicas\n    revision_suffix = each.value.template.revision_suffix\n\n    dynamic \"container\" {\n      for_each = each.value.template.containers\n\n      content {\n        cpu     = container.value.cpu\n        image   = \"${data.azurerm_container_registry.container_registry.login_server}/${container.value.image}\"\n        memory  = container.value.memory\n        name    = container.value.name\n        args    = container.value.args\n        command = container.value.command\n\n        dynamic \"env\" {\n          for_each = container.value.env == null ? [local.identity_env] : concat(container.value.env, [local.identity_env])\n\n          content {\n            name        = env.value.name\n            secret_name = env.value.secret_name\n            value       = env.value.value\n          }\n        }\n\n        dynamic \"liveness_probe\" {\n          for_each = container.value.liveness_probe == null ? [] : [container.value.liveness_probe]\n\n          content {\n            port                    = liveness_probe.value.port\n            transport               = liveness_probe.value.transport\n            failure_count_threshold = liveness_probe.value.failure_count_threshold\n            host                    = liveness_probe.value.host\n            initial_delay           = liveness_probe.value.initial_delay\n            interval_seconds        = liveness_probe.value.interval_seconds\n            path                    = liveness_probe.value.path\n            timeout                 = liveness_probe.value.timeout\n\n            dynamic \"header\" {\n              for_each = liveness_probe.value.header == null ? [] : [liveness_probe.value.header]\n\n              content {\n                name  = header.value.name\n                value = header.value.value\n              }\n            }\n          }\n        }\n\n        dynamic \"readiness_probe\" {\n          for_each = container.value.readiness_probe == null ? [] : [container.value.readiness_probe]\n\n          content {\n            port                    = readiness_probe.value.port\n            transport               = readiness_probe.value.transport\n            failure_count_threshold = readiness_probe.value.failure_count_threshold\n            host                    = readiness_probe.value.host\n            interval_seconds        = readiness_probe.value.interval_seconds\n            path                    = readiness_probe.value.path\n            success_count_threshold = readiness_probe.value.success_count_threshold\n            timeout                 = readiness_probe.value.timeout\n\n            dynamic \"header\" {\n              for_each = readiness_probe.value.header == null ? [] : [readiness_probe.value.header]\n\n              content {\n                name  = header.value.name\n                value = header.value.value\n              }\n            }\n          }\n        }\n\n        dynamic \"startup_probe\" {\n          for_each = container.value.startup_probe == null ? [] : [container.value.startup_probe]\n\n          content {\n            port                    = startup_probe.value.port\n            transport               = startup_probe.value.transport\n            failure_count_threshold = startup_probe.value.failure_count_threshold\n            host                    = startup_probe.value.host\n            interval_seconds        = startup_probe.value.interval_seconds\n            path                    = startup_probe.value.path\n            timeout                 = startup_probe.value.timeout\n\n            dynamic \"header\" {\n              for_each = startup_probe.value.header == null ? [] : [startup_probe.value.header]\n\n              content {\n                name  = header.value.name\n                value = header.value.name\n              }\n            }\n          }\n        }\n\n        dynamic \"volume_mounts\" {\n          for_each = container.value.volume_mounts == null ? [] : [container.value.volume_mounts]\n\n          content {\n            name = volume_mounts.value.name\n            path = volume_mounts.value.path\n          }\n        }\n      }\n    }\n\n    dynamic \"volume\" {\n      for_each = each.value.template.volume == null ? [] : each.value.template.volume\n\n      content {\n        name         = volume.value.name\n        storage_name = volume.value.storage_name\n        storage_type = volume.value.storage_type\n      }\n    }\n  }\n\n  dynamic \"dapr\" {\n    for_each = each.value.dapr == null ? [] : [each.value.dapr]\n\n    content {\n      app_id       = dapr.value.app_id\n      app_port     = dapr.value.app_port\n      app_protocol = dapr.value.app_protocol\n    }\n  }\n\n  dynamic \"identity\" {\n    for_each = each.value.identity == null ? [local.identity] : [each.value.identity]\n\n    content {\n      type         = identity.value.type\n      identity_ids = identity.value.identity_ids\n    }\n  }\n\n  dynamic \"ingress\" {\n    for_each = each.value.ingress == null ? [] : [each.value.ingress]\n\n    content {\n      target_port                = ingress.value.target_port\n      allow_insecure_connections = ingress.value.allow_insecure_connections\n      external_enabled           = ingress.value.external_enabled\n      transport                  = ingress.value.transport\n\n      dynamic \"traffic_weight\" {\n        for_each = ingress.value.traffic_weight == null ? [] : [ingress.value.traffic_weight]\n\n        content {\n          percentage      = traffic_weight.value.percentage\n          label           = traffic_weight.value.label\n          latest_revision = traffic_weight.value.latest_revision\n          revision_suffix = traffic_weight.value.revision_suffix\n        }\n      }\n    }\n  }\n\n  dynamic \"registry\" {\n    for_each = each.value.registry == null ? [local.registry] : concat(each.value.registry, [local.registry])\n\n    content {\n      server   = registry.value.server\n      identity = registry.value.identity\n    }\n  }\n\n  dynamic \"secret\" {\n    for_each = nonsensitive(toset([for pair in lookup(var.container_app_secrets, each.key, []) : pair.name]))\n\n    content {\n      name  = secret.key\n      value = local.container_app_secrets[each.key][secret.key]\n    }\n  }\n}\n
\n

 

\n

 

\n

As you can notice, the module uses the login server of the Azure Container Registry to create the fully qualified name of the container image of the current container app.

\n

 

\n

Managed identities in Azure Container Apps

\n

Each chat application makes use of a DefaultAzureCredential object to acquire a security token from Azure Active Directory and authenticate and authorize with Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) using the credentials of the user-defined managed identity associated with the container app.

\n

You can use a managed identity in a running container app to authenticate and authorize with any service that supports Azure AD authentication. With managed identities:

\n\n

For more information, see Managed identities in Azure Container Apps. The workloads running in a container app can use the Azure Identity client libraries to acquire a security token from the Azure Active Directory. You can choose one of the following approaches inside your code:

\n\n

The following table provides the minimum package version required for each language's client library.

\n

 

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
LanguageLibraryMinimum VersionExample
.NETAzure.Identity1.9.0Link
Goazidentity1.3.0Link
Javaazure-identity1.9.0Link
JavaScript@azure/identity3.2.0Link
Pythonazure-identity1.13.0Link
\n

 

\n

NOTE: When using Azure Identity client library with Azure Container Apps, the client ID of the managed identity must be specified. When using the DefaultAzureCredential, you can explicitly specify the client ID of the container app managed identity in the AZURE_CLIENT_ID environment variable.

\n

 

\n

Simple Chat Application

\n

The Simple Chat Application is a large language model-based chatbot that allows users to submit general-purpose questions to a GPT model, which generates and streams back human-like and engaging conversational responses. The following picture shows the welcome screen of the chat application.

\n

\n

โ€ƒ

\n

You can modify the welcome screen in markdown by editing the chainlit.md file at the project's root. If you do not want a welcome screen, leave the file empty. The following picture shows what happens when a user submits a new message in the chat.

\n

\n

โ€ƒ

\n

Chainlit can render messages in markdown format as shown by the following prompt:

\n

\n

โ€ƒ

\n

Chainlit also provides classes to support the following elements:

\n

 

\n\n

 

\n

You can click the user icon on the UI to access the chat settings and choose, for example, between the light and dark theme.

\n

\n

โ€ƒ

\n

The application is built in Python. Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules.

\n

 

\n

 

\n
# Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n    load_dotenv(override=True)\n    config = dotenv_values(\".env\")
\n

 

\n

 

\n

These are the libraries used by the chat application:

\n

 

\n
    \n
  1. os: This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc.
  2. \n
  3. sys: This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter.
  4. \n
  5. openai: The OpenAI Python library provides convenient access to the OpenAI API from applications written in Python. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook.
  6. \n
  7. logging: This module provides flexible logging of messages.
  8. \n
  9. chainlit as cl: This imports the Chainlit library and aliases it as cl. Chainlit is used to create the UI of the application.
  10. \n
  11. from azure.identity import DefaultAzureCredential, get_bearer_token_provider: when the openai_type property value is azure_ad, a DefaultAzureCredential object from the Azure Identity client library for Python is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity federated with the service account.
  12. \n
  13. load_dotenv and dotenv_values from dotenv: Python-dotenv reads key-value pairs from a .env file and can set them as environment variables. It helps in the development of applications following the 12-factor principles.
  14. \n
\n

 

\n

The requirements.txt file under the src folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command:

\n
\n
pip install -r requirements.txt --upgrade
\n
\n

Next, the code reads the value of the environment variables used to initialize Azure OpenAI objects. In addition, it creates a token provider for Azure OpenAI.

\n
\n
# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n    \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n    DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n)
\n
\n

Here's a brief explanation of each variable and related environment variable:

\n

 

\n
    \n
  1. temperature: A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9.
  2. \n
  3. api_base: The base URL for the OpenAI API.
  4. \n
  5. api_key: The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI.
  6. \n
  7. api_type: A string representing the type of the OpenAI API.
  8. \n
  9. api_version: A string representing the version of the OpenAI API.
  10. \n
  11. engine: The engine used for OpenAI API calls.
  12. \n
  13. model: The model used for OpenAI API calls.
  14. \n
  15. system_content: The content of the system message used for OpenAI API calls.
  16. \n
  17. max_retries: The maximum number of retries for OpenAI API calls.
  18. \n
  19. timeout: The timeout in seconds.
  20. \n
  21. debug: When debug is equal to true, t, or 1, the logger writes the chat completion answers.
  22. \n
\n

 

\n

In the next section, the code creates the AsyncAzureOpenAI client object used by the application to communicate with the Azure OpenAI Service instance. When the api_type is equal to azure, the code initializes the object with the API key. Otherwise, it initializes the azure_ad_token_provider property to the token provider created earlier. Then the code creates a logger.

\n
\n
# Configure OpenAI\nif api_type == \"azure\":\n    openai = AsyncAzureOpenAI(\n        api_version=api_version,\n        api_key=api_key,\n        azure_endpoint=api_base,\n        max_retries=max_retries,\n        timeout=timeout,\n    )\nelse:\n    openai = AsyncAzureOpenAI(\n        api_version=api_version,\n        azure_endpoint=api_base,\n        azure_ad_token_provider=token_provider,\n        max_retries=max_retries,\n        timeout=timeout\n    )\n\n# Configure a logger\nlogging.basicConfig(\n    stream=sys.stdout,\n    format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n    level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)
\n
\n

The backoff time is calculated using the backoff_in_seconds and attempt variables. It follows the formula backoff_in_seconds * 2 ** attempt + random.uniform(0, 1). This formula increases the backoff time exponentially with each attempt and adds a random value between 0 and 1 to avoid synchronized retries.

\n

Next, the code defines a function called start_chat that is used to initialize the UI when the user connects to the application or clicks the New Chat button.

\n

 

\n

 

\n
.on_chat_start\nasync def start_chat():\n    await cl.Avatar(\n        name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"User\",\n        url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n    ).send()\n    cl.user_session.set(\n        \"message_history\",\n        [{\"role\": \"system\", \"content\": system_content}],\n    )\n
\n

 

\n

 

\n

Here is a brief explanation of the function steps:

\n

 

\n\n

Finally, the application defines the method called whenever the user sends a new message in the chat.

\n
\n
@cl.on_message\nasync def on_message(message: cl.Message):\n    message_history = cl.user_session.get(\"message_history\")\n    message_history.append({\"role\": \"user\", \"content\": message.content})\n    logger.info(\"Question: [%s]\", message.content)\n\n    # Create the Chainlit response message\n    msg = cl.Message(content=\"\")\n\n    async for stream_resp in await openai.chat.completions.create(\n        model=model,\n        messages=message_history,\n        temperature=temperature,\n        stream=True,\n    ):\n        if stream_resp and len(stream_resp.choices) > 0:\n            token = stream_resp.choices[0].delta.content or \"\"\n            await msg.stream_token(token)\n\n    if debug:\n        logger.info(\"Answer: [%s]\", msg.content)\n\n    message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n    await msg.send()
\n
\n

Here is a detailed explanation of the function steps:

\n

 

\n\n

Below, you can read the complete code of the application.

\n
\n
# Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n    load_dotenv(override=True)\n    config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n    \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n    DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n)\n\n# Configure OpenAI\nif api_type == \"azure\":\n    openai = AsyncAzureOpenAI(\n        api_version=api_version,\n        api_key=api_key,\n        azure_endpoint=api_base,\n        max_retries=max_retries,\n        timeout=timeout,\n    )\nelse:\n    openai = AsyncAzureOpenAI(\n        api_version=api_version,\n        azure_endpoint=api_base,\n        azure_ad_token_provider=token_provider,\n        max_retries=max_retries,\n        timeout=timeout,\n    )\n\n# Configure a logger\nlogging.basicConfig(\n    stream=sys.stdout,\n    format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n    level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\n@cl.on_chat_start\nasync def start_chat():\n    await cl.Avatar(\n        name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"You\",\n        url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n    ).send()\n    cl.user_session.set(\n        \"message_history\",\n        [{\"role\": \"system\", \"content\": system_content}],\n    )\n\n\n@cl.on_message\nasync def on_message(message: cl.Message):\n    message_history = cl.user_session.get(\"message_history\")\n    message_history.append({\"role\": \"user\", \"content\": message.content})\n    logger.info(\"Question: [%s]\", message.content)\n\n    # Create the Chainlit response message\n    msg = cl.Message(content=\"\")\n\n    async for stream_resp in await openai.chat.completions.create(\n        model=model,\n        messages=message_history,\n        temperature=temperature,\n        stream=True,\n    ):\n        if stream_resp and len(stream_resp.choices) > 0:\n            token = stream_resp.choices[0].delta.content or \"\"\n            await msg.stream_token(token)\n\n    if debug:\n        logger.info(\"Answer: [%s]\", msg.content)\n\n    message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n    await msg.send()
\n
\n

You can run the application locally using the following command. The -w flag` indicates auto-reload whenever we make changes live in our application code.

\n
\n
chainlit run app.py -w
\n
\n

Documents QA Chat

\n

The Documents QA Chat application allows users to submit up to 10 .pdf and .docx documents. The application processes the uploaded documents to create vector embeddings. These embeddings are stored in ChromaDB vector database for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. The following picture shows the chat application interface. As you can see, you can click the Browse button and choose up to 10 .pdf and .docx documents to upload. Alternatively, you can just drag and drop the files over the control area.

\n

\n

โ€ƒ

\n

After uploading the documents, the application creates and stores embeddings to ChromaDB vector database. During the phase, the UI shows a message Processing <file-1>, <file-2>..., as shown in the following picture:

\n

\n

โ€ƒ

\n

When the code finished creating embeddings, the UI is ready to receive user's questions:

\n

\n

โ€ƒ

\n

As your chat application grows in complexity, understanding the individual steps for generating a specific answer can become challenging. To solve this issue, Chainlit allows you to easily explore the reasoning process right from the user interface using the Chain of Thought. If you are using the LangChain integration, every intermediary step is automatically sent and displayed in the Chainlit UI just clicking and expanding the steps, as shown in the following picture:

\n

\n

โ€ƒ

\n

To see the text chunks that were used by the large language model to originate the response, you can click the sources links, as shown in the following picture:

\n

\n

โ€ƒ

\n

In the Chain of Thought, below the step used to invoke the OpenAI chat completion API, you can find an

\n

 Inspect in prompt playground  icon. Clicking on it opens the Prompt Playground dialog which allows you to modify and iterate on the prompt as needed.

\n

\n

โ€ƒ

\n

As shown in the following picture, you can click and edit the value of the highlighted variables in the user prompt:

\n

\n

โ€ƒ

\n

You can then click and edit the user question.

\n

\n

โ€ƒ

\n

Then, you can click the submit button to test the effect of your changes, as shown in the following picture.

\n

\n

โ€ƒ

\n

Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules.

\n
\n
# Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n    ChatPromptTemplate,\n    SystemMessagePromptTemplate,\n    HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n    load_dotenv(override=True)\n    config = dotenv_values(\".env\")
\n
\n

These are the libraries used by the chat application:

\n

 

\n
    \n
  1. os: This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc.
  2. \n
  3. sys: This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter.
  4. \n
  5. time: This module provides various time-related functions for time manipulation and measurement.
  6. \n
  7. openai: the OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses, which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook.
  8. \n
  9. logging: This module provides flexible logging of messages.
  10. \n
  11. chainlit as cl: This imports the Chainlit library and aliases it as cl. Chainlit is used to create the UI of the application.
  12. \n
  13. AzureChatOpenAI from chainlit.playground.config import: you need to import AzureChatOpenAI from chainlit.playground.config to use the Chainlit Playground.
  14. \n
  15. DefaultAzureCredential from azure.identity: when the openai_type property value is azure_ad, a DefaultAzureCredential object from the Azure Identity client library for Python - version 1.13.0 is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity, whose client ID is defined in the AZURE_CLIENT_ID environment variable.
  16. \n
  17. load_dotenv and dotenv_values from dotenv: Python-dotenv reads key-value pairs from a .env file and can set them as environment variables. It helps in the development of applications following the 12-factor principles.
  18. \n
  19. langchain: Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge. LangChain library aims to assist in the development of those types of applications.
  20. \n
\n

The requirements.txt file under the src folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command:

\n
\n
pip install -r requirements.txt --upgrade
\n
\n

Next, the code reads environment variables and configures the OpenAI settings.

\n
\n
# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n\\`\\`\\`\nThe answer is foo\nSOURCES: xyz\n\\`\\`\\`\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n    SystemMessagePromptTemplate.from_template(system_template),\n    HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n    stream=sys.stdout,\n    format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n    level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n    token_provider = get_bearer_token_provider(\n        DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n    )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n    os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment
\n
\n

Here's a brief explanation of each variable and related environment variable:

\n

 

\n
    \n
  1. temperature: A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9.
  2. \n
  3. api_base: The base URL for the OpenAI API.
  4. \n
  5. api_key: The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI.
  6. \n
  7. api_type: A string representing the type of the OpenAI API.
  8. \n
  9. api_version: A string representing the version of the OpenAI API.
  10. \n
  11. chat_completion_deployment: the name of the Azure OpenAI GPT model for chat completion.
  12. \n
  13. embeddings_deployment: the name of the Azure OpenAI deployment for embeddings.
  14. \n
  15. model: The model used for chat completion calls (e.g, gpt-35-turbo-16k).
  16. \n
  17. max_size_mb: the maximum size for the uploaded documents.
  18. \n
  19. max_files: the maximum number of documents that can be uploaded.
  20. \n
  21. text_splitter_chunk_size: the maximum chunk size used by the RecursiveCharacterTextSplitter object.
  22. \n
  23. text_splitter_chunk_overlap: the maximum chunk overlap used by the RecursiveCharacterTextSplitter object.
  24. \n
  25. embeddings_chunk_size: the maximum chunk size used by the OpenAIEmbeddings object.
  26. \n
  27. max_retries: The maximum number of retries for OpenAI API calls.
  28. \n
  29. retry_min_seconds: the minimum number of seconds before a retry.
  30. \n
  31. retry_max_seconds: the maximum number of seconds before a retry.
  32. \n
  33. timeout: The timeout in seconds.
  34. \n
  35. system_template: The content of the system message used for OpenAI API calls.
  36. \n
  37. debug: When debug is equal to true, t, or 1, the logger switches to verbose mode.
  38. \n
\n

 

\n

Next, the code defines a function called start_chat that is used to initialize the when the user connects to the application or clicks the New Chat button.

\n
\n
@cl.on_chat_start\nasync def start_chat():\n    # Sending Avatars for Chat Participants\n    await cl.Avatar(\n        name=\"Chatbot\",\n        url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"Error\",\n        url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"You\",\n        url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\"\n    ).send()
\n
\n

Here is a brief explanation of the function steps:

\n

 

\n\n

 

\n

The following code is used to initialize the large language model (LLM) chain used to reply to questions on the content of the uploaded documents.

\n
\n
# Initialize the file list to None\n    files = None\n\n    # Wait for the user to upload a file\n    while files == None:\n        files = await cl.AskFileMessage(\n            content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n            accept=[\n                \"application/pdf\",\n                \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n            ],\n            max_size_mb=max_size_mb,\n            max_files=max_files,\n            timeout=86400,\n            raise_on_timeout=False,\n        ).send()
\n
\n

The AskFileMessage API call prompts the user to upload up to a specified number of .pdf or .docx files. The uploaded files are stored in the files variable. The process continues until the user uploads files. For more information, see AskFileMessage.

\n

The following code processes each uploaded file by extracting its content.

\n

 

\n
    \n
  1. The text content of each file is stored in the list all_texts.
  2. \n
  3. This code performs text processing and chunking. It checks the file extension to read the file content accordingly, depending on if it's a .pdf or a .docx document.
  4. \n
  5. The text content is split into smaller chunks using the RecursiveCharacterTextSplitter LangChain object.
  6. \n
  7. Metadata is created for each chunk and stored in the metadatas list.
  8. \n
\n
\n
    # Create a message to inform the user that the files are being processed\n    content = \"\"\n    if len(files) == 1:\n        content = f\"Processing `{files[0].name}`...\"\n    else:\n        files_names = [f\"`{f.name}`\" for f in files]\n        content = f\"Processing {', '.join(files_names)}...\"\n    logger.info(content)\n    msg = cl.Message(content=content, author=\"Chatbot\")\n    await msg.send()\n\n    # Create a list to store the texts of each file\n    all_texts = []\n\n    # Process each file uplodaded by the user\n    for file in files:\n        # Read file contents\n        with open(file.path, \"rb\") as uploaded_file:\n            file_contents = uploaded_file.read()\n\n        logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n        # Create an in-memory buffer from the file content\n        bytes = io.BytesIO(file_contents)\n\n        # Get file extension\n        extension = file.name.split(\".\")[-1]\n\n        # Initialize the text variable\n        text = \"\"\n\n        # Read the file\n        if extension == \"pdf\":\n            reader = PdfReader(bytes)\n            for i in range(len(reader.pages)):\n                text += reader.pages[i].extract_text()\n                if debug:\n                    logger.info(\"[%s] read from %s\", text, file.path)\n        elif extension == \"docx\":\n            doc = Document(bytes)\n            paragraph_list = []\n            for paragraph in doc.paragraphs:\n                paragraph_list.append(paragraph.text)\n                if debug:\n                    logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n            text = \"\\n\".join(paragraph_list)\n\n        # Split the text into chunks\n        text_splitter = RecursiveCharacterTextSplitter(\n            chunk_size=text_splitter_chunk_size,\n            chunk_overlap=text_splitter_chunk_overlap,\n        )\n        texts = text_splitter.split_text(text)\n\n        # Add the chunks and metadata to the list\n        all_texts.extend(texts)\n\n    # Create a metadata for each chunk\n    metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))]
\n
\n

The next piece of code performs the following steps:

\n

 

\n
    \n
  1. It creates an AzureOpenAIEmbeddings configured to use the embeddings model in the Azure OpenAI Service to create embeddings from text chunks.
  2. \n
  3. It creates a ChromaDB vector database using the OpenAIEmbeddings object, the text chunks list, and the metadata list.
  4. \n
  5. It creates an AzureChatOpenAI LangChain object based on the GPR model hosted in Azure OpenAI Service.
  6. \n
  7. It creates a chain using the RetrievalQAWithSourcesChain.from_chain_type API call uses previously created models and stores them as retrievers.
  8. \n
  9. It stores the metadata and text chunks in the user session using the cl.user_session.set() API call.
  10. \n
  11. It creates a message to inform the user that the files are ready for queries, and finally returns the chain.
  12. \n
  13. The cl.user_session.set(\"chain\", chain) call stores the LLM chain in the user_session dictionary for later use.
  14. \n
\n

The next section create the LangChain LLM chain.

\n
\n
    # Create a Chroma vector store\n    if api_type == \"azure\":\n        embeddings = AzureOpenAIEmbeddings(\n            openai_api_version=api_version,\n            openai_api_type=api_type,\n            openai_api_key=api_key,\n            azure_endpoint=api_base,\n            azure_deployment=embeddings_deployment,\n            max_retries=max_retries,\n            retry_min_seconds=retry_min_seconds,\n            retry_max_seconds=retry_max_seconds,\n            chunk_size=embeddings_chunk_size,\n            timeout=timeout,\n        )\n    else:\n        embeddings = AzureOpenAIEmbeddings(\n            openai_api_version=api_version,\n            openai_api_type=api_type,\n            azure_endpoint=api_base,\n            azure_ad_token_provider=token_provider,\n            azure_deployment=embeddings_deployment,\n            max_retries=max_retries,\n            retry_min_seconds=retry_min_seconds,\n            retry_max_seconds=retry_max_seconds,\n            chunk_size=embeddings_chunk_size,\n            timeout=timeout,\n        )\n\n    # Create a Chroma vector store\n    db = await cl.make_async(Chroma.from_texts)(\n        all_texts, embeddings, metadatas=metadatas\n    )\n\n    # Create an AzureChatOpenAI llm\n    if api_type == \"azure\":\n        llm = AzureChatOpenAI(\n            openai_api_type=api_type,\n            openai_api_version=api_version,\n            openai_api_key=api_key,\n            azure_endpoint=api_base,\n            temperature=temperature,\n            azure_deployment=chat_completion_deployment,\n            streaming=True,\n            max_retries=max_retries,\n            timeout=timeout,\n        )\n    else:\n        llm = AzureChatOpenAI(\n            openai_api_type=api_type,\n            openai_api_version=api_version,\n            azure_endpoint=api_base,\n            api_key=api_key,\n            temperature=temperature,\n            azure_deployment=chat_completion_deployment,\n            azure_ad_token_provider=token_provider,\n            streaming=True,\n            max_retries=max_retries,\n            timeout=timeout,\n        )\n\n    # Create a chain that uses the Chroma vector store\n    chain = RetrievalQAWithSourcesChain.from_chain_type(\n        llm=llm,\n        chain_type=\"stuff\",\n        retriever=db.as_retriever(),\n        return_source_documents=True,\n        chain_type_kwargs=chain_type_kwargs,\n    )\n\n    # Save the metadata and texts in the user session\n    cl.user_session.set(\"metadatas\", metadatas)\n    cl.user_session.set(\"texts\", all_texts)\n\n    # Create a message to inform the user that the files are ready for queries\n    content = \"\"\n    if len(files) == 1:\n        content = f\"`{files[0].name}` processed. You can now ask questions!\"\n        logger.info(content)\n    else:\n        files_names = [f\"`{f.name}`\" for f in files]\n        content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n        logger.info(content)\n    msg.content = content\n    msg.author = \"Chatbot\"\n    await msg.update()\n\n    # Store the chain in the user session\n    cl.user_session.set(\"chain\", chain)
\n
\n

The following code handles the communication with the OpenAI API and incorporates retrying logic in case the API calls fail due to specific errors.

\n

 

\n\n
\n
@cl.on_message\nasync def main(message: cl.Message):\n    # Retrieve the chain from the user session\n    chain = cl.user_session.get(\"chain\")\n\n    # Create a callback handler\n    cb = cl.AsyncLangchainCallbackHandler()\n\n    # Get the response from the chain\n    response = await chain.acall(message.content, callbacks=[cb])\n    logger.info(\"Question: [%s]\", message.content)
\n
\n

The code below extracts the answers and sources from the API response and formats them to be sent as a message.

\n\n

 

\n

 

\n
    # Get the answer and sources from the response\n    answer = response[\"answer\"]\n    sources = response[\"sources\"].strip()\n    source_elements = []\n\n    if debug:\n        logger.info(\"Answer: [%s]\", answer)\n\n    # Get the metadata and texts from the user session\n    metadatas = cl.user_session.get(\"metadatas\")\n    all_sources = [m[\"source\"] for m in metadatas]\n    texts = cl.user_session.get(\"texts\")\n\n    if sources:\n        found_sources = []\n\n        # Add the sources to the message\n        for source in sources.split(\",\"):\n            source_name = source.strip().replace(\".\", \"\")\n            # Get the index of the source\n            try:\n                index = all_sources.index(source_name)\n            except ValueError:\n                continue\n            text = texts[index]\n            found_sources.append(source_name)\n            # Create the text element referenced in the message\n            source_elements.append(cl.Text(content=text, name=source_name))\n\n        if found_sources:\n            answer += f\"\\nSources: {', '.join(found_sources)}\"\n        else:\n            answer += \"\\nNo sources found\"\n\n    await cl.Message(content=answer, elements=source_elements).send()\n\n    # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n    if api_type == \"azure_ad\":\n        os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider()\n
\n

 

\n

 

\n

 

\n

 

\n

Below, you can read the complete code of the application.

\n
\n
# Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n    ChatPromptTemplate,\n    SystemMessagePromptTemplate,\n    HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n    load_dotenv(override=True)\n    config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n```\nThe answer is foo\nSOURCES: xyz\n```\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n    SystemMessagePromptTemplate.from_template(system_template),\n    HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n    stream=sys.stdout,\n    format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n    level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n    token_provider = get_bearer_token_provider(\n        DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n    )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n    os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment\n\n\n@cl.on_chat_start\nasync def start():\n    await cl.Avatar(\n        name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n    ).send()\n    await cl.Avatar(\n        name=\"You\",\n        url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n    ).send()\n\n    # Initialize the file list to None\n    files = None\n\n    # Wait for the user to upload a file\n    while files == None:\n        files = await cl.AskFileMessage(\n            content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n            accept=[\n                \"application/pdf\",\n                \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n            ],\n            max_size_mb=max_size_mb,\n            max_files=max_files,\n            timeout=86400,\n            raise_on_timeout=False,\n        ).send()\n\n    # Create a message to inform the user that the files are being processed\n    content = \"\"\n    if len(files) == 1:\n        content = f\"Processing `{files[0].name}`...\"\n    else:\n        files_names = [f\"`{f.name}`\" for f in files]\n        content = f\"Processing {', '.join(files_names)}...\"\n    logger.info(content)\n    msg = cl.Message(content=content, author=\"Chatbot\")\n    await msg.send()\n\n    # Create a list to store the texts of each file\n    all_texts = []\n\n    # Process each file uplodaded by the user\n    for file in files:\n        # Read file contents\n        with open(file.path, \"rb\") as uploaded_file:\n            file_contents = uploaded_file.read()\n\n        logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n        # Create an in-memory buffer from the file content\n        bytes = io.BytesIO(file_contents)\n\n        # Get file extension\n        extension = file.name.split(\".\")[-1]\n\n        # Initialize the text variable\n        text = \"\"\n\n        # Read the file\n        if extension == \"pdf\":\n            reader = PdfReader(bytes)\n            for i in range(len(reader.pages)):\n                text += reader.pages[i].extract_text()\n                if debug:\n                    logger.info(\"[%s] read from %s\", text, file.path)\n        elif extension == \"docx\":\n            doc = Document(bytes)\n            paragraph_list = []\n            for paragraph in doc.paragraphs:\n                paragraph_list.append(paragraph.text)\n                if debug:\n                    logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n            text = \"\\n\".join(paragraph_list)\n\n        # Split the text into chunks\n        text_splitter = RecursiveCharacterTextSplitter(\n            chunk_size=text_splitter_chunk_size,\n            chunk_overlap=text_splitter_chunk_overlap,\n        )\n        texts = text_splitter.split_text(text)\n\n        # Add the chunks and metadata to the list\n        all_texts.extend(texts)\n\n    # Create a metadata for each chunk\n    metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))]\n\n    # Create a Chroma vector store\n    if api_type == \"azure\":\n        embeddings = AzureOpenAIEmbeddings(\n            openai_api_version=api_version,\n            openai_api_type=api_type,\n            openai_api_key=api_key,\n            azure_endpoint=api_base,\n            azure_deployment=embeddings_deployment,\n            max_retries=max_retries,\n            retry_min_seconds=retry_min_seconds,\n            retry_max_seconds=retry_max_seconds,\n            chunk_size=embeddings_chunk_size,\n            timeout=timeout,\n        )\n    else:\n        embeddings = AzureOpenAIEmbeddings(\n            openai_api_version=api_version,\n            openai_api_type=api_type,\n            azure_endpoint=api_base,\n            azure_ad_token_provider=token_provider,\n            azure_deployment=embeddings_deployment,\n            max_retries=max_retries,\n            retry_min_seconds=retry_min_seconds,\n            retry_max_seconds=retry_max_seconds,\n            chunk_size=embeddings_chunk_size,\n            timeout=timeout,\n        )\n\n    # Create a Chroma vector store\n    db = await cl.make_async(Chroma.from_texts)(\n        all_texts, embeddings, metadatas=metadatas\n    )\n\n    # Create an AzureChatOpenAI llm\n    if api_type == \"azure\":\n        llm = AzureChatOpenAI(\n            openai_api_type=api_type,\n            openai_api_version=api_version,\n            openai_api_key=api_key,\n            azure_endpoint=api_base,\n            temperature=temperature,\n            azure_deployment=chat_completion_deployment,\n            streaming=True,\n            max_retries=max_retries,\n            timeout=timeout,\n        )\n    else:\n        llm = AzureChatOpenAI(\n            openai_api_type=api_type,\n            openai_api_version=api_version,\n            azure_endpoint=api_base,\n            api_key=api_key,\n            temperature=temperature,\n            azure_deployment=chat_completion_deployment,\n            azure_ad_token_provider=token_provider,\n            streaming=True,\n            max_retries=max_retries,\n            timeout=timeout,\n        )\n\n    # Create a chain that uses the Chroma vector store\n    chain = RetrievalQAWithSourcesChain.from_chain_type(\n        llm=llm,\n        chain_type=\"stuff\",\n        retriever=db.as_retriever(),\n        return_source_documents=True,\n        chain_type_kwargs=chain_type_kwargs,\n    )\n\n    # Save the metadata and texts in the user session\n    cl.user_session.set(\"metadatas\", metadatas)\n    cl.user_session.set(\"texts\", all_texts)\n\n    # Create a message to inform the user that the files are ready for queries\n    content = \"\"\n    if len(files) == 1:\n        content = f\"`{files[0].name}` processed. You can now ask questions!\"\n        logger.info(content)\n    else:\n        files_names = [f\"`{f.name}`\" for f in files]\n        content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n        logger.info(content)\n    msg.content = content\n    msg.author = \"Chatbot\"\n    await msg.update()\n\n    # Store the chain in the user session\n    cl.user_session.set(\"chain\", chain)\n\n\n@cl.on_message\nasync def main(message: cl.Message):\n    # Retrieve the chain from the user session\n    chain = cl.user_session.get(\"chain\")\n\n    # Create a callback handler\n    cb = cl.AsyncLangchainCallbackHandler()\n\n    # Get the response from the chain\n    response = await chain.acall(message.content, callbacks=[cb])\n    logger.info(\"Question: [%s]\", message.content)\n\n    # Get the answer and sources from the response\n    answer = response[\"answer\"]\n    sources = response[\"sources\"].strip()\n    source_elements = []\n\n    if debug:\n        logger.info(\"Answer: [%s]\", answer)\n\n    # Get the metadata and texts from the user session\n    metadatas = cl.user_session.get(\"metadatas\")\n    all_sources = [m[\"source\"] for m in metadatas]\n    texts = cl.user_session.get(\"texts\")\n\n    if sources:\n        found_sources = []\n\n        # Add the sources to the message\n        for source in sources.split(\",\"):\n            source_name = source.strip().replace(\".\", \"\")\n            # Get the index of the source\n            try:\n                index = all_sources.index(source_name)\n            except ValueError:\n                continue\n            text = texts[index]\n            found_sources.append(source_name)\n            # Create the text element referenced in the message\n            source_elements.append(cl.Text(content=text, name=source_name))\n\n        if found_sources:\n            answer += f\"\\nSources: {', '.join(found_sources)}\"\n        else:\n            answer += \"\\nNo sources found\"\n\n    await cl.Message(content=answer, elements=source_elements).send()\n\n    # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n    if api_type == \"azure_ad\":\n        os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider()
\n
\n

You can run the application locally using the following command. The -w flag` indicates auto-reload whenever we make changes live in our application code.

\n
\n
chainlit run app.py -w
\n
\n

Build Docker Images

\n

You can use the  src/01-build-docker-images.sh Bash script to build the Docker container image for each container app.

\n

 

\n

 

\n
#!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Use a for loop to build the docker images using the array index\nfor index in ${!images[@]}; do\n  # Build the docker image\n  docker build -t ${images[$index]}:$tag -f Dockerfile --build-arg FILENAME=${filenames[$index]} --build-arg PORT=$port .\ndone
\n

 

\n

 

\n

 

\n

 

\n

Before running any script in the src folder, make sure to customize the value of the variables inside the 00-variables.sh file located in the same folder. This file is embedded in all the scripts and contains the following variables:

\n

 

\n

 

\n
# Variables\n\n# Azure Container Registry\nprefix=\"Blue\"\nacrName=\"${prefix}Registry\"\nacrResourceGrougName=\"${prefix}RG\"\nlocation=\"EastUS\"\n\n# Python Files\ndocAppFile=\"doc.py\"\nchatAppFile=\"chat.py\"\n\n# Docker Images\ndocImageName=\"doc\"\nchatImageName=\"chat\"\ntag=\"v1\"\nport=\"8000\"\n\n# Arrays\nimages=($docImageName $chatImageName)\nfilenames=($docAppFile $chatAppFile)
\n

 

\n

 

\n

The Dockerfile under the src folder is parametric and can be used to build the container images for both chat applications.

\n
\n
# app/Dockerfile\n\n# # Stage 1 - Install build dependencies\n\n# A Dockerfile must start with a FROM instruction that sets the base image for the container.\n# The Python images come in many flavors, each designed for a specific use case.\n# The python:3.11-slim image is a good base image for most applications.\n# It is a minimal image built on top of Debian Linux and includes only the necessary packages to run Python.\n# The slim image is a good choice because it is small and contains only the packages needed to run Python.\n# For more information, see: \n# * https://hub.docker.com/_/python \n# * https://docs.streamlit.io/knowledge-base/tutorials/deploy/docker\nFROM python:3.11-slim AS builder\n\n# The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile.\n# If the WORKDIR doesnโ€™t exist, it will be created even if itโ€™s not used in any subsequent Dockerfile instruction.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#workdir\nWORKDIR /app\n\n# Set environment variables. \n# The ENV instruction sets the environment variable <key> to the value <value>.\n# This value will be in the environment of all โ€œdescendantโ€ Dockerfile commands and can be replaced inline in many as well.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#env\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\n# Install git so that we can clone the app code from a remote repo using the RUN instruction.\n# The RUN comand has 2 forms:\n# * RUN <command> (shell form, the command is run in a shell, which by default is /bin/sh -c on Linux or cmd /S /C on Windows)\n# * RUN [\"executable\", \"param1\", \"param2\"] (exec form)\n# The RUN instruction will execute any commands in a new layer on top of the current image and commit the results. \n# The resulting committed image will be used for the next step in the Dockerfile.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#run\nRUN apt-get update && apt-get install -y \\\n  build-essential \\\n  curl \\\n  software-properties-common \\\n  git \\\n  && rm -rf /var/lib/apt/lists/*\n\n# Create a virtualenv to keep dependencies together\nRUN python -m venv /opt/venv\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the requirements.txt which contains dependencies to WORKDIR\n# COPY has two forms:\n# * COPY <src> <dest> (this copies the files from the local machine to the container's own filesystem)\n# * COPY [\"<src>\",... \"<dest>\"] (this form is required for paths containing whitespace)\n# For more information, see: https://docs.docker.com/engine/reference/builder/#copy\nCOPY requirements.txt .\n\n# Install the Python dependencies\nRUN pip install --no-cache-dir --no-deps -r requirements.txt\n\n# Stage 2 - Copy only necessary files to the runner stage\n\n# The FROM instruction initializes a new build stage for the application\nFROM python:3.11-slim\n\n# Define the filename to copy as an argument\nARG FILENAME\n\n# Deefine the port to run the application on as an argument\nARG PORT=8000\n\n# Set an environment variable\nENV FILENAME=${FILENAME}\n\n# Sets the working directory to /app\nWORKDIR /app\n\n# Copy the virtual environment from the builder stage\nCOPY --from=builder /opt/venv /opt/venv\n\n# Set environment variables\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the $FILENAME containing the application code\nCOPY $FILENAME .\n\n# Copy the chainlit.md file to the working directory\nCOPY chainlit.md .\n\n# Copy the .chainlit folder to the working directory\nCOPY ./.chainlit ./.chainlit\n\n# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#expose\nEXPOSE $PORT\n\n# The ENTRYPOINT instruction has two forms:\n# * ENTRYPOINT [\"executable\", \"param1\", \"param2\"] (exec form, preferred)\n# * ENTRYPOINT command param1 param2 (shell form)\n# The ENTRYPOINT instruction allows you to configure a container that will run as an executable.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#entrypoint\nCMD chainlit run $FILENAME --port=$PORT
\n
\n

Test applications locally

\n

You can use the src/02-run-docker-container.sh Bash script to test the containers for the sender, processor, and receiver applications.

\n
\n
#!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Print the menu\necho \"====================================\"\necho \"Run Docker Container (1-3): \"\necho \"====================================\"\noptions=(\n  \"Doc\"\n  \"Chat\"\n)\nname=\"\"\n# Select an option\nCOLUMNS=0\nselect option in \"${options[@]}\"; do\n  case $option in\n    \"Doc\")\n      docker run -it \\\n      --rm \\\n      -p $port:$port \\\n      -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n      -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n      -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n      -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n      -e AZURE_OPENAI_ADA_DEPLOYMENT=$AZURE_OPENAI_ADA_DEPLOYMENT \\\n      -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n      -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n      -e TEMPERATURE=$TEMPERATURE \\\n      --name $docImageName \\\n      $docImageName:$tag\n      break\n    ;;\n    \"Chat\")\n      docker run -it \\\n      --rm \\\n      -p $port:$port \\\n      -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n      -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n      -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n      -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n      -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n      -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n      -e TEMPERATURE=$TEMPERATURE \\\n      --name $chatImageName \\\n      $chatImageName:$tag\n      break\n    ;;\n    \"Quit\")\n      exit\n    ;;\n    *) echo \"invalid option $REPLY\" ;;\n  esac\ndone
\n
\n

Push Docker containers to the Azure Container Registry

\n

You can use the src/03-push-docker-image.sh Bash script to push the Docker container images for the sender, processor, and receiver applications to the Azure Container Registry (ACR)

\n

 

\n

 

\n
#!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Login to ACR\necho \"Logging in to [${acrName,,}] container registry...\"\naz acr login --name ${acrName,,}\n\n# Retrieve ACR login server. Each container image needs to be tagged with the loginServer name of the registry. \necho \"Retrieving login server for the [${acrName,,}] container registry...\"\nloginServer=$(az acr show --name ${acrName,,} --query loginServer --output tsv)\n\n# Use a for loop to tag and push the local docker images to the Azure Container Registry\nfor index in ${!images[@]}; do\n  # Tag the local sender image with the loginServer of ACR\n  docker tag ${images[$index],,}:$tag $loginServer/${images[$index],,}:$tag\n\n  # Push the container image to ACR\n  docker push $loginServer/${images[$index],,}:$tag\ndone
\n

 

\n

 

\n

Monitoring

\n

Azure Container Apps provides several built-in observability features that together give you a holistic view of your container appโ€™s health throughout its application lifecycle. These features help you monitor and diagnose the state of your app to improve performance and respond to trends and critical problems.

\n

You can use the Log Stream panel on the Azure Portal to see the logs generated by a container app, as shown in the following screenshot.

\n
 
\n

\n

 

\n

Alternatively, you can click open the Logs panel, as shown in the following screenshot, and use a Kusto Query Language (KQL) query to filter, project, and retrieve only the desired data.

\n

 

\n

\n

 

\n

Review deployed resources

\n

You can use the Azure portal to list the deployed resources in the resource group, as shown in the following picture:

\n

 

\n

\n

โ€ƒ

\n

You can also use Azure CLI to list the deployed resources in the resource group:

\n
\n
az resource list --resource-group <resource-group-name>
\n
\n

You can also use the following PowerShell cmdlet to list the deployed resources in the resource group:

\n
\n
Get-AzResource -ResourceGroupName <resource-group-name>
\n
\n

Clean up resources

\n

You can delete the resource group using the following Azure CLI command when you no longer need the resources you created. This will remove all the Azure resources.

\n
\n
az group delete --name <resource-group-name>
\n
\n

Alternatively, you can use the following PowerShell cmdlet to delete the resource group and all the Azure resources.

\n

 

\n

 

\n

 

\n

 

\n
Remove-AzResourceGroup -Name <resource-group-name>
\n

 

\n

 

\n

 

\n

 

\n

 

\n

 

\n

 

\n

 

","body@stringLength":"188813","rawBody":"

This article shows how to quickly build chat applications using Python and leveraging powerful technologies such as OpenAI ChatGPT models, Embedding models, LangChain framework, ChromaDB vector database, and Chainlit, an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. These applications are hosted on Azure Container Apps, a fully managed environment that enables you to run microservices and containerized applications on a serverless platform.

\n\n

Both applications use a user-defined managed identity to authenticate and authorize against Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) and use Azure Private Endpoints to connect privately and securely to these services. The chat UIs are built using Chainlit, an open-source Python package designed explicitly for creating AI applications. Chainlit seamlessly integrates with LangChain, LlamaIndex, and LangFlow, making it a powerful tool for easily developing ChatGPT-like applications.

\n

By following our example, you can quickly create sophisticated chat applications that utilize cutting-edge technologies, empowering users with intelligent conversational capabilities.

\n

 

\n

You can find the code and Visio diagrams in the companion GitHub repository. Also, check the following articles:

\n\n

 

\n

Prerequisites

\n\n

 

\n

Architecture

\n

The following diagram shows the architecture and network topology of the sample:

\n
 
\n

\n

 

\n

This sample provides two sets of Terraform modules to deploy the infrastructure and the chat applications.

\n

 

\n

Infrastructure Terraform Modules

\n

You can use the Terraform modules in the terraform/infra folder to deploy the infrastructure used by the sample, including the Azure Container Apps Environment, Azure OpenAI Service (AOAI), and Azure Container Registry (ACR), but not the Azure Container Apps (ACA). The Terraform modules in the terraform/infra folder deploy the following resources:

\n\n

 

\n

Application Terraform Modules

\n

You can use these Terraform modules in the terraform/apps To deploy the Azure Container Apps (ACA) using the Docker container images stored in the Azure Container Registry you deployed in the previous step.

\n\n

 

\n

Azure Container Apps

\n

Azure Container Apps (ACA) is a serverless compute service provided by Microsoft Azure that allows developers to easily deploy and manage containerized applications without the need to manage the underlying infrastructure. It provides a simplified and scalable solution for running applications in containers, leveraging the power and flexibility of the Azure ecosystem.

\n

With Azure Container Apps, developers can package their applications into containers using popular containerization technologies such as Docker. These containers encapsulate the application and its dependencies, ensuring consistent execution across different environments.

\n

Powered by Kubernetes and open-source technologies like Dapr, KEDA, and envoy, the service abstracts away the complexities of managing the infrastructure, including provisioning, scaling, and monitoring, allowing developers to focus solely on building and deploying their applications. Azure Container Apps handles automatic scaling, and load balancing, and natively integrates with other Azure services, such as Azure Monitor and Azure Container Registry (ACR), to provide a comprehensive and secure application deployment experience.

\n

Azure Container Apps offers benefits such as rapid deployment, easy scalability, cost-efficiency, and seamless integration with other Azure services, making it an attractive choice for modern application development and deployment scenarios.

\n

 

\n

Azure OpenAI Service

\n

The Azure OpenAI Service is a platform offered by Microsoft Azure that provides cognitive services powered by OpenAI models. One of the models available through this service is the ChatGPT model, which is designed for interactive conversational tasks. It allows developers to integrate natural language understanding and generation capabilities into their applications.

\n

Azure OpenAI Service provides REST API access to OpenAI's powerful language models including the GPT-3, Codex and Embeddings model series. In addition, the new GPT-4 and ChatGPT model series have now reached general availability. These models can be easily adapted to your specific task, including but not limited to content generation, summarization, semantic search, and natural language-to-code translation. Users can access the service through REST APIs, Python SDK, or our web-based interface in the Azure OpenAI Studio.

\n

You can use Embeddings model to transform raw data or inputs into meaningful and compact numerical representations called embeddings. Embeddings capture the semantic or contextual information of the input data in a lower-dimensional space, making it easier for machine learning algorithms to process and analyze the data effectively. Embeddings can be stored in a vector database, such as ChromaDB or Facebook AI Similarity Search (FAISS), explicitly designed for efficient storage, indexing, and retrieval of vector embeddings.

\n

The Chat Completion API, which is part of the Azure OpenAI Service, provides a dedicated interface for interacting with the ChatGPT and GPT-4 models. This API is currently in preview and is the preferred method for accessing these models. The GPT-4 models can only be accessed through this API.

\n

GPT-3, GPT-3.5, and GPT-4 models from OpenAI are prompt-based. With prompt-based models, the user interacts with the model by entering a text prompt, to which the model responds with a text completion. This completion is the modelโ€™s continuation of the input text. While these models are compelling, their behavior is also very sensitive to the prompt. This makes prompt construction a critical skill to develop. For more information, see Introduction to prompt engineering.

\n

Prompt construction can be complex. In practice, the prompt acts to configure the model weights to complete the desired task, but it's more of an art than a science, often requiring experience and intuition to craft a successful prompt. The goal of this article is to help get you started with this learning process. It attempts to capture general concepts and patterns that apply to all GPT models. However, it's essential to understand that each model behaves differently, so the learnings may not apply equally to all models.

\n

Prompt engineering refers to the process of creating instructions called prompts for Large Language Models (LLMs), such as OpenAIโ€™s ChatGPT. With the immense potential of LLMs to solve a wide range of tasks, leveraging prompt engineering can empower us to save significant time and facilitate the development of impressive applications. It holds the key to unleashing the full capabilities of these huge models, transforming how we interact and benefit from them. For more information, see Prompt engineering techniques.

\n

 

\n

Vector Databases

\n

A vector database is a specialized database that goes beyond traditional storage by organizing information to simplify the search for similar items. Instead of merely storing words or numbers, it leverages vector embeddings - unique numerical representations of data. These embeddings capture meaning, context, and relationships. For instance, words are represented as vectors, whereas similar words have similar vector values.

\n

The applications of vector databases are numerous and powerful. In language processing, they facilitate the discovery of related documents or sentences. By comparing the vector embeddings of different texts, finding similar or related information becomes faster and more efficient. This capability benefits search engines and recommendation systems, which can suggest relevant articles or products based on user interests.

\n

In the realm of image analysis, vector databases excel in finding visually similar images. By representing images as vectors, a simple comparison of vector values can identify visually similar images. This capability is precious for tasks like reverse image search or content-based image retrieval.

\n

Additionally, vector databases find applications in fraud detection, anomaly detection, and clustering. By comparing vector embeddings of data points, unusual patterns can be detected, and similar items can be grouped together, aiding in effective data analysis and decision-making.  This is a list of Azure services that are suitable for use as a vector database in a retrieval-augmented generation (RAG) solution:

\n

 

\n\n

 

\n

Here is a list of the most popular vector databases:

\n

 

\n\n

 

\n

This sample makes of ChromaDB vector database, but you can easily modify the code to use another vector database. You can even use Azure Cache for Redis Enterprise to store the vector embeddings and compute vector similarity with high performance and low latency. For more information, see Vector Similarity Search with Azure Cache for Redis Enterprise

\n

 

\n

LangChain

\n

LangChain is a software framework designed to streamline the development of applications using large language models (LLMs). It serves as a language model integration framework, facilitating various applications like document analysis and summarization, chatbots, and code analysis.

\n

LangChain's integrations cover an extensive range of systems, tools, and services, making it a comprehensive solution for language model-based applications. LangChain integrates with the major cloud platforms such as Microsoft Azure, Amazon AWS, and Google, and with API wrappers for various purposes like news, movie information, and weather, as well as support for Bash, web scraping, and more. It also supports multiple language models, including those from OpenAI, Anthropic, and Hugging Face. Moreover, LangChain offers various functionalities for document handling, code generation, analysis, debugging, and interaction with databases and other data sources.

\n

 

\n

Chainlit

\n

Chainlit is an open-source Python package that is specifically designed to create user interfaces (UIs) for AI applications. It simplifies the process of building interactive chats and interfaces, making developing AI-powered applications faster and more efficient. While Streamlit is a general-purpose UI library, Chainlit is purpose-built for AI applications and seamlessly integrates with other AI technologies such as LangChain, LlamaIndex, and LangFlow.

\n

With Chainlit, developers can easily create intuitive UIs for their AI models, including ChatGPT-like applications. It provides a user-friendly interface for users to interact with AI models, enabling conversational experiences and information retrieval. Chainlit also offers unique features, such as displaying the Chain of Thought, which allows users to explore the reasoning process directly within the UI. This feature enhances transparency and enables users to understand how the AI arrives at its responses or recommendations.

\n

For more information, see the following resources:

\n\n

 

\n

Deploy the Infrastructure

\n

Before deploying the Terraform modules in the terraform/infra folder, specify a value for the following variables in the terraform.tfvars variable definitions file.

\n

 

\n

 

\nname_prefix = \"Blue\"\nlocation = \"EastUS\"\n

 

\n

 

\n

This is the definition of each variable:

\n\n

NOTE: Make sure to select a region where Azure OpenAI Service (AOAI) supports both GPT-3.5/GPT-4 models like gpt-35-turbo-16k and Embeddings models like text-embedding-ada-002.

\n

 

\n

OpenAI Module

\n

The following table contains the code from the terraform/infra/modules/openai/main.tf Terraform module used to deploy the Azure OpenAI Service.

\n

 

\n

 

\nresource \"azurerm_cognitive_account\" \"openai\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n kind = \"OpenAI\"\n custom_subdomain_name = var.custom_subdomain_name\n sku_name = var.sku_name\n public_network_access_enabled = var.public_network_access_enabled\n tags = var.tags\n\n identity {\n type = \"SystemAssigned\"\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_cognitive_deployment\" \"deployment\" {\n for_each = {for deployment in var.deployments: deployment.name => deployment}\n\n name = each.key\n cognitive_account_id = azurerm_cognitive_account.openai.id\n\n model {\n format = \"OpenAI\"\n name = each.value.model.name\n version = each.value.model.version\n }\n\n scale {\n type = \"Standard\"\n }\n}\n\nresource \"azurerm_monitor_diagnostic_setting\" \"settings\" {\n name = \"DiagnosticsSettings\"\n target_resource_id = azurerm_cognitive_account.openai.id\n log_analytics_workspace_id = var.log_analytics_workspace_id\n\n enabled_log {\n category = \"Audit\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"RequestResponse\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n enabled_log {\n category = \"Trace\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n\n metric {\n category = \"AllMetrics\"\n\n retention_policy {\n enabled = true\n days = var.log_analytics_retention_days\n }\n }\n}\n

 

\n

 

\n

Azure Cognitive Services uses custom subdomain names for each resource created through the Azure portal, Azure Cloud Shell, Azure CLI, Bicep, Azure Resource Manager (ARM), or Terraform. Unlike regional endpoints, which were common for all customers in a specific Azure region, custom subdomain names are unique to the resource. Custom subdomain names are required to enable authentication features like Azure Active Directory (Azure AD). We need to specify a custom subdomain for our Azure OpenAI Service, as our chatbot applications will use an Azure AD security token to access it. By default, the terraform/infra/modules/openai/main.tf module sets the value of the custom_subdomain_name parameter to the lowercase name of the Azure OpenAI resource. For more information on custom subdomains, see Custom subdomain names for Cognitive Services.

\n

This Terraform module allows you to pass an array containing the definition of one or more model deployments in the deployments variable. For more information on model deployments, see Create a resource and deploy a model using Azure OpenAI. The openai_deployments variable in the terraform/infra/variables.tf file defines the structure and the default models deployed by the sample:

\n

 

\n

 

\nvariable \"openai_deployments\" {\n description = \"(Optional) Specifies the deployments of the Azure OpenAI Service\"\n type = list(object({\n name = string\n model = object({\n name = string\n version = string\n })\n rai_policy_name = string \n }))\n default = [\n {\n name = \"gpt-35-turbo-16k\"\n model = {\n name = \"gpt-35-turbo-16k\"\n version = \"0613\"\n }\n rai_policy_name = \"\"\n },\n {\n name = \"text-embedding-ada-002\"\n model = {\n name = \"text-embedding-ada-002\"\n version = \"2\"\n }\n rai_policy_name = \"\"\n }\n ] \n}\n

 

\n

 

\n

Alternatively, you can use the Terraform module for deploying Azure OpenAI Service. to deploy Azure OpenAI Service.

\n

Private Endpoint Module

\n

The terraform/infra/main.tf the module creates Azure Private Endpoints and Azure Private DNDS Zones for each of the following resources:

\n\n

In particular, it creates an Azure Private Endpoint and Azure Private DNDS Zone to the Azure OpenAI Service as shown in the following code snippet:

\n

 

\n

 

\nmodule \"openai_private_dns_zone\" {\n source = \"./modules/private_dns_zone\"\n name = \"privatelink.openai.azure.com\"\n resource_group_name = azurerm_resource_group.rg.name\n tags = var.tags\n virtual_networks_to_link = {\n (module.virtual_network.name) = {\n subscription_id = data.azurerm_client_config.current.subscription_id\n resource_group_name = azurerm_resource_group.rg.name\n }\n }\n}\n\nmodule \"openai_private_endpoint\" {\n source = \"./modules/private_endpoint\"\n name = \"${module.openai.name}PrivateEndpoint\"\n location = var.location\n resource_group_name = azurerm_resource_group.rg.name\n subnet_id = module.virtual_network.subnet_ids[var.vm_subnet_name]\n tags = var.tags\n private_connection_resource_id = module.openai.id\n is_manual_connection = false\n subresource_name = \"account\"\n private_dns_zone_group_name = \"AcrPrivateDnsZoneGroup\"\n private_dns_zone_group_ids = [module.openai_private_dns_zone.id]\n}\n\n

 

\n

 

\n

Below you can read the code of the terraform/infra/modules/private_endpoint/main.tf module, which is used to create Azure Private Endpoints:

\n

 

\n

 

\nresource \"azurerm_private_endpoint\" \"private_endpoint\" {\n name = var.name\n location = var.location\n resource_group_name = var.resource_group_name\n subnet_id = var.subnet_id\n tags = var.tags\n\n private_service_connection {\n name = \"${var.name}Connection\"\n private_connection_resource_id = var.private_connection_resource_id\n is_manual_connection = var.is_manual_connection\n subresource_names = try([var.subresource_name], null)\n request_message = try(var.request_message, null)\n }\n\n private_dns_zone_group {\n name = var.private_dns_zone_group_name\n private_dns_zone_ids = var.private_dns_zone_group_ids\n }\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n

 

\n

 

\n

Private DNS Zone Module

\n

In the following box, you can read the code of the terraform/infra/modules/private_dns_zone/main.tf module, which is utilized to create the Azure Private DNS Zones.

\n

 

\n

 

\nresource \"azurerm_private_dns_zone\" \"private_dns_zone\" {\n name = var.name\n resource_group_name = var.resource_group_name\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_private_dns_zone_virtual_network_link\" \"link\" {\n for_each = var.virtual_networks_to_link\n\n name = \"link_to_${lower(basename(each.key))}\"\n resource_group_name = var.resource_group_name\n private_dns_zone_name = azurerm_private_dns_zone.private_dns_zone.name\n virtual_network_id = \"/subscriptions/${each.value.subscription_id}/resourceGroups/${each.value.resource_group_name}/providers/Microsoft.Network/virtualNetworks/${each.key}\"\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n

 

\n

 

\n

Workload Managed Identity Module

\n

Below you can read the code of the terraform/infra/modules/managed_identity/main.tf module, which is used to create the Azure Managed Identity used by the Azure Container Apps to pull container images from the Azure Container Registry, and by the chat applications to connect to the Azure OpenAI Service. You can use a system-assigned or user-assigned managed identity from Azure Active Directory (Azure AD) to let Azure Container Apps access any Azure AD-protected resource. For more information, see Managed identities in Azure Container Apps. You can pull container images from private repositories in an Azure Container Registry using user-assigned or user-assigned managed identities for authentication to avoid using administrative credentials. For more information, see Azure Container Apps image pull with managed identity. This user-defined managed identity is assigned the Cognitive Services User role on the Azure OpenAI Service namespace and ACRPull role on the Azure Container Registry (ACR). By assigning the above roles, you grant the user-defined managed identity access to these resources.

\n

 

\n

 

\nresource \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.name\n resource_group_name = var.resource_group_name\n location = var.location\n tags = var.tags\n\n lifecycle {\n ignore_changes = [\n tags\n ]\n }\n}\n\nresource \"azurerm_role_assignment\" \"cognitive_services_user_assignment\" {\n scope = var.openai_id\n role_definition_name = \"Cognitive Services User\"\n principal_id = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n skip_service_principal_aad_check = true\n}\n\nresource \"azurerm_role_assignment\" \"acr_pull_assignment\" {\n scope = var.acr_id\n role_definition_name = \"AcrPull\"\n principal_id = azurerm_user_assigned_identity.workload_user_assigned_identity.principal_id\n skip_service_principal_aad_check = true\n}\n

 

\n

 

\n

Deploy the Applications

\n

Before deploying the Terraform modules in the terraform/apps folder, specify a value for the following variables in the Terraform.tfvars variable definitions file.

\n

 

\n

 

\nresource_group_name = \"BlueRG\"\ncontainer_app_environment_name = \"BlueEnvironment\"\ncontainer_registry_name = \"BlueRegistry\"\nworkload_managed_identity_name = \"BlueWorkloadIdentity\"\ncontainer_apps = [\n {\n name = \"chatapp\"\n revision_mode = \"Single\"\n ingress = {\n allow_insecure_connections = true\n external_enabled = true\n target_port = 8000\n transport = \"http\"\n traffic_weight = {\n label = \"default\"\n latest_revision = true\n revision_suffix = \"default\"\n percentage = 100\n }\n }\n template = {\n containers = [\n {\n name = \"chat\"\n image = \"chat:v1\"\n cpu = 0.5\n memory = \"1Gi\"\n env = [\n {\n name = \"TEMPERATURE\"\n value = 0.9\n },\n {\n name = \"AZURE_OPENAI_BASE\"\n value = \"https://blueopenai.openai.azure.com/\"\n },\n {\n name = \"AZURE_OPENAI_KEY\"\n value = \"\"\n },\n {\n name = \"AZURE_OPENAI_TYPE\"\n value = \"azure_ad\"\n },\n {\n name = \"AZURE_OPENAI_VERSION\"\n value = \"2023-06-01-preview\"\n },\n {\n name = \"AZURE_OPENAI_DEPLOYMENT\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_MODEL\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n value = \"You are a helpful assistant.\"\n },\n {\n name = \"MAX_RETRIES\"\n value = 5\n },\n {\n name = \"BACKOFF_IN_SECONDS\"\n value = \"1\"\n },\n {\n name = \"TOKEN_REFRESH_INTERVAL\"\n value = 2700\n }\n ]\n liveness_probe = {\n failure_count_threshold = 3\n initial_delay = 30\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n readiness_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n success_count_threshold = 3\n timeout = 30\n transport = \"HTTP\"\n }\n startup_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n }\n ]\n min_replicas = 1\n max_replicas = 3\n }\n },\n {\n name = \"docapp\"\n revision_mode = \"Single\"\n ingress = {\n allow_insecure_connections = true\n external_enabled = true\n target_port = 8000\n transport = \"http\"\n traffic_weight = {\n label = \"default\"\n latest_revision = true\n revision_suffix = \"default\"\n percentage = 100\n }\n }\n template = {\n containers = [\n {\n name = \"doc\"\n image = \"doc:v1\"\n cpu = 0.5\n memory = \"1Gi\"\n env = [\n {\n name = \"TEMPERATURE\"\n value = 0.9\n },\n {\n name = \"AZURE_OPENAI_BASE\"\n value = \"https://blueopenai.openai.azure.com/\"\n },\n {\n name = \"AZURE_OPENAI_KEY\"\n value = \"\"\n },\n {\n name = \"AZURE_OPENAI_TYPE\"\n value = \"azure_ad\"\n },\n {\n name = \"AZURE_OPENAI_VERSION\"\n value = \"2023-06-01-preview\"\n },\n {\n name = \"AZURE_OPENAI_DEPLOYMENT\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_MODEL\"\n value = \"gpt-35-turbo-16k\"\n },\n {\n name = \"AZURE_OPENAI_ADA_DEPLOYMENT\"\n value = \"text-embedding-ada-002\"\n },\n {\n name = \"AZURE_OPENAI_SYSTEM_MESSAGE\"\n value = \"You are a helpful assistant.\"\n },\n {\n name = \"MAX_RETRIES\"\n value = 5\n },\n {\n name = \"CHAINLIT_MAX_FILES\"\n value = 10\n },\n {\n name = \"TEXT_SPLITTER_CHUNK_SIZE\"\n value = 1000\n },\n {\n name = \"TEXT_SPLITTER_CHUNK_OVERLAP\"\n value = 10\n },\n {\n name = \"EMBEDDINGS_CHUNK_SIZE\"\n value = 16\n },\n {\n name = \"BACKOFF_IN_SECONDS\"\n value = \"1\"\n },\n {\n name = \"CHAINLIT_MAX_SIZE_MB\"\n value = 100\n },\n {\n name = \"TOKEN_REFRESH_INTERVAL\"\n value = 2700\n }\n ]\n liveness_probe = {\n failure_count_threshold = 3\n initial_delay = 30\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n readiness_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n success_count_threshold = 3\n timeout = 30\n transport = \"HTTP\"\n }\n startup_probe = {\n failure_count_threshold = 3\n interval_seconds = 60\n path = \"/\"\n port = 8000\n timeout = 30\n transport = \"HTTP\"\n }\n }\n ]\n min_replicas = 1\n max_replicas = 3\n }\n }]\n

 

\n

 

\n

This is the definition of each variable:

\n\n

 

\n

Container App Module

\n

The terraform/apps/modules/container_app/main.tf module is utilized to create the Azure Container Apps. The module defines and uses the following data source for the Azure Container Registry, Azure Container Apps Environment, and user-defined managed identity created when deploying the infrastructure. These data sources are used to access the properties of these Azure resources.

\n

 

\n

 

\ndata \"azurerm_container_app_environment\" \"container_app_environment\" {\n name = var.container_app_environment_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n name = var.container_registry_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.workload_managed_identity_name\n resource_group_name = var.resource_group_name\n}\n

 

\n

 

\n

The module creates and utilizes the following local variables:

\n

 

\n

 

\nlocals {\n identity = {\n type = \"UserAssigned\"\n identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n }\n identity_env = {\n name = \"AZURE_CLIENT_ID\"\n secret_name = null\n value = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n }\n registry = {\n server = data.azurerm_container_registry.container_registry.login_server\n identity = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n }\n}\n

 

\n

 

\n

This is the explanation of each local variable:

\n\n

Here is the complete Terraform code of the module:

\n

 

\n

 

\ndata \"azurerm_container_app_environment\" \"container_app_environment\" {\n name = var.container_app_environment_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_container_registry\" \"container_registry\" {\n name = var.container_registry_name\n resource_group_name = var.resource_group_name\n}\n\ndata \"azurerm_user_assigned_identity\" \"workload_user_assigned_identity\" {\n name = var.workload_managed_identity_name\n resource_group_name = var.resource_group_name\n}\n\nlocals {\n identity = {\n type = \"UserAssigned\"\n identity_ids = [data.azurerm_user_assigned_identity.workload_user_assigned_identity.id]\n }\n identity_env = {\n name = \"AZURE_CLIENT_ID\"\n secret_name = null\n value = data.azurerm_user_assigned_identity.workload_user_assigned_identity.client_id\n }\n registry = {\n server = data.azurerm_container_registry.container_registry.login_server\n identity = data.azurerm_user_assigned_identity.workload_user_assigned_identity.id\n }\n}\n\nresource \"azurerm_container_app\" \"container_app\" {\n for_each = {for app in var.container_apps: app.name => app}\n\n container_app_environment_id = data.azurerm_container_app_environment.container_app_environment.id\n name = each.key\n resource_group_name = var.resource_group_name\n revision_mode = each.value.revision_mode\n tags = each.value.tags\n\n template {\n max_replicas = each.value.template.max_replicas\n min_replicas = each.value.template.min_replicas\n revision_suffix = each.value.template.revision_suffix\n\n dynamic \"container\" {\n for_each = each.value.template.containers\n\n content {\n cpu = container.value.cpu\n image = \"${data.azurerm_container_registry.container_registry.login_server}/${container.value.image}\"\n memory = container.value.memory\n name = container.value.name\n args = container.value.args\n command = container.value.command\n\n dynamic \"env\" {\n for_each = container.value.env == null ? [local.identity_env] : concat(container.value.env, [local.identity_env])\n\n content {\n name = env.value.name\n secret_name = env.value.secret_name\n value = env.value.value\n }\n }\n\n dynamic \"liveness_probe\" {\n for_each = container.value.liveness_probe == null ? [] : [container.value.liveness_probe]\n\n content {\n port = liveness_probe.value.port\n transport = liveness_probe.value.transport\n failure_count_threshold = liveness_probe.value.failure_count_threshold\n host = liveness_probe.value.host\n initial_delay = liveness_probe.value.initial_delay\n interval_seconds = liveness_probe.value.interval_seconds\n path = liveness_probe.value.path\n timeout = liveness_probe.value.timeout\n\n dynamic \"header\" {\n for_each = liveness_probe.value.header == null ? [] : [liveness_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.value\n }\n }\n }\n }\n\n dynamic \"readiness_probe\" {\n for_each = container.value.readiness_probe == null ? [] : [container.value.readiness_probe]\n\n content {\n port = readiness_probe.value.port\n transport = readiness_probe.value.transport\n failure_count_threshold = readiness_probe.value.failure_count_threshold\n host = readiness_probe.value.host\n interval_seconds = readiness_probe.value.interval_seconds\n path = readiness_probe.value.path\n success_count_threshold = readiness_probe.value.success_count_threshold\n timeout = readiness_probe.value.timeout\n\n dynamic \"header\" {\n for_each = readiness_probe.value.header == null ? [] : [readiness_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.value\n }\n }\n }\n }\n\n dynamic \"startup_probe\" {\n for_each = container.value.startup_probe == null ? [] : [container.value.startup_probe]\n\n content {\n port = startup_probe.value.port\n transport = startup_probe.value.transport\n failure_count_threshold = startup_probe.value.failure_count_threshold\n host = startup_probe.value.host\n interval_seconds = startup_probe.value.interval_seconds\n path = startup_probe.value.path\n timeout = startup_probe.value.timeout\n\n dynamic \"header\" {\n for_each = startup_probe.value.header == null ? [] : [startup_probe.value.header]\n\n content {\n name = header.value.name\n value = header.value.name\n }\n }\n }\n }\n\n dynamic \"volume_mounts\" {\n for_each = container.value.volume_mounts == null ? [] : [container.value.volume_mounts]\n\n content {\n name = volume_mounts.value.name\n path = volume_mounts.value.path\n }\n }\n }\n }\n\n dynamic \"volume\" {\n for_each = each.value.template.volume == null ? [] : each.value.template.volume\n\n content {\n name = volume.value.name\n storage_name = volume.value.storage_name\n storage_type = volume.value.storage_type\n }\n }\n }\n\n dynamic \"dapr\" {\n for_each = each.value.dapr == null ? [] : [each.value.dapr]\n\n content {\n app_id = dapr.value.app_id\n app_port = dapr.value.app_port\n app_protocol = dapr.value.app_protocol\n }\n }\n\n dynamic \"identity\" {\n for_each = each.value.identity == null ? [local.identity] : [each.value.identity]\n\n content {\n type = identity.value.type\n identity_ids = identity.value.identity_ids\n }\n }\n\n dynamic \"ingress\" {\n for_each = each.value.ingress == null ? [] : [each.value.ingress]\n\n content {\n target_port = ingress.value.target_port\n allow_insecure_connections = ingress.value.allow_insecure_connections\n external_enabled = ingress.value.external_enabled\n transport = ingress.value.transport\n\n dynamic \"traffic_weight\" {\n for_each = ingress.value.traffic_weight == null ? [] : [ingress.value.traffic_weight]\n\n content {\n percentage = traffic_weight.value.percentage\n label = traffic_weight.value.label\n latest_revision = traffic_weight.value.latest_revision\n revision_suffix = traffic_weight.value.revision_suffix\n }\n }\n }\n }\n\n dynamic \"registry\" {\n for_each = each.value.registry == null ? [local.registry] : concat(each.value.registry, [local.registry])\n\n content {\n server = registry.value.server\n identity = registry.value.identity\n }\n }\n\n dynamic \"secret\" {\n for_each = nonsensitive(toset([for pair in lookup(var.container_app_secrets, each.key, []) : pair.name]))\n\n content {\n name = secret.key\n value = local.container_app_secrets[each.key][secret.key]\n }\n }\n}\n\n

 

\n

 

\n

As you can notice, the module uses the login server of the Azure Container Registry to create the fully qualified name of the container image of the current container app.

\n

 

\n

Managed identities in Azure Container Apps

\n

Each chat application makes use of a DefaultAzureCredential object to acquire a security token from Azure Active Directory and authenticate and authorize with Azure OpenAI Service (AOAI) and Azure Container Registry (ACR) using the credentials of the user-defined managed identity associated with the container app.

\n

You can use a managed identity in a running container app to authenticate and authorize with any service that supports Azure AD authentication. With managed identities:

\n\n

For more information, see Managed identities in Azure Container Apps. The workloads running in a container app can use the Azure Identity client libraries to acquire a security token from the Azure Active Directory. You can choose one of the following approaches inside your code:

\n\n

The following table provides the minimum package version required for each language's client library.

\n

 

\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n
LanguageLibraryMinimum VersionExample
.NETAzure.Identity1.9.0Link
Goazidentity1.3.0Link
Javaazure-identity1.9.0Link
JavaScript@azure/identity3.2.0Link
Pythonazure-identity1.13.0Link
\n

 

\n

NOTE: When using Azure Identity client library with Azure Container Apps, the client ID of the managed identity must be specified. When using the DefaultAzureCredential, you can explicitly specify the client ID of the container app managed identity in the AZURE_CLIENT_ID environment variable.

\n

 

\n

Simple Chat Application

\n

The Simple Chat Application is a large language model-based chatbot that allows users to submit general-purpose questions to a GPT model, which generates and streams back human-like and engaging conversational responses. The following picture shows the welcome screen of the chat application.

\n

\n

โ€ƒ

\n

You can modify the welcome screen in markdown by editing the chainlit.md file at the project's root. If you do not want a welcome screen, leave the file empty. The following picture shows what happens when a user submits a new message in the chat.

\n

\n

โ€ƒ

\n

Chainlit can render messages in markdown format as shown by the following prompt:

\n

\n

โ€ƒ

\n

Chainlit also provides classes to support the following elements:

\n

 

\n\n

 

\n

You can click the user icon on the UI to access the chat settings and choose, for example, between the light and dark theme.

\n

\n

โ€ƒ

\n

The application is built in Python. Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules.

\n

 

\n

 

\n# Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n

 

\n

 

\n

These are the libraries used by the chat application:

\n

 

\n
    \n
  1. os: This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc.
  2. \n
  3. sys: This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter.
  4. \n
  5. openai: The OpenAI Python library provides convenient access to the OpenAI API from applications written in Python. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook.
  6. \n
  7. logging: This module provides flexible logging of messages.
  8. \n
  9. chainlit as cl: This imports the Chainlit library and aliases it as cl. Chainlit is used to create the UI of the application.
  10. \n
  11. from azure.identity import DefaultAzureCredential, get_bearer_token_provider: when the openai_type property value is azure_ad, a DefaultAzureCredential object from the Azure Identity client library for Python is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity federated with the service account.
  12. \n
  13. load_dotenv and dotenv_values from dotenv: Python-dotenv reads key-value pairs from a .env file and can set them as environment variables. It helps in the development of applications following the 12-factor principles.
  14. \n
\n

 

\n

The requirements.txt file under the src folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command:

\n
\n
pip install -r requirements.txt --upgrade
\n
\n

Next, the code reads the value of the environment variables used to initialize Azure OpenAI objects. In addition, it creates a token provider for Azure OpenAI.

\n
\n
# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n)
\n
\n

Here's a brief explanation of each variable and related environment variable:

\n

 

\n
    \n
  1. temperature: A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9.
  2. \n
  3. api_base: The base URL for the OpenAI API.
  4. \n
  5. api_key: The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI.
  6. \n
  7. api_type: A string representing the type of the OpenAI API.
  8. \n
  9. api_version: A string representing the version of the OpenAI API.
  10. \n
  11. engine: The engine used for OpenAI API calls.
  12. \n
  13. model: The model used for OpenAI API calls.
  14. \n
  15. system_content: The content of the system message used for OpenAI API calls.
  16. \n
  17. max_retries: The maximum number of retries for OpenAI API calls.
  18. \n
  19. timeout: The timeout in seconds.
  20. \n
  21. debug: When debug is equal to true, t, or 1, the logger writes the chat completion answers.
  22. \n
\n

 

\n

In the next section, the code creates the AsyncAzureOpenAI client object used by the application to communicate with the Azure OpenAI Service instance. When the api_type is equal to azure, the code initializes the object with the API key. Otherwise, it initializes the azure_ad_token_provider property to the token provider created earlier. Then the code creates a logger.

\n
\n
# Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)
\n
\n

The backoff time is calculated using the backoff_in_seconds and attempt variables. It follows the formula backoff_in_seconds * 2 ** attempt + random.uniform(0, 1). This formula increases the backoff time exponentially with each attempt and adds a random value between 0 and 1 to avoid synchronized retries.

\n

Next, the code defines a function called start_chat that is used to initialize the UI when the user connects to the application or clicks the New Chat button.

\n

 

\n

 

\n.on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"User\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n\n

 

\n

 

\n

Here is a brief explanation of the function steps:

\n

 

\n\n

Finally, the application defines the method called whenever the user sends a new message in the chat.

\n
\n
@cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send()
\n
\n

Here is a detailed explanation of the function steps:

\n

 

\n\n

Below, you can read the complete code of the application.

\n
\n
# Import packages\nimport os\nimport sys\nfrom openai import AsyncAzureOpenAI\nimport logging\nimport chainlit as cl\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nengine = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nsystem_content = os.getenv(\n \"AZURE_OPENAI_SYSTEM_MESSAGE\", \"You are a helpful assistant.\"\n)\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Create Token Provider\ntoken_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n)\n\n# Configure OpenAI\nif api_type == \"azure\":\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n api_key=api_key,\n azure_endpoint=api_base,\n max_retries=max_retries,\n timeout=timeout,\n )\nelse:\n openai = AsyncAzureOpenAI(\n api_version=api_version,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n\n@cl.on_chat_start\nasync def start_chat():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n cl.user_session.set(\n \"message_history\",\n [{\"role\": \"system\", \"content\": system_content}],\n )\n\n\n@cl.on_message\nasync def on_message(message: cl.Message):\n message_history = cl.user_session.get(\"message_history\")\n message_history.append({\"role\": \"user\", \"content\": message.content})\n logger.info(\"Question: [%s]\", message.content)\n\n # Create the Chainlit response message\n msg = cl.Message(content=\"\")\n\n async for stream_resp in await openai.chat.completions.create(\n model=model,\n messages=message_history,\n temperature=temperature,\n stream=True,\n ):\n if stream_resp and len(stream_resp.choices) > 0:\n token = stream_resp.choices[0].delta.content or \"\"\n await msg.stream_token(token)\n\n if debug:\n logger.info(\"Answer: [%s]\", msg.content)\n\n message_history.append({\"role\": \"assistant\", \"content\": msg.content})\n await msg.send()
\n
\n

You can run the application locally using the following command. The -w flag` indicates auto-reload whenever we make changes live in our application code.

\n
\n
chainlit run app.py -w
\n
\n

Documents QA Chat

\n

The Documents QA Chat application allows users to submit up to 10 .pdf and .docx documents. The application processes the uploaded documents to create vector embeddings. These embeddings are stored in ChromaDB vector database for efficient retrieval. Users can pose questions about the uploaded documents and view the Chain of Thought, enabling easy exploration of the reasoning process. The completion message contains links to the text chunks in the documents that were used as a source for the response. The following picture shows the chat application interface. As you can see, you can click the Browse button and choose up to 10 .pdf and .docx documents to upload. Alternatively, you can just drag and drop the files over the control area.

\n

\n

โ€ƒ

\n

After uploading the documents, the application creates and stores embeddings to ChromaDB vector database. During the phase, the UI shows a message Processing <file-1>, <file-2>..., as shown in the following picture:

\n

\n

โ€ƒ

\n

When the code finished creating embeddings, the UI is ready to receive user's questions:

\n

\n

โ€ƒ

\n

As your chat application grows in complexity, understanding the individual steps for generating a specific answer can become challenging. To solve this issue, Chainlit allows you to easily explore the reasoning process right from the user interface using the Chain of Thought. If you are using the LangChain integration, every intermediary step is automatically sent and displayed in the Chainlit UI just clicking and expanding the steps, as shown in the following picture:

\n

\n

โ€ƒ

\n

To see the text chunks that were used by the large language model to originate the response, you can click the sources links, as shown in the following picture:

\n

\n

โ€ƒ

\n

In the Chain of Thought, below the step used to invoke the OpenAI chat completion API, you can find an

\n

 Inspect in prompt playground  icon. Clicking on it opens the Prompt Playground dialog which allows you to modify and iterate on the prompt as needed.

\n

\n

โ€ƒ

\n

As shown in the following picture, you can click and edit the value of the highlighted variables in the user prompt:

\n

\n

โ€ƒ

\n

You can then click and edit the user question.

\n

\n

โ€ƒ

\n

Then, you can click the submit button to test the effect of your changes, as shown in the following picture.

\n

\n

โ€ƒ

\n

Let's take a look at the individual parts of the application code. In the following section, the Python code starts by importing the necessary packages/modules.

\n
\n
# Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")
\n
\n

These are the libraries used by the chat application:

\n

 

\n
    \n
  1. os: This module provides a way of interacting with the operating system, enabling the code to access environment variables, file paths, etc.
  2. \n
  3. sys: This module provides access to some variables used or maintained by the interpreter and functions that interact with the interpreter.
  4. \n
  5. time: This module provides various time-related functions for time manipulation and measurement.
  6. \n
  7. openai: the OpenAI Python library provides convenient access to the OpenAI API from applications written in the Python language. It includes a pre-defined set of classes for API resources that initialize themselves dynamically from API responses, which makes it compatible with a wide range of versions of the OpenAI API. You can find usage examples for the OpenAI Python library in our API reference and the OpenAI Cookbook.
  8. \n
  9. logging: This module provides flexible logging of messages.
  10. \n
  11. chainlit as cl: This imports the Chainlit library and aliases it as cl. Chainlit is used to create the UI of the application.
  12. \n
  13. AzureChatOpenAI from chainlit.playground.config import: you need to import AzureChatOpenAI from chainlit.playground.config to use the Chainlit Playground.
  14. \n
  15. DefaultAzureCredential from azure.identity: when the openai_type property value is azure_ad, a DefaultAzureCredential object from the Azure Identity client library for Python - version 1.13.0 is used to acquire security token from the Microsoft Entra ID using the credentials of the user-defined managed identity, whose client ID is defined in the AZURE_CLIENT_ID environment variable.
  16. \n
  17. load_dotenv and dotenv_values from dotenv: Python-dotenv reads key-value pairs from a .env file and can set them as environment variables. It helps in the development of applications following the 12-factor principles.
  18. \n
  19. langchain: Large language models (LLMs) are emerging as a transformative technology, enabling developers to build applications that they previously could not. However, using these LLMs in isolation is often insufficient for creating a truly powerful app - the real power comes when you can combine them with other sources of computation or knowledge. LangChain library aims to assist in the development of those types of applications.
  20. \n
\n

The requirements.txt file under the src folder contains the list of packages used by the chat applications. You can restore these packages in your environment using the following command:

\n
\n
pip install -r requirements.txt --upgrade
\n
\n

Next, the code reads environment variables and configures the OpenAI settings.

\n
\n
# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n\\`\\`\\`\nThe answer is foo\nSOURCES: xyz\n\\`\\`\\`\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment
\n
\n

Here's a brief explanation of each variable and related environment variable:

\n

 

\n
    \n
  1. temperature: A float value representing the temperature for Create chat completion method of the OpenAI API. It is fetched from the environment variables with a default value of 0.9.
  2. \n
  3. api_base: The base URL for the OpenAI API.
  4. \n
  5. api_key: The API key for the OpenAI API. The value of this variable can be null when using a user-assigned managed identity to acquire a security token to access Azure OpenAI.
  6. \n
  7. api_type: A string representing the type of the OpenAI API.
  8. \n
  9. api_version: A string representing the version of the OpenAI API.
  10. \n
  11. chat_completion_deployment: the name of the Azure OpenAI GPT model for chat completion.
  12. \n
  13. embeddings_deployment: the name of the Azure OpenAI deployment for embeddings.
  14. \n
  15. model: The model used for chat completion calls (e.g, gpt-35-turbo-16k).
  16. \n
  17. max_size_mb: the maximum size for the uploaded documents.
  18. \n
  19. max_files: the maximum number of documents that can be uploaded.
  20. \n
  21. text_splitter_chunk_size: the maximum chunk size used by the RecursiveCharacterTextSplitter object.
  22. \n
  23. text_splitter_chunk_overlap: the maximum chunk overlap used by the RecursiveCharacterTextSplitter object.
  24. \n
  25. embeddings_chunk_size: the maximum chunk size used by the OpenAIEmbeddings object.
  26. \n
  27. max_retries: The maximum number of retries for OpenAI API calls.
  28. \n
  29. retry_min_seconds: the minimum number of seconds before a retry.
  30. \n
  31. retry_max_seconds: the maximum number of seconds before a retry.
  32. \n
  33. timeout: The timeout in seconds.
  34. \n
  35. system_template: The content of the system message used for OpenAI API calls.
  36. \n
  37. debug: When debug is equal to true, t, or 1, the logger switches to verbose mode.
  38. \n
\n

 

\n

Next, the code defines a function called start_chat that is used to initialize the when the user connects to the application or clicks the New Chat button.

\n
\n
@cl.on_chat_start\nasync def start_chat():\n # Sending Avatars for Chat Participants\n await cl.Avatar(\n name=\"Chatbot\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\",\n url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\"\n ).send()
\n
\n

Here is a brief explanation of the function steps:

\n

 

\n\n

 

\n

The following code is used to initialize the large language model (LLM) chain used to reply to questions on the content of the uploaded documents.

\n
\n
# Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send()
\n
\n

The AskFileMessage API call prompts the user to upload up to a specified number of .pdf or .docx files. The uploaded files are stored in the files variable. The process continues until the user uploads files. For more information, see AskFileMessage.

\n

The following code processes each uploaded file by extracting its content.

\n

 

\n
    \n
  1. The text content of each file is stored in the list all_texts.
  2. \n
  3. This code performs text processing and chunking. It checks the file extension to read the file content accordingly, depending on if it's a .pdf or a .docx document.
  4. \n
  5. The text content is split into smaller chunks using the RecursiveCharacterTextSplitter LangChain object.
  6. \n
  7. Metadata is created for each chunk and stored in the metadatas list.
  8. \n
\n
\n
# Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))]
\n
\n

The next piece of code performs the following steps:

\n

 

\n
    \n
  1. It creates an AzureOpenAIEmbeddings configured to use the embeddings model in the Azure OpenAI Service to create embeddings from text chunks.
  2. \n
  3. It creates a ChromaDB vector database using the OpenAIEmbeddings object, the text chunks list, and the metadata list.
  4. \n
  5. It creates an AzureChatOpenAI LangChain object based on the GPR model hosted in Azure OpenAI Service.
  6. \n
  7. It creates a chain using the RetrievalQAWithSourcesChain.from_chain_type API call uses previously created models and stores them as retrievers.
  8. \n
  9. It stores the metadata and text chunks in the user session using the cl.user_session.set() API call.
  10. \n
  11. It creates a message to inform the user that the files are ready for queries, and finally returns the chain.
  12. \n
  13. The cl.user_session.set(\"chain\", chain) call stores the LLM chain in the user_session dictionary for later use.
  14. \n
\n

The next section create the LangChain LLM chain.

\n
\n
# Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain)
\n
\n

The following code handles the communication with the OpenAI API and incorporates retrying logic in case the API calls fail due to specific errors.

\n

 

\n\n
\n
@cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content)
\n
\n

The code below extracts the answers and sources from the API response and formats them to be sent as a message.

\n\n

 

\n

 

\n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider()\n\n

 

\n

 

\n

 

\n

 

\n

Below, you can read the complete code of the application.

\n
\n
# Import packages\nimport os\nimport io\nimport sys\nimport logging\nimport chainlit as cl\nfrom chainlit.playground.config import AzureChatOpenAI\nfrom pypdf import PdfReader\nfrom docx import Document\nfrom azure.identity import DefaultAzureCredential, get_bearer_token_provider\nfrom dotenv import load_dotenv\nfrom dotenv import dotenv_values\nfrom langchain.embeddings import AzureOpenAIEmbeddings\nfrom langchain.text_splitter import RecursiveCharacterTextSplitter\nfrom langchain.vectorstores.chroma import Chroma\nfrom langchain.chains import RetrievalQAWithSourcesChain\nfrom langchain.chat_models import AzureChatOpenAI\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\n\n# Load environment variables from .env file\nif os.path.exists(\".env\"):\n load_dotenv(override=True)\n config = dotenv_values(\".env\")\n\n# Read environment variables\ntemperature = float(os.environ.get(\"TEMPERATURE\", 0.9))\napi_base = os.getenv(\"AZURE_OPENAI_BASE\")\napi_key = os.getenv(\"AZURE_OPENAI_KEY\")\napi_type = os.environ.get(\"AZURE_OPENAI_TYPE\", \"azure\")\napi_version = os.environ.get(\"AZURE_OPENAI_VERSION\", \"2023-12-01-preview\")\nchat_completion_deployment = os.getenv(\"AZURE_OPENAI_DEPLOYMENT\")\nembeddings_deployment = os.getenv(\"AZURE_OPENAI_ADA_DEPLOYMENT\")\nmodel = os.getenv(\"AZURE_OPENAI_MODEL\")\nmax_size_mb = int(os.getenv(\"CHAINLIT_MAX_SIZE_MB\", 100))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\nmax_files = int(os.getenv(\"CHAINLIT_MAX_FILES\", 10))\ntext_splitter_chunk_size = int(os.getenv(\"TEXT_SPLITTER_CHUNK_SIZE\", 1000))\ntext_splitter_chunk_overlap = int(os.getenv(\"TEXT_SPLITTER_CHUNK_OVERLAP\", 10))\nembeddings_chunk_size = int(os.getenv(\"EMBEDDINGS_CHUNK_SIZE\", 16))\nmax_retries = int(os.getenv(\"MAX_RETRIES\", 5))\nretry_min_seconds = int(os.getenv(\"RETRY_MIN_SECONDS\", 1))\nretry_max_seconds = int(os.getenv(\"RETRY_MAX_SECONDS\", 5))\ntimeout = int(os.getenv(\"TIMEOUT\", 30))\ndebug = os.getenv(\"DEBUG\", \"False\").lower() in (\"true\", \"1\", \"t\")\n\n# Configure system prompt\nsystem_template = \"\"\"Use the following pieces of context to answer the users question.\nIf you don't know the answer, just say that you don't know, don't try to make up an answer.\nALWAYS return a \"SOURCES\" part in your answer.\nThe \"SOURCES\" part should be a reference to the source of the document from which you got your answer.\n\nExample of your response should be:\n\n```\nThe answer is foo\nSOURCES: xyz\n```\n\nBegin!\n----------------\n{summaries}\"\"\"\nmessages = [\n SystemMessagePromptTemplate.from_template(system_template),\n HumanMessagePromptTemplate.from_template(\"{question}\"),\n]\nprompt = ChatPromptTemplate.from_messages(messages)\nchain_type_kwargs = {\"prompt\": prompt}\n\n# Configure a logger\nlogging.basicConfig(\n stream=sys.stdout,\n format=\"[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s\",\n level=logging.INFO,\n)\nlogger = logging.getLogger(__name__)\n\n# Create Token Provider\nif api_type == \"azure_ad\":\n token_provider = get_bearer_token_provider(\n DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\"\n )\n\n# Setting the environment variables for the playground\nif api_type == \"azure\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = api_key\nos.environ[\"AZURE_OPENAI_API_VERSION\"] = api_version\nos.environ[\"AZURE_OPENAI_ENDPOINT\"] = api_base\nos.environ[\"AZURE_OPENAI_DEPLOYMENT_NAME\"] = chat_completion_deployment\n\n\n@cl.on_chat_start\nasync def start():\n await cl.Avatar(\n name=\"Chatbot\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"Error\", url=\"https://cdn-icons-png.flaticon.com/512/8649/8649595.png\"\n ).send()\n await cl.Avatar(\n name=\"You\",\n url=\"https://media.architecturaldigest.com/photos/5f241de2c850b2a36b415024/master/w_1600%2Cc_limit/Luke-logo.png\",\n ).send()\n\n # Initialize the file list to None\n files = None\n\n # Wait for the user to upload a file\n while files == None:\n files = await cl.AskFileMessage(\n content=f\"Please upload up to {max_files} `.pdf` or `.docx` files to begin.\",\n accept=[\n \"application/pdf\",\n \"application/vnd.openxmlformats-officedocument.wordprocessingml.document\",\n ],\n max_size_mb=max_size_mb,\n max_files=max_files,\n timeout=86400,\n raise_on_timeout=False,\n ).send()\n\n # Create a message to inform the user that the files are being processed\n content = \"\"\n if len(files) == 1:\n content = f\"Processing `{files[0].name}`...\"\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"Processing {', '.join(files_names)}...\"\n logger.info(content)\n msg = cl.Message(content=content, author=\"Chatbot\")\n await msg.send()\n\n # Create a list to store the texts of each file\n all_texts = []\n\n # Process each file uplodaded by the user\n for file in files:\n # Read file contents\n with open(file.path, \"rb\") as uploaded_file:\n file_contents = uploaded_file.read()\n\n logger.info(\"[%d] bytes were read from %s\", len(file_contents), file.path)\n\n # Create an in-memory buffer from the file content\n bytes = io.BytesIO(file_contents)\n\n # Get file extension\n extension = file.name.split(\".\")[-1]\n\n # Initialize the text variable\n text = \"\"\n\n # Read the file\n if extension == \"pdf\":\n reader = PdfReader(bytes)\n for i in range(len(reader.pages)):\n text += reader.pages[i].extract_text()\n if debug:\n logger.info(\"[%s] read from %s\", text, file.path)\n elif extension == \"docx\":\n doc = Document(bytes)\n paragraph_list = []\n for paragraph in doc.paragraphs:\n paragraph_list.append(paragraph.text)\n if debug:\n logger.info(\"[%s] read from %s\", paragraph.text, file.path)\n text = \"\\n\".join(paragraph_list)\n\n # Split the text into chunks\n text_splitter = RecursiveCharacterTextSplitter(\n chunk_size=text_splitter_chunk_size,\n chunk_overlap=text_splitter_chunk_overlap,\n )\n texts = text_splitter.split_text(text)\n\n # Add the chunks and metadata to the list\n all_texts.extend(texts)\n\n # Create a metadata for each chunk\n metadatas = [{\"source\": f\"{i}-pl\"} for i in range(len(all_texts))]\n\n # Create a Chroma vector store\n if api_type == \"azure\":\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n else:\n embeddings = AzureOpenAIEmbeddings(\n openai_api_version=api_version,\n openai_api_type=api_type,\n azure_endpoint=api_base,\n azure_ad_token_provider=token_provider,\n azure_deployment=embeddings_deployment,\n max_retries=max_retries,\n retry_min_seconds=retry_min_seconds,\n retry_max_seconds=retry_max_seconds,\n chunk_size=embeddings_chunk_size,\n timeout=timeout,\n )\n\n # Create a Chroma vector store\n db = await cl.make_async(Chroma.from_texts)(\n all_texts, embeddings, metadatas=metadatas\n )\n\n # Create an AzureChatOpenAI llm\n if api_type == \"azure\":\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n openai_api_key=api_key,\n azure_endpoint=api_base,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n else:\n llm = AzureChatOpenAI(\n openai_api_type=api_type,\n openai_api_version=api_version,\n azure_endpoint=api_base,\n api_key=api_key,\n temperature=temperature,\n azure_deployment=chat_completion_deployment,\n azure_ad_token_provider=token_provider,\n streaming=True,\n max_retries=max_retries,\n timeout=timeout,\n )\n\n # Create a chain that uses the Chroma vector store\n chain = RetrievalQAWithSourcesChain.from_chain_type(\n llm=llm,\n chain_type=\"stuff\",\n retriever=db.as_retriever(),\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs,\n )\n\n # Save the metadata and texts in the user session\n cl.user_session.set(\"metadatas\", metadatas)\n cl.user_session.set(\"texts\", all_texts)\n\n # Create a message to inform the user that the files are ready for queries\n content = \"\"\n if len(files) == 1:\n content = f\"`{files[0].name}` processed. You can now ask questions!\"\n logger.info(content)\n else:\n files_names = [f\"`{f.name}`\" for f in files]\n content = f\"{', '.join(files_names)} processed. You can now ask questions.\"\n logger.info(content)\n msg.content = content\n msg.author = \"Chatbot\"\n await msg.update()\n\n # Store the chain in the user session\n cl.user_session.set(\"chain\", chain)\n\n\n@cl.on_message\nasync def main(message: cl.Message):\n # Retrieve the chain from the user session\n chain = cl.user_session.get(\"chain\")\n\n # Create a callback handler\n cb = cl.AsyncLangchainCallbackHandler()\n\n # Get the response from the chain\n response = await chain.acall(message.content, callbacks=[cb])\n logger.info(\"Question: [%s]\", message.content)\n\n # Get the answer and sources from the response\n answer = response[\"answer\"]\n sources = response[\"sources\"].strip()\n source_elements = []\n\n if debug:\n logger.info(\"Answer: [%s]\", answer)\n\n # Get the metadata and texts from the user session\n metadatas = cl.user_session.get(\"metadatas\")\n all_sources = [m[\"source\"] for m in metadatas]\n texts = cl.user_session.get(\"texts\")\n\n if sources:\n found_sources = []\n\n # Add the sources to the message\n for source in sources.split(\",\"):\n source_name = source.strip().replace(\".\", \"\")\n # Get the index of the source\n try:\n index = all_sources.index(source_name)\n except ValueError:\n continue\n text = texts[index]\n found_sources.append(source_name)\n # Create the text element referenced in the message\n source_elements.append(cl.Text(content=text, name=source_name))\n\n if found_sources:\n answer += f\"\\nSources: {', '.join(found_sources)}\"\n else:\n answer += \"\\nNo sources found\"\n\n await cl.Message(content=answer, elements=source_elements).send()\n\n # Setting the AZURE_OPENAI_API_KEY environment variable for the playground\n if api_type == \"azure_ad\":\n os.environ[\"AZURE_OPENAI_API_KEY\"] = token_provider()
\n
\n

You can run the application locally using the following command. The -w flag` indicates auto-reload whenever we make changes live in our application code.

\n
\n
chainlit run app.py -w
\n
\n

Build Docker Images

\n

You can use the  src/01-build-docker-images.sh Bash script to build the Docker container image for each container app.

\n

 

\n

 

\n#!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Use a for loop to build the docker images using the array index\nfor index in ${!images[@]}; do\n # Build the docker image\n docker build -t ${images[$index]}:$tag -f Dockerfile --build-arg FILENAME=${filenames[$index]} --build-arg PORT=$port .\ndone\n

 

\n

 

\n

 

\n

 

\n

Before running any script in the src folder, make sure to customize the value of the variables inside the 00-variables.sh file located in the same folder. This file is embedded in all the scripts and contains the following variables:

\n

 

\n

 

\n# Variables\n\n# Azure Container Registry\nprefix=\"Blue\"\nacrName=\"${prefix}Registry\"\nacrResourceGrougName=\"${prefix}RG\"\nlocation=\"EastUS\"\n\n# Python Files\ndocAppFile=\"doc.py\"\nchatAppFile=\"chat.py\"\n\n# Docker Images\ndocImageName=\"doc\"\nchatImageName=\"chat\"\ntag=\"v1\"\nport=\"8000\"\n\n# Arrays\nimages=($docImageName $chatImageName)\nfilenames=($docAppFile $chatAppFile)\n

 

\n

 

\n

The Dockerfile under the src folder is parametric and can be used to build the container images for both chat applications.

\n
\n
# app/Dockerfile\n\n# # Stage 1 - Install build dependencies\n\n# A Dockerfile must start with a FROM instruction that sets the base image for the container.\n# The Python images come in many flavors, each designed for a specific use case.\n# The python:3.11-slim image is a good base image for most applications.\n# It is a minimal image built on top of Debian Linux and includes only the necessary packages to run Python.\n# The slim image is a good choice because it is small and contains only the packages needed to run Python.\n# For more information, see: \n# * https://hub.docker.com/_/python \n# * https://docs.streamlit.io/knowledge-base/tutorials/deploy/docker\nFROM python:3.11-slim AS builder\n\n# The WORKDIR instruction sets the working directory for any RUN, CMD, ENTRYPOINT, COPY and ADD instructions that follow it in the Dockerfile.\n# If the WORKDIR doesnโ€™t exist, it will be created even if itโ€™s not used in any subsequent Dockerfile instruction.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#workdir\nWORKDIR /app\n\n# Set environment variables. \n# The ENV instruction sets the environment variable <key> to the value <value>.\n# This value will be in the environment of all โ€œdescendantโ€ Dockerfile commands and can be replaced inline in many as well.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#env\nENV PYTHONDONTWRITEBYTECODE 1\nENV PYTHONUNBUFFERED 1\n\n# Install git so that we can clone the app code from a remote repo using the RUN instruction.\n# The RUN comand has 2 forms:\n# * RUN <command> (shell form, the command is run in a shell, which by default is /bin/sh -c on Linux or cmd /S /C on Windows)\n# * RUN [\"executable\", \"param1\", \"param2\"] (exec form)\n# The RUN instruction will execute any commands in a new layer on top of the current image and commit the results. \n# The resulting committed image will be used for the next step in the Dockerfile.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#run\nRUN apt-get update && apt-get install -y \\\n build-essential \\\n curl \\\n software-properties-common \\\n git \\\n && rm -rf /var/lib/apt/lists/*\n\n# Create a virtualenv to keep dependencies together\nRUN python -m venv /opt/venv\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the requirements.txt which contains dependencies to WORKDIR\n# COPY has two forms:\n# * COPY <src> <dest> (this copies the files from the local machine to the container's own filesystem)\n# * COPY [\"<src>\",... \"<dest>\"] (this form is required for paths containing whitespace)\n# For more information, see: https://docs.docker.com/engine/reference/builder/#copy\nCOPY requirements.txt .\n\n# Install the Python dependencies\nRUN pip install --no-cache-dir --no-deps -r requirements.txt\n\n# Stage 2 - Copy only necessary files to the runner stage\n\n# The FROM instruction initializes a new build stage for the application\nFROM python:3.11-slim\n\n# Define the filename to copy as an argument\nARG FILENAME\n\n# Deefine the port to run the application on as an argument\nARG PORT=8000\n\n# Set an environment variable\nENV FILENAME=${FILENAME}\n\n# Sets the working directory to /app\nWORKDIR /app\n\n# Copy the virtual environment from the builder stage\nCOPY --from=builder /opt/venv /opt/venv\n\n# Set environment variables\nENV PATH=\"/opt/venv/bin:$PATH\"\n\n# Clone the $FILENAME containing the application code\nCOPY $FILENAME .\n\n# Copy the chainlit.md file to the working directory\nCOPY chainlit.md .\n\n# Copy the .chainlit folder to the working directory\nCOPY ./.chainlit ./.chainlit\n\n# The EXPOSE instruction informs Docker that the container listens on the specified network ports at runtime.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#expose\nEXPOSE $PORT\n\n# The ENTRYPOINT instruction has two forms:\n# * ENTRYPOINT [\"executable\", \"param1\", \"param2\"] (exec form, preferred)\n# * ENTRYPOINT command param1 param2 (shell form)\n# The ENTRYPOINT instruction allows you to configure a container that will run as an executable.\n# For more information, see: https://docs.docker.com/engine/reference/builder/#entrypoint\nCMD chainlit run $FILENAME --port=$PORT
\n
\n

Test applications locally

\n

You can use the src/02-run-docker-container.sh Bash script to test the containers for the sender, processor, and receiver applications.

\n
\n
#!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Print the menu\necho \"====================================\"\necho \"Run Docker Container (1-3): \"\necho \"====================================\"\noptions=(\n \"Doc\"\n \"Chat\"\n)\nname=\"\"\n# Select an option\nCOLUMNS=0\nselect option in \"${options[@]}\"; do\n case $option in\n \"Doc\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_ADA_DEPLOYMENT=$AZURE_OPENAI_ADA_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $docImageName \\\n $docImageName:$tag\n break\n ;;\n \"Chat\")\n docker run -it \\\n --rm \\\n -p $port:$port \\\n -e AZURE_OPENAI_BASE=$AZURE_OPENAI_BASE \\\n -e AZURE_OPENAI_KEY=$AZURE_OPENAI_KEY \\\n -e AZURE_OPENAI_MODEL=$AZURE_OPENAI_MODEL \\\n -e AZURE_OPENAI_DEPLOYMENT=$AZURE_OPENAI_DEPLOYMENT \\\n -e AZURE_OPENAI_VERSION=$AZURE_OPENAI_VERSION \\\n -e AZURE_OPENAI_TYPE=$AZURE_OPENAI_TYPE \\\n -e TEMPERATURE=$TEMPERATURE \\\n --name $chatImageName \\\n $chatImageName:$tag\n break\n ;;\n \"Quit\")\n exit\n ;;\n *) echo \"invalid option $REPLY\" ;;\n esac\ndone
\n
\n

Push Docker containers to the Azure Container Registry

\n

You can use the src/03-push-docker-image.sh Bash script to push the Docker container images for the sender, processor, and receiver applications to the Azure Container Registry (ACR)

\n

 

\n

 

\n#!/bin/bash\n\n# Variables\nsource ./00-variables.sh\n\n# Login to ACR\necho \"Logging in to [${acrName,,}] container registry...\"\naz acr login --name ${acrName,,}\n\n# Retrieve ACR login server. Each container image needs to be tagged with the loginServer name of the registry. \necho \"Retrieving login server for the [${acrName,,}] container registry...\"\nloginServer=$(az acr show --name ${acrName,,} --query loginServer --output tsv)\n\n# Use a for loop to tag and push the local docker images to the Azure Container Registry\nfor index in ${!images[@]}; do\n # Tag the local sender image with the loginServer of ACR\n docker tag ${images[$index],,}:$tag $loginServer/${images[$index],,}:$tag\n\n # Push the container image to ACR\n docker push $loginServer/${images[$index],,}:$tag\ndone\n

 

\n

 

\n

Monitoring

\n

Azure Container Apps provides several built-in observability features that together give you a holistic view of your container appโ€™s health throughout its application lifecycle. These features help you monitor and diagnose the state of your app to improve performance and respond to trends and critical problems.

\n

You can use the Log Stream panel on the Azure Portal to see the logs generated by a container app, as shown in the following screenshot.

\n
 
\n

\n

 

\n

Alternatively, you can click open the Logs panel, as shown in the following screenshot, and use a Kusto Query Language (KQL) query to filter, project, and retrieve only the desired data.

\n

 

\n

\n

 

\n

Review deployed resources

\n

You can use the Azure portal to list the deployed resources in the resource group, as shown in the following picture:

\n

 

\n

\n

โ€ƒ

\n

You can also use Azure CLI to list the deployed resources in the resource group:

\n
\n
az resource list --resource-group <resource-group-name>
\n
\n

You can also use the following PowerShell cmdlet to list the deployed resources in the resource group:

\n
\n
Get-AzResource -ResourceGroupName <resource-group-name>
\n
\n

Clean up resources

\n

You can delete the resource group using the following Azure CLI command when you no longer need the resources you created. This will remove all the Azure resources.

\n
\n
az group delete --name <resource-group-name>
\n
\n

Alternatively, you can use the following PowerShell cmdlet to delete the resource group and all the Azure resources.

\n

 

\n

 

\n

 

\n

 

\nRemove-AzResourceGroup -Name <resource-group-name>\n

 

\n

 

\n

 

\n

 

\n

 

\n

 

\n

 

\n

 

","kudosSumWeight":5,"postTime":"2023-07-27T06:47:11.162-07:00","images":{"__typename":"AssociatedImageConnection","edges":[{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDI","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDM","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDQ","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDU","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDY","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDc","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDg","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDk","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEw","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEx","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEy","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDEz","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE0","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE1","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE2","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE3","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8\"}"}},{"__typename":"AssociatedImageEdge","cursor":"MjUuMXwyLjF8b3wyNXxfTlZffDE4","node":{"__ref":"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8\"}"}}],"totalCount":18,"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"attachments":{"__typename":"AttachmentConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"tags":{"__typename":"TagConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDE","node":{"__typename":"Tag","id":"tag:App","text":"App","time":"2016-07-12T09:46:46.175-07:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDI","node":{"__typename":"Tag","id":"tag:Cloud Native Apps","text":"Cloud Native Apps","time":"2021-01-08T08:35:18.760-08:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDM","node":{"__typename":"Tag","id":"tag:data & ai","text":"data & ai","time":"2020-09-11T06:32:31.686-07:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDQ","node":{"__typename":"Tag","id":"tag:Infra","text":"Infra","time":"2020-03-04T21:21:46.909-08:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDU","node":{"__typename":"Tag","id":"tag:ISV 1:Many","text":"ISV 1:Many","time":"2021-01-22T05:55:04.673-08:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDY","node":{"__typename":"Tag","id":"tag:ISVs","text":"ISVs","time":"2020-09-16T12:09:53.388-07:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDc","node":{"__typename":"Tag","id":"tag:Multitenant architecture","text":"Multitenant architecture","time":"2023-05-02T18:52:10.242-07:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}},{"__typename":"TagEdge","cursor":"MjUuMXwyLjF8b3wxMHxfTlZffDg","node":{"__typename":"Tag","id":"tag:saas","text":"saas","time":"2018-09-20T07:53:04.840-07:00","lastActivityTime":null,"messagesCount":null,"followersCount":null}}]},"timeToRead":55,"rawTeaser":"
\n
This article and the companion sample show how to create two Azure Container Apps that use OpenAI, LangChain, ChromaDB, and Chainlit using Terraform.
\n
\n

โ€ƒ

\n
\n
","introduction":"","coverImage":null,"coverImageProperties":{"__typename":"CoverImageProperties","style":"STANDARD","titlePosition":"BOTTOM","altText":""},"currentRevision":{"__ref":"Revision:revision:3885602_8"},"latestVersion":{"__typename":"FriendlyVersion","major":"8","minor":"0"},"metrics":{"__typename":"MessageMetrics","views":80589},"visibilityScope":"PUBLIC","canonicalUrl":null,"seoTitle":null,"seoDescription":null,"placeholder":false,"originalMessageForPlaceholder":null,"contributors":{"__typename":"UserConnection","edges":[]},"nonCoAuthorContributors":{"__typename":"UserConnection","edges":[]},"coAuthors":{"__typename":"UserConnection","edges":[]},"blogMessagePolicies":{"__typename":"BlogMessagePolicies","canDoAuthoringActionsOnBlog":{"__typename":"PolicyResult","failureReason":{"__typename":"FailureReason","message":"error.lithium.policies.blog.action_can_do_authoring_action.accessDenied","key":"error.lithium.policies.blog.action_can_do_authoring_action.accessDenied","args":[]}}},"archivalData":null,"replies":{"__typename":"MessageConnection","edges":[{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4Myw0MDgyOTgz","node":{"__ref":"BlogReplyMessage:message:4082983"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4Myw0MDgxNzMy","node":{"__ref":"BlogReplyMessage:message:4081732"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4Myw0MDgxNjg0","node":{"__ref":"BlogReplyMessage:message:4081684"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4MywzOTU2NDg2","node":{"__ref":"BlogReplyMessage:message:3956486"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4MywzOTEzNDAx","node":{"__ref":"BlogReplyMessage:message:3913401"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4MywzOTEzMDUy","node":{"__ref":"BlogReplyMessage:message:3913052"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4MywzODk5ODE2","node":{"__ref":"BlogReplyMessage:message:3899816"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4MywzODk5Nzg0","node":{"__ref":"BlogReplyMessage:message:3899784"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4MywzODg1Nzc4","node":{"__ref":"BlogReplyMessage:message:3885778"}},{"__typename":"MessageEdge","cursor":"MjUuMXwyLjF8aXwxMHwxMzI6MHxpbnQsNDA4Mjk4MywzODg1NzUy","node":{"__ref":"BlogReplyMessage:message:3885752"}}],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"customFields":[],"revisions({\"constraints\":{\"isPublished\":{\"eq\":true}},\"first\":1})":{"__typename":"RevisionConnection","totalCount":8}},"Conversation:conversation:3885602":{"__typename":"Conversation","id":"conversation:3885602","solved":false,"topic":{"__ref":"BlogTopicMessage:message:3885602"},"lastPostingActivityTime":"2024-05-07T05:35:33.028-07:00","lastPostTime":"2024-03-12T10:53:14.732-07:00","unreadReplyCount":10,"isSubscribed":false},"ModerationData:moderation_data:3885602":{"__typename":"ModerationData","id":"moderation_data:3885602","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOGlEQTI4NEI0NjcwRTNENjQx?revision=8","title":"architecture.png","associationType":"TEASER","width":900,"height":516,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5MjkyOWk4RTkzMjIxOUMzMEYyREIz?revision=8","title":"architecture.png","associationType":"BODY","width":900,"height":516,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDExOWk0NzlFNThDNjI3QUNENzM2?revision=8","title":"chainlit-welcome-screen.png","associationType":"BODY","width":1399,"height":1072,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE4OGk1ODE3ODQxQTMzQzY0REMz?revision=8","title":"chainlit-simple-chat.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5MWkxQ0I1QzI0Q0VFNTVBRjNC?revision=8","title":"chainlit-format-result.png","associationType":"BODY","width":1399,"height":1154,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyM2k3NzU5RjAxNkVEODBEQjU4?revision=8","title":"chainlit-dark-mode.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyNmkzM0FBNzE1RTQ3RDkwNUI4?revision=8","title":"chainlit-before-upload.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEyOGk1QUFBNDZBMjFCODc5RkIz?revision=8","title":"chainlit-processing-documents.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5Mmk3RDJFMjQ5RTM0QzJGMjgz?revision=8","title":"chainlit-document-reply.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5M2k5OUZFOUQ1RjM1N0MyOTAz?revision=8","title":"chainlit-chain-of-thought.png","associationType":"BODY","width":1358,"height":3039,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDE5NGkzOTUxODk3QThGQjc5N0E1?revision=8","title":"chainlit-source.png","associationType":"BODY","width":1358,"height":1132,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzM2kyRTg1QzY5REM0MEM2NzBE?revision=8","title":"chainlit-prompt-playground.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNGlFREU5MDY4REM0MjUzNzdG?revision=8","title":"chainlit-prompt-playground-variable.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNWk4NjY5NzY3RDQ4QTYyOUI5?revision=8","title":"chainlit-prompt-playground-question.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTU0MDEzNmkyQkZDRDkwRDY4ODZEQzEx?revision=8","title":"chainlit-prompt-playground-reply.png","associationType":"BODY","width":1399,"height":1124,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MWlGODlGNjg1Rjg0MjA4NDI3?revision=8","title":"log-stream.png","associationType":"BODY","width":1281,"height":1059,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1MmkzNDA5RTNEMEE5Njg1MzRC?revision=8","title":"logs.png","associationType":"BODY","width":1281,"height":1059,"altText":null},"AssociatedImage:{\"url\":\"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8\"}":{"__typename":"AssociatedImage","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/images/bS0zODg1NjAyLTQ5Mjk1M2k5RkVCNEFBRjdEQzgzM0Ey?revision=8","title":"resources.png","associationType":"BODY","width":857,"height":1081,"altText":null},"Revision:revision:3885602_8":{"__typename":"Revision","id":"revision:3885602_8","lastEditTime":"2024-05-07T05:35:33.028-07:00"},"CachedAsset:theme:customTheme1-1743778046782":{"__typename":"CachedAsset","id":"theme:customTheme1-1743778046782","value":{"id":"customTheme1","animation":{"fast":"150ms","normal":"250ms","slow":"500ms","slowest":"750ms","function":"cubic-bezier(0.07, 0.91, 0.51, 1)","__typename":"AnimationThemeSettings"},"avatar":{"borderRadius":"50%","collections":["default"],"__typename":"AvatarThemeSettings"},"basics":{"browserIcon":{"imageAssetName":"favicon-1730836283320.png","imageLastModified":"1730836286415","__typename":"ThemeAsset"},"customerLogo":{"imageAssetName":"favicon-1730836271365.png","imageLastModified":"1730836274203","__typename":"ThemeAsset"},"maximumWidthOfPageContent":"1300px","oneColumnNarrowWidth":"800px","gridGutterWidthMd":"30px","gridGutterWidthXs":"10px","pageWidthStyle":"WIDTH_OF_BROWSER","__typename":"BasicsThemeSettings"},"buttons":{"borderRadiusSm":"3px","borderRadius":"3px","borderRadiusLg":"5px","paddingY":"5px","paddingYLg":"7px","paddingYHero":"var(--lia-bs-btn-padding-y-lg)","paddingX":"12px","paddingXLg":"16px","paddingXHero":"60px","fontStyle":"NORMAL","fontWeight":"700","textTransform":"NONE","disabledOpacity":0.5,"primaryTextColor":"var(--lia-bs-white)","primaryTextHoverColor":"var(--lia-bs-white)","primaryTextActiveColor":"var(--lia-bs-white)","primaryBgColor":"var(--lia-bs-primary)","primaryBgHoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.85))","primaryBgActiveColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) * 0.7))","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","primaryBorderActive":"1px solid transparent","primaryBorderFocus":"1px solid var(--lia-bs-white)","primaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","secondaryTextColor":"var(--lia-bs-gray-900)","secondaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","secondaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","secondaryBgColor":"var(--lia-bs-gray-200)","secondaryBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","secondaryBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","secondaryBorder":"1px solid transparent","secondaryBorderHover":"1px solid transparent","secondaryBorderActive":"1px solid transparent","secondaryBorderFocus":"1px solid transparent","secondaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","tertiaryTextColor":"var(--lia-bs-gray-900)","tertiaryTextHoverColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.95))","tertiaryTextActiveColor":"hsl(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), calc(var(--lia-bs-gray-900-l) * 0.9))","tertiaryBgColor":"transparent","tertiaryBgHoverColor":"transparent","tertiaryBgActiveColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.04)","tertiaryBorder":"1px solid transparent","tertiaryBorderHover":"1px solid hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","tertiaryBorderActive":"1px solid transparent","tertiaryBorderFocus":"1px solid transparent","tertiaryBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","destructiveTextColor":"var(--lia-bs-danger)","destructiveTextHoverColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.95))","destructiveTextActiveColor":"hsl(var(--lia-bs-danger-h), var(--lia-bs-danger-s), calc(var(--lia-bs-danger-l) * 0.9))","destructiveBgColor":"var(--lia-bs-gray-200)","destructiveBgHoverColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.96))","destructiveBgActiveColor":"hsl(var(--lia-bs-gray-200-h), var(--lia-bs-gray-200-s), calc(var(--lia-bs-gray-200-l) * 0.92))","destructiveBorder":"1px solid transparent","destructiveBorderHover":"1px solid transparent","destructiveBorderActive":"1px solid transparent","destructiveBorderFocus":"1px solid transparent","destructiveBoxShadowFocus":"0 0 0 1px var(--lia-bs-primary), 0 0 0 4px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","__typename":"ButtonsThemeSettings"},"border":{"color":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","mainContent":"NONE","sideContent":"LIGHT","radiusSm":"3px","radius":"5px","radiusLg":"9px","radius50":"100vw","__typename":"BorderThemeSettings"},"boxShadow":{"xs":"0 0 0 1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.08), 0 3px 0 -1px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.16)","sm":"0 2px 4px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.12)","md":"0 5px 15px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","lg":"0 10px 30px hsla(var(--lia-bs-gray-900-h), var(--lia-bs-gray-900-s), var(--lia-bs-gray-900-l), 0.3)","__typename":"BoxShadowThemeSettings"},"cards":{"bgColor":"var(--lia-panel-bg-color)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":"var(--lia-box-shadow-xs)","__typename":"CardsThemeSettings"},"chip":{"maxWidth":"300px","height":"30px","__typename":"ChipThemeSettings"},"coreTypes":{"defaultMessageLinkColor":"var(--lia-bs-link-color)","defaultMessageLinkDecoration":"none","defaultMessageLinkFontStyle":"NORMAL","defaultMessageLinkFontWeight":"400","defaultMessageFontStyle":"NORMAL","defaultMessageFontWeight":"400","forumColor":"#4099E2","forumFontFamily":"var(--lia-bs-font-family-base)","forumFontWeight":"var(--lia-default-message-font-weight)","forumLineHeight":"var(--lia-bs-line-height-base)","forumFontStyle":"var(--lia-default-message-font-style)","forumMessageLinkColor":"var(--lia-default-message-link-color)","forumMessageLinkDecoration":"var(--lia-default-message-link-decoration)","forumMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","forumMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","forumSolvedColor":"#148563","blogColor":"#1CBAA0","blogFontFamily":"var(--lia-bs-font-family-base)","blogFontWeight":"var(--lia-default-message-font-weight)","blogLineHeight":"1.75","blogFontStyle":"var(--lia-default-message-font-style)","blogMessageLinkColor":"var(--lia-default-message-link-color)","blogMessageLinkDecoration":"var(--lia-default-message-link-decoration)","blogMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","blogMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","tkbColor":"#4C6B90","tkbFontFamily":"var(--lia-bs-font-family-base)","tkbFontWeight":"var(--lia-default-message-font-weight)","tkbLineHeight":"1.75","tkbFontStyle":"var(--lia-default-message-font-style)","tkbMessageLinkColor":"var(--lia-default-message-link-color)","tkbMessageLinkDecoration":"var(--lia-default-message-link-decoration)","tkbMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","tkbMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaColor":"#4099E2","qandaFontFamily":"var(--lia-bs-font-family-base)","qandaFontWeight":"var(--lia-default-message-font-weight)","qandaLineHeight":"var(--lia-bs-line-height-base)","qandaFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkColor":"var(--lia-default-message-link-color)","qandaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","qandaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","qandaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","qandaSolvedColor":"#3FA023","ideaColor":"#FF8000","ideaFontFamily":"var(--lia-bs-font-family-base)","ideaFontWeight":"var(--lia-default-message-font-weight)","ideaLineHeight":"var(--lia-bs-line-height-base)","ideaFontStyle":"var(--lia-default-message-font-style)","ideaMessageLinkColor":"var(--lia-default-message-link-color)","ideaMessageLinkDecoration":"var(--lia-default-message-link-decoration)","ideaMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","ideaMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","contestColor":"#FCC845","contestFontFamily":"var(--lia-bs-font-family-base)","contestFontWeight":"var(--lia-default-message-font-weight)","contestLineHeight":"var(--lia-bs-line-height-base)","contestFontStyle":"var(--lia-default-message-link-font-style)","contestMessageLinkColor":"var(--lia-default-message-link-color)","contestMessageLinkDecoration":"var(--lia-default-message-link-decoration)","contestMessageLinkFontStyle":"ITALIC","contestMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","occasionColor":"#D13A1F","occasionFontFamily":"var(--lia-bs-font-family-base)","occasionFontWeight":"var(--lia-default-message-font-weight)","occasionLineHeight":"var(--lia-bs-line-height-base)","occasionFontStyle":"var(--lia-default-message-font-style)","occasionMessageLinkColor":"var(--lia-default-message-link-color)","occasionMessageLinkDecoration":"var(--lia-default-message-link-decoration)","occasionMessageLinkFontStyle":"var(--lia-default-message-link-font-style)","occasionMessageLinkFontWeight":"var(--lia-default-message-link-font-weight)","grouphubColor":"#333333","categoryColor":"#949494","communityColor":"#FFFFFF","productColor":"#949494","__typename":"CoreTypesThemeSettings"},"colors":{"black":"#000000","white":"#FFFFFF","gray100":"#F7F7F7","gray200":"#F7F7F7","gray300":"#E8E8E8","gray400":"#D9D9D9","gray500":"#CCCCCC","gray600":"#717171","gray700":"#707070","gray800":"#545454","gray900":"#333333","dark":"#545454","light":"#F7F7F7","primary":"#0069D4","secondary":"#333333","bodyText":"#1E1E1E","bodyBg":"#FFFFFF","info":"#409AE2","success":"#41C5AE","warning":"#FCC844","danger":"#BC341B","alertSystem":"#FF6600","textMuted":"#707070","highlight":"#FFFCAD","outline":"var(--lia-bs-primary)","custom":["#D3F5A4","#243A5E"],"__typename":"ColorsThemeSettings"},"divider":{"size":"3px","marginLeft":"4px","marginRight":"4px","borderRadius":"50%","bgColor":"var(--lia-bs-gray-600)","bgColorActive":"var(--lia-bs-gray-600)","__typename":"DividerThemeSettings"},"dropdown":{"fontSize":"var(--lia-bs-font-size-sm)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius-sm)","dividerBg":"var(--lia-bs-gray-300)","itemPaddingY":"5px","itemPaddingX":"20px","headerColor":"var(--lia-bs-gray-700)","__typename":"DropdownThemeSettings"},"email":{"link":{"color":"#0069D4","hoverColor":"#0061c2","decoration":"none","hoverDecoration":"underline","__typename":"EmailLinkSettings"},"border":{"color":"#e4e4e4","__typename":"EmailBorderSettings"},"buttons":{"borderRadiusLg":"5px","paddingXLg":"16px","paddingYLg":"7px","fontWeight":"700","primaryTextColor":"#ffffff","primaryTextHoverColor":"#ffffff","primaryBgColor":"#0069D4","primaryBgHoverColor":"#005cb8","primaryBorder":"1px solid transparent","primaryBorderHover":"1px solid transparent","__typename":"EmailButtonsSettings"},"panel":{"borderRadius":"5px","borderColor":"#e4e4e4","__typename":"EmailPanelSettings"},"__typename":"EmailThemeSettings"},"emoji":{"skinToneDefault":"#ffcd43","skinToneLight":"#fae3c5","skinToneMediumLight":"#e2cfa5","skinToneMedium":"#daa478","skinToneMediumDark":"#a78058","skinToneDark":"#5e4d43","__typename":"EmojiThemeSettings"},"heading":{"color":"var(--lia-bs-body-color)","fontFamily":"Segoe UI","fontStyle":"NORMAL","fontWeight":"400","h1FontSize":"34px","h2FontSize":"32px","h3FontSize":"28px","h4FontSize":"24px","h5FontSize":"20px","h6FontSize":"16px","lineHeight":"1.3","subHeaderFontSize":"11px","subHeaderFontWeight":"500","h1LetterSpacing":"normal","h2LetterSpacing":"normal","h3LetterSpacing":"normal","h4LetterSpacing":"normal","h5LetterSpacing":"normal","h6LetterSpacing":"normal","subHeaderLetterSpacing":"2px","h1FontWeight":"var(--lia-bs-headings-font-weight)","h2FontWeight":"var(--lia-bs-headings-font-weight)","h3FontWeight":"var(--lia-bs-headings-font-weight)","h4FontWeight":"var(--lia-bs-headings-font-weight)","h5FontWeight":"var(--lia-bs-headings-font-weight)","h6FontWeight":"var(--lia-bs-headings-font-weight)","__typename":"HeadingThemeSettings"},"icons":{"size10":"10px","size12":"12px","size14":"14px","size16":"16px","size20":"20px","size24":"24px","size30":"30px","size40":"40px","size50":"50px","size60":"60px","size80":"80px","size120":"120px","size160":"160px","__typename":"IconsThemeSettings"},"imagePreview":{"bgColor":"var(--lia-bs-gray-900)","titleColor":"var(--lia-bs-white)","controlColor":"var(--lia-bs-white)","controlBgColor":"var(--lia-bs-gray-800)","__typename":"ImagePreviewThemeSettings"},"input":{"borderColor":"var(--lia-bs-gray-600)","disabledColor":"var(--lia-bs-gray-600)","focusBorderColor":"var(--lia-bs-primary)","labelMarginBottom":"10px","btnFontSize":"var(--lia-bs-font-size-sm)","focusBoxShadow":"0 0 0 3px hsla(var(--lia-bs-primary-h), var(--lia-bs-primary-s), var(--lia-bs-primary-l), 0.2)","checkLabelMarginBottom":"2px","checkboxBorderRadius":"3px","borderRadiusSm":"var(--lia-bs-border-radius-sm)","borderRadius":"var(--lia-bs-border-radius)","borderRadiusLg":"var(--lia-bs-border-radius-lg)","formTextMarginTop":"4px","textAreaBorderRadius":"var(--lia-bs-border-radius)","activeFillColor":"var(--lia-bs-primary)","__typename":"InputThemeSettings"},"loading":{"dotDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.2)","dotLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.5)","barDarkColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.06)","barLightColor":"hsla(var(--lia-bs-white-h), var(--lia-bs-white-s), var(--lia-bs-white-l), 0.4)","__typename":"LoadingThemeSettings"},"link":{"color":"var(--lia-bs-primary)","hoverColor":"hsl(var(--lia-bs-primary-h), var(--lia-bs-primary-s), calc(var(--lia-bs-primary-l) - 10%))","decoration":"none","hoverDecoration":"underline","__typename":"LinkThemeSettings"},"listGroup":{"itemPaddingY":"15px","itemPaddingX":"15px","borderColor":"var(--lia-bs-gray-300)","__typename":"ListGroupThemeSettings"},"modal":{"contentTextColor":"var(--lia-bs-body-color)","contentBg":"var(--lia-bs-white)","backgroundBg":"var(--lia-bs-black)","smSize":"440px","mdSize":"760px","lgSize":"1080px","backdropOpacity":0.3,"contentBoxShadowXs":"var(--lia-bs-box-shadow-sm)","contentBoxShadow":"var(--lia-bs-box-shadow)","headerFontWeight":"700","__typename":"ModalThemeSettings"},"navbar":{"position":"FIXED","background":{"attachment":null,"clip":null,"color":"var(--lia-bs-white)","imageAssetName":"","imageLastModified":"0","origin":null,"position":"CENTER_CENTER","repeat":"NO_REPEAT","size":"COVER","__typename":"BackgroundProps"},"backgroundOpacity":0.8,"paddingTop":"15px","paddingBottom":"15px","borderBottom":"1px solid var(--lia-bs-border-color)","boxShadow":"var(--lia-bs-box-shadow-sm)","brandMarginRight":"30px","brandMarginRightSm":"10px","brandLogoHeight":"30px","linkGap":"10px","linkJustifyContent":"flex-start","linkPaddingY":"5px","linkPaddingX":"10px","linkDropdownPaddingY":"9px","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkColor":"var(--lia-bs-body-color)","linkHoverColor":"var(--lia-bs-primary)","linkFontSize":"var(--lia-bs-font-size-sm)","linkFontStyle":"NORMAL","linkFontWeight":"400","linkTextTransform":"NONE","linkLetterSpacing":"normal","linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkBgColor":"transparent","linkBgHoverColor":"transparent","linkBorder":"none","linkBorderHover":"none","linkBoxShadow":"none","linkBoxShadowHover":"none","linkTextBorderBottom":"none","linkTextBorderBottomHover":"none","dropdownPaddingTop":"10px","dropdownPaddingBottom":"15px","dropdownPaddingX":"10px","dropdownMenuOffset":"2px","dropdownDividerMarginTop":"10px","dropdownDividerMarginBottom":"10px","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","controllerIconColor":"var(--lia-bs-body-color)","controllerIconHoverColor":"var(--lia-bs-body-color)","controllerTextColor":"var(--lia-nav-controller-icon-color)","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","controllerHighlightColor":"hsla(30, 100%, 50%)","controllerHighlightTextColor":"var(--lia-yiq-light)","controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerColor":"var(--lia-nav-controller-icon-color)","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","hamburgerBgColor":"transparent","hamburgerBgHoverColor":"transparent","hamburgerBorder":"none","hamburgerBorderHover":"none","collapseMenuMarginLeft":"20px","collapseMenuDividerBg":"var(--lia-nav-link-color)","collapseMenuDividerOpacity":0.16,"__typename":"NavbarThemeSettings"},"pager":{"textColor":"var(--lia-bs-link-color)","textFontWeight":"var(--lia-font-weight-md)","textFontSize":"var(--lia-bs-font-size-sm)","__typename":"PagerThemeSettings"},"panel":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-bs-border-radius)","borderColor":"var(--lia-bs-border-color)","boxShadow":"none","__typename":"PanelThemeSettings"},"popover":{"arrowHeight":"8px","arrowWidth":"16px","maxWidth":"300px","minWidth":"100px","headerBg":"var(--lia-bs-white)","borderColor":"var(--lia-bs-border-color)","borderRadius":"var(--lia-bs-border-radius)","boxShadow":"0 0.5rem 1rem hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.15)","__typename":"PopoverThemeSettings"},"prism":{"color":"#000000","bgColor":"#f5f2f0","fontFamily":"var(--font-family-monospace)","fontSize":"var(--lia-bs-font-size-base)","fontWeightBold":"var(--lia-bs-font-weight-bold)","fontStyleItalic":"italic","tabSize":2,"highlightColor":"#b3d4fc","commentColor":"#62707e","punctuationColor":"#6f6f6f","namespaceOpacity":"0.7","propColor":"#990055","selectorColor":"#517a00","operatorColor":"#906736","operatorBgColor":"hsla(0, 0%, 100%, 0.5)","keywordColor":"#0076a9","functionColor":"#d3284b","variableColor":"#c14700","__typename":"PrismThemeSettings"},"rte":{"bgColor":"var(--lia-bs-white)","borderRadius":"var(--lia-panel-border-radius)","boxShadow":" var(--lia-panel-box-shadow)","customColor1":"#bfedd2","customColor2":"#fbeeb8","customColor3":"#f8cac6","customColor4":"#eccafa","customColor5":"#c2e0f4","customColor6":"#2dc26b","customColor7":"#f1c40f","customColor8":"#e03e2d","customColor9":"#b96ad9","customColor10":"#3598db","customColor11":"#169179","customColor12":"#e67e23","customColor13":"#ba372a","customColor14":"#843fa1","customColor15":"#236fa1","customColor16":"#ecf0f1","customColor17":"#ced4d9","customColor18":"#95a5a6","customColor19":"#7e8c8d","customColor20":"#34495e","customColor21":"#000000","customColor22":"#ffffff","defaultMessageHeaderMarginTop":"40px","defaultMessageHeaderMarginBottom":"20px","defaultMessageItemMarginTop":"0","defaultMessageItemMarginBottom":"10px","diffAddedColor":"hsla(170, 53%, 51%, 0.4)","diffChangedColor":"hsla(43, 97%, 63%, 0.4)","diffNoneColor":"hsla(0, 0%, 80%, 0.4)","diffRemovedColor":"hsla(9, 74%, 47%, 0.4)","specialMessageHeaderMarginTop":"40px","specialMessageHeaderMarginBottom":"20px","specialMessageItemMarginTop":"0","specialMessageItemMarginBottom":"10px","__typename":"RteThemeSettings"},"tags":{"bgColor":"var(--lia-bs-gray-200)","bgHoverColor":"var(--lia-bs-gray-400)","borderRadius":"var(--lia-bs-border-radius-sm)","color":"var(--lia-bs-body-color)","hoverColor":"var(--lia-bs-body-color)","fontWeight":"var(--lia-font-weight-md)","fontSize":"var(--lia-font-size-xxs)","textTransform":"UPPERCASE","letterSpacing":"0.5px","__typename":"TagsThemeSettings"},"toasts":{"borderRadius":"var(--lia-bs-border-radius)","paddingX":"12px","__typename":"ToastsThemeSettings"},"typography":{"fontFamilyBase":"Segoe UI","fontStyleBase":"NORMAL","fontWeightBase":"400","fontWeightLight":"300","fontWeightNormal":"400","fontWeightMd":"500","fontWeightBold":"700","letterSpacingSm":"normal","letterSpacingXs":"normal","lineHeightBase":"1.5","fontSizeBase":"16px","fontSizeXxs":"11px","fontSizeXs":"12px","fontSizeSm":"14px","fontSizeLg":"20px","fontSizeXl":"24px","smallFontSize":"14px","customFonts":[{"source":"SERVER","name":"Segoe UI","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"},{"style":"NORMAL","weight":"300","__typename":"FontStyleData"},{"style":"NORMAL","weight":"600","__typename":"FontStyleData"},{"style":"NORMAL","weight":"700","__typename":"FontStyleData"},{"style":"ITALIC","weight":"400","__typename":"FontStyleData"}],"assetNames":["SegoeUI-normal-400.woff2","SegoeUI-normal-300.woff2","SegoeUI-normal-600.woff2","SegoeUI-normal-700.woff2","SegoeUI-italic-400.woff2"],"__typename":"CustomFont"},{"source":"SERVER","name":"MWF Fluent Icons","styles":[{"style":"NORMAL","weight":"400","__typename":"FontStyleData"}],"assetNames":["MWFFluentIcons-normal-400.woff2"],"__typename":"CustomFont"}],"__typename":"TypographyThemeSettings"},"unstyledListItem":{"marginBottomSm":"5px","marginBottomMd":"10px","marginBottomLg":"15px","marginBottomXl":"20px","marginBottomXxl":"25px","__typename":"UnstyledListItemThemeSettings"},"yiq":{"light":"#ffffff","dark":"#000000","__typename":"YiqThemeSettings"},"colorLightness":{"primaryDark":0.36,"primaryLight":0.74,"primaryLighter":0.89,"primaryLightest":0.95,"infoDark":0.39,"infoLight":0.72,"infoLighter":0.85,"infoLightest":0.93,"successDark":0.24,"successLight":0.62,"successLighter":0.8,"successLightest":0.91,"warningDark":0.39,"warningLight":0.68,"warningLighter":0.84,"warningLightest":0.93,"dangerDark":0.41,"dangerLight":0.72,"dangerLighter":0.89,"dangerLightest":0.95,"__typename":"ColorLightnessThemeSettings"},"localOverride":false,"__typename":"Theme"},"localOverride":false},"CachedAsset:text:en_US-components/common/EmailVerification-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/common/EmailVerification-1743095130000","value":{"email.verification.title":"Email Verification Required","email.verification.message.update.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. To change your email, visit My Settings.","email.verification.message.resend.email":"To participate in the community, you must first verify your email address. The verification email was sent to {email}. Resend email."},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/Loading/LoadingDot-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/Loading/LoadingDot-1743095130000","value":{"title":"Loading..."},"localOverride":false},"CachedAsset:quilt:o365.prod:pages/blogs/BlogMessagePage:board:FastTrackforAzureBlog-1743770136065":{"__typename":"CachedAsset","id":"quilt:o365.prod:pages/blogs/BlogMessagePage:board:FastTrackforAzureBlog-1743770136065","value":{"id":"BlogMessagePage","container":{"id":"Common","headerProps":{"backgroundImageProps":null,"backgroundColor":null,"addComponents":null,"removeComponents":["community.widget.bannerWidget"],"componentOrder":null,"__typename":"QuiltContainerSectionProps"},"headerComponentProps":{"community.widget.breadcrumbWidget":{"disableLastCrumbForDesktop":false}},"footerProps":null,"footerComponentProps":null,"items":[{"id":"blog-article","layout":"ONE_COLUMN","bgColor":null,"showTitle":null,"showDescription":null,"textPosition":null,"textColor":null,"sectionEditLevel":"LOCKED","bgImage":null,"disableSpacing":null,"edgeToEdgeDisplay":null,"fullHeight":null,"showBorder":null,"__typename":"OneColumnQuiltSection","columnMap":{"main":[{"id":"blogs.widget.blogArticleWidget","className":"lia-blog-container","props":null,"__typename":"QuiltComponent"}],"__typename":"OneSectionColumns"}},{"id":"section-1729184836777","layout":"MAIN_SIDE","bgColor":"transparent","showTitle":false,"showDescription":false,"textPosition":"CENTER","textColor":"var(--lia-bs-body-color)","sectionEditLevel":null,"bgImage":null,"disableSpacing":null,"edgeToEdgeDisplay":null,"fullHeight":null,"showBorder":null,"__typename":"MainSideQuiltSection","columnMap":{"main":[],"side":[{"id":"custom.widget.Social_Sharing","className":null,"props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"useBackground":true,"title":"Share","lazyLoad":false},"__typename":"QuiltComponent"}],"__typename":"MainSideSectionColumns"}}],"__typename":"QuiltContainer"},"__typename":"Quilt","localOverride":false},"localOverride":false},"CachedAsset:text:en_US-pages/blogs/BlogMessagePage-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-pages/blogs/BlogMessagePage-1743095130000","value":{"title":"{contextMessageSubject} | {communityTitle}","errorMissing":"This blog post cannot be found","name":"Blog Message Page","section.blog-article.title":"Blog Post","archivedMessageTitle":"This Content Has Been Archived","section.section-1729184836777.title":"","section.section-1729184836777.description":"","section.CncIde.title":"Blog Post","section.tifEmD.description":"","section.tifEmD.title":""},"localOverride":false},"CachedAsset:quiltWrapper:o365.prod:Common:1743769928957":{"__typename":"CachedAsset","id":"quiltWrapper:o365.prod:Common:1743769928957","value":{"id":"Common","header":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"community.widget.navbarWidget","props":{"showUserName":true,"showRegisterLink":true,"useIconLanguagePicker":true,"useLabelLanguagePicker":true,"className":"QuiltComponent_lia-component-edit-mode__0nCcm","links":{"sideLinks":[],"mainLinks":[{"children":[],"linkType":"INTERNAL","id":"gxcuf89792","params":{},"routeName":"CommunityPage"},{"children":[],"linkType":"EXTERNAL","id":"external-link","url":"/Directory","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft365","params":{"categoryId":"microsoft365"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-teams","params":{"categoryId":"MicrosoftTeams"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows","params":{"categoryId":"Windows"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-securityand-compliance","params":{"categoryId":"microsoft-security"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"outlook","params":{"categoryId":"Outlook"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"planner","params":{"categoryId":"Planner"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"windows-server","params":{"categoryId":"Windows-Server"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"azure","params":{"categoryId":"Azure"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"exchange","params":{"categoryId":"Exchange"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-endpoint-manager","params":{"categoryId":"microsoft-endpoint-manager"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"s-q-l-server","params":{"categoryId":"SQL-Server"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-2","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities","url":"/","target":"BLANK"},{"children":[{"linkType":"INTERNAL","id":"education-sector","params":{"categoryId":"EducationSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"a-i","params":{"categoryId":"AI"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"i-t-ops-talk","params":{"categoryId":"ITOpsTalk"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"partner-community","params":{"categoryId":"PartnerCommunity"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-mechanics","params":{"categoryId":"MicrosoftMechanics"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"healthcare-and-life-sciences","params":{"categoryId":"HealthcareAndLifeSciences"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"public-sector","params":{"categoryId":"PublicSector"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"io-t","params":{"categoryId":"IoT"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"driving-adoption","params":{"categoryId":"DrivingAdoption"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"s-m-b","params":{"categoryId":"SMB"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"startupsat-microsoft","params":{"categoryId":"StartupsatMicrosoft"},"routeName":"CategoryPage"},{"linkType":"EXTERNAL","id":"external-link-1","url":"/Directory","target":"SELF"}],"linkType":"EXTERNAL","id":"communities-1","url":"/","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external","url":"/Blogs","target":"SELF"},{"children":[],"linkType":"EXTERNAL","id":"external-1","url":"/Events","target":"SELF"},{"children":[{"linkType":"INTERNAL","id":"microsoft-learn-1","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"linkType":"INTERNAL","id":"microsoft-learn-blog","params":{"boardId":"MicrosoftLearnBlog","categoryId":"MicrosoftLearn"},"routeName":"BlogBoardPage"},{"linkType":"EXTERNAL","id":"external-10","url":"https://learningroomdirectory.microsoft.com/","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-3","url":"https://docs.microsoft.com/learn/dynamics365/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-4","url":"https://docs.microsoft.com/learn/m365/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-5","url":"https://docs.microsoft.com/learn/topics/sci/?wt.mc_id=techcom_header-webpage-m365","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-6","url":"https://docs.microsoft.com/learn/powerplatform/?wt.mc_id=techcom_header-webpage-powerplatform","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-7","url":"https://docs.microsoft.com/learn/github/?wt.mc_id=techcom_header-webpage-github","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-8","url":"https://docs.microsoft.com/learn/teams/?wt.mc_id=techcom_header-webpage-teams","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-9","url":"https://docs.microsoft.com/learn/dotnet/?wt.mc_id=techcom_header-webpage-dotnet","target":"BLANK"},{"linkType":"EXTERNAL","id":"external-2","url":"https://docs.microsoft.com/learn/azure/?WT.mc_id=techcom_header-webpage-m365","target":"BLANK"}],"linkType":"INTERNAL","id":"microsoft-learn","params":{"categoryId":"MicrosoftLearn"},"routeName":"CategoryPage"},{"children":[],"linkType":"INTERNAL","id":"community-info-center","params":{"categoryId":"Community-Info-Center"},"routeName":"CategoryPage"}]},"style":{"boxShadow":"var(--lia-bs-box-shadow-sm)","controllerHighlightColor":"hsla(30, 100%, 50%)","linkFontWeight":"400","dropdownDividerMarginBottom":"10px","hamburgerBorderHover":"none","linkBoxShadowHover":"none","linkFontSize":"14px","backgroundOpacity":0.8,"controllerBorderRadius":"var(--lia-border-radius-50)","hamburgerBgColor":"transparent","hamburgerColor":"var(--lia-nav-controller-icon-color)","linkTextBorderBottom":"none","brandLogoHeight":"30px","linkBgHoverColor":"transparent","linkLetterSpacing":"normal","collapseMenuDividerOpacity":0.16,"dropdownPaddingBottom":"15px","paddingBottom":"15px","dropdownMenuOffset":"2px","hamburgerBgHoverColor":"transparent","borderBottom":"1px solid var(--lia-bs-border-color)","hamburgerBorder":"none","dropdownPaddingX":"10px","brandMarginRightSm":"10px","linkBoxShadow":"none","collapseMenuDividerBg":"var(--lia-nav-link-color)","linkColor":"var(--lia-bs-body-color)","linkJustifyContent":"flex-start","dropdownPaddingTop":"10px","controllerHighlightTextColor":"var(--lia-yiq-dark)","controllerTextColor":"var(--lia-nav-controller-icon-color)","background":{"imageAssetName":"","color":"var(--lia-bs-white)","size":"COVER","repeat":"NO_REPEAT","position":"CENTER_CENTER","imageLastModified":""},"linkBorderRadius":"var(--lia-bs-border-radius-sm)","linkHoverColor":"var(--lia-bs-body-color)","position":"FIXED","linkBorder":"none","linkTextBorderBottomHover":"2px solid var(--lia-bs-body-color)","brandMarginRight":"30px","hamburgerHoverColor":"var(--lia-nav-controller-icon-color)","linkBorderHover":"none","collapseMenuMarginLeft":"20px","linkFontStyle":"NORMAL","controllerTextHoverColor":"var(--lia-nav-controller-icon-hover-color)","linkPaddingX":"10px","linkPaddingY":"5px","paddingTop":"15px","linkTextTransform":"NONE","dropdownBorderColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.08)","controllerBgHoverColor":"hsla(var(--lia-bs-black-h), var(--lia-bs-black-s), var(--lia-bs-black-l), 0.1)","linkBgColor":"transparent","linkDropdownPaddingX":"var(--lia-nav-link-px)","linkDropdownPaddingY":"9px","controllerIconColor":"var(--lia-bs-body-color)","dropdownDividerMarginTop":"10px","linkGap":"10px","controllerIconHoverColor":"var(--lia-bs-body-color)"},"showSearchIcon":false,"languagePickerStyle":"iconAndLabel"},"__typename":"QuiltComponent"},{"id":"community.widget.breadcrumbWidget","props":{"backgroundColor":"transparent","linkHighlightColor":"var(--lia-bs-primary)","visualEffects":{"showBottomBorder":true},"linkTextColor":"var(--lia-bs-gray-700)"},"__typename":"QuiltComponent"},{"id":"custom.widget.community_banner","props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"usePageWidth":false,"useBackground":false,"title":"","lazyLoad":false},"__typename":"QuiltComponent"},{"id":"custom.widget.HeroBanner","props":{"widgetVisibility":"signedInOrAnonymous","usePageWidth":false,"useTitle":true,"cMax_items":3,"useBackground":false,"title":"","lazyLoad":false,"widgetChooser":"custom.widget.HeroBanner"},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"footer":{"backgroundImageProps":{"assetName":null,"backgroundSize":"COVER","backgroundRepeat":"NO_REPEAT","backgroundPosition":"CENTER_CENTER","lastModified":null,"__typename":"BackgroundImageProps"},"backgroundColor":"transparent","items":[{"id":"custom.widget.MicrosoftFooter","props":{"widgetVisibility":"signedInOrAnonymous","useTitle":true,"useBackground":false,"title":"","lazyLoad":false},"__typename":"QuiltComponent"}],"__typename":"QuiltWrapperSection"},"__typename":"QuiltWrapper","localOverride":false},"localOverride":false},"CachedAsset:text:en_US-components/common/ActionFeedback-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/common/ActionFeedback-1743095130000","value":{"joinedGroupHub.title":"Welcome","joinedGroupHub.message":"You are now a member of this group and are subscribed to updates.","groupHubInviteNotFound.title":"Invitation Not Found","groupHubInviteNotFound.message":"Sorry, we could not find your invitation to the group. The owner may have canceled the invite.","groupHubNotFound.title":"Group Not Found","groupHubNotFound.message":"The grouphub you tried to join does not exist. It may have been deleted.","existingGroupHubMember.title":"Already Joined","existingGroupHubMember.message":"You are already a member of this group.","accountLocked.title":"Account Locked","accountLocked.message":"Your account has been locked due to multiple failed attempts. Try again in {lockoutTime} minutes.","editedGroupHub.title":"Changes Saved","editedGroupHub.message":"Your group has been updated.","leftGroupHub.title":"Goodbye","leftGroupHub.message":"You are no longer a member of this group and will not receive future updates.","deletedGroupHub.title":"Deleted","deletedGroupHub.message":"The group has been deleted.","groupHubCreated.title":"Group Created","groupHubCreated.message":"{groupHubName} is ready to use","accountClosed.title":"Account Closed","accountClosed.message":"The account has been closed and you will now be redirected to the homepage","resetTokenExpired.title":"Reset Password Link has Expired","resetTokenExpired.message":"Try resetting your password again","invalidUrl.title":"Invalid URL","invalidUrl.message":"The URL you're using is not recognized. Verify your URL and try again.","accountClosedForUser.title":"Account Closed","accountClosedForUser.message":"{userName}'s account is closed","inviteTokenInvalid.title":"Invitation Invalid","inviteTokenInvalid.message":"Your invitation to the community has been canceled or expired.","inviteTokenError.title":"Invitation Verification Failed","inviteTokenError.message":"The url you are utilizing is not recognized. Verify your URL and try again","pageNotFound.title":"Access Denied","pageNotFound.message":"You do not have access to this area of the community or it doesn't exist","eventAttending.title":"Responded as Attending","eventAttending.message":"You'll be notified when there's new activity and reminded as the event approaches","eventInterested.title":"Responded as Interested","eventInterested.message":"You'll be notified when there's new activity and reminded as the event approaches","eventNotFound.title":"Event Not Found","eventNotFound.message":"The event you tried to respond to does not exist.","redirectToRelatedPage.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.title":"Showing Related Content","redirectToRelatedPageForBaseUsers.message":"The content you are trying to access is archived","redirectToRelatedPage.message":"The content you are trying to access is archived","relatedUrl.archivalLink.flyoutMessage":"The content you are trying to access is archived View Archived Content"},"localOverride":false},"CachedAsset:component:custom.widget.community_banner-en-1743770170699":{"__typename":"CachedAsset","id":"component:custom.widget.community_banner-en-1743770170699","value":{"component":{"id":"custom.widget.community_banner","template":{"id":"community_banner","markupLanguage":"HANDLEBARS","style":".community-banner {\n a.top-bar.btn {\n top: 0px;\n width: 100%;\n z-index: 999;\n text-align: center;\n left: 0px;\n background: #0068b8;\n color: white;\n padding: 10px 0px;\n display:block;\n box-shadow:none !important;\n border: none !important;\n border-radius: none !important;\n margin: 0px !important;\n font-size:14px;\n }\n}","texts":null,"defaults":{"config":{"applicablePages":[],"description":"community announcement text","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.community_banner","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":"community announcement text","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_community_banner_community-banner_1a5zb_1 {\n a.custom_widget_community_banner_top-bar_1a5zb_2.custom_widget_community_banner_btn_1a5zb_2 {\n top: 0;\n width: 100%;\n z-index: 999;\n text-align: center;\n left: 0;\n background: #0068b8;\n color: white;\n padding: 0.625rem 0;\n display:block;\n box-shadow:none !important;\n border: none !important;\n border-radius: none !important;\n margin: 0 !important;\n font-size:0.875rem;\n }\n}","tokens":{"community-banner":"custom_widget_community_banner_community-banner_1a5zb_1","top-bar":"custom_widget_community_banner_top-bar_1a5zb_2","btn":"custom_widget_community_banner_btn_1a5zb_2"}},"form":null},"localOverride":false},"CachedAsset:component:custom.widget.HeroBanner-en-1743770170699":{"__typename":"CachedAsset","id":"component:custom.widget.HeroBanner-en-1743770170699","value":{"component":{"id":"custom.widget.HeroBanner","template":{"id":"HeroBanner","markupLanguage":"REACT","style":null,"texts":{"searchPlaceholderText":"Search this community","followActionText":"Follow","unfollowActionText":"Following","searchOnHoverText":"Please enter your search term(s) and then press return key to complete a search."},"defaults":{"config":{"applicablePages":[],"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.HeroBanner","form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":null,"fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[{"id":"max_items","dataType":"NUMBER","list":false,"defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"control":"INPUT","__typename":"PropDefinition"}],"__typename":"ComponentProperties"},"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"},"__typename":"Component","localOverride":false},"globalCss":null,"form":{"fields":[{"id":"widgetChooser","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"title","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useTitle","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"useBackground","validation":null,"noValidation":null,"dataType":"BOOLEAN","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"widgetVisibility","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"moreOptions","validation":null,"noValidation":null,"dataType":"STRING","list":null,"control":null,"defaultValue":null,"label":null,"description":null,"possibleValues":null,"__typename":"FormField"},{"id":"cMax_items","validation":null,"noValidation":null,"dataType":"NUMBER","list":false,"control":"INPUT","defaultValue":"3","label":"Max Items","description":"The maximum number of items to display in the carousel","possibleValues":null,"__typename":"FormField"}],"layout":{"rows":[{"id":"widgetChooserGroup","type":"fieldset","as":null,"items":[{"id":"widgetChooser","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"titleGroup","type":"fieldset","as":null,"items":[{"id":"title","className":null,"__typename":"FormFieldRef"},{"id":"useTitle","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"useBackground","type":"fieldset","as":null,"items":[{"id":"useBackground","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"widgetVisibility","type":"fieldset","as":null,"items":[{"id":"widgetVisibility","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"moreOptionsGroup","type":"fieldset","as":null,"items":[{"id":"moreOptions","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"},{"id":"componentPropsGroup","type":"fieldset","as":null,"items":[{"id":"cMax_items","className":null,"__typename":"FormFieldRef"}],"props":null,"legend":null,"description":null,"className":null,"viewVariant":null,"toggleState":null,"__typename":"FormFieldset"}],"actionButtons":null,"className":"custom_widget_HeroBanner_form","formGroupFieldSeparator":"divider","__typename":"FormLayout"},"__typename":"Form"}},"localOverride":false},"CachedAsset:component:custom.widget.Social_Sharing-en-1743770170699":{"__typename":"CachedAsset","id":"component:custom.widget.Social_Sharing-en-1743770170699","value":{"component":{"id":"custom.widget.Social_Sharing","template":{"id":"Social_Sharing","markupLanguage":"HANDLEBARS","style":".social-share {\n .sharing-options {\n position: relative;\n margin: 0;\n padding: 0;\n line-height: 10px;\n display: flex;\n justify-content: left;\n gap: 5px;\n list-style-type: none;\n li {\n text-align: left;\n a {\n min-width: 30px;\n min-height: 30px;\n display: block;\n padding: 1px;\n .social-share-linkedin {\n img {\n background-color: rgb(0, 119, 181);\n }\n }\n .social-share-facebook {\n img {\n background-color: rgb(59, 89, 152);\n }\n }\n .social-share-x {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .social-share-rss {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .social-share-reddit {\n img {\n background-color: rgb(255, 69, 0);\n }\n }\n .social-share-email {\n img {\n background-color: rgb(132, 132, 132);\n }\n }\n }\n a {\n img {\n height: 2rem;\n }\n }\n }\n }\n}\n","texts":null,"defaults":{"config":{"applicablePages":[],"description":"Adds buttons to share to various social media websites","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.Social_Sharing","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":"Adds buttons to share to various social media websites","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_Social_Sharing_social-share_c7xxz_1 {\n .custom_widget_Social_Sharing_sharing-options_c7xxz_2 {\n position: relative;\n margin: 0;\n padding: 0;\n line-height: 0.625rem;\n display: flex;\n justify-content: left;\n gap: 0.3125rem;\n list-style-type: none;\n li {\n text-align: left;\n a {\n min-width: 1.875rem;\n min-height: 1.875rem;\n display: block;\n padding: 0.0625rem;\n .custom_widget_Social_Sharing_social-share-linkedin_c7xxz_18 {\n img {\n background-color: rgb(0, 119, 181);\n }\n }\n .custom_widget_Social_Sharing_social-share-facebook_c7xxz_23 {\n img {\n background-color: rgb(59, 89, 152);\n }\n }\n .custom_widget_Social_Sharing_social-share-x_c7xxz_28 {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .custom_widget_Social_Sharing_social-share-rss_c7xxz_33 {\n img {\n background-color: rgb(0, 0, 0);\n }\n }\n .custom_widget_Social_Sharing_social-share-reddit_c7xxz_38 {\n img {\n background-color: rgb(255, 69, 0);\n }\n }\n .custom_widget_Social_Sharing_social-share-email_c7xxz_43 {\n img {\n background-color: rgb(132, 132, 132);\n }\n }\n }\n a {\n img {\n height: 2rem;\n }\n }\n }\n }\n}\n","tokens":{"social-share":"custom_widget_Social_Sharing_social-share_c7xxz_1","sharing-options":"custom_widget_Social_Sharing_sharing-options_c7xxz_2","social-share-linkedin":"custom_widget_Social_Sharing_social-share-linkedin_c7xxz_18","social-share-facebook":"custom_widget_Social_Sharing_social-share-facebook_c7xxz_23","social-share-x":"custom_widget_Social_Sharing_social-share-x_c7xxz_28","social-share-rss":"custom_widget_Social_Sharing_social-share-rss_c7xxz_33","social-share-reddit":"custom_widget_Social_Sharing_social-share-reddit_c7xxz_38","social-share-email":"custom_widget_Social_Sharing_social-share-email_c7xxz_43"}},"form":null},"localOverride":false},"CachedAsset:component:custom.widget.MicrosoftFooter-en-1743770170699":{"__typename":"CachedAsset","id":"component:custom.widget.MicrosoftFooter-en-1743770170699","value":{"component":{"id":"custom.widget.MicrosoftFooter","template":{"id":"MicrosoftFooter","markupLanguage":"HANDLEBARS","style":".context-uhf {\n min-width: 280px;\n font-size: 15px;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.c-uhff-link {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.c-uhff {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.c-uhff-nav {\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n .c-heading-4 {\n color: #616161;\n word-break: break-word;\n font-size: 15px;\n line-height: 20px;\n padding: 36px 0 4px;\n font-weight: 600;\n }\n .c-uhff-nav-row {\n .c-uhff-nav-group {\n display: block;\n float: left;\n min-height: 1px;\n vertical-align: text-top;\n padding: 0 12px;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 12px;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.c-list.f-bare {\n font-size: 11px;\n line-height: 16px;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 8px 0;\n margin: 0;\n }\n }\n }\n }\n}\n.c-uhff-base {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(1600px + 10%);\n padding: 30px 5% 16px;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.c-uhff-ccpa {\n font-size: 11px;\n line-height: 16px;\n float: left;\n margin: 3px 0;\n }\n a.c-uhff-ccpa:hover {\n text-decoration: underline;\n }\n ul.c-list {\n font-size: 11px;\n line-height: 16px;\n float: right;\n margin: 3px 0;\n color: #616161;\n li {\n padding: 0 24px 4px 0;\n display: inline-block;\n }\n }\n .c-list.f-bare {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 30px 24px 16px;\n }\n}\n","texts":{"New tab":"What's New","New 1":"Surface Laptop Studio 2","New 2":"Surface Laptop Go 3","New 3":"Surface Pro 9","New 4":"Surface Laptop 5","New 5":"Surface Studio 2+","New 6":"Copilot in Windows","New 7":"Microsoft 365","New 8":"Windows 11 apps","Store tab":"Microsoft Store","Store 1":"Account Profile","Store 2":"Download Center","Store 3":"Microsoft Store Support","Store 4":"Returns","Store 5":"Order tracking","Store 6":"Certified Refurbished","Store 7":"Microsoft Store Promise","Store 8":"Flexible Payments","Education tab":"Education","Edu 1":"Microsoft in education","Edu 2":"Devices for education","Edu 3":"Microsoft Teams for Education","Edu 4":"Microsoft 365 Education","Edu 5":"How to buy for your school","Edu 6":"Educator Training and development","Edu 7":"Deals for students and parents","Edu 8":"Azure for students","Business tab":"Business","Bus 1":"Microsoft Cloud","Bus 2":"Microsoft Security","Bus 3":"Dynamics 365","Bus 4":"Microsoft 365","Bus 5":"Microsoft Power Platform","Bus 6":"Microsoft Teams","Bus 7":"Microsoft Industry","Bus 8":"Small Business","Developer tab":"Developer & IT","Dev 1":"Azure","Dev 2":"Developer Center","Dev 3":"Documentation","Dev 4":"Microsoft Learn","Dev 5":"Microsoft Tech Community","Dev 6":"Azure Marketplace","Dev 7":"AppSource","Dev 8":"Visual Studio","Company tab":"Company","Com 1":"Careers","Com 2":"About Microsoft","Com 3":"Company News","Com 4":"Privacy at Microsoft","Com 5":"Investors","Com 6":"Diversity and inclusion","Com 7":"Accessiblity","Com 8":"Sustainibility"},"defaults":{"config":{"applicablePages":[],"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"components":[{"id":"custom.widget.MicrosoftFooter","form":null,"config":null,"props":[],"__typename":"Component"}],"grouping":"CUSTOM","__typename":"ComponentTemplate"},"properties":{"config":{"applicablePages":[],"description":"The Microsoft Footer","fetchedContent":null,"__typename":"ComponentConfiguration"},"props":[],"__typename":"ComponentProperties"},"form":null,"__typename":"Component","localOverride":false},"globalCss":{"css":".custom_widget_MicrosoftFooter_context-uhf_f95yq_1 {\n min-width: 17.5rem;\n font-size: 0.9375rem;\n box-sizing: border-box;\n -ms-text-size-adjust: 100%;\n -webkit-text-size-adjust: 100%;\n & *,\n & *:before,\n & *:after {\n box-sizing: inherit;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-link_f95yq_12 {\n color: #616161;\n word-break: break-word;\n text-decoration: none;\n }\n &a:link,\n &a:focus,\n &a:hover,\n &a:active,\n &a:visited {\n text-decoration: none;\n color: inherit;\n }\n & div {\n font-family: 'Segoe UI', SegoeUI, 'Helvetica Neue', Helvetica, Arial, sans-serif;\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff_f95yq_12 {\n background: #f2f2f2;\n margin: -1.5625;\n width: auto;\n height: auto;\n}\n.custom_widget_MicrosoftFooter_c-uhff-nav_f95yq_35 {\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 0 5%;\n box-sizing: inherit;\n &:before,\n &:after {\n content: ' ';\n display: table;\n clear: left;\n }\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n .custom_widget_MicrosoftFooter_c-heading-4_f95yq_49 {\n color: #616161;\n word-break: break-word;\n font-size: 0.9375rem;\n line-height: 1.25rem;\n padding: 2.25rem 0 0.25rem;\n font-weight: 600;\n }\n .custom_widget_MicrosoftFooter_c-uhff-nav-row_f95yq_57 {\n .custom_widget_MicrosoftFooter_c-uhff-nav-group_f95yq_58 {\n display: block;\n float: left;\n min-height: 0.0625rem;\n vertical-align: text-top;\n padding: 0 0.75rem;\n width: 100%;\n zoom: 1;\n &:first-child {\n padding-left: 0;\n @media only screen and (max-width: 1083px) {\n padding-left: 0.75rem;\n }\n }\n @media only screen and (min-width: 540px) and (max-width: 1082px) {\n width: 33.33333%;\n }\n @media only screen and (min-width: 1083px) {\n width: 16.6666666667%;\n }\n ul.custom_widget_MicrosoftFooter_c-list_f95yq_78.custom_widget_MicrosoftFooter_f-bare_f95yq_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n margin-top: 0;\n margin-bottom: 0;\n padding-left: 0;\n list-style-type: none;\n li {\n word-break: break-word;\n padding: 0.5rem 0;\n margin: 0;\n }\n }\n }\n }\n}\n.custom_widget_MicrosoftFooter_c-uhff-base_f95yq_94 {\n background: #f2f2f2;\n margin: 0 auto;\n max-width: calc(100rem + 10%);\n padding: 1.875rem 5% 1rem;\n &:before,\n &:after {\n content: ' ';\n display: table;\n }\n &:after {\n clear: both;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_f95yq_107 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: left;\n margin: 0.1875rem 0;\n }\n a.custom_widget_MicrosoftFooter_c-uhff-ccpa_f95yq_107:hover {\n text-decoration: underline;\n }\n ul.custom_widget_MicrosoftFooter_c-list_f95yq_78 {\n font-size: 0.6875rem;\n line-height: 1rem;\n float: right;\n margin: 0.1875rem 0;\n color: #616161;\n li {\n padding: 0 1.5rem 0.25rem 0;\n display: inline-block;\n }\n }\n .custom_widget_MicrosoftFooter_c-list_f95yq_78.custom_widget_MicrosoftFooter_f-bare_f95yq_78 {\n padding-left: 0;\n list-style-type: none;\n }\n @media only screen and (max-width: 1083px) {\n display: flex;\n flex-wrap: wrap;\n padding: 1.875rem 1.5rem 1rem;\n }\n}\n","tokens":{"context-uhf":"custom_widget_MicrosoftFooter_context-uhf_f95yq_1","c-uhff-link":"custom_widget_MicrosoftFooter_c-uhff-link_f95yq_12","c-uhff":"custom_widget_MicrosoftFooter_c-uhff_f95yq_12","c-uhff-nav":"custom_widget_MicrosoftFooter_c-uhff-nav_f95yq_35","c-heading-4":"custom_widget_MicrosoftFooter_c-heading-4_f95yq_49","c-uhff-nav-row":"custom_widget_MicrosoftFooter_c-uhff-nav-row_f95yq_57","c-uhff-nav-group":"custom_widget_MicrosoftFooter_c-uhff-nav-group_f95yq_58","c-list":"custom_widget_MicrosoftFooter_c-list_f95yq_78","f-bare":"custom_widget_MicrosoftFooter_f-bare_f95yq_78","c-uhff-base":"custom_widget_MicrosoftFooter_c-uhff-base_f95yq_94","c-uhff-ccpa":"custom_widget_MicrosoftFooter_c-uhff-ccpa_f95yq_107"}},"form":null},"localOverride":false},"CachedAsset:text:en_US-components/community/Breadcrumb-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/community/Breadcrumb-1743095130000","value":{"navLabel":"Breadcrumbs","dropdown":"Additional parent page navigation"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageBanner-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageBanner-1743095130000","value":{"messageMarkedAsSpam":"This post has been marked as spam","messageMarkedAsSpam@board:TKB":"This article has been marked as spam","messageMarkedAsSpam@board:BLOG":"This post has been marked as spam","messageMarkedAsSpam@board:FORUM":"This discussion has been marked as spam","messageMarkedAsSpam@board:OCCASION":"This event has been marked as spam","messageMarkedAsSpam@board:IDEA":"This idea has been marked as spam","manageSpam":"Manage Spam","messageMarkedAsAbuse":"This post has been marked as abuse","messageMarkedAsAbuse@board:TKB":"This article has been marked as abuse","messageMarkedAsAbuse@board:BLOG":"This post has been marked as abuse","messageMarkedAsAbuse@board:FORUM":"This discussion has been marked as abuse","messageMarkedAsAbuse@board:OCCASION":"This event has been marked as abuse","messageMarkedAsAbuse@board:IDEA":"This idea has been marked as abuse","preModCommentAuthorText":"This comment will be published as soon as it is approved","preModCommentModeratorText":"This comment is awaiting moderation","messageMarkedAsOther":"This post has been rejected due to other reasons","messageMarkedAsOther@board:TKB":"This article has been rejected due to other reasons","messageMarkedAsOther@board:BLOG":"This post has been rejected due to other reasons","messageMarkedAsOther@board:FORUM":"This discussion has been rejected due to other reasons","messageMarkedAsOther@board:OCCASION":"This event has been rejected due to other reasons","messageMarkedAsOther@board:IDEA":"This idea has been rejected due to other reasons","messageArchived":"This post was archived on {date}","relatedUrl":"View Related Content","relatedContentText":"Showing related content","archivedContentLink":"View Archived Content"},"localOverride":false},"Category:category:Exchange":{"__typename":"Category","id":"category:Exchange","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Planner":{"__typename":"Category","id":"category:Planner","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Outlook":{"__typename":"Category","id":"category:Outlook","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Community-Info-Center":{"__typename":"Category","id":"category:Community-Info-Center","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:EducationSector":{"__typename":"Category","id":"category:EducationSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:DrivingAdoption":{"__typename":"Category","id":"category:DrivingAdoption","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Azure":{"__typename":"Category","id":"category:Azure","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows-Server":{"__typename":"Category","id":"category:Windows-Server","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:SQL-Server":{"__typename":"Category","id":"category:SQL-Server","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftTeams":{"__typename":"Category","id":"category:MicrosoftTeams","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:PublicSector":{"__typename":"Category","id":"category:PublicSector","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft365":{"__typename":"Category","id":"category:microsoft365","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:IoT":{"__typename":"Category","id":"category:IoT","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:HealthcareAndLifeSciences":{"__typename":"Category","id":"category:HealthcareAndLifeSciences","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:SMB":{"__typename":"Category","id":"category:SMB","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:ITOpsTalk":{"__typename":"Category","id":"category:ITOpsTalk","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft-endpoint-manager":{"__typename":"Category","id":"category:microsoft-endpoint-manager","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftLearn":{"__typename":"Category","id":"category:MicrosoftLearn","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Blog:board:MicrosoftLearnBlog":{"__typename":"Blog","id":"board:MicrosoftLearnBlog","blogPolicies":{"__typename":"BlogPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}},"boardPolicies":{"__typename":"BoardPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:AI":{"__typename":"Category","id":"category:AI","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:MicrosoftMechanics":{"__typename":"Category","id":"category:MicrosoftMechanics","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:StartupsatMicrosoft":{"__typename":"Category","id":"category:StartupsatMicrosoft","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:PartnerCommunity":{"__typename":"Category","id":"category:PartnerCommunity","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:Windows":{"__typename":"Category","id":"category:Windows","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"Category:category:microsoft-security":{"__typename":"Category","id":"category:microsoft-security","categoryPolicies":{"__typename":"CategoryPolicies","canReadNode":{"__typename":"PolicyResult","failureReason":null}}},"QueryVariables:TopicReplyList:message:3885602:8":{"__typename":"QueryVariables","id":"TopicReplyList:message:3885602:8","value":{"id":"message:3885602","first":10,"sorts":{"postTime":{"direction":"DESC"}},"repliesFirst":3,"repliesFirstDepthThree":1,"repliesSorts":{"postTime":{"direction":"DESC"}},"useAvatar":true,"useAuthorLogin":true,"useAuthorRank":true,"useBody":true,"useKudosCount":true,"useTimeToRead":false,"useMedia":false,"useReadOnlyIcon":false,"useRepliesCount":true,"useSearchSnippet":false,"useAcceptedSolutionButton":false,"useSolvedBadge":false,"useAttachments":false,"attachmentsFirst":5,"useTags":true,"useNodeAncestors":false,"useUserHoverCard":false,"useNodeHoverCard":false,"useModerationStatus":true,"usePreviewSubjectModal":false,"useMessageStatus":true}},"ROOT_MUTATION":{"__typename":"Mutation"},"CachedAsset:text:en_US-components/community/Navbar-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/community/Navbar-1743095130000","value":{"community":"Community Home","inbox":"Inbox","manageContent":"Manage Content","tos":"Terms of Service","forgotPassword":"Forgot Password","themeEditor":"Theme Editor","edit":"Edit Navigation Bar","skipContent":"Skip to content","gxcuf89792":"Tech Community","external-1":"Events","s-m-b":"Small and Medium Businesses","windows-server":"Windows Server","education-sector":"Education Sector","driving-adoption":"Driving Adoption","microsoft-learn":"Microsoft Learn","s-q-l-server":"SQL Server","partner-community":"Microsoft Partner Community","microsoft365":"Microsoft 365","external-9":".NET","external-8":"Teams","external-7":"Github","products-services":"Products","external-6":"Power Platform","communities-1":"Topics","external-5":"Microsoft Security","planner":"Planner","external-4":"Microsoft 365","external-3":"Dynamics 365","azure":"Azure","healthcare-and-life-sciences":"Healthcare and Life Sciences","external-2":"Azure","microsoft-mechanics":"Microsoft Mechanics","microsoft-learn-1":"Community","external-10":"Learning Room Directory","microsoft-learn-blog":"Blog","windows":"Windows","i-t-ops-talk":"ITOps Talk","external-link-1":"View All","microsoft-securityand-compliance":"Microsoft Security","public-sector":"Public Sector","community-info-center":"Lounge","external-link-2":"View All","microsoft-teams":"Microsoft Teams","external":"Blogs","microsoft-endpoint-manager":"Microsoft Intune and Configuration Manager","startupsat-microsoft":"Startups at Microsoft","exchange":"Exchange","a-i":"AI and Machine Learning","io-t":"Internet of Things (IoT)","outlook":"Outlook","external-link":"Community Hubs","communities":"Products"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarHamburgerDropdown-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarHamburgerDropdown-1743095130000","value":{"hamburgerLabel":"Side Menu"},"localOverride":false},"CachedAsset:text:en_US-components/community/BrandLogo-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/community/BrandLogo-1743095130000","value":{"logoAlt":"Khoros","themeLogoAlt":"Brand Logo"},"localOverride":false},"CachedAsset:text:en_US-components/community/NavbarTextLinks-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarTextLinks-1743095130000","value":{"more":"More"},"localOverride":false},"CachedAsset:text:en_US-components/authentication/AuthenticationLink-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/authentication/AuthenticationLink-1743095130000","value":{"title.login":"Sign In","title.registration":"Register","title.forgotPassword":"Forgot Password","title.multiAuthLogin":"Sign In"},"localOverride":false},"CachedAsset:text:en_US-components/nodes/NodeLink-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/nodes/NodeLink-1743095130000","value":{"place":"Place {name}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageView/MessageViewStandard-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageView/MessageViewStandard-1743095130000","value":{"anonymous":"Anonymous","author":"{messageAuthorLogin}","authorBy":"{messageAuthorLogin}","board":"{messageBoardTitle}","replyToUser":" to {parentAuthor}","showMoreReplies":"Show More","replyText":"Reply","repliesText":"Replies","markedAsSolved":"Marked as Solved","movedMessagePlaceholder.BLOG":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholder.TKB":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholder.FORUM":"{count, plural, =0 {This reply has been} other {These replies have been} }","movedMessagePlaceholder.IDEA":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholder.OCCASION":"{count, plural, =0 {This comment has been} other {These comments have been} }","movedMessagePlaceholderUrlText":"moved.","messageStatus":"Status: ","statusChanged":"Status changed: {previousStatus} to {currentStatus}","statusAdded":"Status added: {status}","statusRemoved":"Status removed: {status}","labelExpand":"expand replies","labelCollapse":"collapse replies","unhelpfulReason.reason1":"Content is outdated","unhelpfulReason.reason2":"Article is missing information","unhelpfulReason.reason3":"Content is for a different Product","unhelpfulReason.reason4":"Doesn't match what I was searching for"},"localOverride":false},"CachedAsset:text:en_US-components/messages/ThreadedReplyList-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/ThreadedReplyList-1743095130000","value":{"title":"{count, plural, one{# Reply} other{# Replies}}","title@board:BLOG":"{count, plural, one{# Comment} other{# Comments}}","title@board:TKB":"{count, plural, one{# Comment} other{# Comments}}","title@board:IDEA":"{count, plural, one{# Comment} other{# Comments}}","title@board:OCCASION":"{count, plural, one{# Comment} other{# Comments}}","noRepliesTitle":"No Replies","noRepliesTitle@board:BLOG":"No Comments","noRepliesTitle@board:TKB":"No Comments","noRepliesTitle@board:IDEA":"No Comments","noRepliesTitle@board:OCCASION":"No Comments","noRepliesDescription":"Be the first to reply","noRepliesDescription@board:BLOG":"Be the first to comment","noRepliesDescription@board:TKB":"Be the first to comment","noRepliesDescription@board:IDEA":"Be the first to comment","noRepliesDescription@board:OCCASION":"Be the first to comment","messageReadOnlyAlert:BLOG":"Comments have been turned off for this post","messageReadOnlyAlert:TKB":"Comments have been turned off for this article","messageReadOnlyAlert:IDEA":"Comments have been turned off for this idea","messageReadOnlyAlert:FORUM":"Replies have been turned off for this discussion","messageReadOnlyAlert:OCCASION":"Comments have been turned off for this event"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageReplyCallToAction-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageReplyCallToAction-1743095130000","value":{"leaveReply":"Leave a reply...","leaveReply@board:BLOG@message:root":"Leave a comment...","leaveReply@board:TKB@message:root":"Leave a comment...","leaveReply@board:IDEA@message:root":"Leave a comment...","leaveReply@board:OCCASION@message:root":"Leave a comment...","repliesTurnedOff.FORUM":"Replies are turned off for this topic","repliesTurnedOff.BLOG":"Comments are turned off for this topic","repliesTurnedOff.TKB":"Comments are turned off for this topic","repliesTurnedOff.IDEA":"Comments are turned off for this topic","repliesTurnedOff.OCCASION":"Comments are turned off for this topic","infoText":"Stop poking me!"},"localOverride":false},"Rank:rank:37":{"__typename":"Rank","id":"rank:37","position":18,"name":"Copper Contributor","color":"333333","icon":null,"rankStyle":"TEXT"},"User:user:1460574":{"__typename":"User","id":"user:1460574","uid":1460574,"login":"markojotic","biography":null,"registrationData":{"__typename":"RegistrationData","status":null,"registrationTime":"2022-07-19T12:45:38.199-07:00"},"deleted":false,"email":"","avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-9.svg?time=0"},"rank":{"__ref":"Rank:rank:37"},"entityType":"USER","eventPath":"community:gxcuf89792/user:1460574"},"ModerationData:moderation_data:4082983":{"__typename":"ModerationData","id":"moderation_data:4082983","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:4082983":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:1460574"},"id":"message:4082983","revisionNum":1,"uid":4082983,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:4082983"},"body":"

Sounds good, let me work out the steps and I'll create that PR. 

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"71","kudosSumWeight":0,"repliesCount":0,"postTime":"2024-03-12T10:53:14.732-07:00","lastPublishTime":"2024-03-12T10:53:14.732-07:00","metrics":{"__typename":"MessageMetrics","views":14698},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:4082983","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"customFields":[]},"ModerationData:moderation_data:4081732":{"__typename":"ModerationData","id":"moderation_data:4081732","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:4081732":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:988334"},"id":"message:4081732","revisionNum":1,"uid":4081732,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:4081732"},"body":"

Thanks markojotic, I created the project a few months ago, and in the meantime, some breaking changes may have been introduced. Could you please submit a PR with the suggested changes to the repo? Thanks! You need to manually push the container image to ACR. See the steps documented in the article.

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"208","kudosSumWeight":0,"repliesCount":0,"postTime":"2024-03-11T09:05:32.328-07:00","lastPublishTime":"2024-03-11T09:05:32.328-07:00","metrics":{"__typename":"MessageMetrics","views":14834},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:4081732","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"customFields":[]},"ModerationData:moderation_data:4081684":{"__typename":"ModerationData","id":"moderation_data:4081684","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:4081684":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:1460574"},"id":"message:4081684","revisionNum":2,"uid":4081684,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:4081684"},"body":"

Hi paolosalvatori - Thanks for the write up. 

I wanted to document some of the issues when deploying the code above:

 

1) on the infra layer - Error: creating Monitor Diagnostics Setting \"DiagnosticsSettings\" - Diagnostic settings does not support retention for new diagnostic settings.\"}

2) on the app layer - revision_suffix must not be set during deployment

 

3) app layer,  Error: creating Container App
โ”‚ Container App Name: \"chatapp\"): performing CreateOrUpdate: unexpected status 400 with error: InvalidParameterValueInContainerTemplate: The following field(s)
are either invalid or missing. Field 'template.containers.chat.image' is invalid with details: 'Invalid value: \"mjauregistry.azurecr.io/chat:v1\": GET https:: MANIFEST_UNKNOWN: manifest tagged by \"v1\" is not found; map[Tag:v1]';.


I understand that this means the image is missing from the ACR, but I don't understand if I'm missing a step or where is the image populated from? 

 

Any tips on how to address the 3rd error? 

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"223","kudosSumWeight":0,"repliesCount":0,"postTime":"2024-03-11T08:28:58.353-07:00","lastPublishTime":"2024-03-11T08:36:11.555-07:00","metrics":{"__typename":"MessageMetrics","views":14814},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:4081684","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"customFields":[]},"User:user:2086409":{"__typename":"User","id":"user:2086409","uid":2086409,"login":"yugandhar_Venkatesh","biography":null,"registrationData":{"__typename":"RegistrationData","status":null,"registrationTime":"2023-10-16T22:19:03.150-07:00"},"deleted":false,"email":"","avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-12.svg?time=0"},"rank":{"__ref":"Rank:rank:37"},"entityType":"USER","eventPath":"community:gxcuf89792/user:2086409"},"ModerationData:moderation_data:3956486":{"__typename":"ModerationData","id":"moderation_data:3956486","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:3956486":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:2086409"},"id":"message:3956486","revisionNum":1,"uid":3956486,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:3956486"},"body":"

Hi @paolosalvatori , Thanks for sharing this very detailed article. 

 

After running the application defaultly i am getting the chainlit logo in the UI. Can we change this logo using the custom logo? Could you please help me that?

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"223","kudosSumWeight":1,"repliesCount":0,"postTime":"2023-10-16T22:22:42.959-07:00","lastPublishTime":"2023-10-16T22:22:42.959-07:00","metrics":{"__typename":"MessageMetrics","views":31479},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:3956486","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"customFields":[]},"ModerationData:moderation_data:3913401":{"__typename":"ModerationData","id":"moderation_data:3913401","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:3913401":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:988334"},"id":"message:3913401","revisionNum":1,"uid":3913401,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:3913401"},"body":"

Thanks Heman_k 

\n
\n

I'm not an Azure Cognitive Search subject-matter expert, but I can surely affirm that Azure Cognitive Search has strong vector search capabilities that allow you to search and retrieve similar vectors based on their similarity scores. While I do not have direct comparisons to other similar products or other vector databases, such as Chroma or FAISS, Azure Cognitive Search's vector search capabilities are designed to provide efficient and accurate results.

\n
\n
\n
\n
\n
\n

Regarding the HNSW algorithm, Azure Search does support it. The HNSW algorithm is a commonly used algorithm for approximate nearest neighbor search, and it is available in Azure Cognitive Search for vector search scenarios. Please check Add vector search - Azure Cognitive Search | Microsoft Learn.

\n

As for the specific issue you mentioned regarding importing the HnswVectorSearchAlgorithmConfiguration module in Python 3.9, it's possible that the module may not be available or accessible in that specific version. It is recommended to check the documentation or reach out to Microsoft Azure support for further assistance or alternative approaches.

\n

Overall, Azure Search offers robust vector search capabilities, including support for the HNSW algorithm, which can be valuable for creating efficient and accurate search experiences.

\n
\n
\n
\n
\n


P.S. If you found my article and sample interesting and helpful, please like the article and star the GitHub project, thanks ๐Ÿ™‚

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"213","kudosSumWeight":1,"repliesCount":0,"postTime":"2023-08-29T23:59:02.083-07:00","lastPublishTime":"2023-08-29T23:59:02.083-07:00","metrics":{"__typename":"MessageMetrics","views":38318},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:3913401","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}},"customFields":[]},"User:user:2005042":{"__typename":"User","id":"user:2005042","uid":2005042,"login":"Heman_k","biography":null,"registrationData":{"__typename":"RegistrationData","status":null,"registrationTime":"2023-08-29T12:42:16.315-07:00"},"deleted":false,"email":"","avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-3.svg?time=0"},"rank":{"__ref":"Rank:rank:37"},"entityType":"USER","eventPath":"community:gxcuf89792/user:2005042"},"ModerationData:moderation_data:3913052":{"__typename":"ModerationData","id":"moderation_data:3913052","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:3913052":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:2005042"},"id":"message:3913052","revisionNum":1,"uid":3913052,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:3913052"},"body":"

Hi paolosalvatori , Thanks for sharing this very detailed article. Covers a lot of ground!!

 

On a related topic, can you please comment on the vector search capabilities in Azure Search? How does it compare it with vector search in other similar products?

 

Also, I am specifically interested in the support for HNSW algorithm in in Azure Search. Does Azure Search support HNSW? I have seen sample code where there is a module called HnswVectorSearchAlgorithmConfiguration in the azure.search.documents.indexes.models package, but I am having problems importing this module, at least in Python 3.9.  Any info or suggestions?

 

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"218","kudosSumWeight":0,"repliesCount":0,"postTime":"2023-08-29T13:02:09.628-07:00","lastPublishTime":"2023-08-29T13:02:09.628-07:00","metrics":{"__typename":"MessageMetrics","views":38403},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:3913052","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"customFields":[],"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"ModerationData:moderation_data:3899816":{"__typename":"ModerationData","id":"moderation_data:3899816","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:3899816":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:988334"},"id":"message:3899816","revisionNum":1,"uid":3899816,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:3899816"},"body":"

Hi merveguel, all the pre-requisites are free of charge. I think you can use a free Azure account to test the entire architecture, but I'm not sure about Azure OpenAI Service. I should check, but I'm OOF now. If you don't have an Azure account already, the best way is to open one. Alternatively, check the pricing page for the various services and the Azure Free Account FAQ: https://azure.microsoft.com/en-us/free/free-account-faq

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"203","kudosSumWeight":1,"repliesCount":0,"postTime":"2023-08-15T03:17:40.427-07:00","lastPublishTime":"2023-08-15T03:17:40.427-07:00","metrics":{"__typename":"MessageMetrics","views":40726},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:3899816","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"customFields":[],"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"User:user:1981490":{"__typename":"User","id":"user:1981490","uid":1981490,"login":"merveguel","biography":null,"registrationData":{"__typename":"RegistrationData","status":null,"registrationTime":"2023-08-15T02:28:07.214-07:00"},"deleted":false,"email":"","avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-3.svg?time=0"},"rank":{"__ref":"Rank:rank:37"},"entityType":"USER","eventPath":"community:gxcuf89792/user:1981490"},"ModerationData:moderation_data:3899784":{"__typename":"ModerationData","id":"moderation_data:3899784","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:3899784":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:1981490"},"id":"message:3899784","revisionNum":1,"uid":3899784,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:3899784"},"body":"

Hi paolosalvatori, thanks for such detailed post, I was wondering if this article can be implemented using the free-tier version of the prerequisite tools?

Best, Merve

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"180","kudosSumWeight":0,"repliesCount":0,"postTime":"2023-08-15T02:32:14.786-07:00","lastPublishTime":"2023-08-15T02:32:14.786-07:00","metrics":{"__typename":"MessageMetrics","views":40736},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:3899784","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"customFields":[],"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"ModerationData:moderation_data:3885778":{"__typename":"ModerationData","id":"moderation_data:3885778","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:3885778":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:988334"},"id":"message:3885778","revisionNum":1,"uid":3885778,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:3885778"},"body":"

Thanks cicorias, I thought to create an Azure Container Apps + Azure OpenAI sample after my articles on AKS + Azure OpenAI. 

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"136","kudosSumWeight":1,"repliesCount":0,"postTime":"2023-07-27T09:01:55.748-07:00","lastPublishTime":"2023-07-27T09:01:55.748-07:00","metrics":{"__typename":"MessageMetrics","views":45876},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:3885778","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"customFields":[],"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"User:user:374754":{"__typename":"User","id":"user:374754","uid":374754,"login":"cicorias","biography":null,"registrationData":{"__typename":"RegistrationData","status":null,"registrationTime":"2019-07-11T05:41:26.274-07:00"},"deleted":false,"email":"","avatar":{"__typename":"UserAvatar","url":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/m_assets/avatars/default/avatar-8.svg?time=0"},"rank":{"__ref":"Rank:rank:4"},"entityType":"USER","eventPath":"community:gxcuf89792/user:374754"},"ModerationData:moderation_data:3885752":{"__typename":"ModerationData","id":"moderation_data:3885752","status":"APPROVED","rejectReason":null,"isReportedAbuse":false,"rejectUser":null,"rejectTime":null,"rejectActorType":null},"BlogReplyMessage:message:3885752":{"__typename":"BlogReplyMessage","author":{"__ref":"User:user:374754"},"id":"message:3885752","revisionNum":1,"uid":3885752,"depth":1,"hasGivenKudo":false,"subscribed":false,"board":{"__ref":"Blog:board:FastTrackforAzureBlog"},"parent":{"__ref":"BlogTopicMessage:message:3885602"},"conversation":{"__ref":"Conversation:conversation:3885602"},"subject":"Re: Create an Azure OpenAI, LangChain, ChromaDB, and Chainlit Chat App in Container Apps using Terra","moderationData":{"__ref":"ModerationData:moderation_data:3885752"},"body":"

Thanks again for such relevant and timely guidance Paolo!!

","body@stripHtml({\"removeProcessingText\":false,\"removeSpoilerMarkup\":false,\"removeTocMarkup\":false,\"truncateLength\":200})@stringLength":"60","kudosSumWeight":1,"repliesCount":0,"postTime":"2023-07-27T08:40:25.437-07:00","lastPublishTime":"2023-07-27T08:40:25.437-07:00","metrics":{"__typename":"MessageMetrics","views":45887},"visibilityScope":"PUBLIC","placeholder":false,"originalMessageForPlaceholder":null,"entityType":"BLOG_REPLY","eventPath":"category:FastTrack/category:products-services/category:communities/community:gxcuf89792board:FastTrackforAzureBlog/message:3885602/message:3885752","replies":{"__typename":"MessageConnection","pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null},"edges":[]},"customFields":[],"attachments":{"__typename":"AttachmentConnection","edges":[],"pageInfo":{"__typename":"PageInfo","hasNextPage":false,"endCursor":null,"hasPreviousPage":false,"startCursor":null}}},"CachedAsset:text:en_US-components/community/NavbarDropdownToggle-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/community/NavbarDropdownToggle-1743095130000","value":{"ariaLabelClosed":"Press the down arrow to open the menu"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/common/QueryHandler-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/common/QueryHandler-1743095130000","value":{"title":"Query Handler"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageCoverImage-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageCoverImage-1743095130000","value":{"coverImageTitle":"Cover Image"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeTitle-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeTitle-1743095130000","value":{"nodeTitle":"{nodeTitle, select, community {Community} other {{nodeTitle}}} "},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageTimeToRead-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageTimeToRead-1743095130000","value":{"minReadText":"{min} MIN READ"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageSubject-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageSubject-1743095130000","value":{"noSubject":"(no subject)"},"localOverride":false},"CachedAsset:text:en_US-components/users/UserLink-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/users/UserLink-1743095130000","value":{"authorName":"View Profile: {author}","anonymous":"Anonymous"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/users/UserRank-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/users/UserRank-1743095130000","value":{"rankName":"{rankName}","userRank":"Author rank {rankName}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageTime-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageTime-1743095130000","value":{"postTime":"Published: {time}","lastPublishTime":"Last Update: {time}","conversation.lastPostingActivityTime":"Last posting activity time: {time}","conversation.lastPostTime":"Last post time: {time}","moderationData.rejectTime":"Rejected time: {time}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageBody-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageBody-1743095130000","value":{"showMessageBody":"Show More","mentionsErrorTitle":"{mentionsType, select, board {Board} user {User} message {Message} other {}} No Longer Available","mentionsErrorMessage":"The {mentionsType} you are trying to view has been removed from the community.","videoProcessing":"Video is being processed. Please try again in a few minutes.","bannerTitle":"Video provider requires cookies to play the video. Accept to continue or {url} it directly on the provider's site.","buttonTitle":"Accept","urlText":"watch"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageCustomFields-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageCustomFields-1743095130000","value":{"CustomField.default.label":"Value of {name}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageRevision-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageRevision-1743095130000","value":{"lastUpdatedDatePublished":"{publishCount, plural, one{Published} other{Updated}} {date}","lastUpdatedDateDraft":"Created {date}","version":"Version {major}.{minor}"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageReplyButton-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageReplyButton-1743095130000","value":{"repliesCount":"{count}","title":"Reply","title@board:BLOG@message:root":"Comment","title@board:TKB@message:root":"Comment","title@board:IDEA@message:root":"Comment","title@board:OCCASION@message:root":"Comment"},"localOverride":false},"CachedAsset:text:en_US-components/messages/MessageAuthorBio-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/messages/MessageAuthorBio-1743095130000","value":{"sendMessage":"Send Message","actionMessage":"Follow this blog board to get notified when there's new activity","coAuthor":"CO-PUBLISHER","contributor":"CONTRIBUTOR","userProfile":"View Profile","iconlink":"Go to {name} {type}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/users/UserAvatar-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/users/UserAvatar-1743095130000","value":{"altText":"{login}'s avatar","altTextGeneric":"User's avatar"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/ranks/UserRankLabel-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/ranks/UserRankLabel-1743095130000","value":{"altTitle":"Icon for {rankName} rank"},"localOverride":false},"CachedAsset:text:en_US-components/users/UserRegistrationDate-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/users/UserRegistrationDate-1743095130000","value":{"noPrefix":"{date}","withPrefix":"Joined {date}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeAvatar-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeAvatar-1743095130000","value":{"altTitle":"Node avatar for {nodeTitle}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeDescription-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeDescription-1743095130000","value":{"description":"{description}"},"localOverride":false},"CachedAsset:text:en_US-components/tags/TagView/TagViewChip-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-components/tags/TagView/TagViewChip-1743095130000","value":{"tagLabelName":"Tag name {tagName}"},"localOverride":false},"CachedAsset:text:en_US-shared/client/components/nodes/NodeIcon-1743095130000":{"__typename":"CachedAsset","id":"text:en_US-shared/client/components/nodes/NodeIcon-1743095130000","value":{"contentType":"Content Type {style, select, FORUM {Forum} BLOG {Blog} TKB {Knowledge Base} IDEA {Ideas} OCCASION {Events} other {}} icon"},"localOverride":false}}}},"page":"/blogs/BlogMessagePage/BlogMessagePage","query":{"boardId":"fasttrackforazureblog","messageSubject":"create-an-azure-openai-langchain-chromadb-and-chainlit-chat-app-in-container-app","messageId":"3885602"},"buildId":"HEhyUrv5OXNBIbfCLaOrw","runtimeConfig":{"buildInformationVisible":false,"logLevelApp":"info","logLevelMetrics":"info","openTelemetryClientEnabled":false,"openTelemetryConfigName":"o365","openTelemetryServiceVersion":"25.1.0","openTelemetryUniverse":"prod","openTelemetryCollector":"http://localhost:4318","openTelemetryRouteChangeAllowedTime":"5000","apolloDevToolsEnabled":false,"inboxMuteWipFeatureEnabled":false},"isFallback":false,"isExperimentalCompile":false,"dynamicIds":["./components/community/Navbar/NavbarWidget.tsx","./components/community/Breadcrumb/BreadcrumbWidget.tsx","./components/customComponent/CustomComponent/CustomComponent.tsx","./components/blogs/BlogArticleWidget/BlogArticleWidget.tsx","./components/external/components/ExternalComponent.tsx","./components/messages/MessageView/MessageViewStandard/MessageViewStandard.tsx","./components/messages/ThreadedReplyList/ThreadedReplyList.tsx","../shared/client/components/common/List/UnstyledList/UnstyledList.tsx","./components/messages/MessageView/MessageView.tsx","../shared/client/components/common/List/UnwrappedList/UnwrappedList.tsx","./components/tags/TagView/TagView.tsx","./components/tags/TagView/TagViewChip/TagViewChip.tsx"],"appGip":true,"scriptLoader":[{"id":"analytics","src":"https://techcommunity.microsoft.com/t5/s/gxcuf89792/pagescripts/1730819800000/analytics.js?page.id=BlogMessagePage&entity.id=board%3Afasttrackforazureblog&entity.id=message%3A3885602","strategy":"afterInteractive"}]}