Compare commits

..

1 Commits

Author SHA1 Message Date
sunghyunkang1111
9bd905403b Update README.md 2025-02-20 14:34:17 -06:00
84 changed files with 1256 additions and 4377 deletions

View File

@@ -2,6 +2,7 @@
UI for Azure Cosmos DB. Powers the [Azure Portal](https://portal.azure.com/), https://cosmos.azure.com/, and the [Cosmos DB Emulator](https://docs.microsoft.com/en-us/azure/cosmos-db/local-emulator)
![](https://sdkctlstore.blob.core.windows.net/exe/dataexplorer.gif)
## Getting Started

6
package-lock.json generated
View File

@@ -51,7 +51,6 @@
"@types/mkdirp": "1.0.1",
"@types/node-fetch": "2.5.7",
"@xmldom/xmldom": "0.7.13",
"@xterm/xterm": "5.5.0",
"allotment": "1.20.2",
"applicationinsights": "1.8.0",
"bootstrap": "3.4.1",
@@ -13239,11 +13238,6 @@
"node": ">=10.0.0"
}
},
"node_modules/@xterm/xterm": {
"version": "5.5.0",
"resolved": "https://registry.npmjs.org/@xterm/xterm/-/xterm-5.5.0.tgz",
"integrity": "sha512-hqJHYaQb5OptNunnyAnkHyM8aCjZ1MEIDTQu1iIbbTD/xops91NB5yq1ZK/dC2JDbVWtF23zUtl9JE2NqwT87A=="
},
"node_modules/@xtuc/ieee754": {
"version": "1.2.0",
"license": "BSD-3-Clause"

View File

@@ -13,8 +13,8 @@
"@babel/plugin-proposal-decorators": "7.12.12",
"@fluentui/react": "8.119.0",
"@fluentui/react-components": "9.54.2",
"@jupyterlab/terminal": "3.0.3",
"@jupyterlab/services": "6.0.2",
"@jupyterlab/terminal": "3.0.3",
"@microsoft/applicationinsights-web": "2.6.1",
"@nteract/commutable": "7.5.1",
"@nteract/connected-components": "6.8.2",
@@ -46,7 +46,6 @@
"@types/mkdirp": "1.0.1",
"@types/node-fetch": "2.5.7",
"@xmldom/xmldom": "0.7.13",
"@xterm/xterm": "5.5.0",
"allotment": "1.20.2",
"applicationinsights": "1.8.0",
"bootstrap": "3.4.1",

View File

@@ -3,8 +3,9 @@ import { getAuthorizationTokenUsingResourceTokens } from "Common/getAuthorizatio
import { AuthorizationToken } from "Contracts/FabricMessageTypes";
import { checkDatabaseResourceTokensValidity } from "Platform/Fabric/FabricUtil";
import { LocalStorageUtility, StorageKey } from "Shared/StorageUtility";
import { useNewPortalBackendEndpoint } from "Utils/EndpointUtils";
import { AuthType } from "../AuthType";
import { PriorityLevel } from "../Common/Constants";
import { BackendApi, PriorityLevel } from "../Common/Constants";
import * as Logger from "../Common/Logger";
import { Platform, configContext } from "../ConfigContext";
import { updateUserContext, userContext } from "../UserContext";
@@ -124,6 +125,10 @@ export async function getTokenFromAuthService(
resourceType: string,
resourceId?: string,
): Promise<AuthorizationToken> {
if (!useNewPortalBackendEndpoint(BackendApi.RuntimeProxy)) {
return getTokenFromAuthService_ToBeDeprecated(verb, resourceType, resourceId);
}
try {
const host: string = configContext.PORTAL_BACKEND_ENDPOINT;
const response: Response = await _global.fetch(host + "/api/connectionstring/runtimeproxy/authorizationtokens", {
@@ -146,6 +151,34 @@ export async function getTokenFromAuthService(
}
}
export async function getTokenFromAuthService_ToBeDeprecated(
verb: string,
resourceType: string,
resourceId?: string,
): Promise<AuthorizationToken> {
try {
const host = configContext.BACKEND_ENDPOINT;
const response = await _global.fetch(host + "/api/guest/runtimeproxy/authorizationTokens", {
method: "POST",
headers: {
"content-type": "application/json",
"x-ms-encrypted-auth-token": userContext.accessToken,
},
body: JSON.stringify({
verb,
resourceType,
resourceId,
}),
});
//TODO I am not sure why we have to parse the JSON again here. fetch should do it for us when we call .json()
const result = JSON.parse(await response.json());
return result;
} catch (error) {
logConsoleError(`Failed to get authorization headers for ${resourceType}: ${getErrorMessage(error)}`);
return Promise.reject(error);
}
}
// The Capability is a bitmap, which cosmosdb backend decodes as per the below enum
enum SDKSupportedCapabilities {
None = 0,
@@ -170,10 +203,8 @@ export function client(): Cosmos.CosmosClient {
}
let _defaultHeaders: Cosmos.CosmosHeaders = {};
_defaultHeaders["x-ms-cosmos-sdk-supportedcapabilities"] =
SDKSupportedCapabilities.None | SDKSupportedCapabilities.PartitionMerge;
_defaultHeaders["x-ms-cosmos-throughput-bucket"] = 1;
if (
userContext.authType === AuthType.ConnectionString ||

View File

@@ -4,8 +4,16 @@ import { configContext, resetConfigContext, updateConfigContext } from "../Confi
import { DatabaseAccount } from "../Contracts/DataModels";
import { Collection } from "../Contracts/ViewModels";
import DocumentId from "../Explorer/Tree/DocumentId";
import { extractFeatures } from "../Platform/Hosted/extractFeatures";
import { updateUserContext } from "../UserContext";
import { deleteDocuments, getEndpoint, queryDocuments, readDocument, updateDocument } from "./MongoProxyClient";
import {
deleteDocument,
getEndpoint,
getFeatureEndpointOrDefault,
queryDocuments,
readDocument,
updateDocument,
} from "./MongoProxyClient";
const databaseId = "testDB";
@@ -188,8 +196,20 @@ describe("MongoProxyClient", () => {
expect.any(Object),
);
});
it("builds the correct proxy URL in development", () => {
updateConfigContext({
MONGO_BACKEND_ENDPOINT: "https://localhost:1234",
globallyEnabledMongoAPIs: [],
});
updateDocument(databaseId, collection, documentId, "{}");
expect(window.fetch).toHaveBeenCalledWith(
`${configContext.MONGO_PROXY_ENDPOINT}/api/mongo/explorer`,
expect.any(Object),
);
});
});
describe("deleteDocuments", () => {
describe("deleteDocument", () => {
beforeEach(() => {
resetConfigContext();
updateUserContext({
@@ -206,9 +226,9 @@ describe("MongoProxyClient", () => {
});
it("builds the correct URL", () => {
deleteDocuments(databaseId, collection, [documentId]);
deleteDocument(databaseId, collection, documentId);
expect(window.fetch).toHaveBeenCalledWith(
`${configContext.MONGO_PROXY_ENDPOINT}/api/mongo/explorer/bulkdelete`,
`${configContext.MONGO_PROXY_ENDPOINT}/api/mongo/explorer`,
expect.any(Object),
);
});
@@ -218,9 +238,9 @@ describe("MongoProxyClient", () => {
MONGO_PROXY_ENDPOINT: "https://localhost:1234",
globallyEnabledMongoAPIs: [],
});
deleteDocuments(databaseId, collection, [documentId]);
deleteDocument(databaseId, collection, documentId);
expect(window.fetch).toHaveBeenCalledWith(
`${configContext.MONGO_PROXY_ENDPOINT}/api/mongo/explorer/bulkdelete`,
`${configContext.MONGO_PROXY_ENDPOINT}/api/mongo/explorer`,
expect.any(Object),
);
});
@@ -255,4 +275,33 @@ describe("MongoProxyClient", () => {
expect(endpoint).toEqual(`${configContext.MONGO_PROXY_ENDPOINT}/api/connectionstring/mongo/explorer`);
});
});
describe("getFeatureEndpointOrDefault", () => {
beforeEach(() => {
resetConfigContext();
updateConfigContext({
MONGO_PROXY_ENDPOINT: MongoProxyEndpoints.Prod,
globallyEnabledMongoAPIs: [],
});
const params = new URLSearchParams({
"feature.mongoProxyEndpoint": MongoProxyEndpoints.Prod,
"feature.mongoProxyAPIs": "readDocument|createDocument",
});
const features = extractFeatures(params);
updateUserContext({
authType: AuthType.AAD,
features: features,
});
});
it("returns a local endpoint", () => {
const endpoint = getFeatureEndpointOrDefault("readDocument");
expect(endpoint).toEqual(`${configContext.MONGO_PROXY_ENDPOINT}/api/mongo/explorer`);
});
it("returns a production endpoint", () => {
const endpoint = getFeatureEndpointOrDefault("DeleteDocument");
expect(endpoint).toEqual(`${configContext.MONGO_PROXY_ENDPOINT}/api/mongo/explorer`);
});
});
});

View File

@@ -1,13 +1,20 @@
import { Constants as CosmosSDKConstants } from "@azure/cosmos";
import {
allowedMongoProxyEndpoints_ToBeDeprecated,
defaultAllowedMongoProxyEndpoints,
validateEndpoint,
} from "Utils/EndpointUtils";
import queryString from "querystring";
import { AuthType } from "../AuthType";
import { configContext } from "../ConfigContext";
import * as DataModels from "../Contracts/DataModels";
import { MessageTypes } from "../Contracts/ExplorerContracts";
import { Collection } from "../Contracts/ViewModels";
import DocumentId from "../Explorer/Tree/DocumentId";
import { hasFlag } from "../Platform/Hosted/extractFeatures";
import { userContext } from "../UserContext";
import { logConsoleError } from "../Utils/NotificationConsoleUtils";
import { ApiType, ContentType, HttpHeaders, HttpStatusCodes } from "./Constants";
import { ApiType, ContentType, HttpHeaders, HttpStatusCodes, MongoProxyApi, MongoProxyEndpoints } from "./Constants";
import { MinimalQueryIterator } from "./IteratorUtilities";
import { sendMessage } from "./MessageHandler";
@@ -60,6 +67,10 @@ export function queryDocuments(
query: string,
continuationToken?: string,
): Promise<QueryResponse> {
if (!useMongoProxyEndpoint(MongoProxyApi.ResourceList) || !useMongoProxyEndpoint(MongoProxyApi.QueryDocuments)) {
return queryDocuments_ToBeDeprecated(databaseId, collection, isResourceList, query, continuationToken);
}
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const params = {
@@ -78,7 +89,7 @@ export function queryDocuments(
query,
};
const endpoint = getEndpoint(configContext.MONGO_PROXY_ENDPOINT) || "";
const endpoint = getFeatureEndpointOrDefault(MongoProxyApi.ResourceList) || "";
const headers = {
...defaultHeaders,
@@ -116,11 +127,76 @@ export function queryDocuments(
});
}
function queryDocuments_ToBeDeprecated(
databaseId: string,
collection: Collection,
isResourceList: boolean,
query: string,
continuationToken?: string,
): Promise<QueryResponse> {
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const params = {
db: databaseId,
coll: collection.id(),
resourceUrl: `${resourceEndpoint}dbs/${databaseId}/colls/${collection.id()}/docs/`,
rid: collection.rid,
rtype: "docs",
sid: userContext.subscriptionId,
rg: userContext.resourceGroup,
dba: databaseAccount.name,
pk:
collection && collection.partitionKey && !collection.partitionKey.systemKey
? collection.partitionKeyProperties?.[0]
: "",
};
const endpoint = getFeatureEndpointOrDefault("resourcelist") || "";
const headers = {
...defaultHeaders,
...authHeaders(),
[CosmosSDKConstants.HttpHeaders.IsQuery]: "true",
[CosmosSDKConstants.HttpHeaders.PopulateQueryMetrics]: "true",
[CosmosSDKConstants.HttpHeaders.EnableScanInQuery]: "true",
[CosmosSDKConstants.HttpHeaders.EnableCrossPartitionQuery]: "true",
[CosmosSDKConstants.HttpHeaders.ParallelizeCrossPartitionQuery]: "true",
[HttpHeaders.contentType]: "application/query+json",
};
if (continuationToken) {
headers[CosmosSDKConstants.HttpHeaders.Continuation] = continuationToken;
}
const path = isResourceList ? "/resourcelist" : "";
return window
.fetch(`${endpoint}${path}?${queryString.stringify(params)}`, {
method: "POST",
body: JSON.stringify({ query }),
headers,
})
.then(async (response) => {
if (response.ok) {
return {
continuationToken: response.headers.get(CosmosSDKConstants.HttpHeaders.Continuation),
documents: (await response.json()).Documents as DataModels.DocumentId[],
headers: response.headers,
};
}
await errorHandling(response, "querying documents", params);
return undefined;
});
}
export function readDocument(
databaseId: string,
collection: Collection,
documentId: DocumentId,
): Promise<DataModels.DocumentId> {
if (!useMongoProxyEndpoint(MongoProxyApi.ReadDocument)) {
return readDocument_ToBeDeprecated(databaseId, collection, documentId);
}
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const idComponents = documentId.self.split("/");
@@ -141,7 +217,7 @@ export function readDocument(
: "",
};
const endpoint = getEndpoint(configContext.MONGO_PROXY_ENDPOINT);
const endpoint = getFeatureEndpointOrDefault(MongoProxyApi.ReadDocument);
return window
.fetch(endpoint, {
@@ -161,12 +237,61 @@ export function readDocument(
});
}
export function readDocument_ToBeDeprecated(
databaseId: string,
collection: Collection,
documentId: DocumentId,
): Promise<DataModels.DocumentId> {
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const idComponents = documentId.self.split("/");
const path = idComponents.slice(0, 4).join("/");
const rid = encodeURIComponent(idComponents[5]);
const params = {
db: databaseId,
coll: collection.id(),
resourceUrl: `${resourceEndpoint}${path}/${rid}`,
rid,
rtype: "docs",
sid: userContext.subscriptionId,
rg: userContext.resourceGroup,
dba: databaseAccount.name,
pk:
documentId && documentId.partitionKey && !documentId.partitionKey.systemKey
? documentId.partitionKeyProperties?.[0]
: "",
};
const endpoint = getFeatureEndpointOrDefault("readDocument");
return window
.fetch(`${endpoint}?${queryString.stringify(params)}`, {
method: "GET",
headers: {
...defaultHeaders,
...authHeaders(),
[CosmosSDKConstants.HttpHeaders.PartitionKey]: encodeURIComponent(
JSON.stringify(documentId.partitionKeyHeader()),
),
},
})
.then(async (response) => {
if (response.ok) {
return response.json();
}
return await errorHandling(response, "reading document", params);
});
}
export function createDocument(
databaseId: string,
collection: Collection,
partitionKeyProperty: string,
documentContent: unknown,
): Promise<DataModels.DocumentId> {
if (!useMongoProxyEndpoint(MongoProxyApi.CreateDocument)) {
return createDocument_ToBeDeprecated(databaseId, collection, partitionKeyProperty, documentContent);
}
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const params = {
@@ -183,7 +308,7 @@ export function createDocument(
documentContent: JSON.stringify(documentContent),
};
const endpoint = getEndpoint(configContext.MONGO_PROXY_ENDPOINT);
const endpoint = getFeatureEndpointOrDefault(MongoProxyApi.CreateDocument);
return window
.fetch(`${endpoint}/createDocument`, {
@@ -203,12 +328,54 @@ export function createDocument(
});
}
export function createDocument_ToBeDeprecated(
databaseId: string,
collection: Collection,
partitionKeyProperty: string,
documentContent: unknown,
): Promise<DataModels.DocumentId> {
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const params = {
db: databaseId,
coll: collection.id(),
resourceUrl: `${resourceEndpoint}dbs/${databaseId}/colls/${collection.id()}/docs/`,
rid: collection.rid,
rtype: "docs",
sid: userContext.subscriptionId,
rg: userContext.resourceGroup,
dba: databaseAccount.name,
pk: collection && collection.partitionKey && !collection.partitionKey.systemKey ? partitionKeyProperty : "",
};
const endpoint = getFeatureEndpointOrDefault("createDocument");
return window
.fetch(`${endpoint}/resourcelist?${queryString.stringify(params)}`, {
method: "POST",
body: JSON.stringify(documentContent),
headers: {
...defaultHeaders,
...authHeaders(),
},
})
.then(async (response) => {
if (response.ok) {
return response.json();
}
return await errorHandling(response, "creating document", params);
});
}
export function updateDocument(
databaseId: string,
collection: Collection,
documentId: DocumentId,
documentContent: string,
): Promise<DataModels.DocumentId> {
if (!useMongoProxyEndpoint(MongoProxyApi.UpdateDocument)) {
return updateDocument_ToBeDeprecated(databaseId, collection, documentId, documentContent);
}
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const idComponents = documentId.self.split("/");
@@ -229,7 +396,7 @@ export function updateDocument(
: "",
documentContent,
};
const endpoint = getEndpoint(configContext.MONGO_PROXY_ENDPOINT);
const endpoint = getFeatureEndpointOrDefault(MongoProxyApi.UpdateDocument);
return window
.fetch(endpoint, {
@@ -250,6 +417,139 @@ export function updateDocument(
});
}
export function updateDocument_ToBeDeprecated(
databaseId: string,
collection: Collection,
documentId: DocumentId,
documentContent: string,
): Promise<DataModels.DocumentId> {
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const idComponents = documentId.self.split("/");
const path = idComponents.slice(0, 5).join("/");
const rid = encodeURIComponent(idComponents[5]);
const params = {
db: databaseId,
coll: collection.id(),
resourceUrl: `${resourceEndpoint}${path}/${rid}`,
rid,
rtype: "docs",
sid: userContext.subscriptionId,
rg: userContext.resourceGroup,
dba: databaseAccount.name,
pk:
documentId && documentId.partitionKey && !documentId.partitionKey.systemKey
? documentId.partitionKeyProperties?.[0]
: "",
};
const endpoint = getFeatureEndpointOrDefault("updateDocument");
return window
.fetch(`${endpoint}?${queryString.stringify(params)}`, {
method: "PUT",
body: documentContent,
headers: {
...defaultHeaders,
...authHeaders(),
[HttpHeaders.contentType]: ContentType.applicationJson,
[CosmosSDKConstants.HttpHeaders.PartitionKey]: JSON.stringify(documentId.partitionKeyHeader()),
},
})
.then(async (response) => {
if (response.ok) {
return response.json();
}
return await errorHandling(response, "updating document", params);
});
}
export function deleteDocument(databaseId: string, collection: Collection, documentId: DocumentId): Promise<void> {
if (!useMongoProxyEndpoint(MongoProxyApi.DeleteDocument)) {
return deleteDocument_ToBeDeprecated(databaseId, collection, documentId);
}
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const idComponents = documentId.self.split("/");
const path = idComponents.slice(0, 5).join("/");
const rid = encodeURIComponent(idComponents[5]);
const params = {
databaseID: databaseId,
collectionID: collection.id(),
resourceUrl: `${resourceEndpoint}${path}/${rid}`,
resourceID: rid,
resourceType: "docs",
subscriptionID: userContext.subscriptionId,
resourceGroup: userContext.resourceGroup,
databaseAccountName: databaseAccount.name,
partitionKey:
documentId && documentId.partitionKey && !documentId.partitionKey.systemKey
? documentId.partitionKeyProperties?.[0]
: "",
};
const endpoint = getFeatureEndpointOrDefault(MongoProxyApi.DeleteDocument);
return window
.fetch(endpoint, {
method: "DELETE",
body: JSON.stringify(params),
headers: {
...defaultHeaders,
...authHeaders(),
[HttpHeaders.contentType]: ContentType.applicationJson,
},
})
.then(async (response) => {
if (response.ok) {
return undefined;
}
return await errorHandling(response, "deleting document", params);
});
}
export function deleteDocument_ToBeDeprecated(
databaseId: string,
collection: Collection,
documentId: DocumentId,
): Promise<void> {
const { databaseAccount } = userContext;
const resourceEndpoint = databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint;
const idComponents = documentId.self.split("/");
const path = idComponents.slice(0, 5).join("/");
const rid = encodeURIComponent(idComponents[5]);
const params = {
db: databaseId,
coll: collection.id(),
resourceUrl: `${resourceEndpoint}${path}/${rid}`,
rid,
rtype: "docs",
sid: userContext.subscriptionId,
rg: userContext.resourceGroup,
dba: databaseAccount.name,
pk:
documentId && documentId.partitionKey && !documentId.partitionKey.systemKey
? documentId.partitionKeyProperties?.[0]
: "",
};
const endpoint = getFeatureEndpointOrDefault("deleteDocument");
return window
.fetch(`${endpoint}?${queryString.stringify(params)}`, {
method: "DELETE",
headers: {
...defaultHeaders,
...authHeaders(),
[HttpHeaders.contentType]: ContentType.applicationJson,
[CosmosSDKConstants.HttpHeaders.PartitionKey]: JSON.stringify(documentId.partitionKeyHeader()),
},
})
.then(async (response) => {
if (response.ok) {
return undefined;
}
return await errorHandling(response, "deleting document", params);
});
}
export function deleteDocuments(
databaseId: string,
collection: Collection,
@@ -275,7 +575,7 @@ export function deleteDocuments(
resourceGroup: userContext.resourceGroup,
databaseAccountName: databaseAccount.name,
};
const endpoint = getEndpoint(configContext.MONGO_PROXY_ENDPOINT);
const endpoint = getFeatureEndpointOrDefault(MongoProxyApi.BulkDelete);
return window
.fetch(`${endpoint}/bulkdelete`, {
@@ -299,6 +599,9 @@ export function deleteDocuments(
export function createMongoCollectionWithProxy(
params: DataModels.CreateCollectionParams,
): Promise<DataModels.Collection> {
if (!useMongoProxyEndpoint(MongoProxyApi.CreateCollectionWithProxy)) {
return createMongoCollectionWithProxy_ToBeDeprecated(params);
}
const { databaseAccount } = userContext;
const shardKey: string = params.partitionKey?.paths[0];
@@ -319,7 +622,7 @@ export function createMongoCollectionWithProxy(
isSharded: !!shardKey,
};
const endpoint = getEndpoint(configContext.MONGO_PROXY_ENDPOINT);
const endpoint = getFeatureEndpointOrDefault(MongoProxyApi.CreateCollectionWithProxy);
return window
.fetch(`${endpoint}/createCollection`, {
@@ -339,6 +642,70 @@ export function createMongoCollectionWithProxy(
});
}
export function createMongoCollectionWithProxy_ToBeDeprecated(
params: DataModels.CreateCollectionParams,
): Promise<DataModels.Collection> {
const { databaseAccount } = userContext;
const shardKey: string = params.partitionKey?.paths[0];
const mongoParams: DataModels.MongoParameters = {
resourceUrl: databaseAccount.properties.mongoEndpoint || databaseAccount.properties.documentEndpoint,
db: params.databaseId,
coll: params.collectionId,
pk: shardKey,
offerThroughput: params.autoPilotMaxThroughput || params.offerThroughput,
cd: params.createNewDatabase,
st: params.databaseLevelThroughput,
is: !!shardKey,
rid: "",
rtype: "colls",
sid: userContext.subscriptionId,
rg: userContext.resourceGroup,
dba: databaseAccount.name,
isAutoPilot: !!params.autoPilotMaxThroughput,
};
const endpoint = getFeatureEndpointOrDefault("createCollectionWithProxy");
return window
.fetch(
`${endpoint}/createCollection?${queryString.stringify(
mongoParams as unknown as queryString.ParsedUrlQueryInput,
)}`,
{
method: "POST",
headers: {
...defaultHeaders,
...authHeaders(),
[HttpHeaders.contentType]: "application/json",
},
},
)
.then(async (response) => {
if (response.ok) {
return response.json();
}
return await errorHandling(response, "creating collection", mongoParams);
});
}
export function getFeatureEndpointOrDefault(feature: string): string {
let endpoint;
if (useMongoProxyEndpoint(feature)) {
endpoint = configContext.MONGO_PROXY_ENDPOINT;
} else {
const allowedMongoProxyEndpoints = configContext.allowedMongoProxyEndpoints || [
...defaultAllowedMongoProxyEndpoints,
...allowedMongoProxyEndpoints_ToBeDeprecated,
];
endpoint =
hasFlag(userContext.features.mongoProxyAPIs, feature) &&
validateEndpoint(userContext.features.mongoProxyEndpoint, allowedMongoProxyEndpoints)
? userContext.features.mongoProxyEndpoint
: configContext.MONGO_BACKEND_ENDPOINT || configContext.BACKEND_ENDPOINT;
}
return getEndpoint(endpoint);
}
export function getEndpoint(endpoint: string): string {
let url = endpoint + "/api/mongo/explorer";
@@ -352,6 +719,84 @@ export function getEndpoint(endpoint: string): string {
return url;
}
export function useMongoProxyEndpoint(mongoProxyApi: string): boolean {
const mongoProxyEnvironmentMap: { [key: string]: string[] } = {
[MongoProxyApi.ResourceList]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.QueryDocuments]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.CreateDocument]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.ReadDocument]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.UpdateDocument]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.DeleteDocument]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.CreateCollectionWithProxy]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.LegacyMongoShell]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
[MongoProxyApi.BulkDelete]: [
MongoProxyEndpoints.Development,
MongoProxyEndpoints.Mpac,
MongoProxyEndpoints.Prod,
MongoProxyEndpoints.Fairfax,
MongoProxyEndpoints.Mooncake,
],
};
if (!mongoProxyEnvironmentMap[mongoProxyApi] || !configContext.MONGO_PROXY_ENDPOINT) {
return false;
}
if (configContext.globallyEnabledMongoAPIs.includes(mongoProxyApi)) {
return true;
}
return mongoProxyEnvironmentMap[mongoProxyApi].includes(configContext.MONGO_PROXY_ENDPOINT);
}
export class ThrottlingError extends Error {
constructor(message: string) {
super(message);

View File

@@ -105,8 +105,6 @@ const readCollectionOfferWithARM = async (databaseId: string, collectionId: stri
? parseInt(resource.softAllowedMaximumThroughput)
: resource.softAllowedMaximumThroughput;
const throughputBuckets = resource?.throughputBuckets;
if (autoscaleSettings) {
return {
id: offerId,
@@ -116,7 +114,6 @@ const readCollectionOfferWithARM = async (databaseId: string, collectionId: stri
offerReplacePending: resource.offerReplacePending === "true",
instantMaximumThroughput,
softAllowedMaximumThroughput,
throughputBuckets,
};
}
@@ -128,7 +125,6 @@ const readCollectionOfferWithARM = async (databaseId: string, collectionId: stri
offerReplacePending: resource.offerReplacePending === "true",
instantMaximumThroughput,
softAllowedMaximumThroughput,
throughputBuckets,
};
}

View File

@@ -1,6 +1,6 @@
import { OfferDefinition, RequestOptions } from "@azure/cosmos";
import { AuthType } from "../../AuthType";
import { Offer, SDKOfferDefinition, ThroughputBucket, UpdateOfferParams } from "../../Contracts/DataModels";
import { Offer, SDKOfferDefinition, UpdateOfferParams } from "../../Contracts/DataModels";
import { userContext } from "../../UserContext";
import {
migrateCassandraKeyspaceToAutoscale,
@@ -359,13 +359,6 @@ const createUpdateOfferBody = (params: UpdateOfferParams): ThroughputSettingsUpd
body.properties.resource.throughput = params.manualThroughput;
}
if (params.throughputBuckets) {
const throughputBuckets = params.throughputBuckets.filter(
(bucket: ThroughputBucket) => bucket.maxThroughputPercentage !== 100,
);
body.properties.resource.throughputBuckets = throughputBuckets;
}
return body;
};

View File

@@ -12,6 +12,7 @@ import {
allowedGraphEndpoints,
allowedHostedExplorerEndpoints,
allowedJunoOrigins,
allowedMongoBackendEndpoints,
allowedMsalRedirectEndpoints,
defaultAllowedArmEndpoints,
defaultAllowedBackendEndpoints,
@@ -49,8 +50,10 @@ export interface ConfigContext {
CATALOG_API_KEY: string;
ARCADIA_ENDPOINT: string;
ARCADIA_LIVY_ENDPOINT_DNS_ZONE: string;
BACKEND_ENDPOINT?: string;
PORTAL_BACKEND_ENDPOINT: string;
NEW_BACKEND_APIS?: BackendApi[];
MONGO_BACKEND_ENDPOINT?: string;
MONGO_PROXY_ENDPOINT: string;
CASSANDRA_PROXY_ENDPOINT: string;
NEW_CASSANDRA_APIS?: string[];
@@ -106,6 +109,7 @@ let configContext: Readonly<ConfigContext> = {
GITHUB_CLIENT_ID: "6cb2f63cf6f7b5cbdeca", // Registered OAuth app: https://github.com/organizations/AzureCosmosDBNotebooks/settings/applications/1189306
GITHUB_TEST_ENV_CLIENT_ID: "b63fc8cbf87fd3c6e2eb", // Registered OAuth app: https://github.com/organizations/AzureCosmosDBNotebooks/settings/applications/1777772
JUNO_ENDPOINT: JunoEndpoints.Prod,
BACKEND_ENDPOINT: "https://main.documentdb.ext.azure.com",
PORTAL_BACKEND_ENDPOINT: PortalBackendEndpoints.Prod,
MONGO_PROXY_ENDPOINT: MongoProxyEndpoints.Prod,
CASSANDRA_PROXY_ENDPOINT: CassandraProxyEndpoints.Prod,
@@ -148,6 +152,15 @@ export function updateConfigContext(newContext: Partial<ConfigContext>): void {
delete newContext.ARCADIA_ENDPOINT;
}
if (
!validateEndpoint(
newContext.BACKEND_ENDPOINT,
configContext.allowedBackendEndpoints || defaultAllowedBackendEndpoints,
)
) {
delete newContext.BACKEND_ENDPOINT;
}
if (
!validateEndpoint(
newContext.MONGO_PROXY_ENDPOINT,
@@ -157,6 +170,10 @@ export function updateConfigContext(newContext: Partial<ConfigContext>): void {
delete newContext.MONGO_PROXY_ENDPOINT;
}
if (!validateEndpoint(newContext.MONGO_BACKEND_ENDPOINT, allowedMongoBackendEndpoints)) {
delete newContext.MONGO_BACKEND_ENDPOINT;
}
if (
!validateEndpoint(
newContext.CASSANDRA_PROXY_ENDPOINT,

View File

@@ -42,11 +42,6 @@ export interface DatabaseAccountExtendedProperties {
publicNetworkAccess?: string;
enablePriorityBasedExecution?: boolean;
vcoreMongoEndpoint?: string;
virtualNetworkRules?: VNetRule[];
}
export interface VNetRule {
id: string;
}
export interface DatabaseAccountResponseLocation {
@@ -280,12 +275,6 @@ export interface Offer {
offerReplacePending: boolean;
instantMaximumThroughput?: number;
softAllowedMaximumThroughput?: number;
throughputBuckets?: ThroughputBucket[];
}
export interface ThroughputBucket {
id: number;
maxThroughputPercentage: number;
}
export interface SDKOfferDefinition extends Resource {
@@ -408,7 +397,6 @@ export interface UpdateOfferParams {
collectionId?: string;
migrateToAutoPilot?: boolean;
migrateToManual?: boolean;
throughputBuckets?: ThroughputBucket[];
}
export interface Notification {

View File

@@ -406,6 +406,7 @@ export interface DataExplorerInputsFrame {
csmEndpoint?: string;
dnsSuffix?: string;
serverId?: string;
extensionEndpoint?: string;
portalBackendEndpoint?: string;
mongoProxyEndpoint?: string;
cassandraProxyEndpoint?: string;

View File

@@ -96,17 +96,17 @@ export const createCollectionContextMenuButton = (
iconSrc: HostedTerminalIcon,
onClick: () => {
const selectedCollection: ViewModels.Collection = useSelectedNode.getState().findSelectedCollection();
if (useNotebook.getState().isShellEnabled || userContext.features.enableCloudShell) {
if (useNotebook.getState().isShellEnabled) {
container.openNotebookTerminal(ViewModels.TerminalKind.Mongo);
} else {
selectedCollection && selectedCollection.onNewMongoShellClick();
}
},
label: (useNotebook.getState().isShellEnabled || userContext.features.enableCloudShell) ? "Open Mongo Shell" : "New Shell",
label: useNotebook.getState().isShellEnabled ? "Open Mongo Shell" : "New Shell",
});
}
if ((useNotebook.getState().isShellEnabled || userContext.features.enableCloudShell) && userContext.apiType === "Cassandra") {
if (useNotebook.getState().isShellEnabled && userContext.apiType === "Cassandra") {
items.push({
iconSrc: HostedTerminalIcon,
onClick: () => {

View File

@@ -1,7 +1,5 @@
import { AuthType } from "AuthType";
import { shallow } from "enzyme";
import ko from "knockout";
import { Features } from "Platform/Hosted/extractFeatures";
import React from "react";
import { updateCollection } from "../../../Common/dataAccess/updateCollection";
import { updateOffer } from "../../../Common/dataAccess/updateOffer";
@@ -249,42 +247,4 @@ describe("SettingsComponent", () => {
expect(conflictResolutionPolicy.mode).toEqual(DataModels.ConflictResolutionMode.Custom);
expect(conflictResolutionPolicy.conflictResolutionProcedure).toEqual(expectSprocPath);
});
it("should save throughput bucket changes when Save button is clicked", async () => {
updateUserContext({
apiType: "SQL",
features: { enableThroughputBuckets: true } as Features,
authType: AuthType.AAD,
});
const wrapper = shallow(<SettingsComponent {...baseProps} />);
const settingsComponentInstance = wrapper.instance() as SettingsComponent;
const isEnabled = settingsComponentInstance["throughputBucketsEnabled"];
expect(isEnabled).toBe(true);
wrapper.setState({
isThroughputBucketsSaveable: true,
throughputBuckets: [
{ id: 1, maxThroughputPercentage: 70 },
{ id: 2, maxThroughputPercentage: 60 },
],
});
await settingsComponentInstance.onSaveClick();
expect(updateOffer).toHaveBeenCalledWith({
databaseId: collection.databaseId,
collectionId: collection.id(),
currentOffer: expect.any(Object),
autopilotThroughput: collection.offer().autoscaleMaxThroughput,
manualThroughput: collection.offer().manualThroughput,
throughputBuckets: [
{ id: 1, maxThroughputPercentage: 70 },
{ id: 2, maxThroughputPercentage: 60 },
],
});
expect(wrapper.state("isThroughputBucketsSaveable")).toBe(false);
});
});

View File

@@ -7,10 +7,6 @@ import {
ContainerPolicyComponent,
ContainerPolicyComponentProps,
} from "Explorer/Controls/Settings/SettingsSubComponents/ContainerPolicyComponent";
import {
ThroughputBucketsComponent,
ThroughputBucketsComponentProps,
} from "Explorer/Controls/Settings/SettingsSubComponents/ThroughputInputComponents/ThroughputBucketsComponent";
import { useDatabases } from "Explorer/useDatabases";
import { isFullTextSearchEnabled, isVectorSearchEnabled } from "Utils/CapabilityUtils";
import { isRunningOnPublicCloud } from "Utils/CloudUtils";
@@ -90,8 +86,6 @@ export interface SettingsComponentState {
wasAutopilotOriginallySet: boolean;
isScaleSaveable: boolean;
isScaleDiscardable: boolean;
throughputBuckets: DataModels.ThroughputBucket[];
throughputBucketsBaseline: DataModels.ThroughputBucket[];
throughputError: string;
timeToLive: TtlType;
@@ -110,7 +104,6 @@ export interface SettingsComponentState {
changeFeedPolicyBaseline: ChangeFeedPolicyState;
isSubSettingsSaveable: boolean;
isSubSettingsDiscardable: boolean;
isThroughputBucketsSaveable: boolean;
vectorEmbeddingPolicy: DataModels.VectorEmbeddingPolicy;
vectorEmbeddingPolicyBaseline: DataModels.VectorEmbeddingPolicy;
@@ -165,7 +158,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
private isVectorSearchEnabled: boolean;
private isFullTextSearchEnabled: boolean;
private totalThroughputUsed: number;
private throughputBucketsEnabled: boolean;
public mongoDBCollectionResource: MongoDBCollectionResource;
constructor(props: SettingsComponentProps) {
@@ -183,10 +175,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
this.isFullTextSearchEnabled = isFullTextSearchEnabled() && !hasDatabaseSharedThroughput(this.collection);
this.changeFeedPolicyVisible = userContext.features.enableChangeFeedPolicy;
this.throughputBucketsEnabled =
userContext.apiType === "SQL" &&
userContext.features.enableThroughputBuckets &&
userContext.authType === AuthType.AAD;
// Mongo container with system partition key still treat as "Fixed"
this.isFixedContainer =
@@ -205,8 +193,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
wasAutopilotOriginallySet: false,
isScaleSaveable: false,
isScaleDiscardable: false,
throughputBuckets: undefined,
throughputBucketsBaseline: undefined,
throughputError: undefined,
timeToLive: undefined,
@@ -225,7 +211,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
changeFeedPolicyBaseline: undefined,
isSubSettingsSaveable: false,
isSubSettingsDiscardable: false,
isThroughputBucketsSaveable: false,
vectorEmbeddingPolicy: undefined,
vectorEmbeddingPolicyBaseline: undefined,
@@ -342,8 +327,7 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
this.state.isIndexingPolicyDirty ||
this.state.isConflictResolutionDirty ||
this.state.isComputedPropertiesDirty ||
(!!this.state.currentMongoIndexes && this.state.isMongoIndexingPolicySaveable) ||
this.state.isThroughputBucketsSaveable
(!!this.state.currentMongoIndexes && this.state.isMongoIndexingPolicySaveable)
);
};
@@ -355,8 +339,7 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
this.state.isIndexingPolicyDirty ||
this.state.isConflictResolutionDirty ||
this.state.isComputedPropertiesDirty ||
(!!this.state.currentMongoIndexes && this.state.isMongoIndexingPolicyDiscardable) ||
this.state.isThroughputBucketsSaveable
(!!this.state.currentMongoIndexes && this.state.isMongoIndexingPolicyDiscardable)
);
};
@@ -436,8 +419,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
this.setState({
throughput: this.state.throughputBaseline,
throughputBuckets: this.state.throughputBucketsBaseline,
throughputBucketsBaseline: this.state.throughputBucketsBaseline,
timeToLive: this.state.timeToLiveBaseline,
timeToLiveSeconds: this.state.timeToLiveSecondsBaseline,
displayedTtlSeconds: this.state.displayedTtlSecondsBaseline,
@@ -460,7 +441,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
isScaleSaveable: false,
isScaleDiscardable: false,
isSubSettingsSaveable: false,
isThroughputBucketsSaveable: false,
isSubSettingsDiscardable: false,
isContainerPolicyDirty: false,
isIndexingPolicyDirty: false,
@@ -499,10 +479,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
private onIndexingPolicyContentChange = (newIndexingPolicy: DataModels.IndexingPolicy): void =>
this.setState({ indexingPolicyContent: newIndexingPolicy });
private onThroughputBucketsSaveableChange = (isSaveable: boolean): void => {
this.setState({ isThroughputBucketsSaveable: isSaveable });
};
private resetShouldDiscardContainerPolicies = (): void => this.setState({ shouldDiscardContainerPolicies: false });
private resetShouldDiscardIndexingPolicy = (): void => this.setState({ shouldDiscardIndexingPolicy: false });
@@ -773,13 +749,9 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
] as DataModels.ComputedProperties;
}
const throughputBuckets = this.offer?.throughputBuckets;
return {
throughput: offerThroughput,
throughputBaseline: offerThroughput,
throughputBuckets,
throughputBucketsBaseline: throughputBuckets,
changeFeedPolicy: changeFeedPolicy,
changeFeedPolicyBaseline: changeFeedPolicy,
timeToLive: timeToLive,
@@ -867,10 +839,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
this.setState({ throughput: newThroughput, throughputError });
};
private onThroughputBucketChange = (throughputBuckets: DataModels.ThroughputBucket[]): void => {
this.setState({ throughputBuckets });
};
private onAutoPilotSelected = (isAutoPilotSelected: boolean): void =>
this.setState({ isAutoPilotSelected: isAutoPilotSelected });
@@ -1061,24 +1029,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
}
}
if (this.throughputBucketsEnabled && this.state.isThroughputBucketsSaveable) {
const updatedOffer: DataModels.Offer = await updateOffer({
databaseId: this.collection.databaseId,
collectionId: this.collection.id(),
currentOffer: this.collection.offer(),
autopilotThroughput: this.collection.offer().autoscaleMaxThroughput
? this.collection.offer().autoscaleMaxThroughput
: undefined,
manualThroughput: this.collection.offer().manualThroughput
? this.collection.offer().manualThroughput
: undefined,
throughputBuckets: this.state.throughputBuckets,
});
this.collection.offer(updatedOffer);
this.offer = updatedOffer;
this.setState({ isThroughputBucketsSaveable: false });
}
if (this.state.isScaleSaveable) {
const updateOfferParams: DataModels.UpdateOfferParams = {
databaseId: this.collection.databaseId,
@@ -1259,13 +1209,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
onConflictResolutionDirtyChange: this.onConflictResolutionDirtyChange,
};
const throughputBucketsComponentProps: ThroughputBucketsComponentProps = {
currentBuckets: this.state.throughputBuckets,
throughputBucketsBaseline: this.state.throughputBucketsBaseline,
onBucketsChange: this.onThroughputBucketChange,
onSaveableChange: this.onThroughputBucketsSaveableChange,
};
const partitionKeyComponentProps: PartitionKeyComponentProps = {
database: useDatabases.getState().findDatabaseWithId(this.collection.databaseId),
collection: this.collection,
@@ -1328,13 +1271,6 @@ export class SettingsComponent extends React.Component<SettingsComponentProps, S
});
}
if (this.throughputBucketsEnabled) {
tabs.push({
tab: SettingsV2TabTypes.ThroughputBucketsTab,
content: <ThroughputBucketsComponent {...throughputBucketsComponentProps} />,
});
}
const pivotProps: IPivotProps = {
onLinkClick: this.onPivotChange,
selectedKey: SettingsV2TabTypes[this.state.selectedTab],

View File

@@ -1,177 +0,0 @@
import "@testing-library/jest-dom";
import { fireEvent, render, screen } from "@testing-library/react";
import React from "react";
import { ThroughputBucketsComponent } from "./ThroughputBucketsComponent";
describe("ThroughputBucketsComponent", () => {
const mockOnBucketsChange = jest.fn();
const mockOnSaveableChange = jest.fn();
const defaultProps = {
currentBuckets: [
{ id: 1, maxThroughputPercentage: 50 },
{ id: 2, maxThroughputPercentage: 60 },
],
throughputBucketsBaseline: [
{ id: 1, maxThroughputPercentage: 40 },
{ id: 2, maxThroughputPercentage: 50 },
],
onBucketsChange: mockOnBucketsChange,
onSaveableChange: mockOnSaveableChange,
};
beforeEach(() => {
jest.clearAllMocks();
});
it("renders the correct number of buckets", () => {
render(<ThroughputBucketsComponent {...defaultProps} />);
expect(screen.getAllByText(/Group \d+/)).toHaveLength(5);
});
it("renders buckets in the correct order even if input is unordered", () => {
const unorderedBuckets = [
{ id: 2, maxThroughputPercentage: 60 },
{ id: 1, maxThroughputPercentage: 50 },
];
render(<ThroughputBucketsComponent {...defaultProps} currentBuckets={unorderedBuckets} />);
const bucketLabels = screen.getAllByText(/Group \d+/).map((el) => el.textContent);
expect(bucketLabels).toEqual(["Group 1 (Data Explorer Query Bucket)", "Group 2", "Group 3", "Group 4", "Group 5"]);
});
it("renders all provided buckets even if they exceed the max default bucket count", () => {
const oversizedBuckets = [
{ id: 1, maxThroughputPercentage: 50 },
{ id: 2, maxThroughputPercentage: 60 },
{ id: 3, maxThroughputPercentage: 70 },
{ id: 4, maxThroughputPercentage: 80 },
{ id: 5, maxThroughputPercentage: 90 },
{ id: 6, maxThroughputPercentage: 100 },
{ id: 7, maxThroughputPercentage: 40 },
];
render(<ThroughputBucketsComponent {...defaultProps} currentBuckets={oversizedBuckets} />);
expect(screen.getAllByText(/Group \d+/)).toHaveLength(7);
expect(screen.getByDisplayValue("50")).toBeInTheDocument();
expect(screen.getByDisplayValue("60")).toBeInTheDocument();
expect(screen.getByDisplayValue("70")).toBeInTheDocument();
expect(screen.getByDisplayValue("80")).toBeInTheDocument();
expect(screen.getByDisplayValue("90")).toBeInTheDocument();
expect(screen.getByDisplayValue("100")).toBeInTheDocument();
expect(screen.getByDisplayValue("40")).toBeInTheDocument();
});
it("calls onBucketsChange when a bucket value changes", () => {
render(<ThroughputBucketsComponent {...defaultProps} />);
const input = screen.getByDisplayValue("50");
fireEvent.change(input, { target: { value: "70" } });
expect(mockOnBucketsChange).toHaveBeenCalledWith([
{ id: 1, maxThroughputPercentage: 70 },
{ id: 2, maxThroughputPercentage: 60 },
{ id: 3, maxThroughputPercentage: 100 },
{ id: 4, maxThroughputPercentage: 100 },
{ id: 5, maxThroughputPercentage: 100 },
]);
});
it("triggers onSaveableChange when values change", () => {
render(<ThroughputBucketsComponent {...defaultProps} />);
const input = screen.getByDisplayValue("50");
fireEvent.change(input, { target: { value: "80" } });
expect(mockOnSaveableChange).toHaveBeenCalledWith(true);
});
it("updates state consistently after multiple changes to different buckets", () => {
render(<ThroughputBucketsComponent {...defaultProps} />);
const input1 = screen.getByDisplayValue("50");
fireEvent.change(input1, { target: { value: "70" } });
const input2 = screen.getByDisplayValue("60");
fireEvent.change(input2, { target: { value: "80" } });
expect(mockOnBucketsChange).toHaveBeenCalledWith([
{ id: 1, maxThroughputPercentage: 70 },
{ id: 2, maxThroughputPercentage: 80 },
{ id: 3, maxThroughputPercentage: 100 },
{ id: 4, maxThroughputPercentage: 100 },
{ id: 5, maxThroughputPercentage: 100 },
]);
});
it("resets to baseline when currentBuckets are reset", () => {
const { rerender } = render(<ThroughputBucketsComponent {...defaultProps} />);
const input1 = screen.getByDisplayValue("50");
fireEvent.change(input1, { target: { value: "70" } });
rerender(<ThroughputBucketsComponent {...defaultProps} currentBuckets={defaultProps.throughputBucketsBaseline} />);
expect(screen.getByDisplayValue("40")).toBeInTheDocument();
expect(screen.getByDisplayValue("50")).toBeInTheDocument();
});
it("does not call onBucketsChange when value remains unchanged", () => {
render(<ThroughputBucketsComponent {...defaultProps} />);
const input = screen.getByDisplayValue("50");
fireEvent.change(input, { target: { value: "50" } });
expect(mockOnBucketsChange).not.toHaveBeenCalled();
});
it("disables input and slider when maxThroughputPercentage is 100", () => {
render(
<ThroughputBucketsComponent
{...defaultProps}
currentBuckets={[
{ id: 1, maxThroughputPercentage: 100 },
{ id: 2, maxThroughputPercentage: 50 },
]}
/>,
);
const disabledInputs = screen.getAllByDisplayValue("100");
expect(disabledInputs.length).toBeGreaterThan(0);
expect(disabledInputs[0]).toBeDisabled();
const sliders = screen.getAllByRole("slider");
expect(sliders.length).toBeGreaterThan(0);
expect(sliders[0]).toHaveAttribute("aria-disabled", "true");
expect(sliders[1]).toHaveAttribute("aria-disabled", "false");
});
it("toggles bucket value between 50 and 100 with switch", () => {
render(<ThroughputBucketsComponent {...defaultProps} />);
const toggles = screen.getAllByRole("switch");
fireEvent.click(toggles[0]);
expect(mockOnBucketsChange).toHaveBeenCalledWith([
{ id: 1, maxThroughputPercentage: 100 },
{ id: 2, maxThroughputPercentage: 60 },
{ id: 3, maxThroughputPercentage: 100 },
{ id: 4, maxThroughputPercentage: 100 },
{ id: 5, maxThroughputPercentage: 100 },
]);
fireEvent.click(toggles[0]);
expect(mockOnBucketsChange).toHaveBeenCalledWith([
{ id: 1, maxThroughputPercentage: 50 },
{ id: 2, maxThroughputPercentage: 60 },
{ id: 3, maxThroughputPercentage: 100 },
{ id: 4, maxThroughputPercentage: 100 },
{ id: 5, maxThroughputPercentage: 100 },
]);
});
it("ensures default buckets are used when no buckets are provided", () => {
render(<ThroughputBucketsComponent {...defaultProps} currentBuckets={[]} />);
expect(screen.getAllByText(/Group \d+/)).toHaveLength(5);
expect(screen.getAllByDisplayValue("100")).toHaveLength(5);
});
});

View File

@@ -1,105 +0,0 @@
import { Label, Slider, Stack, TextField, Toggle } from "@fluentui/react";
import { ThroughputBucket } from "Contracts/DataModels";
import React, { FC, useEffect, useState } from "react";
import { isDirty } from "../../SettingsUtils";
const MAX_BUCKET_SIZES = 5;
const DEFAULT_BUCKETS = Array.from({ length: MAX_BUCKET_SIZES }, (_, i) => ({
id: i + 1,
maxThroughputPercentage: 100,
}));
export interface ThroughputBucketsComponentProps {
currentBuckets: ThroughputBucket[];
throughputBucketsBaseline: ThroughputBucket[];
onBucketsChange: (updatedBuckets: ThroughputBucket[]) => void;
onSaveableChange: (isSaveable: boolean) => void;
}
export const ThroughputBucketsComponent: FC<ThroughputBucketsComponentProps> = ({
currentBuckets,
throughputBucketsBaseline,
onBucketsChange,
onSaveableChange,
}) => {
const getThroughputBuckets = (buckets: ThroughputBucket[]): ThroughputBucket[] => {
if (!buckets || buckets.length === 0) {
return DEFAULT_BUCKETS;
}
const maxBuckets = Math.max(DEFAULT_BUCKETS.length, buckets.length);
const adjustedDefaultBuckets = Array.from({ length: maxBuckets }, (_, i) => ({
id: i + 1,
maxThroughputPercentage: 100,
}));
return adjustedDefaultBuckets.map(
(defaultBucket) => buckets?.find((bucket) => bucket.id === defaultBucket.id) || defaultBucket,
);
};
const [throughputBuckets, setThroughputBuckets] = useState<ThroughputBucket[]>(getThroughputBuckets(currentBuckets));
useEffect(() => {
setThroughputBuckets(getThroughputBuckets(currentBuckets));
onSaveableChange(false);
}, [currentBuckets]);
useEffect(() => {
const isChanged = isDirty(throughputBuckets, getThroughputBuckets(throughputBucketsBaseline));
onSaveableChange(isChanged);
}, [throughputBuckets]);
const handleBucketChange = (id: number, newValue: number) => {
const updatedBuckets = throughputBuckets.map((bucket) =>
bucket.id === id ? { ...bucket, maxThroughputPercentage: newValue } : bucket,
);
setThroughputBuckets(updatedBuckets);
const settingsChanged = isDirty(updatedBuckets, throughputBuckets);
settingsChanged && onBucketsChange(updatedBuckets);
};
const onToggle = (id: number, checked: boolean) => {
handleBucketChange(id, checked ? 50 : 100);
};
return (
<Stack tokens={{ childrenGap: "m" }} styles={{ root: { width: "70%", maxWidth: 700 } }}>
<Label>Throughput Buckets</Label>
<Stack>
{throughputBuckets?.map((bucket) => (
<Stack key={bucket.id} horizontal tokens={{ childrenGap: 8 }} verticalAlign="center">
<Slider
min={1}
max={100}
step={1}
value={bucket.maxThroughputPercentage}
onChange={(newValue) => handleBucketChange(bucket.id, newValue)}
showValue={false}
label={`Group ${bucket.id}${bucket.id === 1 ? " (Data Explorer Query Bucket)" : ""}`}
styles={{ root: { flex: 2, maxWidth: 400 } }}
disabled={bucket.maxThroughputPercentage === 100}
/>
<TextField
value={bucket.maxThroughputPercentage.toString()}
onChange={(event, newValue) => handleBucketChange(bucket.id, parseInt(newValue || "0", 10))}
type="number"
suffix="%"
styles={{
fieldGroup: { width: 80 },
}}
disabled={bucket.maxThroughputPercentage === 100}
/>
<Toggle
onText="Active"
offText="Inactive"
checked={bucket.maxThroughputPercentage !== 100}
onChange={(event, checked) => onToggle(bucket.id, checked)}
styles={{ root: { marginBottom: 0 }, text: { fontSize: 12 } }}
></Toggle>
</Stack>
))}
</Stack>
</Stack>
);
};

View File

@@ -11,8 +11,7 @@ export type isDirtyTypes =
| DataModels.IndexingPolicy
| DataModels.ComputedProperties
| DataModels.VectorEmbedding[]
| DataModels.FullTextPolicy
| DataModels.ThroughputBucket[];
| DataModels.FullTextPolicy;
export const TtlOff = "off";
export const TtlOn = "on";
export const TtlOnNoDefault = "on-nodefault";
@@ -56,7 +55,6 @@ export enum SettingsV2TabTypes {
PartitionKeyTab,
ComputedPropertiesTab,
ContainerVectorPolicyTab,
ThroughputBucketsTab,
}
export enum ContainerPolicyTabTypes {
@@ -169,8 +167,6 @@ export const getTabTitle = (tab: SettingsV2TabTypes): string => {
return "Computed Properties";
case SettingsV2TabTypes.ContainerVectorPolicyTab:
return "Container Policies";
case SettingsV2TabTypes.ThroughputBucketsTab:
return "Throughput Buckets";
default:
throw new Error(`Unknown tab ${tab}`);
}

View File

@@ -906,28 +906,25 @@ export default class Explorer {
}
public async openNotebookTerminal(kind: ViewModels.TerminalKind): Promise<void> {
if (userContext.features.enableCloudShell || !useNotebook.getState().isPhoenixFeatures) {
this.connectToTerminal(kind);
return;
}
await this.allocateContainer(PoolIdType.DefaultPoolId);
const notebookServerInfo = useNotebook.getState().notebookServerInfo;
if (notebookServerInfo?.notebookServerEndpoint) {
this.connectToTerminal(kind);
if (useNotebook.getState().isPhoenixFeatures) {
await this.allocateContainer(PoolIdType.DefaultPoolId);
const notebookServerInfo = useNotebook.getState().notebookServerInfo;
if (notebookServerInfo && notebookServerInfo.notebookServerEndpoint !== undefined) {
this.connectToNotebookTerminal(kind);
} else {
useDialog
.getState()
.showOkModalDialog(
"Failed to connect",
"Failed to connect to temporary workspace. This could happen because of network issues. Please refresh the page and try again.",
);
}
} else {
useDialog
.getState()
.showOkModalDialog(
"Failed to connect",
"Failed to connect to temporary workspace. This could happen because of network issues. Please refresh the page and try again."
);
this.connectToNotebookTerminal(kind);
}
}
private connectToTerminal(kind: ViewModels.TerminalKind): void {
private connectToNotebookTerminal(kind: ViewModels.TerminalKind): void {
let title: string;
switch (kind) {
@@ -1130,7 +1127,7 @@ export default class Explorer {
await this.initNotebooks(userContext.databaseAccount);
}
this.refreshSampleData();
await this.refreshSampleData();
}
public async configureCopilot(): Promise<void> {
@@ -1155,27 +1152,26 @@ export default class Explorer {
.setCopilotSampleDBEnabled(copilotEnabled && copilotUserDBEnabled && copilotSampleDBEnabled);
}
public refreshSampleData(): void {
if (!userContext.sampleDataConnectionInfo) {
public async refreshSampleData(): Promise<void> {
try {
if (!userContext.sampleDataConnectionInfo) {
return;
}
const collection: DataModels.Collection = await readSampleCollection();
if (!collection) {
return;
}
const databaseId = userContext.sampleDataConnectionInfo?.databaseId;
if (!databaseId) {
return;
}
const sampleDataResourceTokenCollection = new ResourceTokenCollection(this, databaseId, collection, true);
useDatabases.setState({ sampleDataResourceTokenCollection });
} catch (error) {
Logger.logError(getErrorMessage(error), "Explorer");
return;
}
const databaseId = userContext.sampleDataConnectionInfo?.databaseId;
if (!databaseId) {
return;
}
readSampleCollection()
.then((collection: DataModels.Collection) => {
if (!collection) {
return;
}
const sampleDataResourceTokenCollection = new ResourceTokenCollection(this, databaseId, collection, true);
useDatabases.setState({ sampleDataResourceTokenCollection });
})
.catch((error) => {
Logger.logError(getErrorMessage(error), "Explorer/refreshSampleData");
});
}
}

View File

@@ -125,13 +125,13 @@ export function createContextCommandBarButtons(
const buttons: CommandButtonComponentProps[] = [];
if (!selectedNodeState.isDatabaseNodeOrNoneSelected() && userContext.apiType === "Mongo") {
const label = (useNotebook.getState().isShellEnabled || userContext.features.enableCloudShell) ? "Open Mongo Shell" : "New Shell";
const label = useNotebook.getState().isShellEnabled ? "Open Mongo Shell" : "New Shell";
const newMongoShellBtn: CommandButtonComponentProps = {
iconSrc: HostedTerminalIcon,
iconAlt: label,
onCommandClick: () => {
const selectedCollection: ViewModels.Collection = selectedNodeState.findSelectedCollection();
if (useNotebook.getState().isShellEnabled || userContext.features.enableCloudShell) {
if (useNotebook.getState().isShellEnabled) {
container.openNotebookTerminal(ViewModels.TerminalKind.Mongo);
} else {
selectedCollection && selectedCollection.onNewMongoShellClick();
@@ -145,7 +145,7 @@ export function createContextCommandBarButtons(
}
if (
(useNotebook.getState().isShellEnabled || userContext.features.enableCloudShell) &&
useNotebook.getState().isShellEnabled &&
!selectedNodeState.isDatabaseNodeOrNoneSelected() &&
userContext.apiType === "Cassandra"
) {

View File

@@ -1,5 +1,6 @@
import { isPublicInternetAccessAllowed } from "Common/DatabaseAccountUtility";
import { PhoenixClient } from "Phoenix/PhoenixClient";
import { useNewPortalBackendEndpoint } from "Utils/EndpointUtils";
import { cloneDeep } from "lodash";
import create, { UseStore } from "zustand";
import { AuthType } from "../../AuthType";
@@ -127,7 +128,9 @@ export const useNotebook: UseStore<NotebookState> = create((set, get) => ({
userContext.apiType === "Postgres" || userContext.apiType === "VCoreMongo"
? databaseAccount?.location
: databaseAccount?.properties?.writeLocations?.[0]?.locationName.toLowerCase();
const disallowedLocationsUri: string = `${configContext.PORTAL_BACKEND_ENDPOINT}/api/disallowedlocations`;
const disallowedLocationsUri: string = useNewPortalBackendEndpoint(Constants.BackendApi.DisallowedLocations)
? `${configContext.PORTAL_BACKEND_ENDPOINT}/api/disallowedlocations`
: `${configContext.BACKEND_ENDPOINT}/api/disallowedLocations`;
const authorizationHeader = getAuthorizationHeader();
try {
const response = await fetch(disallowedLocationsUri, {

View File

@@ -1,6 +1,7 @@
import { FeedOptions } from "@azure/cosmos";
import {
Areas,
BackendApi,
ConnectionStatusType,
ContainerStatusType,
HttpStatusCodes,
@@ -31,6 +32,7 @@ import { Action } from "Shared/Telemetry/TelemetryConstants";
import { traceFailure, traceStart, traceSuccess } from "Shared/Telemetry/TelemetryProcessor";
import { userContext } from "UserContext";
import { getAuthorizationHeader } from "Utils/AuthorizationUtils";
import { useNewPortalBackendEndpoint } from "Utils/EndpointUtils";
import { queryPagesUntilContentPresent } from "Utils/QueryUtils";
import { QueryCopilotState, useQueryCopilot } from "hooks/useQueryCopilot";
import { useTabs } from "hooks/useTabs";
@@ -80,7 +82,9 @@ export const isCopilotFeatureRegistered = async (subscriptionId: string): Promis
};
export const getCopilotEnabled = async (): Promise<boolean> => {
const backendEndpoint: string = configContext.PORTAL_BACKEND_ENDPOINT;
const backendEndpoint: string = useNewPortalBackendEndpoint(BackendApi.PortalSettings)
? configContext.PORTAL_BACKEND_ENDPOINT
: configContext.BACKEND_ENDPOINT;
const url = `${backendEndpoint}/api/portalsettings/querycopilot`;
const authorizationHeader: AuthorizationTokenHeaderMetadata = getAuthorizationHeader();

View File

@@ -3,7 +3,7 @@ import * as ko from "knockout";
import Q from "q";
import { AuthType } from "../../AuthType";
import * as Constants from "../../Common/Constants";
import { CassandraProxyAPIs } from "../../Common/Constants";
import { CassandraProxyAPIs, CassandraProxyEndpoints } from "../../Common/Constants";
import { handleError } from "../../Common/ErrorHandlingUtils";
import * as HeadersUtility from "../../Common/HeadersUtility";
import { createDocument } from "../../Common/dataAccess/createDocument";
@@ -264,6 +264,9 @@ export class CassandraAPIDataClient extends TableDataClient {
shouldNotify?: boolean,
paginationToken?: string,
): Promise<Entities.IListTableEntitiesResult> {
if (!this.useCassandraProxyEndpoint("postQuery")) {
return this.queryDocuments_ToBeDeprecated(collection, query, shouldNotify, paginationToken);
}
const clearMessage =
shouldNotify && NotificationConsoleUtils.logConsoleProgress(`Querying rows for table ${collection.id()}`);
try {
@@ -306,6 +309,55 @@ export class CassandraAPIDataClient extends TableDataClient {
}
}
public async queryDocuments_ToBeDeprecated(
collection: ViewModels.Collection,
query: string,
shouldNotify?: boolean,
paginationToken?: string,
): Promise<Entities.IListTableEntitiesResult> {
const clearMessage =
shouldNotify && NotificationConsoleUtils.logConsoleProgress(`Querying rows for table ${collection.id()}`);
try {
const { authType, databaseAccount } = userContext;
const apiEndpoint: string =
authType === AuthType.EncryptedToken
? Constants.CassandraBackend.guestQueryApi
: Constants.CassandraBackend.queryApi;
const data: any = await $.ajax(`${configContext.BACKEND_ENDPOINT}/${apiEndpoint}`, {
type: "POST",
data: {
accountName: databaseAccount?.name,
cassandraEndpoint: this.trimCassandraEndpoint(databaseAccount?.properties.cassandraEndpoint),
resourceId: databaseAccount?.id,
keyspaceId: collection.databaseId,
tableId: collection.id(),
query,
paginationToken,
},
beforeSend: this.setAuthorizationHeader as any,
cache: false,
});
shouldNotify &&
NotificationConsoleUtils.logConsoleInfo(
`Successfully fetched ${data.result.length} rows for table ${collection.id()}`,
);
return {
Results: data.result,
ContinuationToken: data.paginationToken,
};
} catch (error) {
shouldNotify &&
handleError(
error,
"QueryDocuments_ToBeDeprecated_Cassandra",
`Failed to query rows for table ${collection.id()}`,
);
throw error;
} finally {
clearMessage?.();
}
}
public async deleteDocuments(
collection: ViewModels.Collection,
entitiesToDelete: Entities.ITableEntity[],
@@ -419,6 +471,10 @@ export class CassandraAPIDataClient extends TableDataClient {
}
public getTableKeys(collection: ViewModels.Collection): Q.Promise<CassandraTableKeys> {
if (!this.useCassandraProxyEndpoint("getKeys")) {
return this.getTableKeys_ToBeDeprecated(collection);
}
if (!!collection.cassandraKeys) {
return Q.resolve(collection.cassandraKeys);
}
@@ -459,7 +515,52 @@ export class CassandraAPIDataClient extends TableDataClient {
return deferred.promise;
}
public getTableKeys_ToBeDeprecated(collection: ViewModels.Collection): Q.Promise<CassandraTableKeys> {
if (!!collection.cassandraKeys) {
return Q.resolve(collection.cassandraKeys);
}
const clearInProgressMessage = logConsoleProgress(`Fetching keys for table ${collection.id()}`);
const { authType, databaseAccount } = userContext;
const apiEndpoint: string =
authType === AuthType.EncryptedToken
? Constants.CassandraBackend.guestKeysApi
: Constants.CassandraBackend.keysApi;
let endpoint = `${configContext.BACKEND_ENDPOINT}/${apiEndpoint}`;
const deferred = Q.defer<CassandraTableKeys>();
$.ajax(endpoint, {
type: "POST",
data: {
accountName: databaseAccount?.name,
cassandraEndpoint: this.trimCassandraEndpoint(databaseAccount?.properties.cassandraEndpoint),
resourceId: databaseAccount?.id,
keyspaceId: collection.databaseId,
tableId: collection.id(),
},
beforeSend: this.setAuthorizationHeader as any,
cache: false,
})
.then(
(data: CassandraTableKeys) => {
collection.cassandraKeys = data;
logConsoleInfo(`Successfully fetched keys for table ${collection.id()}`);
deferred.resolve(data);
},
(error: any) => {
const errorText = error.responseJSON?.message ?? JSON.stringify(error);
handleError(errorText, "FetchKeysCassandra", `Error fetching keys for table ${collection.id()}`);
deferred.reject(errorText);
},
)
.done(clearInProgressMessage);
return deferred.promise;
}
public getTableSchema(collection: ViewModels.Collection): Q.Promise<CassandraTableKey[]> {
if (!this.useCassandraProxyEndpoint("getSchema")) {
return this.getTableSchema_ToBeDeprecated(collection);
}
if (!!collection.cassandraSchema) {
return Q.resolve(collection.cassandraSchema);
}
@@ -501,7 +602,52 @@ export class CassandraAPIDataClient extends TableDataClient {
return deferred.promise;
}
public getTableSchema_ToBeDeprecated(collection: ViewModels.Collection): Q.Promise<CassandraTableKey[]> {
if (!!collection.cassandraSchema) {
return Q.resolve(collection.cassandraSchema);
}
const clearInProgressMessage = logConsoleProgress(`Fetching schema for table ${collection.id()}`);
const { databaseAccount, authType } = userContext;
const apiEndpoint: string =
authType === AuthType.EncryptedToken
? Constants.CassandraBackend.guestSchemaApi
: Constants.CassandraBackend.schemaApi;
let endpoint = `${configContext.BACKEND_ENDPOINT}/${apiEndpoint}`;
const deferred = Q.defer<CassandraTableKey[]>();
$.ajax(endpoint, {
type: "POST",
data: {
accountName: databaseAccount?.name,
cassandraEndpoint: this.trimCassandraEndpoint(databaseAccount?.properties.cassandraEndpoint),
resourceId: databaseAccount?.id,
keyspaceId: collection.databaseId,
tableId: collection.id(),
},
beforeSend: this.setAuthorizationHeader as any,
cache: false,
})
.then(
(data: any) => {
collection.cassandraSchema = data.columns;
logConsoleInfo(`Successfully fetched schema for table ${collection.id()}`);
deferred.resolve(data.columns);
},
(error: any) => {
const errorText = error.responseJSON?.message ?? JSON.stringify(error);
handleError(errorText, "FetchSchemaCassandra", `Error fetching schema for table ${collection.id()}`);
deferred.reject(errorText);
},
)
.done(clearInProgressMessage);
return deferred.promise;
}
private createOrDeleteQuery(cassandraEndpoint: string, resourceId: string, query: string): Q.Promise<any> {
if (!this.useCassandraProxyEndpoint("createOrDelete")) {
return this.createOrDeleteQuery_ToBeDeprecated(cassandraEndpoint, resourceId, query);
}
const deferred = Q.defer();
const { authType, databaseAccount } = userContext;
const apiEndpoint: string =
@@ -531,6 +677,38 @@ export class CassandraAPIDataClient extends TableDataClient {
return deferred.promise;
}
private createOrDeleteQuery_ToBeDeprecated(
cassandraEndpoint: string,
resourceId: string,
query: string,
): Q.Promise<any> {
const deferred = Q.defer();
const { authType, databaseAccount } = userContext;
const apiEndpoint: string =
authType === AuthType.EncryptedToken
? Constants.CassandraBackend.guestCreateOrDeleteApi
: Constants.CassandraBackend.createOrDeleteApi;
$.ajax(`${configContext.BACKEND_ENDPOINT}/${apiEndpoint}`, {
type: "POST",
data: {
accountName: databaseAccount?.name,
cassandraEndpoint: this.trimCassandraEndpoint(cassandraEndpoint),
resourceId: resourceId,
query: query,
},
beforeSend: this.setAuthorizationHeader as any,
cache: false,
}).then(
(data: any) => {
deferred.resolve();
},
(reason) => {
deferred.reject(reason);
},
);
return deferred.promise;
}
private trimCassandraEndpoint(cassandraEndpoint: string): string {
if (!cassandraEndpoint) {
return cassandraEndpoint;
@@ -569,4 +747,23 @@ export class CassandraAPIDataClient extends TableDataClient {
private getCassandraPartitionKeyProperty(collection: ViewModels.Collection): string {
return collection.cassandraKeys.partitionKeys[0].property;
}
private useCassandraProxyEndpoint(api: string): boolean {
const activeCassandraProxyEndpoints: string[] = [
CassandraProxyEndpoints.Development,
CassandraProxyEndpoints.Mpac,
CassandraProxyEndpoints.Prod,
CassandraProxyEndpoints.Fairfax,
CassandraProxyEndpoints.Mooncake,
];
if (configContext.globallyEnabledCassandraAPIs.includes(api)) {
return true;
}
return (
configContext.NEW_CASSANDRA_APIS?.includes(api) &&
activeCassandraProxyEndpoints.includes(configContext.CASSANDRA_PROXY_ENDPOINT)
);
}
}

View File

@@ -1,126 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
*/
import { IDisposable, ITerminalAddon, Terminal } from 'xterm';
interface IAttachOptions {
bidirectional?: boolean;
}
export class AttachAddon implements ITerminalAddon {
private _socket: WebSocket;
private _bidirectional: boolean;
private _disposables: IDisposable[] = [];
private _socketData: string;
constructor(socket: WebSocket, options?: IAttachOptions) {
this._socket = socket;
// always set binary type to arraybuffer, we do not handle blobs
this._socket.binaryType = 'arraybuffer';
this._bidirectional = !(options && options.bidirectional === false);
this._socketData = '';
}
public activate(terminal: Terminal): void {
this._disposables.push(
addSocketListener(this._socket, 'message', ev => {
let data: ArrayBuffer | string = ev.data;
const startStatusJson = 'ie_us';
const endStatusJson = 'ie_ue';
if (typeof data === 'object') {
const enc = new TextDecoder("utf-8");
data = enc.decode(ev.data as any);
}
// for example of json object look in TerminalHelper in the socket.onMessage
if (data.includes(startStatusJson) && data.includes(endStatusJson)) {
// process as one line
const statusData = data.split(startStatusJson)[1].split(endStatusJson)[0];
data = data.replace(statusData, '');
data = data.replace(startStatusJson, '');
data = data.replace(endStatusJson, '');
} else if (data.includes(startStatusJson)) {
// check for start
const partialStatusData = data.split(startStatusJson)[1];
this._socketData += partialStatusData;
data = data.replace(partialStatusData, '');
data = data.replace(startStatusJson, '');
} else if (data.includes(endStatusJson)) {
// check for end and process the command
const partialStatusData = data.split(endStatusJson)[0];
this._socketData += partialStatusData;
data = data.replace(partialStatusData, '');
data = data.replace(endStatusJson, '');
this._socketData = '';
} else if (this._socketData.length > 0) {
// check if the line is all data then just concatenate
this._socketData += data;
data = '';
}
terminal.write(data);
})
);
if (this._bidirectional) {
this._disposables.push(terminal.onData(data => this._sendData(data)));
this._disposables.push(terminal.onBinary(data => this._sendBinary(data)));
}
this._disposables.push(addSocketListener(this._socket, 'close', () => this.dispose()));
this._disposables.push(addSocketListener(this._socket, 'error', () => this.dispose()));
}
public dispose(): void {
for (const d of this._disposables) {
d.dispose();
}
}
private _sendData(data: string): void {
if (!this._checkOpenSocket()) {
return;
}
this._socket.send(data);
}
private _sendBinary(data: string): void {
if (!this._checkOpenSocket()) {
return;
}
const buffer = new Uint8Array(data.length);
for (let i = 0; i < data.length; ++i) {
buffer[i] = data.charCodeAt(i) & 255;
}
this._socket.send(buffer);
}
private _checkOpenSocket(): boolean {
switch (this._socket.readyState) {
case WebSocket.OPEN:
return true;
case WebSocket.CONNECTING:
throw new Error('Attach addon was loaded before socket was open');
case WebSocket.CLOSING:
return false;
case WebSocket.CLOSED:
throw new Error('Attach addon socket is closed');
default:
throw new Error('Unexpected socket state');
}
}
}
function addSocketListener<K extends keyof WebSocketEventMap>(socket: WebSocket, type: K, handler: (this: WebSocket, ev: WebSocketEventMap[K]) => any): IDisposable {
socket.addEventListener(type, handler);
return {
dispose: () => {
if (!handler) {
// Already disposed
return;
}
socket.removeEventListener(type, handler);
}
};
}

View File

@@ -1,76 +0,0 @@
import React, { useEffect, useRef } from "react";
import { Terminal } from "xterm";
import { FitAddon } from 'xterm-addon-fit';
import "xterm/css/xterm.css";
import { TerminalKind } from "../../../Contracts/ViewModels";
import { startCloudShellTerminal } from "./Core/CloudShellTerminalCore";
export interface CloudShellTerminalProps {
shellType: TerminalKind;
}
export const CloudShellTerminalComponent: React.FC<CloudShellTerminalProps> = ({
shellType
}: CloudShellTerminalProps) => {
const terminalRef = useRef(null); // Reference for terminal container
const xtermRef = useRef(null); // Reference for XTerm instance
const socketRef = useRef(null); // Reference for WebSocket
const fitAddon = new FitAddon();
useEffect(() => {
// Initialize XTerm instance
const term = new Terminal({
cursorBlink: true,
cursorStyle: 'bar',
fontFamily: 'monospace',
fontSize: 14,
theme: {
background: "#1e1e1e",
foreground: "#d4d4d4",
cursor: "#ffcc00"
},
scrollback: 1000
});
term.loadAddon(fitAddon);
// Attach terminal to the DOM
if (terminalRef.current) {
term.open(terminalRef.current);
xtermRef.current = term;
}
if (fitAddon) {
fitAddon.fit();
}
// Adjust terminal size on window resize
const handleResize = () => fitAddon.fit();
window.addEventListener('resize', handleResize);
try {
socketRef.current = startCloudShellTerminal(term, shellType);
term.onData((data) => {
if (socketRef.current && socketRef.current.readyState === WebSocket.OPEN) {
socketRef.current.send(data);
}
});
} catch (error) {
console.error("Failed to initialize CloudShell terminal:", error);
term.writeln(`\x1B[31mError initializing terminal: ${error.message}\x1B[0m`);
}
// Cleanup function to close WebSocket and dispose terminal
return () => {
if (!socketRef.current) return;
if (socketRef.current) {
socketRef.current.close(); // Close WebSocket connection
}
window.removeEventListener('resize', handleResize);
term.dispose(); // Clean up XTerm instance
};
}, [shellType]);
return <div ref={terminalRef} style={{ width: "100%", height: "500px"}} />;
};

View File

@@ -1,152 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
*/
import { TerminalKind } from "../../../Contracts/ViewModels";
import { userContext } from "../../../UserContext";
export const getCommands = (terminalKind: TerminalKind, key: string) => {
let dbAccount = userContext.databaseAccount;
let endpoint;
switch (terminalKind) {
case TerminalKind.Postgres:
endpoint = dbAccount.properties.postgresqlEndpoint;
break;
case TerminalKind.Mongo:
endpoint = dbAccount.properties.mongoEndpoint;
break;
case TerminalKind.VCoreMongo:
endpoint = dbAccount.properties.vcoreMongoEndpoint;
break;
case TerminalKind.Cassandra:
endpoint = dbAccount.properties.cassandraEndpoint;
break;
default:
throw new Error("Unknown Terminal Kind");
}
let config = {
host: getHostFromUrl(endpoint),
name: dbAccount.name,
password: key,
endpoint: endpoint
};
return commands(terminalKind, config).join("\n").concat("\n");
};
export interface CommandConfig {
host: string,
name: string,
password: string,
endpoint: string
}
export const commands = (terminalKind: TerminalKind, config?: CommandConfig): string[] => {
switch (terminalKind) {
case TerminalKind.Postgres:
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if psql is installed; if not, proceed with installation
"if ! command -v psql &> /dev/null; then echo '⚠️ psql not found. Installing...'; fi",
// 3. Download PostgreSQL if not installed
"if ! command -v psql &> /dev/null; then curl -LO https://ftp.postgresql.org/pub/source/v15.2/postgresql-15.2.tar.bz2; fi",
// 4. Extract PostgreSQL package if not installed
"if ! command -v psql &> /dev/null; then tar -xvjf postgresql-15.2.tar.bz2; fi",
// 5. Create a directory for PostgreSQL installation if not installed
"if ! command -v psql &> /dev/null; then mkdir -p ~/pgsql; fi",
// 6. Download readline (dependency for PostgreSQL) if not installed
"if ! command -v psql &> /dev/null; then curl -LO https://ftp.gnu.org/gnu/readline/readline-8.1.tar.gz; fi",
// 7. Extract readline package if not installed
"if ! command -v psql &> /dev/null; then tar -xvzf readline-8.1.tar.gz; fi",
// 8. Configure readline if not installed
"if ! command -v psql &> /dev/null; then cd readline-8.1 && ./configure --prefix=$HOME/pgsql; fi",
// 9. Add PostgreSQL to PATH if not installed
"if ! command -v psql &> /dev/null; then echo 'export PATH=$HOME/pgsql/bin:$PATH' >> ~/.bashrc; fi",
// 10. Source .bashrc to update PATH (even if psql was already installed)
"source ~/.bashrc",
// 11. Verify PostgreSQL installation
"psql --version",
`psql 'read -p "Enter Database Name: " dbname && read -p "Enter Username: " username && host=${config.endpoint} port=5432 dbname=$dbname user=$username sslmode=require'`
];
case TerminalKind.Mongo:
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if mongosh is installed; if not, proceed with installation
"if ! command -v mongosh &> /dev/null; then echo '⚠️ mongosh not found. Installing...'; fi",
// 3. Download mongosh if not installed
"if ! command -v mongosh &> /dev/null; then curl -LO https://downloads.mongodb.com/compass/mongosh-2.3.8-linux-x64.tgz; fi",
// 4. Extract mongosh package if not installed
"if ! command -v mongosh &> /dev/null; then tar -xvzf mongosh-2.3.8-linux-x64.tgz; fi",
// 5. Move mongosh binaries if not installed
"if ! command -v mongosh &> /dev/null; then mkdir -p ~/mongosh && mv mongosh-2.3.8-linux-x64/* ~/mongosh/; fi",
// 6. Add mongosh to PATH if not installed
"if ! command -v mongosh &> /dev/null; then echo 'export PATH=$HOME/mongosh/bin:$PATH' >> ~/.bashrc; fi",
// 7. Source .bashrc to update PATH (even if mongosh was already installed)
"source ~/.bashrc",
// 8. Verify mongosh installation
"mongosh --version",
// 9. Login to MongoDB
`mongosh --host ${config.host} --port 10255 --username ${config.name} --password ${config.password} --tls --tlsAllowInvalidCertificates`
];
case TerminalKind.VCoreMongo:
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if mongosh is installed; if not, proceed with installation
"if ! command -v mongosh &> /dev/null; then echo '⚠️ mongosh not found. Installing...'; fi",
// 3. Download mongosh if not installed
"if ! command -v mongosh &> /dev/null; then curl -LO https://downloads.mongodb.com/compass/mongosh-2.3.8-linux-x64.tgz; fi",
// 4. Extract mongosh package if not installed
"if ! command -v mongosh &> /dev/null; then tar -xvzf mongosh-2.3.8-linux-x64.tgz; fi",
// 5. Move mongosh binaries if not installed
"if ! command -v mongosh &> /dev/null; then mkdir -p ~/mongosh && mv mongosh-2.3.8-linux-x64/* ~/mongosh/; fi",
// 6. Add mongosh to PATH if not installed
"if ! command -v mongosh &> /dev/null; then echo 'export PATH=$HOME/mongosh/bin:$PATH' >> ~/.bashrc; fi",
// 7. Source .bashrc to update PATH (even if mongosh was already installed)
"source ~/.bashrc",
// 8. Verify mongosh installation
"mongosh --version",
// 10. Login to MongoDBmongosh mongodb+srv://<credentials>@neesharma-stage-mongo-vcore.mongocluster.cosmos.azure.com/?authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000\u0007
`read -p "Enter username: " username && mongosh "mongodb+srv://$username:@${config.endpoint}/?authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000" --tls --tlsAllowInvalidCertificates`
];
case TerminalKind.Cassandra:
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if cqlsh is installed; if not, proceed with installation
"if ! command -v cqlsh &> /dev/null; then echo '⚠️ cqlsh not found. Installing...'; fi",
// 3. Download Cassandra if not installed
"if ! command -v cqlsh &> /dev/null; then curl -LO https://archive.apache.org/dist/cassandra/5.0.3/apache-cassandra-5.0.3-bin.tar.gz; fi",
// 4. Extract Cassandra package if not installed
"if ! command -v cqlsh &> /dev/null; then tar -xvzf apache-cassandra-5.0.3-bin.tar.gz; fi",
// 5. Move Cassandra binaries if not installed
"if ! command -v cqlsh &> /dev/null; then mkdir -p ~/cassandra && mv apache-cassandra-5.0.3/* ~/cassandra/; fi",
// 6. Add Cassandra to PATH if not installed
"if ! command -v cqlsh &> /dev/null; then echo 'export PATH=$HOME/cassandra/bin:$PATH' >> ~/.bashrc; fi",
// 7. Set environment variables for SSL
"if ! command -v cqlsh &> /dev/null; then echo 'export SSL_VERSION=TLSv1_2' >> ~/.bashrc; fi",
"if ! command -v cqlsh &> /dev/null; then echo 'export SSL_VALIDATE=false' >> ~/.bashrc; fi",
// 8. Source .bashrc to update PATH (even if cqlsh was already installed)
"source ~/.bashrc",
// 9. Verify cqlsh installation
"cqlsh --version",
// 10. Login to Cassandra
`cqlsh ${config.host} 10350 -u ${config.name} -p ${config.password} --ssl --protocol-version=4`
];
default:
return ["echo Unknown Shell"];
}
}
const getHostFromUrl = (mongoEndpoint: string): string => {
try {
const url = new URL(mongoEndpoint);
return url.hostname;
} catch (error) {
console.error("Invalid Mongo Endpoint URL:", error);
return "";
}
};

View File

@@ -1,393 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Core functionality for CloudShell terminal management
*/
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { userContext } from "../../../../UserContext";
import {
authorizeSession,
connectTerminal,
provisionConsole,
putEphemeralUserSettings,
registerCloudShellProvider,
verifyCloudShellProviderRegistration
} from "../Data/CloudShellApiClient";
import { getNormalizedRegion } from "../Data/RegionUtils";
import { ShellTypeHandler } from "../ShellTypes/ShellTypeFactory";
import { AttachAddon } from "../Utils/AttachAddOn";
import { wait } from "../Utils/CommonUtils";
import { terminalLog } from "../Utils/LogFormatter";
// Constants
const DEFAULT_CLOUDSHELL_REGION = "westus";
const POLLING_INTERVAL_MS = 5000;
const MAX_RETRY_COUNT = 10;
const MAX_PING_COUNT = 20 * 60; // 20 minutes (60 seconds/minute)
/**
* Main function to start a CloudShell terminal
*/
export const startCloudShellTerminal = async (terminal: Terminal, shellType: TerminalKind) => {
// Get the shell handler for this type
const shellHandler = ShellTypeHandler.getHandler(shellType);
terminal.writeln(terminalLog.header("Initializing Azure CloudShell"));
await ensureCloudShellProviderRegistered(terminal);
const { resolvedRegion, defaultCloudShellRegion } = determineCloudShellRegion(terminal);
// Ask for user consent for region
const consentGranted = await askForRegionConsent(terminal, resolvedRegion);
if (!consentGranted) {
return {}; // Exit if user declined
}
// Check network requirements for this shell type
const networkConfig = await shellHandler.configureNetworkAccess(terminal, resolvedRegion);
terminal.writeln("");
// Provision CloudShell session
terminal.writeln(terminalLog.cloudshell(`Provisioning Started....`));
let sessionDetails: {
socketUri?: string;
provisionConsoleResponse?: any;
targetUri?: string;
};
try {
sessionDetails = await provisionCloudShellSession(resolvedRegion, terminal, networkConfig.vNetSettings, networkConfig.isAllPublicAccessEnabled);
} catch (err) {
terminal.writeln(terminalLog.error(err));
terminal.writeln(terminalLog.error("Failed to provision in primary region"));
terminal.writeln(terminalLog.warning(`Attempting with fallback region: ${defaultCloudShellRegion}`));
sessionDetails = await provisionCloudShellSession(defaultCloudShellRegion, terminal, networkConfig.vNetSettings, networkConfig.isAllPublicAccessEnabled);
}
if (!sessionDetails.socketUri) {
terminal.writeln(terminalLog.error('Unable to provision console. Please try again later.'));
return {};
}
// Configure WebSocket connection with shell-specific commands
const socket = await establishTerminalConnection(
terminal,
shellHandler,
sessionDetails.socketUri,
sessionDetails.provisionConsoleResponse,
sessionDetails.targetUri
);
return socket;
};
/**
* Ensures that the CloudShell provider is registered for the current subscription
*/
export const ensureCloudShellProviderRegistered = async (terminal: Terminal): Promise<void> => {
try {
terminal.writeln(terminalLog.info("Verifying provider registration..."));
const response: any = await verifyCloudShellProviderRegistration(userContext.subscriptionId);
if (response.registrationState !== "Registered") {
terminal.writeln(terminalLog.warning("Registering CloudShell provider..."));
await registerCloudShellProvider(userContext.subscriptionId);
terminal.writeln(terminalLog.success("Provider registration successful"));
}
} catch (err) {
terminal.writeln(terminalLog.error("Unable to verify provider registration"));
throw err;
}
};
/**
* Determines the appropriate CloudShell region
*/
export const determineCloudShellRegion = (terminal: Terminal): { resolvedRegion: string; defaultCloudShellRegion: string } => {
const region = userContext.databaseAccount?.location;
const resolvedRegion = getNormalizedRegion(region, DEFAULT_CLOUDSHELL_REGION);
return { resolvedRegion, defaultCloudShellRegion: DEFAULT_CLOUDSHELL_REGION };
};
/**
* Asks the user for consent to use the specified CloudShell region
*/
export const askForRegionConsent = async (terminal: Terminal, resolvedRegion: string): Promise<boolean> => {
terminal.writeln(terminalLog.header("CloudShell Region Confirmation"));
terminal.writeln(terminalLog.info("The CloudShell container will be provisioned in a specific Azure region."));
// Data residency and compliance information
terminal.writeln(terminalLog.subheader("Important Information"));
const dbRegion = userContext.databaseAccount?.location || "unknown";
terminal.writeln(terminalLog.item("Database Region", dbRegion));
terminal.writeln(terminalLog.item("CloudShell Container Region", resolvedRegion));
terminal.writeln(terminalLog.subheader("What this means to you?"));
terminal.writeln(terminalLog.item("Data Residency", "Commands and query results will be processed in this region"));
terminal.writeln(terminalLog.item("Network", "Database connections will originate from this region"));
// Consent question
terminal.writeln("");
terminal.writeln(terminalLog.prompt("Would you like to provision Azure CloudShell in the '" + resolvedRegion + "' region?"));
terminal.writeln(terminalLog.prompt("Press 'Y' to continue or 'N' to cancel (Y/N)"));
return new Promise<boolean>((resolve) => {
const keyListener = terminal.onKey(({ key }: { key: string }) => {
keyListener.dispose();
terminal.writeln("");
if (key.toLowerCase() === 'y') {
terminal.writeln(terminalLog.success("Proceeding with CloudShell in " + resolvedRegion));
terminal.writeln(terminalLog.separator());
resolve(true);
} else {
terminal.writeln(terminalLog.error("CloudShell provisioning canceled"));
setTimeout(() => terminal.dispose(), 2000);
resolve(false);
}
});
});
};
/**
* Provisions a CloudShell session
*/
export const provisionCloudShellSession = async (
resolvedRegion: string,
terminal: Terminal,
vNetSettings: object,
isAllPublicAccessEnabled: boolean
): Promise<{ socketUri?: string; provisionConsoleResponse?: any; targetUri?: string }> => {
return new Promise( async (resolve, reject) => {
try {
terminal.writeln(terminalLog.header("Configuring CloudShell Session"));
// Check if vNetSettings is available and not empty
const hasVNetSettings = vNetSettings && Object.keys(vNetSettings).length > 0;
if (hasVNetSettings) {
terminal.writeln(terminalLog.vnet("Enabling private network configuration"));
displayNetworkSettings(terminal, vNetSettings, resolvedRegion);
}
else {
terminal.writeln(terminalLog.warning("No VNet configuration provided"));
terminal.writeln(terminalLog.warning("CloudShell will be provisioned with public network access"));
if (!isAllPublicAccessEnabled) {
terminal.writeln(terminalLog.error("Warning: Your database has network restrictions"));
terminal.writeln(terminalLog.error("CloudShell may not be able to connect without proper VNet configuration"));
}
}
terminal.writeln(terminalLog.warning("Any previous VNet settings will be overridden"));
// Apply user settings
await putEphemeralUserSettings(userContext.subscriptionId, resolvedRegion, vNetSettings);
terminal.writeln(terminalLog.success("Session settings applied"));
// Provision console
let provisionConsoleResponse;
let attemptCounter = 0;
do {
provisionConsoleResponse = await provisionConsole(userContext.subscriptionId, resolvedRegion);
terminal.writeln(terminalLog.progress("Provisioning", provisionConsoleResponse.properties.provisioningState));
attemptCounter++;
if (provisionConsoleResponse.properties.provisioningState !== "Succeeded") {
await wait(POLLING_INTERVAL_MS);
}
} while (provisionConsoleResponse.properties.provisioningState !== "Succeeded" && attemptCounter < 10);
if (provisionConsoleResponse.properties.provisioningState !== "Succeeded") {
const errorMessage = `Provisioning failed: ${provisionConsoleResponse.properties.provisioningState}`;
terminal.writeln(terminalLog.error(errorMessage));
return reject(new Error(errorMessage));
}
// Connect terminal
const connectTerminalResponse = await connectTerminal(
provisionConsoleResponse.properties.uri,
{ rows: terminal.rows, cols: terminal.cols }
);
const targetUri = `${provisionConsoleResponse.properties.uri}/terminals?cols=${terminal.cols}&rows=${terminal.rows}&version=2019-01-01&shell=bash`;
const termId = connectTerminalResponse.id;
// Determine socket URI
let socketUri = connectTerminalResponse.socketUri.replace(":443/", "");
const targetUriBody = targetUri.replace('https://', '').split('?')[0];
if (socketUri.indexOf(targetUriBody) === -1) {
socketUri = `wss://${targetUriBody}/${termId}`;
}
if (targetUriBody.includes('servicebus')) {
const targetUriBodyArr = targetUriBody.split('/');
socketUri = `wss://${targetUriBodyArr[0]}/$hc/${targetUriBodyArr[1]}/terminals/${termId}`;
}
return resolve({ socketUri, provisionConsoleResponse, targetUri });
} catch (err) {
terminal.writeln(terminalLog.error(`Provisioning failed: ${err.message}`));
return reject(err);
}
});
};
/**
* Display VNet settings in the terminal
*/
const displayNetworkSettings = (terminal: Terminal, vNetSettings: any, resolvedRegion: string): void => {
if (vNetSettings.networkProfileResourceId) {
const profileName = vNetSettings.networkProfileResourceId.split('/').pop();
terminal.writeln(terminalLog.item("Network Profile", profileName));
if (vNetSettings.relayNamespaceResourceId) {
const relayName = vNetSettings.relayNamespaceResourceId.split('/').pop();
terminal.writeln(terminalLog.item("Relay Namespace", relayName));
}
terminal.writeln(terminalLog.item("Region", resolvedRegion));
terminal.writeln(terminalLog.success("CloudShell will use this VNet to connect to your database"));
}
};
/**
* Establishes a terminal connection via WebSocket
*/
export const establishTerminalConnection = async (
terminal: Terminal,
shellHandler: any,
socketUri: string,
provisionConsoleResponse: any,
targetUri: string
): Promise<WebSocket> => {
let socket = new WebSocket(socketUri);
// Get shell-specific initial commands
const initCommands = await shellHandler.getInitialCommands();
// Configure the socket
socket = configureSocketConnection(socket, socketUri, terminal, initCommands, 0);
// Attach the terminal addon
const attachAddon = new AttachAddon(socket);
terminal.loadAddon(attachAddon);
terminal.writeln(terminalLog.success("Connection established"));
// Authorize the session
try {
const authorizeResponse = await authorizeSession(provisionConsoleResponse.properties.uri);
const cookieToken = authorizeResponse.token;
// Load auth token with a hidden image
const img = document.createElement("img");
img.src = `${targetUri}&token=${encodeURIComponent(cookieToken)}`;
terminal.focus();
} catch (err) {
terminal.writeln(terminalLog.error("Authorization failed"));
socket.close();
throw err;
}
return socket;
};
/**
* Configures a WebSocket connection for the terminal
*/
export const configureSocketConnection = (
socket: WebSocket,
uri: string,
terminal: Terminal,
initCommands: string,
socketRetryCount: number
): WebSocket => {
let jsonData = '';
let keepAliveID: NodeJS.Timeout = null;
let pingCount = 0;
sendTerminalStartupCommands(socket, initCommands);
socket.onclose = () => {
if (keepAliveID) {
clearTimeout(keepAliveID);
pingCount = 0;
}
terminal.writeln(terminalLog.warning("Session terminated. Refresh the page to start a new session."));
};
socket.onerror = () => {
if (socketRetryCount < MAX_RETRY_COUNT && socket.readyState !== WebSocket.CLOSED) {
configureSocketConnection(socket, uri, terminal, initCommands, socketRetryCount + 1);
} else {
socket.close();
}
};
socket.onmessage = (event: MessageEvent<string>) => {
pingCount = 0; // Reset ping count on message receipt
let eventData = '';
if (typeof event.data === "object") {
try {
const enc = new TextDecoder("utf-8");
eventData = enc.decode(event.data as any);
} catch (e) {
// Not an array buffer, ignore
}
}
if (typeof event.data === 'string') {
eventData = event.data;
}
// Process event data
if (eventData.includes("ie_us") && eventData.includes("ie_ue")) {
const statusData = eventData.split('ie_us')[1].split('ie_ue')[0];
console.log(statusData);
} else if (eventData.includes("ie_us")) {
jsonData += eventData.split('ie_us')[1];
} else if (eventData.includes("ie_ue")) {
jsonData += eventData.split('ie_ue')[0];
console.log(jsonData);
jsonData = '';
} else if (jsonData.length > 0) {
jsonData += eventData;
}
};
return socket;
};
/**
* Sends startup commands to the terminal
*/
export const sendTerminalStartupCommands = (socket: WebSocket, initCommands: string): void => {
let keepAliveID: NodeJS.Timeout = null;
let pingCount = 0;
if (socket && socket.readyState === WebSocket.OPEN) {
socket.send(initCommands);
} else {
socket.onopen = () => {
socket.send(initCommands);
const keepSocketAlive = (socket: WebSocket) => {
if (socket.readyState === WebSocket.OPEN) {
if (pingCount >= MAX_PING_COUNT) {
socket.close();
} else {
socket.send('');
pingCount++;
keepAliveID = setTimeout(() => keepSocketAlive(socket), 1000);
}
}
};
keepSocketAlive(socket);
};
}
};

View File

@@ -1,320 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
*/
import { ApiVersionsConfig, ResourceType } from "Explorer/Tabs/CloudShellTab/DataModels";
import { v4 as uuidv4 } from 'uuid';
import { configContext } from "../../../ConfigContext";
import { TerminalKind } from "../../../Contracts/ViewModels";
import { userContext } from '../../../UserContext';
import { armRequest } from "../../../Utils/arm/request";
import { Authorization, ConnectTerminalResponse, NetworkType, OsType, ProvisionConsoleResponse, SessionType, Settings, ShellType } from "./DataModels";
/**
* API version configuration by terminal type and resource type
*/
const API_VERSIONS : ApiVersionsConfig = {
// Default version for fallback
DEFAULT: "2024-07-01",
// Resource type specific defaults
RESOURCE_DEFAULTS: {
[ResourceType.NETWORK]: "2023-05-01",
[ResourceType.DATABASE]: "2024-07-01",
[ResourceType.VNET]: "2023-05-01",
[ResourceType.SUBNET]: "2023-05-01",
[ResourceType.RELAY]: "2024-01-01",
[ResourceType.ROLE]: "2022-04-01"
},
// Shell-type specific versions with resource overrides
SHELL_TYPES: {
[TerminalKind.Mongo]: {
[ResourceType.DATABASE]: "2024-11-15"
},
[TerminalKind.VCoreMongo]: {
[ResourceType.DATABASE]: "2024-07-01"
},
[TerminalKind.Cassandra]: {
[ResourceType.DATABASE]: "2024-11-15"
}
}
};
export const validateUserSettings = (userSettings: Settings) => {
if (userSettings.sessionType !== SessionType.Ephemeral && userSettings.osType !== OsType.Linux) {
return false;
} else {
return true;
}
}
// Current shell type context
let currentShellType: TerminalKind | null = null;
/**
* Set the active shell type to determine API version
*/
export const setShellType = (shellType: TerminalKind): void => {
currentShellType = shellType;
};
/**
* Get the appropriate API version based on shell type and resource type
* Uses a cascading fallback approach for maximum flexibility
*/
export const getApiVersion = (resourceType?: ResourceType): string => {
// If no shell type is set, fallback to resource default or global default
if (!currentShellType) {
return resourceType ?
(API_VERSIONS.RESOURCE_DEFAULTS[resourceType] || API_VERSIONS.DEFAULT) :
API_VERSIONS.DEFAULT;
}
// Shell type is set, try to get specific version in this priority:
// 1. Shell-specific + resource-specific
if (resourceType &&
API_VERSIONS.SHELL_TYPES[currentShellType]) {
const shellTypeConfig = API_VERSIONS.SHELL_TYPES[currentShellType];
if (resourceType in shellTypeConfig) {
return shellTypeConfig[resourceType] as string;
}
}
// 2. Resource-specific default
if (resourceType && resourceType in API_VERSIONS.RESOURCE_DEFAULTS) {
return API_VERSIONS.RESOURCE_DEFAULTS[resourceType];
}
// 3. Global default
return API_VERSIONS.DEFAULT;
};
export const getUserRegion = async (subscriptionId: string, resourceGroup: string, accountName: string) => {
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.DocumentDB/databaseAccounts/${accountName}`,
method: "GET",
apiVersion: "2022-12-01"
});
};
export const deleteUserSettings = async (): Promise<void> => {
await armRequest<void>({
host: configContext.ARM_ENDPOINT,
path: `/providers/Microsoft.Portal/userSettings/cloudconsole`,
method: "DELETE",
apiVersion: "2023-02-01-preview"
});
};
export const getUserSettings = async (): Promise<Settings> => {
const resp = await armRequest<any>({
host: configContext.ARM_ENDPOINT,
path: `/providers/Microsoft.Portal/userSettings/cloudconsole`,
method: "GET",
apiVersion: "2023-02-01-preview"
});
return resp;
};
export const putEphemeralUserSettings = async (userSubscriptionId: string, userRegion: string, vNetSettings?: object) => {
const ephemeralSettings = {
properties: {
preferredOsType: OsType.Linux,
preferredShellType: ShellType.Bash,
preferredLocation: userRegion,
networkType: (!vNetSettings || Object.keys(vNetSettings).length === 0) ? NetworkType.Default : (vNetSettings ? NetworkType.Isolated : NetworkType.Default),
sessionType: SessionType.Ephemeral,
userSubscription: userSubscriptionId,
vnetSettings: vNetSettings ?? {}
}
};
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/providers/Microsoft.Portal/userSettings/cloudconsole`,
method: "PUT",
apiVersion: "2023-02-01-preview",
body: ephemeralSettings
});
};
export const verifyCloudShellProviderRegistration = async(subscriptionId: string) => {
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/subscriptions/${subscriptionId}/providers/Microsoft.CloudShell`,
method: "GET",
apiVersion: "2022-12-01"
});
};
export const registerCloudShellProvider = async (subscriptionId: string) => {
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/subscriptions/${subscriptionId}/providers/Microsoft.CloudShell/register`,
method: "POST",
apiVersion: "2022-12-01"
});
};
export const provisionConsole = async (subscriptionId: string, location: string): Promise<ProvisionConsoleResponse> => {
const data = {
properties: {
osType: OsType.Linux
}
};
return await armRequest<any>({
host: configContext.ARM_ENDPOINT,
path: `providers/Microsoft.Portal/consoles/default`,
method: "PUT",
apiVersion: "2023-02-01-preview",
customHeaders: {
'x-ms-console-preferred-location': location
},
body: data,
});
};
export const connectTerminal = async (consoleUri: string, size: { rows: number, cols: number }): Promise<ConnectTerminalResponse> => {
const targetUri = consoleUri + `/terminals?cols=${size.cols}&rows=${size.rows}&version=2019-01-01&shell=bash`;
const resp = await fetch(targetUri, {
method: "POST",
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Content-Length': '2',
'Authorization': userContext.authorizationToken,
'x-ms-client-request-id': uuidv4(),
'Accept-Language': getLocale(),
},
body: "{}" // empty body is necessary
});
return resp.json();
};
export const authorizeSession = async (consoleUri: string): Promise<Authorization> => {
const targetUri = consoleUri + "/authorize";
const resp = await fetch(targetUri, {
method: "POST",
headers: {
'Accept': 'application/json',
'Authorization': userContext.authorizationToken,
'Accept-Language': getLocale(),
"Content-Type": 'application/json'
},
body: "{}" // empty body is necessary
});
return resp.json();
};
export const getLocale = () => {
const langLocale = navigator.language;
return (langLocale && langLocale.length === 2 ? langLocale[1] : 'en-us');
};
const validCloudShellRegions = new Set(["westus", "southcentralus", "eastus", "northeurope", "westeurope", "centralindia", "southeastasia", "westcentralus"]);
export const getNormalizedRegion = (region: string, defaultCloudshellRegion: string) => {
if (!region) return defaultCloudshellRegion;
const regionMap: Record<string, string> = {
"centralus": "westcentralus",
"eastus2": "eastus"
};
const normalizedRegion = regionMap[region.toLowerCase()] || region;
return validCloudShellRegions.has(normalizedRegion.toLowerCase()) ? normalizedRegion : defaultCloudshellRegion;
};
export async function getNetworkProfileInfo<T>(networkProfileResourceId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.NETWORK);
return await GetARMCall<T>(networkProfileResourceId, apiVersion);
}
export async function getAccountDetails<T>(databaseAccountId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.DATABASE);
return await GetARMCall<T>(databaseAccountId, apiVersion);
}
export async function getVnetInformation<T>(vnetId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.VNET);
return await GetARMCall<T>(vnetId, apiVersion);
}
export async function getSubnetInformation<T>(subnetId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.SUBNET);
return await GetARMCall<T>(subnetId, apiVersion);
}
export async function updateSubnetInformation<T>(subnetId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.SUBNET);
return await PutARMCall(subnetId, request, apiVersion);
}
export async function updateDatabaseAccount<T>(accountId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.DATABASE);
return await PutARMCall(accountId, request, apiVersion);
}
export async function getDatabaseOperations<T>(accountId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.DATABASE);
return await GetARMCall<T>(`${accountId}/operations`, apiVersion);
}
export async function updateVnet<T>(vnetId: string, request: object, apiVersionOverride?: string) {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.VNET);
return await PutARMCall<T>(vnetId, request, apiVersion);
}
export async function getVnet<T>(vnetId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.VNET);
return await GetARMCall<T>(vnetId, apiVersion);
}
export async function createNetworkProfile<T>(networkProfileId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.NETWORK);
return await PutARMCall<T>(networkProfileId, request, apiVersion);
}
export async function createRelay<T>(relayId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.RELAY);
return await PutARMCall<T>(relayId, request, apiVersion);
}
export async function getRelay<T>(relayId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.RELAY);
return await GetARMCall<T>(relayId, apiVersion);
}
export async function createRoleOnNetworkProfile<T>(roleid: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.ROLE);
return await PutARMCall<T>(roleid, request, apiVersion);
}
export async function createRoleOnRelay<T>(roleid: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.ROLE);
return await PutARMCall<T>(roleid, request, apiVersion);
}
export async function GetARMCall<T>(path: string, apiVersion: string = API_VERSIONS.DEFAULT): Promise<T> {
return await armRequest<T>({
host: configContext.ARM_ENDPOINT,
path: path,
method: "GET",
apiVersion: apiVersion
});
}
export async function PutARMCall<T>(path: string, request: object, apiVersion: string = API_VERSIONS.DEFAULT): Promise<T> {
return await armRequest<T>({
host: configContext.ARM_ENDPOINT,
path: path,
method: "PUT",
apiVersion: apiVersion,
body: request
});
}

View File

@@ -1,263 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* CloudShell API client for various operations
*/
import { v4 as uuidv4 } from 'uuid';
import { configContext } from "../../../../ConfigContext";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { userContext } from '../../../../UserContext';
import { armRequest } from "../../../../Utils/arm/request";
import { ApiVersionsConfig, DEFAULT_API_VERSIONS } from "../Models/ApiVersions";
import { Authorization, ConnectTerminalResponse, NetworkType, OsType, ProvisionConsoleResponse, ResourceType, SessionType, Settings, ShellType } from "../Models/DataModels";
import { getLocale } from '../Data/LocalizationUtils';
// Current shell type context
let currentShellType: TerminalKind | null = null;
/**
* Set the active shell type to determine API version
*/
export const setShellType = (shellType: TerminalKind): void => {
currentShellType = shellType;
};
/**
* Get the appropriate API version based on shell type and resource type
*/
export const getApiVersion = (resourceType?: ResourceType, apiVersions?: ApiVersionsConfig): string => {
if (!apiVersions) {
apiVersions = DEFAULT_API_VERSIONS; // Default fallback
}
// Shell type is set, try to get specific version in this priority:
// 1. Shell-specific + resource-specific
if (resourceType &&
apiVersions.SHELL_TYPES[currentShellType]) {
const shellTypeConfig = apiVersions.SHELL_TYPES[currentShellType];
if (resourceType in shellTypeConfig) {
return shellTypeConfig[resourceType] as string;
}
}
// 2. Resource-specific default
if (resourceType && resourceType in apiVersions.RESOURCE_DEFAULTS) {
return apiVersions.RESOURCE_DEFAULTS[resourceType];
}
// 3. Global default
return apiVersions.DEFAULT;
};
export const getUserRegion = async (subscriptionId: string, resourceGroup: string, accountName: string) => {
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.DocumentDB/databaseAccounts/${accountName}`,
method: "GET",
apiVersion: "2022-12-01"
});
};
export const deleteUserSettings = async (): Promise<void> => {
await armRequest<void>({
host: configContext.ARM_ENDPOINT,
path: `/providers/Microsoft.Portal/userSettings/cloudconsole`,
method: "DELETE",
apiVersion: "2023-02-01-preview"
});
};
export const getUserSettings = async (): Promise<Settings> => {
const resp = await armRequest<any>({
host: configContext.ARM_ENDPOINT,
path: `/providers/Microsoft.Portal/userSettings/cloudconsole`,
method: "GET",
apiVersion: "2023-02-01-preview"
});
return resp;
};
export const putEphemeralUserSettings = async (userSubscriptionId: string, userRegion: string, vNetSettings?: object) => {
const ephemeralSettings = {
properties: {
preferredOsType: OsType.Linux,
preferredShellType: ShellType.Bash,
preferredLocation: userRegion,
networkType: (!vNetSettings || Object.keys(vNetSettings).length === 0) ? NetworkType.Default : (vNetSettings ? NetworkType.Isolated : NetworkType.Default),
sessionType: SessionType.Ephemeral,
userSubscription: userSubscriptionId,
vnetSettings: vNetSettings ?? {}
}
};
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/providers/Microsoft.Portal/userSettings/cloudconsole`,
method: "PUT",
apiVersion: "2023-02-01-preview",
body: ephemeralSettings
});
};
export const verifyCloudShellProviderRegistration = async(subscriptionId: string) => {
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/subscriptions/${subscriptionId}/providers/Microsoft.CloudShell`,
method: "GET",
apiVersion: "2022-12-01"
});
};
export const registerCloudShellProvider = async (subscriptionId: string) => {
return await armRequest({
host: configContext.ARM_ENDPOINT,
path: `/subscriptions/${subscriptionId}/providers/Microsoft.CloudShell/register`,
method: "POST",
apiVersion: "2022-12-01"
});
};
export const provisionConsole = async (subscriptionId: string, location: string): Promise<ProvisionConsoleResponse> => {
const data = {
properties: {
osType: OsType.Linux
}
};
return await armRequest<any>({
host: configContext.ARM_ENDPOINT,
path: `providers/Microsoft.Portal/consoles/default`,
method: "PUT",
apiVersion: "2023-02-01-preview",
customHeaders: {
'x-ms-console-preferred-location': location
},
body: data,
});
};
export const connectTerminal = async (consoleUri: string, size: { rows: number, cols: number }): Promise<ConnectTerminalResponse> => {
const targetUri = consoleUri + `/terminals?cols=${size.cols}&rows=${size.rows}&version=2019-01-01&shell=bash`;
const resp = await fetch(targetUri, {
method: "POST",
headers: {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Content-Length': '2',
'Authorization': userContext.authorizationToken,
'x-ms-client-request-id': uuidv4(),
'Accept-Language': getLocale(),
},
body: "{}" // empty body is necessary
});
return resp.json();
};
export const authorizeSession = async (consoleUri: string): Promise<Authorization> => {
const targetUri = consoleUri + "/authorize";
const resp = await fetch(targetUri, {
method: "POST",
headers: {
'Accept': 'application/json',
'Authorization': userContext.authorizationToken,
'Accept-Language': getLocale(),
"Content-Type": 'application/json'
},
body: "{}" // empty body is necessary
});
return resp.json();
};
export async function getNetworkProfileInfo<T>(networkProfileResourceId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.NETWORK);
return await GetARMCall<T>(networkProfileResourceId, apiVersion);
}
export async function getAccountDetails<T>(databaseAccountId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.DATABASE);
return await GetARMCall<T>(databaseAccountId, apiVersion);
}
export async function getVnetInformation<T>(vnetId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.VNET);
return await GetARMCall<T>(vnetId, apiVersion);
}
export async function getSubnetInformation<T>(subnetId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.SUBNET);
return await GetARMCall<T>(subnetId, apiVersion);
}
export async function updateSubnetInformation<T>(subnetId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.SUBNET);
return await PutARMCall<T>(subnetId, request, apiVersion);
}
export async function updateDatabaseAccount<T>(accountId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.DATABASE);
return await PutARMCall<T>(accountId, request, apiVersion);
}
export async function getDatabaseOperations<T>(accountId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.DATABASE);
return await GetARMCall<T>(`${accountId}/operations`, apiVersion);
}
export async function updateVnet<T>(vnetId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.VNET);
return await PutARMCall<T>(vnetId, request, apiVersion);
}
export async function getVnet<T>(vnetId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.VNET);
return await GetARMCall<T>(vnetId, apiVersion);
}
export async function createNetworkProfile<T>(networkProfileId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.NETWORK);
return await PutARMCall<T>(networkProfileId, request, apiVersion);
}
export async function createRelay<T>(relayId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.RELAY);
return await PutARMCall<T>(relayId, request, apiVersion);
}
export async function getRelay<T>(relayId: string, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.RELAY);
return await GetARMCall<T>(relayId, apiVersion);
}
export async function createRoleOnNetworkProfile<T>(roleId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.ROLE);
return await PutARMCall<T>(roleId, request, apiVersion);
}
export async function createRoleOnRelay<T>(roleId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.ROLE);
return await PutARMCall<T>(roleId, request, apiVersion);
}
export async function createPrivateEndpoint<T>(privateEndpointId: string, request: object, apiVersionOverride?: string): Promise<T> {
const apiVersion = apiVersionOverride || getApiVersion(ResourceType.NETWORK);
return await PutARMCall<T>(privateEndpointId, request, apiVersion);
}
export async function GetARMCall<T>(path: string, apiVersion: string = '2024-07-01'): Promise<T> {
return await armRequest<T>({
host: configContext.ARM_ENDPOINT,
path: path,
method: "GET",
apiVersion: apiVersion
});
}
export async function PutARMCall<T>(path: string, request: object, apiVersion: string = '2024-07-01'): Promise<T> {
return await armRequest<T>({
host: configContext.ARM_ENDPOINT,
path: path,
method: "PUT",
apiVersion: apiVersion,
body: request
});
}

View File

@@ -1,12 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Localization utilities for CloudShell
*/
/**
* Gets the current locale for API requests
*/
export const getLocale = (): string => {
const langLocale = navigator.language;
return (langLocale && langLocale.length > 2 ? langLocale : 'en-us');
};

View File

@@ -1,37 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Region utilities for CloudShell
*/
export const getLocale = () => {
const langLocale = navigator.language;
return (langLocale && langLocale.length === 2 ? langLocale[1] : 'en-us');
};
const validCloudShellRegions = new Set([
"westus",
"southcentralus",
"eastus",
"northeurope",
"westeurope",
"centralindia",
"southeastasia",
"westcentralus"
]);
/**
* Normalizes a region name to a valid CloudShell region
* @param region The region to normalize
* @param defaultCloudshellRegion Default region to use if the provided region is not supported
*/
export const getNormalizedRegion = (region: string, defaultCloudshellRegion: string) => {
if (!region) return defaultCloudshellRegion;
const regionMap: Record<string, string> = {
"centralus": "westcentralus",
"eastus2": "eastus"
};
const normalizedRegion = regionMap[region.toLowerCase()] || region;
return validCloudShellRegions.has(normalizedRegion.toLowerCase()) ? normalizedRegion : defaultCloudshellRegion;
};

View File

@@ -1,185 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
*/
import { TerminalKind } from "../../../Contracts/ViewModels";
export const enum OsType {
Linux = "linux",
Windows = "windows"
}
export const enum ShellType {
Bash = "bash",
PowerShellCore = "pwsh"
}
export const enum NetworkType {
Default = "Default",
Isolated = "Isolated"
}
export const enum SessionType {
Mounted = "Mounted",
Ephemeral = "Ephemeral"
}
export const enum UserInputs {
NoReset = "1",
ConfigureVNet = "2",
ResetVNet = "3"
};
export type Settings = {
properties: UserSettingProperties
};
export type UserSettingProperties = {
networkType: string;
preferredLocation: string;
preferredOsType: OsType;
preferredShellType: ShellType;
userSubscription: string;
sessionType: SessionType;
vnetSettings: VnetSettings;
}
export type VnetSettings = {
networkProfileResourceId?: string;
relayNamespaceResourceId?: string;
location?: string;
}
export type ProvisionConsoleResponse = {
properties: {
osType: OsType;
provisioningState: string;
uri: string;
};
};
export type Authorization = {
token: string;
};
export type ConnectTerminalResponse = {
id: string;
idleTimeout: string;
rootDirectory: string;
socketUri: string;
tokenUpdated: boolean;
};
export type VnetModel = {
name: string;
id: string;
etag: string;
type: string;
location: string;
tags: Record<string, string>;
properties: {
provisioningState: string;
resourceGuid: string;
addressSpace: {
addressPrefixes: string[];
};
encryption: {
enabled: boolean;
enforcement: string;
};
privateEndpointVNetPolicies: string;
subnets: Array<{
name: string;
id: string;
etag: string;
type: string;
properties: {
provisioningState: string;
addressPrefixes?: string[];
addressPrefix?: string;
networkSecurityGroup?: { id: string };
ipConfigurations?: { id: string }[];
ipConfigurationProfiles?: { id: string }[];
privateEndpoints?: { id: string }[];
serviceEndpoints?: Array<{
provisioningState: string;
service: string;
locations: string[];
}>;
delegations?: Array<{
name: string;
id: string;
etag: string;
type: string;
properties: {
provisioningState: string;
serviceName: string;
actions: string[];
};
}>;
purpose?: string;
privateEndpointNetworkPolicies?: string;
privateLinkServiceNetworkPolicies?: string;
};
}>;
virtualNetworkPeerings: any[];
enableDdosProtection: boolean;
};
};
export type RelayNamespace = {
id: string;
name: string;
type: string;
location: string;
tags: Record<string, string>;
properties: {
metricId: string;
serviceBusEndpoint: string;
provisioningState: string;
status: string;
createdAt: string;
updatedAt: string;
};
sku: {
name: string;
tier: string;
};
};
export type RelayNamespaceResponse = {
value: RelayNamespace[];
};
/**
* Resource types for API versioning
*/
export enum ResourceType {
NETWORK = "NETWORK",
DATABASE = "DATABASE",
VNET = "VNET",
SUBNET = "SUBNET",
RELAY = "RELAY",
ROLE = "ROLE"
}
// Type definition for API_VERSIONS configuration
export type ApiVersionsConfig = {
// Global default API version
DEFAULT: string;
// Resource-specific default API versions
RESOURCE_DEFAULTS: {
[key in ResourceType]: string;
};
// Shell-type specific configurations
SHELL_TYPES: {
[key in TerminalKind]?: {
// Resource-specific overrides for this shell type
[key in ResourceType]?: string;
};
};
};

View File

@@ -1,29 +0,0 @@
/**
* Standardized terminal logging functions for consistent formatting
*/
export const terminalLog = {
// Section headers
header: (message: string) => `\n\x1B[1;34m┌─ ${message} ${"─".repeat(Math.max(45 - message.length, 0))}\x1B[0m`,
subheader: (message: string) => `\x1B[1;36m├ ${message}\x1B[0m`,
sectionEnd: () => `\x1B[1;34m└${"─".repeat(50)}\x1B[0m\n`,
// Status messages
success: (message: string) => `\x1B[32m✓ ${message}\x1B[0m`,
warning: (message: string) => `\x1B[33m⚠ ${message}\x1B[0m`,
error: (message: string) => `\x1B[31m✗ ${message}\x1B[0m`,
info: (message: string) => `\x1B[34m${message}\x1B[0m`,
// Resource information
database: (message: string) => `\x1B[35m🔶 Database: ${message}\x1B[0m`,
vnet: (message: string) => `\x1B[36m🔷 Network: ${message}\x1B[0m`,
cloudshell: (message: string) => `\x1B[32m🔷 CloudShell: ${message}\x1B[0m`,
// Data formatting
item: (label: string, value: string) => `${label}: \x1B[32m${value}\x1B[0m`,
progress: (operation: string, status: string) => `\x1B[34m${operation}: \x1B[36m${status}\x1B[0m`,
// User interaction
prompt: (message: string) => `\x1B[1;37m${message}\x1B[0m`,
separator: () => `\x1B[30;1m${"─".repeat(50)}\x1B[0m`
};

View File

@@ -1,74 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* API versions configuration for CloudShell
*/
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { ResourceType } from "./DataModels";
/**
* Configuration for API versions used by the CloudShell
*/
export type ApiVersionsConfig = {
DEFAULT: string;
RESOURCE_DEFAULTS: Record<ResourceType, string>;
SHELL_TYPES: Record<TerminalKind, Record<ResourceType, string>>;
}
/**
* Default API versions configuration
*/
export const DEFAULT_API_VERSIONS: ApiVersionsConfig = {
DEFAULT: '2024-07-01',
RESOURCE_DEFAULTS: {
[ResourceType.DATABASE]: '2024-11-15',
[ResourceType.NETWORK]: '2024-07-01',
[ResourceType.VNET]: '2024-07-01',
[ResourceType.SUBNET]: '2024-07-01',
[ResourceType.RELAY]: '2022-10-01',
[ResourceType.ROLE]: '2022-04-01',
},
SHELL_TYPES: {
[TerminalKind.Mongo]: {
[ResourceType.DATABASE]: '2024-11-15',
[ResourceType.NETWORK]: '2024-07-01',
[ResourceType.VNET]: '2024-07-01',
[ResourceType.SUBNET]: '2024-07-01',
[ResourceType.RELAY]: '2024-01-01',
[ResourceType.ROLE]: '2022-04-01',
},
[TerminalKind.VCoreMongo]: {
[ResourceType.DATABASE]: '2024-07-01',
[ResourceType.NETWORK]: '2024-07-01',
[ResourceType.VNET]: '2024-07-01',
[ResourceType.SUBNET]: '2024-07-01',
[ResourceType.RELAY]: '2024-01-01',
[ResourceType.ROLE]: '2022-04-01',
},
[TerminalKind.Postgres]: {
[ResourceType.DATABASE]: '2024-11-15',
[ResourceType.NETWORK]: '2024-07-01',
[ResourceType.VNET]: '2024-07-01',
[ResourceType.SUBNET]: '2024-07-01',
[ResourceType.RELAY]: '2024-01-01',
[ResourceType.ROLE]: '2022-04-01',
},
[TerminalKind.Cassandra]: {
[ResourceType.DATABASE]: '2024-11-15',
[ResourceType.NETWORK]: '2024-07-01',
[ResourceType.VNET]: '2024-07-01',
[ResourceType.SUBNET]: '2024-07-01',
[ResourceType.RELAY]: '2024-01-01',
[ResourceType.ROLE]: '2022-04-01',
},
[TerminalKind.Default]: {
[ResourceType.DATABASE]: undefined,
[ResourceType.NETWORK]: undefined,
[ResourceType.VNET]: undefined,
[ResourceType.SUBNET]: undefined,
[ResourceType.RELAY]: undefined,
[ResourceType.ROLE]: undefined,
},
},
};

View File

@@ -1,163 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Data models for CloudShell
*/
export const enum OsType {
Linux = "linux",
Windows = "windows"
}
export const enum ShellType {
Bash = "bash",
PowerShellCore = "pwsh"
}
export const enum NetworkType {
Default = "Default",
Isolated = "Isolated"
}
export const enum SessionType {
Mounted = "Mounted",
Ephemeral = "Ephemeral"
}
export const enum UserInputs {
NoReset = "1",
ConfigureVNet = "2",
ResetVNet = "3"
};
export type Settings = {
properties: UserSettingProperties
};
export type UserSettingProperties = {
networkType: string;
preferredLocation: string;
preferredOsType: OsType;
preferredShellType: ShellType;
userSubscription: string;
sessionType: SessionType;
vnetSettings: VnetSettings;
}
export type VnetSettings = {
networkProfileResourceId?: string;
relayNamespaceResourceId?: string;
location?: string;
}
export type ProvisionConsoleResponse = {
properties: {
osType: OsType;
provisioningState: string;
uri: string;
};
};
export type Authorization = {
token: string;
};
export type ConnectTerminalResponse = {
id: string;
idleTimeout: string;
rootDirectory: string;
socketUri: string;
tokenUpdated: boolean;
};
export type VnetModel = {
name: string;
id: string;
etag: string;
type: string;
location: string;
tags: Record<string, string>;
properties: {
provisioningState: string;
resourceGuid: string;
addressSpace: {
addressPrefixes: string[];
};
encryption: {
enabled: boolean;
enforcement: string;
};
privateEndpointVNetPolicies: string;
subnets: Array<{
name: string;
id: string;
etag: string;
type: string;
properties: {
provisioningState: string;
addressPrefixes?: string[];
addressPrefix?: string;
networkSecurityGroup?: { id: string };
ipConfigurations?: { id: string }[];
ipConfigurationProfiles?: { id: string }[];
privateEndpoints?: { id: string }[];
serviceEndpoints?: Array<{
provisioningState: string;
service: string;
locations: string[];
}>;
delegations?: Array<{
name: string;
id: string;
etag: string;
type: string;
properties: {
provisioningState: string;
serviceName: string;
actions: string[];
};
}>;
purpose?: string;
privateEndpointNetworkPolicies?: string;
privateLinkServiceNetworkPolicies?: string;
};
}>;
virtualNetworkPeerings: any[];
enableDdosProtection: boolean;
};
};
export type RelayNamespace = {
id: string;
name: string;
type: string;
location: string;
tags: Record<string, string>;
properties: {
metricId: string;
serviceBusEndpoint: string;
provisioningState: string;
status: string;
createdAt: string;
updatedAt: string;
};
sku: {
name: string;
tier: string;
};
};
export type RelayNamespaceResponse = {
value: RelayNamespace[];
};
/**
* Resource types for API versioning
*/
export enum ResourceType {
NETWORK = "NETWORK",
DATABASE = "DATABASE",
VNET = "VNET",
SUBNET = "SUBNET",
RELAY = "RELAY",
ROLE = "ROLE"
}

View File

@@ -1,94 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Firewall handling functionality for CloudShell
*/
import { Terminal } from "xterm";
import { userContext } from "../../../../UserContext";
import { hasFirewallRestrictions } from "../../Shared/CheckFirewallRules";
import { getAccountDetails, updateDatabaseAccount } from "../Data/CloudShellApiClient";
import { askConfirmation } from "../Utils/CommonUtils";
import { terminalLog } from "../Utils/LogFormatter";
export class FirewallHandler {
/**
* Checks if firewall configuration is needed for CloudShell
*/
public static async checkFirewallConfiguration(terminal: Terminal): Promise<boolean> {
if (!hasFirewallRestrictions()) {
return false; // No firewall rules to configure
}
terminal.writeln(terminalLog.header("Database Firewall Configuration"));
terminal.writeln(terminalLog.warning("Your database has firewall restrictions enabled"));
terminal.writeln(terminalLog.warning("CloudShell might need access through these restrictions"));
const shouldConfigureFirewall = await askConfirmation(
terminal,
"Would you like to check and configure firewall settings?"
);
if (!shouldConfigureFirewall) {
terminal.writeln(terminalLog.info("Skipping firewall configuration"));
return false;
}
return await this.configureFirewallForCloudShell(terminal);
}
/**
* Configures firewall for CloudShell access
*/
private static async configureFirewallForCloudShell(terminal: Terminal): Promise<boolean> {
try {
// Get current database account details
terminal.writeln(terminalLog.database("Retrieving current firewall configuration..."));
const dbAccount = userContext.databaseAccount;
const currentDbAccount = await getAccountDetails(dbAccount.id);
// Check if "Allow Azure Services" is already enabled
const ipRules = currentDbAccount.properties.ipRules || [];
const azureServicesEnabled = currentDbAccount.properties.publicNetworkAccess === "Enabled";
if (azureServicesEnabled) {
terminal.writeln(terminalLog.success("Azure services access is already enabled"));
return true;
}
// Ask user to enable Azure services access
terminal.writeln(terminalLog.warning("Azure services access is not enabled"));
terminal.writeln(terminalLog.info("CloudShell requires 'Allow Azure Services' to be enabled"));
const enableAzureServices = await askConfirmation(
terminal,
"Enable 'Allow Azure Services' for this database?"
);
if (!enableAzureServices) {
terminal.writeln(terminalLog.warning("CloudShell may not be able to connect without enabling Azure services access"));
return false;
}
// Update database account to enable Azure services access
terminal.writeln(terminalLog.info("Updating database firewall configuration..."));
// Create update payload - only modify firewall-related properties
const updatePayload = {
...currentDbAccount,
properties: {
...currentDbAccount.properties,
publicNetworkAccess: "Enabled"
}
};
await updateDatabaseAccount(dbAccount.id, updatePayload);
terminal.writeln(terminalLog.success("Database firewall updated successfully"));
terminal.writeln(terminalLog.success("Azure services access is now enabled"));
return true;
} catch (error) {
terminal.writeln(terminalLog.error(`Error configuring firewall: ${error.message}`));
return false;
}
}
}

View File

@@ -1,99 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Network access configuration handler for CloudShell
*/
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { IsPublicAccessAvailable } from "../../Shared/CheckFirewallRules";
import { getUserSettings } from "../Data/CloudShellApiClient";
import { VnetSettings } from "../Models/DataModels";
import { terminalLog } from "../Utils/LogFormatter";
import { VNetHandler } from "./VNetHandler";
export class NetworkAccessHandler {
/**
* Configures network access for the CloudShell based on shell type and network restrictions
*/
public static async configureNetworkAccess(
terminal: Terminal,
region: string,
shellType: TerminalKind
): Promise<{
vNetSettings: any;
isAllPublicAccessEnabled: boolean;
}> {
// Check if public access is available for this shell type
const isAllPublicAccessEnabled = await IsPublicAccessAvailable(shellType);
// If public access is enabled, no need for VNet configuration
if (isAllPublicAccessEnabled) {
terminal.writeln(terminalLog.database("Public access enabled. Skipping VNet configuration."));
return {
vNetSettings: {},
isAllPublicAccessEnabled: true
};
}
// Public access is restricted, we need to configure a VNet or use existing one
terminal.writeln(terminalLog.database("Network restrictions detected"));
terminal.writeln(terminalLog.info("Loading CloudShell configuration..."));
// Get existing settings if available
const settings = await getUserSettings();
if (!settings) {
terminal.writeln(terminalLog.warning("No existing user settings found."));
}
// Retrieve CloudShell VNet settings if available
let cloudShellVnetSettings: VnetSettings | undefined;
if (settings) {
cloudShellVnetSettings = await VNetHandler.retrieveCloudShellVnetSettings(settings, terminal);
}
// If CloudShell has VNet settings, check with database config
let finalVNetSettings = {};
if (cloudShellVnetSettings && cloudShellVnetSettings.networkProfileResourceId) {
// Check if we should use existing VNet settings
const isContinueWithSameVnet = await VNetHandler.askForVNetConfigConsent(terminal, shellType);
if (isContinueWithSameVnet) {
// Check if the VNet is already configured in the database
const isVNetInDatabaseConfig = await VNetHandler.isCloudShellVNetInDatabaseConfig(cloudShellVnetSettings, terminal);
if (!isVNetInDatabaseConfig) {
terminal.writeln(terminalLog.warning("CloudShell VNet is not configured in database access list"));
const addToDatabase = await VNetHandler.askToAddVNetToDatabase(terminal, cloudShellVnetSettings);
if (addToDatabase) {
await VNetHandler.addCloudShellVNetToDatabase(cloudShellVnetSettings, terminal);
finalVNetSettings = cloudShellVnetSettings;
} else {
// User declined to add VNet to database, need to recreate
terminal.writeln(terminalLog.warning("Will configure new VNet..."));
cloudShellVnetSettings = undefined;
}
} else {
terminal.writeln(terminalLog.success("CloudShell VNet is already in database configuration"));
finalVNetSettings = cloudShellVnetSettings;
}
} else {
cloudShellVnetSettings = undefined; // User declined to use existing VNet settings
}
}
// If we don't have valid VNet settings, create new ones
if (!cloudShellVnetSettings || !cloudShellVnetSettings.networkProfileResourceId) {
terminal.writeln(terminalLog.subheader("Configuring network infrastructure"));
finalVNetSettings = await VNetHandler.configureCloudShellVNet(terminal, region);
// Add the new VNet to the database
await VNetHandler.addCloudShellVNetToDatabase(finalVNetSettings as VnetSettings, terminal);
}
return {
vNetSettings: finalVNetSettings,
isAllPublicAccessEnabled: false
};
}
}

View File

@@ -1,894 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* VNet handling functionality for CloudShell
*/
import { v4 as uuidv4 } from 'uuid';
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { userContext } from "../../../../UserContext";
import { hasPrivateEndpointsRestrictions } from "../../Shared/CheckFirewallRules";
import {
createNetworkProfile,
createPrivateEndpoint,
createRelay,
createRoleOnNetworkProfile,
createRoleOnRelay,
getAccountDetails,
getDatabaseOperations,
getNetworkProfileInfo,
getRelay,
getSubnetInformation,
getVnet,
getVnetInformation,
updateDatabaseAccount,
updateSubnetInformation,
updateVnet
} from "../Data/CloudShellApiClient";
import { Settings, VnetSettings } from "../Models/DataModels";
import { askConfirmation, askQuestion, wait } from "../Utils/CommonUtils";
import { terminalLog } from "../Utils/LogFormatter";
// Constants for VNet configuration
const POLLING_INTERVAL_MS = 5000;
const MAX_RETRY_COUNT = 10;
const STANDARD_SKU = "Standard";
const DEFAULT_VNET_ADDRESS_PREFIX = "10.0.0.0/16";
const DEFAULT_SUBNET_ADDRESS_PREFIX = "10.0.1.0/24";
const DEFAULT_CONTAINER_INSTANCE_OID = "88536fb9-d60a-4aee-8195-041425d6e927";
export class VNetHandler {
/**
* Retrieves CloudShell VNet settings from user settings
*/
public static async retrieveCloudShellVnetSettings(settings: Settings, terminal: Terminal): Promise<VnetSettings> {
if (settings?.properties?.vnetSettings && Object.keys(settings.properties.vnetSettings).length > 0) {
try {
const netProfileInfo = await getNetworkProfileInfo<any>(settings.properties.vnetSettings.networkProfileResourceId);
terminal.writeln(terminalLog.header("Existing Network Configuration"));
const subnetId = netProfileInfo.properties.containerNetworkInterfaceConfigurations[0]
.properties.ipConfigurations[0].properties.subnet.id;
const vnetResourceId = subnetId.replace(/\/subnets\/[^/]+$/, '');
terminal.writeln(terminalLog.item("VNet", vnetResourceId));
terminal.writeln(terminalLog.item("Subnet", subnetId));
terminal.writeln(terminalLog.item("Location", settings.properties.vnetSettings.location));
terminal.writeln(terminalLog.item("Network Profile", settings.properties.vnetSettings.networkProfileResourceId));
terminal.writeln(terminalLog.item("Relay Namespace", settings.properties.vnetSettings.relayNamespaceResourceId));
return {
networkProfileResourceId: settings.properties.vnetSettings.networkProfileResourceId,
relayNamespaceResourceId: settings.properties.vnetSettings.relayNamespaceResourceId,
location: settings.properties.vnetSettings.location
};
} catch (err) {
terminal.writeln(terminalLog.warning("Error retrieving network profile. Will configure new network."));
return undefined;
}
}
return undefined;
}
/**
* Asks the user if they want to use existing network configuration (VNet or private endpoint)
*/
public static async askForVNetConfigConsent(terminal: Terminal, shellType: TerminalKind = null): Promise<boolean> {
// Check if this shell type supports only private endpoints
const isPrivateEndpointOnlyShell = shellType === TerminalKind.VCoreMongo;
// Check if the database has private endpoints configured
const hasPrivateEndpoints = hasPrivateEndpointsRestrictions();
// Determine which network type to mention based on shell type and database configuration
const networkType = isPrivateEndpointOnlyShell || hasPrivateEndpoints ? "private endpoint" : "network";
// Ask for consent
terminal.writeln("");
terminal.writeln(terminalLog.prompt(`Use this existing ${networkType} configuration?`));
terminal.writeln(terminalLog.info(`Answering 'N' will configure a new ${networkType} for CloudShell`));
return await askConfirmation(terminal, `Press Y/N to continue...`);
}
/**
* Checks if the CloudShell VNet is already in the database configuration
*/
public static async isCloudShellVNetInDatabaseConfig(vNetSettings: VnetSettings, terminal: Terminal): Promise<boolean> {
try {
terminal.writeln(terminalLog.subheader("Verifying if CloudShell VNet is configured in database"));
// Get the subnet ID from the CloudShell Network Profile
const netProfileInfo = await getNetworkProfileInfo<any>(vNetSettings.networkProfileResourceId);
if (!netProfileInfo?.properties?.containerNetworkInterfaceConfigurations?.[0]
?.properties?.ipConfigurations?.[0]?.properties?.subnet?.id) {
terminal.writeln(terminalLog.warning("Could not retrieve subnet ID from CloudShell VNet"));
return false;
}
const cloudShellSubnetId = netProfileInfo.properties.containerNetworkInterfaceConfigurations[0]
.properties.ipConfigurations[0].properties.subnet.id;
terminal.writeln(terminalLog.item("CloudShell Subnet", cloudShellSubnetId.split('/').pop() || ""));
// Check if this subnet ID is in the database VNet rules
const dbAccount = userContext.databaseAccount;
if (!dbAccount?.properties?.virtualNetworkRules) {
return false;
}
const vnetRules = dbAccount.properties.virtualNetworkRules;
// Check if the CloudShell subnet is already in the rules
return vnetRules.some(rule => rule.id === cloudShellSubnetId);
} catch (err) {
terminal.writeln(terminalLog.error("Error checking database VNet configuration"));
return false;
}
}
/**
* Asks the user if they want to add the CloudShell VNet to the database configuration
*/
public static async askToAddVNetToDatabase(terminal: Terminal, vNetSettings: VnetSettings): Promise<boolean> {
terminal.writeln("");
terminal.writeln(terminalLog.header("Network Configuration Mismatch"));
terminal.writeln(terminalLog.warning("Your CloudShell VNet is not in your database's allowed networks"));
terminal.writeln(terminalLog.warning("To connect from CloudShell, this VNet must be added to your database"));
return await askConfirmation(terminal, "Add CloudShell VNet to database configuration?");
}
/**
* Adds the CloudShell VNet to the database configuration
* Now supports both VNet rules and private endpoints
*/
public static async addCloudShellVNetToDatabase(vNetSettings: VnetSettings, terminal: Terminal): Promise<void> {
try {
terminal.writeln(terminalLog.header("Updating database network configuration"));
// Step 1: Get the subnet ID from CloudShell Network Profile
const { cloudShellSubnetId, cloudShellVnetId } = await this.getCloudShellNetworkIds(vNetSettings, terminal);
// Step 2: Get current database account details
const { currentDbAccount } = await this.getDatabaseAccountDetails(terminal);
// Step 3: Determine if database uses private endpoints
const usesPrivateEndpoints = hasPrivateEndpointsRestrictions() ||
(currentDbAccount.properties.privateEndpointConnections?.length > 0);
// Log which networking mode we're using
if (usesPrivateEndpoints) {
terminal.writeln(terminalLog.info("Database is configured with private endpoints"));
} else {
terminal.writeln(terminalLog.info("Database is configured with VNet rules"));
}
// Step 4: Check if connection is already configured
if (usesPrivateEndpoints) {
if (await this.isPrivateEndpointAlreadyConfigured(cloudShellVnetId, currentDbAccount, terminal)) {
return;
}
} else {
if (await this.isVNetAlreadyConfigured(cloudShellSubnetId, currentDbAccount, terminal)) {
return;
}
}
// Step 5: Check network resource statuses and ongoing operations
const { vnetInfo, subnetInfo, operationInProgress } =
await this.checkNetworkResourceStatuses(cloudShellSubnetId, cloudShellVnetId, currentDbAccount.id, terminal);
// Step 6: If no operation in progress, update the configuration
if (!operationInProgress) {
if (usesPrivateEndpoints) {
// Create or update private endpoint configuration
await this.configurePrivateEndpoint(
cloudShellSubnetId,
vnetInfo.location,
currentDbAccount.id,
terminal
);
} else {
// Enable CosmosDB service endpoint on subnet if needed (for VNet rules)
await this.enableCosmosDBServiceEndpoint(cloudShellSubnetId, subnetInfo, terminal);
// Update database account with VNet rule
await this.updateDatabaseWithVNetRule(currentDbAccount, cloudShellSubnetId, currentDbAccount.id, terminal);
}
} else {
terminal.writeln(terminalLog.info("Monitoring existing network operation..."));
// Step 7: Monitor the update progress
await this.monitorVNetAdditionProgress(cloudShellSubnetId, currentDbAccount.id, terminal);
}
} catch (err) {
terminal.writeln(terminalLog.error(`Error updating database network configuration: ${err.message}`));
throw err;
}
}
/**
* Checks if a private endpoint is already configured for the CloudShell VNet
*/
private static async isPrivateEndpointAlreadyConfigured(
cloudShellVnetId: string,
currentDbAccount: any,
terminal: Terminal
): Promise<boolean> {
// Check if private endpoints exist and are properly configured for this VNet
const hasConfiguredEndpoint = currentDbAccount.properties.privateEndpointConnections?.some(
(connection: any) => {
const isApproved = connection.properties.privateLinkServiceConnectionState.status === 'Approved';
// We would need to check if the endpoint is in the CloudShell VNet
// For simplicity, we're assuming connection.properties.networkInterface contains this info
const endpointVNetId = connection.properties.networkInterface?.id?.split('/subnets/')[0];
return isApproved && endpointVNetId === cloudShellVnetId;
}
);
if (hasConfiguredEndpoint) {
terminal.writeln(terminalLog.success("CloudShell private endpoint is already configured"));
return true;
}
return false;
}
/**
* Configures a private endpoint for the CloudShell VNet to connect to the database
*/
private static async configurePrivateEndpoint(
cloudShellSubnetId: string,
vnetLocation: any,
dbAccountId: string,
terminal: Terminal
): Promise<void> {
// Extract necessary information from IDs
const subnetIdParts = cloudShellSubnetId.split('/');
const subnetIndex = subnetIdParts.indexOf('subnets');
const subnetName = subnetIdParts[subnetIndex + 1];
const resourceGroup = subnetIdParts[4];
const subscriptionId = subnetIdParts[2];
// Generate a unique name for the private endpoint
const privateEndpointName = `pe-cloudshell-cosmos-${Math.floor(10000 + Math.random() * 90000)}`;
terminal.writeln(terminalLog.subheader("Creating private endpoint for CloudShell"));
terminal.writeln(terminalLog.item("Private Endpoint Name", privateEndpointName));
terminal.writeln(terminalLog.item("Target Subnet", subnetName));
// Construct the private endpoint creation payload
const privateEndpointPayload = {
location: vnetLocation,
properties: {
privateLinkServiceConnections: [
{
name: privateEndpointName,
properties: {
privateLinkServiceId: dbAccountId,
groupIds: [
"MongoDB"
],
requestMessage: "CloudShell connectivity request"
},
type: "Microsoft.Network/privateEndpoints/privateLinkServiceConnections"
}
],
subnet: {
id: cloudShellSubnetId
}
}
};
// Send the request to create the private endpoint
// Note: This is a placeholder - we would need to implement this API call
terminal.writeln(terminalLog.info("Submitting private endpoint creation request"));
try {
const privateEndpointUrl = `/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.Network/privateEndpoints/${privateEndpointName}`;
await createPrivateEndpoint(privateEndpointUrl, privateEndpointPayload, "2024-05-01");
terminal.writeln(terminalLog.success("Private endpoint creation request submitted"));
terminal.writeln(terminalLog.warning("Please approve the private endpoint connection in the Azure portal"));
terminal.writeln(terminalLog.info("Note: Private endpoint operations may take several minutes to complete"));
} catch (err) {
terminal.writeln(terminalLog.error(`Failed to create private endpoint: ${err.message}`));
throw err;
}
}
/**
* Gets the subnet and VNet IDs from CloudShell Network Profile
*/
private static async getCloudShellNetworkIds(vNetSettings: VnetSettings, terminal: Terminal): Promise<{ cloudShellSubnetId: string; cloudShellVnetId: string }> {
const netProfileInfo = await getNetworkProfileInfo<any>(vNetSettings.networkProfileResourceId);
if (!netProfileInfo?.properties?.containerNetworkInterfaceConfigurations?.[0]
?.properties?.ipConfigurations?.[0]?.properties?.subnet?.id) {
throw new Error("Could not retrieve subnet ID from CloudShell VNet");
}
const cloudShellSubnetId = netProfileInfo.properties.containerNetworkInterfaceConfigurations[0]
.properties.ipConfigurations[0].properties.subnet.id;
// Extract VNet ID from subnet ID
const cloudShellVnetId = cloudShellSubnetId.substring(0, cloudShellSubnetId.indexOf('/subnets/'));
terminal.writeln(terminalLog.subheader("Identified CloudShell network resources"));
terminal.writeln(terminalLog.item("Subnet", cloudShellSubnetId.split('/').pop() || ""));
terminal.writeln(terminalLog.item("VNet", cloudShellVnetId.split('/').pop() || ""));
return { cloudShellSubnetId, cloudShellVnetId };
}
/**
* Gets the database account details
*/
private static async getDatabaseAccountDetails(terminal: Terminal): Promise<{ currentDbAccount: any }> {
const dbAccount = userContext.databaseAccount;
terminal.writeln(terminalLog.database("Verifying current configuration"));
const currentDbAccount = await getAccountDetails(dbAccount.id);
return { currentDbAccount };
}
/**
* Checks if the VNet is already configured in the database
*/
private static async isVNetAlreadyConfigured(cloudShellSubnetId: string, currentDbAccount: any, terminal: Terminal): Promise<boolean> {
const vnetAlreadyConfigured = currentDbAccount.properties.virtualNetworkRules &&
currentDbAccount.properties.virtualNetworkRules.some(
(rule: any) => rule.id === cloudShellSubnetId
);
if (vnetAlreadyConfigured) {
terminal.writeln(terminalLog.success("CloudShell VNet is already in database configuration"));
return true;
}
return false;
}
/**
* Checks the status of network resources and ongoing operations
*/
private static async checkNetworkResourceStatuses(
cloudShellSubnetId: string,
cloudShellVnetId: string,
dbAccountId: string,
terminal: Terminal
): Promise<{ vnetInfo: any; subnetInfo: any; operationInProgress: boolean }> {
terminal.writeln(terminalLog.subheader("Checking network resource status"));
let operationInProgress = false;
let vnetInfo: any = null;
let subnetInfo: any = null;
if (cloudShellVnetId && cloudShellSubnetId) {
// Get VNet and subnet resource status
vnetInfo = await getVnetInformation<any>(cloudShellVnetId);
subnetInfo = await getSubnetInformation<any>(cloudShellSubnetId);
// Check if there's an ongoing operation on the VNet or subnet
const vnetProvisioningState = vnetInfo?.properties?.provisioningState;
const subnetProvisioningState = subnetInfo?.properties?.provisioningState;
if (vnetProvisioningState !== 'Succeeded' && vnetProvisioningState !== 'Failed') {
terminal.writeln(terminalLog.warning(`VNet operation in progress: ${vnetProvisioningState}`));
operationInProgress = true;
}
if (subnetProvisioningState !== 'Succeeded' && subnetProvisioningState !== 'Failed') {
terminal.writeln(terminalLog.warning(`Subnet operation in progress: ${subnetProvisioningState}`));
operationInProgress = true;
}
// Also check database operations
const latestDbAccount = await getAccountDetails<any>(dbAccountId);
if (latestDbAccount.properties.virtualNetworkRules) {
const isPendingAdd = latestDbAccount.properties.virtualNetworkRules.some(
(rule: any) => rule.id === cloudShellSubnetId && rule.status === 'Updating'
);
if (isPendingAdd) {
terminal.writeln(terminalLog.warning("CloudShell VNet addition to database is already in progress"));
operationInProgress = true;
}
}
}
return { vnetInfo, subnetInfo, operationInProgress };
}
/**
* Enables the CosmosDB service endpoint on a subnet if needed
*/
private static async enableCosmosDBServiceEndpoint(cloudShellSubnetId: string, subnetInfo: any, terminal: Terminal): Promise<void> {
if (!subnetInfo) {
terminal.writeln(terminalLog.warning("Unable to check subnet endpoint configuration"));
return;
}
terminal.writeln(terminalLog.subheader("Checking and configuring CosmosDB service endpoint"));
// Parse the subnet ID to get resource information
const subnetIdParts = cloudShellSubnetId.split('/');
const subnetIndex = subnetIdParts.indexOf('subnets');
if (subnetIndex > 0) {
const subnetName = subnetIdParts[subnetIndex + 1];
const vnetName = subnetIdParts[subnetIndex - 1];
const resourceGroup = subnetIdParts[4];
const subscriptionId = subnetIdParts[2];
// Get the subnet URL
const subnetUrl = `/subscriptions/${subscriptionId}/resourceGroups/${resourceGroup}/providers/Microsoft.Network/virtualNetworks/${vnetName}/subnets/${subnetName}`;
// Check if CosmosDB service endpoint is already enabled
const hasCosmosDBEndpoint = subnetInfo.properties.serviceEndpoints &&
subnetInfo.properties.serviceEndpoints.some(
(endpoint: any) => endpoint.service === 'Microsoft.AzureCosmosDB'
);
if (!hasCosmosDBEndpoint) {
terminal.writeln(terminalLog.warning("Enabling CosmosDB service endpoint on subnet..."));
// Create update payload with CosmosDB service endpoint
const serviceEndpoints = [
...(subnetInfo.properties.serviceEndpoints || []),
{ service: 'Microsoft.AzureCosmosDB' }
];
// Update the subnet configuration while preserving existing properties
const subnetUpdatePayload = {
...subnetInfo,
properties: {
...subnetInfo.properties,
serviceEndpoints: serviceEndpoints
}
};
// Apply the subnet update
await updateSubnetInformation(subnetUrl, subnetUpdatePayload);
// Wait for the subnet update to complete
let subnetUpdateComplete = false;
let subnetRetryCount = 0;
while (!subnetUpdateComplete && subnetRetryCount < MAX_RETRY_COUNT) {
const updatedSubnet = await getSubnetInformation<any>(subnetUrl);
const endpointEnabled = updatedSubnet.properties.serviceEndpoints &&
updatedSubnet.properties.serviceEndpoints.some(
(endpoint: any) => endpoint.service === 'Microsoft.AzureCosmosDB'
);
if (endpointEnabled && updatedSubnet.properties.provisioningState === 'Succeeded') {
subnetUpdateComplete = true;
terminal.writeln(terminalLog.success("CosmosDB service endpoint enabled successfully"));
} else {
subnetRetryCount++;
terminal.writeln(terminalLog.progress("Subnet update", `Waiting (${subnetRetryCount}/${MAX_RETRY_COUNT})`));
await wait(POLLING_INTERVAL_MS);
}
}
if (!subnetUpdateComplete) {
throw new Error("Failed to enable CosmosDB service endpoint on subnet");
}
} else {
terminal.writeln(terminalLog.success("CosmosDB service endpoint is already enabled"));
}
}
}
/**
* Updates the database account with a new VNet rule
*/
private static async updateDatabaseWithVNetRule(currentDbAccount: any, cloudShellSubnetId: string, dbAccountId: string, terminal: Terminal): Promise<void> {
// Create a deep copy of the current database account
const updatePayload = JSON.parse(JSON.stringify(currentDbAccount));
// Update only the network-related properties
updatePayload.properties.virtualNetworkRules = [
...(currentDbAccount.properties.virtualNetworkRules || []),
{ id: cloudShellSubnetId, ignoreMissingVNetServiceEndpoint: false }
];
updatePayload.properties.isVirtualNetworkFilterEnabled = true;
// Update the database account
terminal.writeln(terminalLog.subheader("Submitting VNet update request to database"));
await updateDatabaseAccount(dbAccountId, updatePayload);
terminal.writeln(terminalLog.success("Updated Database account with Cloud Shell Vnet"));
}
/**
* Monitors the progress of adding a VNet to the database account
*/
private static async monitorVNetAdditionProgress(cloudShellSubnetId: string, dbAccountId: string, terminal: Terminal): Promise<void> {
let updateComplete = false;
let retryCount = 0;
let lastStatus = "";
let lastProgress = 0;
let lastOpId = "";
terminal.writeln(terminalLog.subheader("Monitoring database update progress"));
while (!updateComplete && retryCount < MAX_RETRY_COUNT) {
// Check if the VNet is now in the database account
const updatedDbAccount = await getAccountDetails<any>(dbAccountId);
const isVNetAdded = updatedDbAccount.properties.virtualNetworkRules?.some(
(rule: any) => rule.id === cloudShellSubnetId && (!rule.status || rule.status === 'Succeeded')
);
if (isVNetAdded) {
updateComplete = true;
terminal.writeln(terminalLog.success("CloudShell VNet successfully added to database configuration"));
break;
}
// If not yet added, check for operation progress
const operations = await getDatabaseOperations<any>(dbAccountId);
// Find network-related operations
const networkOps = operations.value?.filter(
(op: any) =>
(op.properties.description?.toLowerCase().includes('network') ||
op.properties.description?.toLowerCase().includes('vnet'))
) || [];
// Find active operations
const activeOp = networkOps.find((op: any) => op.properties.status === 'InProgress');
if (activeOp) {
// Show progress details if available
const currentStatus = activeOp.properties.status;
const progress = activeOp.properties.percentComplete || 0;
const opId = activeOp.name;
// Only update the terminal if something has changed
if (currentStatus !== lastStatus || progress !== lastProgress || opId !== lastOpId) {
// Create a progress bar
const progressBarLength = 20;
const filledLength = Math.floor(progress / 100 * progressBarLength);
const progressBar = "█".repeat(filledLength) + "░".repeat(progressBarLength - filledLength);
terminal.writeln(`\x1B[34m [${progressBar}] ${progress}% - ${currentStatus}\x1B[0m`);
lastStatus = currentStatus;
lastProgress = progress;
lastOpId = opId;
}
} else if (networkOps.length > 0) {
// If there are completed operations, show their status
const lastCompletedOp = networkOps[0];
if (lastCompletedOp.properties.status !== lastStatus) {
terminal.writeln(terminalLog.progress("Operation status", lastCompletedOp.properties.status));
lastStatus = lastCompletedOp.properties.status;
}
}
retryCount++;
await wait(POLLING_INTERVAL_MS);
}
if (!updateComplete) {
terminal.writeln(terminalLog.warning("Database update timed out. Please check the Azure portal."));
}
}
/**
* Configures a new VNet for CloudShell
*/
public static async configureCloudShellVNet(terminal: Terminal, resolvedRegion: string): Promise<VnetSettings> {
// Use professional and shorter names for resources
const randomSuffix = Math.floor(10000 + Math.random() * 90000);
const subnetName = `cloudshell-subnet-${randomSuffix}`;
const vnetName = `cloudshell-vnet-${randomSuffix}`;
const networkProfileName = `cloudshell-network-profile-${randomSuffix}`;
const relayName = `cloudshell-relay-${randomSuffix}`;
terminal.writeln(terminalLog.header("Network Resource Configuration"));
const azureContainerInstanceOID = await askQuestion(
terminal,
"Enter Azure Container Instance OID (Refer. https://learn.microsoft.com/en-us/azure/cloud-shell/vnet/deployment#get-the-azure-container-instance-id)",
DEFAULT_CONTAINER_INSTANCE_OID
);
const vNetSubscriptionId = await askQuestion(
terminal,
"Enter Virtual Network Subscription ID",
userContext.subscriptionId
);
const vNetResourceGroup = await askQuestion(
terminal,
"Enter Virtual Network Resource Group",
userContext.resourceGroup
);
// Step 1: Create VNet with Subnet
terminal.writeln(terminalLog.header("Deploying Network Resources"));
const vNetConfigPayload = await this.createCloudShellVnet(
resolvedRegion,
subnetName,
terminal,
vnetName,
vNetSubscriptionId,
vNetResourceGroup
);
// Step 2: Create Network Profile
await this.createNetworkProfileWithVnet(
vNetSubscriptionId,
vNetResourceGroup,
vnetName,
subnetName,
resolvedRegion,
terminal,
networkProfileName
);
// Step 3: Create Network Relay
await this.createNetworkRelay(
resolvedRegion,
terminal,
relayName,
vNetSubscriptionId,
vNetResourceGroup
);
// Step 4: Assign Roles
terminal.writeln(terminalLog.header("Configuring Security Permissions"));
await this.assignRoleToNetworkProfile(
azureContainerInstanceOID,
vNetSubscriptionId,
terminal,
networkProfileName,
vNetResourceGroup
);
await this.assignRoleToRelay(
azureContainerInstanceOID,
vNetSubscriptionId,
terminal,
relayName,
vNetResourceGroup
);
// Step 5: Create and return VNet settings
const networkProfileResourceId = `/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Network/networkProfiles/${networkProfileName.replace(/[\n\r]/g, "")}`;
const relayResourceId = `/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Relay/namespaces/${relayName.replace(/[\n\r]/g, "")}`;
terminal.writeln(terminalLog.success("Network configuration complete"));
return {
networkProfileResourceId,
relayNamespaceResourceId: relayResourceId,
location: vNetConfigPayload.location
};
}
/**
* Creates a VNet for CloudShell
*/
private static async createCloudShellVnet(
resolvedRegion: string,
subnetName: string,
terminal: Terminal,
vnetName: string,
vNetSubscriptionId: string,
vNetResourceGroup: string
): Promise<any> {
const vNetConfigPayload = {
location: resolvedRegion,
properties: {
addressSpace: {
addressPrefixes: [DEFAULT_VNET_ADDRESS_PREFIX],
},
subnets: [
{
name: subnetName,
properties: {
addressPrefix: DEFAULT_SUBNET_ADDRESS_PREFIX,
delegations: [
{
name: "CloudShellDelegation",
properties: {
serviceName: "Microsoft.ContainerInstance/containerGroups"
}
}
],
},
},
],
},
};
terminal.writeln(terminalLog.vnet(`Creating VNet: ${vnetName}`));
let vNetResponse = await updateVnet<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Network/virtualNetworks/${vnetName}`,
vNetConfigPayload
);
while (vNetResponse?.properties?.provisioningState !== "Succeeded") {
vNetResponse = await getVnet<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Network/virtualNetworks/${vnetName}`
);
const vNetState = vNetResponse?.properties?.provisioningState;
if (vNetState !== "Succeeded" && vNetState !== "Failed") {
await wait(POLLING_INTERVAL_MS);
terminal.writeln(terminalLog.progress("VNet deployment", vNetState));
} else {
break;
}
}
terminal.writeln(terminalLog.success("VNet created successfully"));
return vNetConfigPayload;
}
/**
* Creates a Network Profile for CloudShell
*/
private static async createNetworkProfileWithVnet(
vNetSubscriptionId: string,
vNetResourceGroup: string,
vnetName: string,
subnetName: string,
resolvedRegion: string,
terminal: Terminal,
networkProfileName: string
): Promise<void> {
const subnetId = `/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Network/virtualNetworks/${vnetName}/subnets/${subnetName}`;
const createNetworkProfilePayload = {
location: resolvedRegion,
properties: {
containerNetworkInterfaceConfigurations: [
{
name: 'defaultContainerNicConfig',
properties: {
ipConfigurations: [
{
name: 'defaultContainerIpConfig',
properties: {
subnet: {
id: subnetId,
}
}
}
]
}
}
]
}
};
terminal.writeln(terminalLog.vnet("Creating Network Profile"));
let networkProfileResponse = await createNetworkProfile<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Network/networkProfiles/${networkProfileName}`,
createNetworkProfilePayload
);
while (networkProfileResponse?.properties?.provisioningState !== "Succeeded") {
networkProfileResponse = await getNetworkProfileInfo<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Network/networkProfiles/${networkProfileName}`
);
const networkProfileState = networkProfileResponse?.properties?.provisioningState;
if (networkProfileState !== "Succeeded" && networkProfileState !== "Failed") {
await wait(POLLING_INTERVAL_MS);
terminal.writeln(terminalLog.progress("Network Profile", networkProfileState));
} else {
break;
}
}
terminal.writeln(terminalLog.success("Network Profile created successfully"));
}
/**
* Creates a Network Relay for CloudShell
*/
private static async createNetworkRelay(
resolvedRegion: string,
terminal: Terminal,
relayName: string,
vNetSubscriptionId: string,
vNetResourceGroup: string
): Promise<void> {
const relayPayload = {
location: resolvedRegion,
sku: {
name: STANDARD_SKU,
tier: STANDARD_SKU,
}
};
terminal.writeln(terminalLog.vnet("Creating Relay Namespace"));
let relayResponse = await createRelay<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Relay/namespaces/${relayName}`,
relayPayload
);
while (relayResponse?.properties?.provisioningState !== "Succeeded") {
relayResponse = await getRelay<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Relay/namespaces/${relayName}`
);
const relayState = relayResponse?.properties?.provisioningState;
if (relayState !== "Succeeded" && relayState !== "Failed") {
await wait(POLLING_INTERVAL_MS);
terminal.writeln(terminalLog.progress("Relay Namespace", relayState));
} else {
break;
}
}
terminal.writeln(terminalLog.success("Relay Namespace created successfully"));
}
/**
* Assigns a role to a Network Profile
*/
private static async assignRoleToNetworkProfile(
azureContainerInstanceOID: string,
vNetSubscriptionId: string,
terminal: Terminal,
networkProfileName: string,
vNetResourceGroup: string
): Promise<void> {
const nfRoleName = uuidv4();
const networkProfileRoleAssignmentPayload = {
properties: {
principalId: azureContainerInstanceOID,
roleDefinitionId: `/subscriptions/${vNetSubscriptionId}/providers/Microsoft.Authorization/roleDefinitions/4d97b98b-1d4f-4787-a291-c67834d212e7`
}
};
terminal.writeln(terminalLog.info("Assigning permissions to Network Profile"));
await createRoleOnNetworkProfile<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Network/networkProfiles/${networkProfileName}/providers/Microsoft.Authorization/roleAssignments/${nfRoleName}`,
networkProfileRoleAssignmentPayload
);
terminal.writeln(terminalLog.success("Network Profile permissions assigned"));
}
/**
* Assigns a role to a Network Relay
*/
private static async assignRoleToRelay(
azureContainerInstanceOID: string,
vNetSubscriptionId: string,
terminal: Terminal,
relayName: string,
vNetResourceGroup: string
): Promise<void> {
const relayRoleName = uuidv4();
const relayRoleAssignmentPayload = {
properties: {
principalId: azureContainerInstanceOID,
roleDefinitionId: `/subscriptions/${vNetSubscriptionId}/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c`,
}
};
terminal.writeln(terminalLog.info("Assigning permissions to Relay Namespace"));
await createRoleOnRelay<any>(
`/subscriptions/${vNetSubscriptionId}/resourceGroups/${vNetResourceGroup}/providers/Microsoft.Relay/namespaces/${relayName}/providers/Microsoft.Authorization/roleAssignments/${relayRoleName}`,
relayRoleAssignmentPayload
);
terminal.writeln(terminalLog.success("Relay Namespace permissions assigned"));
}
}

View File

@@ -1,80 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Cassandra shell type handler
*/
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { userContext } from "../../../../UserContext";
import { listKeys } from "../../../../Utils/arm/generatedClients/cosmos/databaseAccounts";
import { setShellType } from "../Data/CloudShellApiClient";
import { NetworkAccessHandler } from "../Network/NetworkAccessHandler";
import { getHostFromUrl } from "../Utils/CommonUtils";
import { ShellTypeConfig } from "./ShellTypeFactory";
export class CassandraShellHandler implements ShellTypeConfig {
private shellType: TerminalKind = TerminalKind.Cassandra;
constructor() {
setShellType(this.shellType);
}
public getShellName(): string {
return "Cassandra";
}
public async getInitialCommands(): Promise<string> {
const dbAccount = userContext.databaseAccount;
const endpoint = dbAccount.properties.cassandraEndpoint;
// Get database key
const dbName = dbAccount.name;
let key = "";
if (dbName) {
const keys = await listKeys(userContext.subscriptionId, userContext.resourceGroup, dbName);
key = keys?.primaryMasterKey || "";
}
const config = {
host: getHostFromUrl(endpoint),
name: dbAccount.name,
password: key,
endpoint: endpoint
};
return this.getCommands(config).join("\n").concat("\n");
}
public async configureNetworkAccess(terminal: Terminal, region: string): Promise<{
vNetSettings: any;
isAllPublicAccessEnabled: boolean;
}> {
return await NetworkAccessHandler.configureNetworkAccess(terminal, region, this.shellType);
}
private getCommands(config: any): string[] {
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if cqlsh is installed; if not, proceed with installation
"if ! command -v cqlsh &> /dev/null; then echo '⚠️ cqlsh not found. Installing...'; fi",
// 3. Download Cassandra if not installed
"if ! command -v cqlsh &> /dev/null; then curl -LO https://archive.apache.org/dist/cassandra/5.0.3/apache-cassandra-5.0.3-bin.tar.gz; fi",
// 4. Extract Cassandra package if not installed
"if ! command -v cqlsh &> /dev/null; then tar -xvzf apache-cassandra-5.0.3-bin.tar.gz; fi",
// 5. Move Cassandra binaries if not installed
"if ! command -v cqlsh &> /dev/null; then mkdir -p ~/cassandra && mv apache-cassandra-5.0.3/* ~/cassandra/; fi",
// 6. Add Cassandra to PATH if not installed
"if ! command -v cqlsh &> /dev/null; then echo 'export PATH=$HOME/cassandra/bin:$PATH' >> ~/.bashrc; fi",
// 7. Set environment variables for SSL
"if ! command -v cqlsh &> /dev/null; then echo 'export SSL_VERSION=TLSv1_2' >> ~/.bashrc; fi",
"if ! command -v cqlsh &> /dev/null; then echo 'export SSL_VALIDATE=false' >> ~/.bashrc; fi",
// 8. Source .bashrc to update PATH (even if cqlsh was already installed)
"source ~/.bashrc",
// 9. Verify cqlsh installation
"cqlsh --version",
// 10. Login to Cassandra
`cqlsh ${config.host} 10350 -u ${config.name} -p ${config.password} --ssl --protocol-version=4`
];
}
}

View File

@@ -1,77 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Mongo shell type handler
*/
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { userContext } from "../../../../UserContext";
import { listKeys } from "../../../../Utils/arm/generatedClients/cosmos/databaseAccounts";
import { setShellType } from "../Data/CloudShellApiClient";
import { NetworkAccessHandler } from "../Network/NetworkAccessHandler";
import { getHostFromUrl } from "../Utils/CommonUtils";
import { ShellTypeConfig } from "./ShellTypeFactory";
export class MongoShellHandler implements ShellTypeConfig {
private shellType: TerminalKind = TerminalKind.Mongo;
constructor() {
setShellType(this.shellType);
}
public getShellName(): string {
return "MongoDB";
}
public async getInitialCommands(): Promise<string> {
const dbAccount = userContext.databaseAccount;
const endpoint = dbAccount.properties.mongoEndpoint;
// Get database key
const dbName = dbAccount.name;
let key = "";
if (dbName) {
const keys = await listKeys(userContext.subscriptionId, userContext.resourceGroup, dbName);
key = keys?.primaryMasterKey || "";
}
const config = {
host: getHostFromUrl(endpoint),
name: dbAccount.name,
password: key,
endpoint: endpoint
};
return this.getCommands(config).join("\n").concat("\n");
}
public async configureNetworkAccess(terminal: Terminal, region: string): Promise<{
vNetSettings: any;
isAllPublicAccessEnabled: boolean;
}> {
return await NetworkAccessHandler.configureNetworkAccess(terminal, region, this.shellType);
}
private getCommands(config: any): string[] {
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if mongosh is installed; if not, proceed with installation
"if ! command -v mongosh &> /dev/null; then echo '⚠️ mongosh not found. Installing...'; fi",
// 3. Download mongosh if not installed
"if ! command -v mongosh &> /dev/null; then curl -LO https://downloads.mongodb.com/compass/mongosh-2.3.8-linux-x64.tgz; fi",
// 4. Extract mongosh package if not installed
"if ! command -v mongosh &> /dev/null; then tar -xvzf mongosh-2.3.8-linux-x64.tgz; fi",
// 5. Move mongosh binaries if not installed
"if ! command -v mongosh &> /dev/null; then mkdir -p ~/mongosh && mv mongosh-2.3.8-linux-x64/* ~/mongosh/; fi",
// 6. Add mongosh to PATH if not installed
"if ! command -v mongosh &> /dev/null; then echo 'export PATH=$HOME/mongosh/bin:$PATH' >> ~/.bashrc; fi",
// 7. Source .bashrc to update PATH (even if mongosh was already installed)
"source ~/.bashrc",
// 8. Verify mongosh installation
"mongosh --version",
// 9. Login to MongoDB
`mongosh --host ${config.host} --port 10255 --username ${config.name} --password ${config.password} --tls --tlsAllowInvalidCertificates`
];
}
}

View File

@@ -1,82 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* PostgreSQL shell type handler
*/
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { userContext } from "../../../../UserContext";
import { listKeys } from "../../../../Utils/arm/generatedClients/cosmos/databaseAccounts";
import { setShellType } from "../Data/CloudShellApiClient";
import { NetworkAccessHandler } from "../Network/NetworkAccessHandler";
import { getHostFromUrl } from "../Utils/CommonUtils";
import { ShellTypeConfig } from "./ShellTypeFactory";
export class PostgresShellHandler implements ShellTypeConfig {
private shellType: TerminalKind = TerminalKind.Postgres;
constructor() {
setShellType(this.shellType);
}
public getShellName(): string {
return "PostgreSQL";
}
public async getInitialCommands(): Promise<string> {
const dbAccount = userContext.databaseAccount;
const endpoint = dbAccount.properties.postgresqlEndpoint;
// Get database key
const dbName = dbAccount.name;
let key = "";
if (dbName) {
const keys = await listKeys(userContext.subscriptionId, userContext.resourceGroup, dbName);
key = keys?.primaryMasterKey || "";
}
const config = {
host: getHostFromUrl(endpoint),
name: dbAccount.name,
password: key,
endpoint: endpoint
};
return this.getCommands(config).join("\n").concat("\n");
}
public async configureNetworkAccess(terminal: Terminal, region: string): Promise<{
vNetSettings: any;
isAllPublicAccessEnabled: boolean;
}> {
return await NetworkAccessHandler.configureNetworkAccess(terminal, region, this.shellType);
}
private getCommands(config: any): string[] {
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if psql is installed; if not, proceed with installation
"if ! command -v psql &> /dev/null; then echo '⚠️ psql not found. Installing...'; fi",
// 3. Download PostgreSQL if not installed
"if ! command -v psql &> /dev/null; then curl -LO https://ftp.postgresql.org/pub/source/v15.2/postgresql-15.2.tar.bz2; fi",
// 4. Extract PostgreSQL package if not installed
"if ! command -v psql &> /dev/null; then tar -xvjf postgresql-15.2.tar.bz2; fi",
// 5. Create a directory for PostgreSQL installation if not installed
"if ! command -v psql &> /dev/null; then mkdir -p ~/pgsql; fi",
// 6. Download readline (dependency for PostgreSQL) if not installed
"if ! command -v psql &> /dev/null; then curl -LO https://ftp.gnu.org/gnu/readline/readline-8.1.tar.gz; fi",
// 7. Extract readline package if not installed
"if ! command -v psql &> /dev/null; then tar -xvzf readline-8.1.tar.gz; fi",
// 8. Configure readline if not installed
"if ! command -v psql &> /dev/null; then cd readline-8.1 && ./configure --prefix=$HOME/pgsql; fi",
// 9. Add PostgreSQL to PATH if not installed
"if ! command -v psql &> /dev/null; then echo 'export PATH=$HOME/pgsql/bin:$PATH' >> ~/.bashrc; fi",
// 10. Source .bashrc to update PATH (even if psql was already installed)
"source ~/.bashrc",
// 11. Verify PostgreSQL installation
"psql --version",
`psql 'read -p "Enter Database Name: " dbname && read -p "Enter Username: " username && host=${config.endpoint} port=5432 dbname=$dbname user=$username sslmode=require'`
];
}
}

View File

@@ -1,57 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Factory for creating shell type handlers
*/
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { CassandraShellHandler } from "./CassandraShellHandler";
import { MongoShellHandler } from "./MongoShellHandler";
import { PostgresShellHandler } from "./PostgresShellHandler";
import { VCoreMongoShellHandler } from "./VCoreMongoShellHandler";
export interface ShellTypeConfig {
getShellName(): string;
getInitialCommands(): Promise<string>;
configureNetworkAccess(terminal: Terminal, region: string): Promise<{
vNetSettings: any;
isAllPublicAccessEnabled: boolean;
}>;
}
export class ShellTypeHandler {
/**
* Gets the appropriate handler for the given shell type
*/
public static getHandler(shellType: TerminalKind): ShellTypeConfig {
switch (shellType) {
case TerminalKind.Postgres:
return new PostgresShellHandler();
case TerminalKind.Mongo:
return new MongoShellHandler();
case TerminalKind.VCoreMongo:
return new VCoreMongoShellHandler();
case TerminalKind.Cassandra:
return new CassandraShellHandler();
default:
throw new Error(`Unsupported shell type: ${shellType}`);
}
}
/**
* Gets the display name for a shell type
*/
public static getShellNameForDisplay(terminalKind: TerminalKind): string {
switch (terminalKind) {
case TerminalKind.Postgres:
return "PostgreSQL";
case TerminalKind.Mongo:
case TerminalKind.VCoreMongo:
return "MongoDB";
case TerminalKind.Cassandra:
return "Cassandra";
default:
return "";
}
}
}

View File

@@ -1,78 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* VCore MongoDB shell type handler
*/
import { Terminal } from "xterm";
import { TerminalKind } from "../../../../Contracts/ViewModels";
import { userContext } from "../../../../UserContext";
import { listKeys } from "../../../../Utils/arm/generatedClients/cosmos/databaseAccounts";
import { setShellType } from "../Data/CloudShellApiClient";
import { NetworkAccessHandler } from "../Network/NetworkAccessHandler";
import { getHostFromUrl } from "../Utils/CommonUtils";
import { ShellTypeConfig } from "./ShellTypeFactory";
export class VCoreMongoShellHandler implements ShellTypeConfig {
private shellType: TerminalKind = TerminalKind.VCoreMongo;
constructor() {
setShellType(this.shellType);
}
public getShellName(): string {
return "MongoDB VCore";
}
public async getInitialCommands(): Promise<string> {
const dbAccount = userContext.databaseAccount;
const endpoint = dbAccount.properties.vcoreMongoEndpoint;
// Get database key
const dbName = dbAccount.name;
let key = "";
if (dbName) {
const keys = await listKeys(userContext.subscriptionId, userContext.resourceGroup, dbName);
key = keys?.primaryMasterKey || "";
}
const config = {
host: getHostFromUrl(endpoint),
name: dbAccount.name,
password: key,
endpoint: endpoint
};
return this.getCommands(config).join("\n").concat("\n");
}
public async configureNetworkAccess(terminal: Terminal, region: string): Promise<{
vNetSettings: any;
isAllPublicAccessEnabled: boolean;
}> {
// VCore MongoDB uses private endpoints
return await NetworkAccessHandler.configureNetworkAccess(terminal, region, this.shellType);
}
private getCommands(config: any): string[] {
return [
// 1. Fetch and display location details in a readable format
"curl -s https://ipinfo.io | jq -r '\"Region: \" + .region + \" Country: \" + .country + \" City: \" + .city + \" IP Addr: \" + .ip'",
// 2. Check if mongosh is installed; if not, proceed with installation
"if ! command -v mongosh &> /dev/null; then echo '⚠️ mongosh not found. Installing...'; fi",
// 3. Download mongosh if not installed
"if ! command -v mongosh &> /dev/null; then curl -LO https://downloads.mongodb.com/compass/mongosh-2.3.8-linux-x64.tgz; fi",
// 4. Extract mongosh package if not installed
"if ! command -v mongosh &> /dev/null; then tar -xvzf mongosh-2.3.8-linux-x64.tgz; fi",
// 5. Move mongosh binaries if not installed
"if ! command -v mongosh &> /dev/null; then mkdir -p ~/mongosh && mv mongosh-2.3.8-linux-x64/* ~/mongosh/; fi",
// 6. Add mongosh to PATH if not installed
"if ! command -v mongosh &> /dev/null; then echo 'export PATH=$HOME/mongosh/bin:$PATH' >> ~/.bashrc; fi",
// 7. Source .bashrc to update PATH (even if mongosh was already installed)
"source ~/.bashrc",
// 8. Verify mongosh installation
"mongosh --version",
// 9. Login to MongoDB
`read -p "Enter username: " username && mongosh "mongodb+srv://$username:@${config.endpoint}/?authMechanism=SCRAM-SHA-256&retrywrites=false&maxIdleTimeMS=120000" --tls --tlsAllowInvalidCertificates`
];
}
}

View File

@@ -1,123 +0,0 @@
import { IDisposable, ITerminalAddon, Terminal } from 'xterm';
interface IAttachOptions {
bidirectional?: boolean;
}
export class AttachAddon implements ITerminalAddon {
private _socket: WebSocket;
private _bidirectional: boolean;
private _disposables: IDisposable[] = [];
private _socketData: string;
constructor(socket: WebSocket, options?: IAttachOptions) {
this._socket = socket;
// always set binary type to arraybuffer, we do not handle blobs
this._socket.binaryType = 'arraybuffer';
this._bidirectional = !(options && options.bidirectional === false);
this._socketData = '';
}
public activate(terminal: Terminal): void {
this._disposables.push(
addSocketListener(this._socket, 'message', ev => {
let data: ArrayBuffer | string = ev.data;
const startStatusJson = 'ie_us';
const endStatusJson = 'ie_ue';
if (typeof data === 'object') {
const enc = new TextDecoder("utf-8");
data = enc.decode(ev.data as any);
}
// for example of json object look in TerminalHelper in the socket.onMessage
if (data.includes(startStatusJson) && data.includes(endStatusJson)) {
// process as one line
const statusData = data.split(startStatusJson)[1].split(endStatusJson)[0];
data = data.replace(statusData, '');
data = data.replace(startStatusJson, '');
data = data.replace(endStatusJson, '');
} else if (data.includes(startStatusJson)) {
// check for start
const partialStatusData = data.split(startStatusJson)[1];
this._socketData += partialStatusData;
data = data.replace(partialStatusData, '');
data = data.replace(startStatusJson, '');
} else if (data.includes(endStatusJson)) {
// check for end and process the command
const partialStatusData = data.split(endStatusJson)[0];
this._socketData += partialStatusData;
data = data.replace(partialStatusData, '');
data = data.replace(endStatusJson, '');
this._socketData = '';
} else if (this._socketData.length > 0) {
// check if the line is all data then just concatenate
this._socketData += data;
data = '';
}
terminal.write(data);
})
);
if (this._bidirectional) {
this._disposables.push(terminal.onData(data => this._sendData(data)));
this._disposables.push(terminal.onBinary(data => this._sendBinary(data)));
}
this._disposables.push(addSocketListener(this._socket, 'close', () => this.dispose()));
this._disposables.push(addSocketListener(this._socket, 'error', () => this.dispose()));
}
public dispose(): void {
for (const d of this._disposables) {
d.dispose();
}
}
private _sendData(data: string): void {
if (!this._checkOpenSocket()) {
return;
}
this._socket.send(data);
}
private _sendBinary(data: string): void {
if (!this._checkOpenSocket()) {
return;
}
const buffer = new Uint8Array(data.length);
for (let i = 0; i < data.length; ++i) {
buffer[i] = data.charCodeAt(i) & 255;
}
this._socket.send(buffer);
}
private _checkOpenSocket(): boolean {
switch (this._socket.readyState) {
case WebSocket.OPEN:
return true;
case WebSocket.CONNECTING:
throw new Error('Attach addon was loaded before socket was open');
case WebSocket.CLOSING:
return false;
case WebSocket.CLOSED:
throw new Error('Attach addon socket is closed');
default:
throw new Error('Unexpected socket state');
}
}
}
function addSocketListener<K extends keyof WebSocketEventMap>(socket: WebSocket, type: K, handler: (this: WebSocket, ev: WebSocketEventMap[K]) => any): IDisposable {
socket.addEventListener(type, handler);
return {
dispose: () => {
if (!handler) {
// Already disposed
return;
}
socket.removeEventListener(type, handler);
}
};
}

View File

@@ -1,84 +0,0 @@
/**
* Copyright (c) Microsoft Corporation. All rights reserved.
* Common utility functions for CloudShell
*/
import { Terminal } from "xterm";
import { terminalLog } from "./LogFormatter";
/**
* Utility function to wait for a specified duration
*/
export const wait = (ms: number): Promise<void> => new Promise(resolve => setTimeout(resolve, ms));
/**
* Utility function to ask a question in the terminal
*/
export const askQuestion = (terminal: Terminal, question: string, defaultAnswer: string = ""): Promise<string> => {
return new Promise<string>((resolve) => {
const prompt = terminalLog.prompt(`${question} (${defaultAnswer}): `);
terminal.writeln(prompt);
terminal.focus();
let response = "";
const dataListener = terminal.onData((data: string) => {
if (data === "\r") { // Enter key pressed
terminal.writeln(""); // Move to a new line
dataListener.dispose();
if (response.trim() === "") {
response = defaultAnswer; // Use default answer if no input
}
return resolve(response.trim());
} else if (data === "\u007F" || data === "\b") { // Handle backspace
if (response.length > 0) {
response = response.slice(0, -1);
terminal.write("\x1B[D \x1B[D"); // Move cursor back, clear character
}
} else if (data.charCodeAt(0) >= 32) { // Ignore control characters
response += data;
terminal.write(data); // Display typed characters
}
});
// Prevent cursor movement beyond the prompt
terminal.onKey(({ domEvent }: { domEvent: KeyboardEvent }) => {
if (domEvent.key === "ArrowLeft" && response.length === 0) {
domEvent.preventDefault(); // Stop moving left beyond the question
}
});
});
};
/**
* Utility function to ask for yes/no confirmation
*/
export const askConfirmation = async (terminal: Terminal, question: string): Promise<boolean> => {
terminal.writeln("");
terminal.writeln(terminalLog.prompt(`${question} (Y/N)`));
terminal.focus();
return new Promise<boolean>((resolve) => {
const keyListener = terminal.onKey(({ key }: { key: string }) => {
keyListener.dispose();
terminal.writeln("");
if (key.toLowerCase() === 'y') {
resolve(true);
} else {
resolve(false);
}
});
});
};
/**
* Extract host from a URL
*/
export const getHostFromUrl = (url: string): string => {
try {
const urlObj = new URL(url);
return urlObj.hostname;
} catch (error) {
console.error("Invalid URL:", error);
return "";
}
};

View File

@@ -1,28 +0,0 @@
/**
* Standardized terminal logging functions for consistent formatting
*/
export const terminalLog = {
// Section headers
header: (message: string) => `\n\x1B[1;34m┌─ ${message} ${"─".repeat(Math.max(45 - message.length, 0))}\x1B[0m`,
subheader: (message: string) => `\x1B[1;36m├ ${message}\x1B[0m`,
sectionEnd: () => `\x1B[1;34m└${"─".repeat(50)}\x1B[0m\n`,
// Status messages
success: (message: string) => `\x1B[32m✓ ${message}\x1B[0m`,
warning: (message: string) => `\x1B[33m⚠ ${message}\x1B[0m`,
error: (message: string) => `\x1B[31m✗ ${message}\x1B[0m`,
info: (message: string) => `\x1B[34m${message}\x1B[0m`,
// Resource information
database: (message: string) => `\x1B[35m🔶 Database: ${message}\x1B[0m`,
vnet: (message: string) => `\x1B[36m🔷 Network: ${message}\x1B[0m`,
cloudshell: (message: string) => `\x1B[32m🔷 CloudShell: ${message}\x1B[0m`,
// Data formatting
item: (label: string, value: string) => `${label}: \x1B[32m${value}\x1B[0m`,
progress: (operation: string, status: string) => `\x1B[34m${operation}: \x1B[36m${status}\x1B[0m`,
// User interaction
prompt: (message: string) => `\x1B[1;37m${message}\x1B[0m`,
separator: () => `\x1B[30;1m${"─".repeat(50)}\x1B[0m`
};

View File

@@ -1150,16 +1150,27 @@ export const DocumentsTabComponent: React.FunctionComponent<IDocumentsTabCompone
deletePromise = _bulkDeleteNoSqlDocuments(_collection, toDeleteDocumentIds);
}
} else {
deletePromise = MongoProxyClient.deleteDocuments(
_collection.databaseId,
_collection as ViewModels.Collection,
toDeleteDocumentIds,
).then(({ deletedCount, isAcknowledged }) => {
if (deletedCount === toDeleteDocumentIds.length && isAcknowledged) {
return toDeleteDocumentIds;
}
throw new Error(`Delete failed with deletedCount: ${deletedCount} and isAcknowledged: ${isAcknowledged}`);
});
if (isMongoBulkDeleteDisabled) {
// TODO: Once new mongo proxy is available for all users, remove the call for MongoProxyClient.deleteDocument().
// MongoProxyClient.deleteDocuments() should be called for all users.
deletePromise = MongoProxyClient.deleteDocument(
_collection.databaseId,
_collection as ViewModels.Collection,
toDeleteDocumentIds[0],
).then(() => [toDeleteDocumentIds[0]]);
// ----------------------------------------------------------------------------------------------------
} else {
deletePromise = MongoProxyClient.deleteDocuments(
_collection.databaseId,
_collection as ViewModels.Collection,
toDeleteDocumentIds,
).then(({ deletedCount, isAcknowledged }) => {
if (deletedCount === toDeleteDocumentIds.length && isAcknowledged) {
return toDeleteDocumentIds;
}
throw new Error(`Delete failed with deletedCount: ${deletedCount} and isAcknowledged: ${isAcknowledged}`);
});
}
}
return deletePromise
@@ -2043,8 +2054,11 @@ export const DocumentsTabComponent: React.FunctionComponent<IDocumentsTabCompone
}
}, [prevSelectedColumnIds, refreshDocumentsGrid, selectedColumnIds]);
// TODO: remove isMongoBulkDeleteDisabled when new mongo proxy is enabled for all users
// TODO: remove partitionKey.systemKey when JS SDK bug is fixed
const isBulkDeleteDisabled = partitionKey.systemKey && !isPreferredApiMongoDB;
const isMongoBulkDeleteDisabled = !MongoProxyClient.useMongoProxyEndpoint(Constants.MongoProxyApi.BulkDelete);
const isBulkDeleteDisabled =
(partitionKey.systemKey && !isPreferredApiMongoDB) || (isPreferredApiMongoDB && isMongoBulkDeleteDisabled);
// -------------------------------------------------------
const getFilterChoices = (): InputDatalistDropdownOptionSection[] => {

View File

@@ -1,4 +1,6 @@
import { useMongoProxyEndpoint } from "Common/MongoProxyClient";
import React, { Component } from "react";
import * as Constants from "../../../Common/Constants";
import { configContext } from "../../../ConfigContext";
import * as ViewModels from "../../../Contracts/ViewModels";
import { Action, ActionModifiers } from "../../../Shared/Telemetry/TelemetryConstants";
@@ -48,13 +50,15 @@ export default class MongoShellTabComponent extends Component<
IMongoShellTabComponentStates
> {
private _logTraces: Map<string, number>;
private _useMongoProxyEndpoint: boolean;
constructor(props: IMongoShellTabComponentProps) {
super(props);
this._logTraces = new Map();
this._useMongoProxyEndpoint = useMongoProxyEndpoint(Constants.MongoProxyApi.LegacyMongoShell);
this.state = {
url: getMongoShellUrl(),
url: getMongoShellUrl(this._useMongoProxyEndpoint),
};
props.onMongoShellTabAccessor({
@@ -109,9 +113,17 @@ export default class MongoShellTabComponent extends Component<
const resourceId = databaseAccount?.id;
const accountName = databaseAccount?.name;
const documentEndpoint = databaseAccount?.properties.mongoEndpoint || databaseAccount?.properties.documentEndpoint;
const mongoEndpoint =
documentEndpoint.substr(
Constants.MongoDBAccounts.protocol.length + 3,
documentEndpoint.length -
(Constants.MongoDBAccounts.protocol.length + 2 + Constants.MongoDBAccounts.defaultPort.length),
) + Constants.MongoDBAccounts.defaultPort.toString();
const databaseId = this.props.collection.databaseId;
const collectionId = this.props.collection.id();
const apiEndpoint = configContext.MONGO_PROXY_ENDPOINT;
const apiEndpoint = this._useMongoProxyEndpoint
? configContext.MONGO_PROXY_ENDPOINT
: configContext.BACKEND_ENDPOINT;
const encryptedAuthToken: string = userContext.accessToken;
shellIframe.contentWindow.postMessage(
@@ -120,7 +132,7 @@ export default class MongoShellTabComponent extends Component<
data: {
resourceId: resourceId,
accountName: accountName,
mongoEndpoint: documentEndpoint,
mongoEndpoint: this._useMongoProxyEndpoint ? documentEndpoint : mongoEndpoint,
authorization: authorization,
databaseId: databaseId,
collectionId: collectionId,

View File

@@ -2,6 +2,8 @@ import { Platform, resetConfigContext, updateConfigContext } from "../../../Conf
import { updateUserContext, userContext } from "../../../UserContext";
import { getMongoShellUrl } from "./getMongoShellUrl";
const mongoBackendEndpoint = "https://localhost:1234";
describe("getMongoShellUrl", () => {
let queryString = "";
@@ -9,6 +11,7 @@ describe("getMongoShellUrl", () => {
resetConfigContext();
updateConfigContext({
BACKEND_ENDPOINT: mongoBackendEndpoint,
platform: Platform.Hosted,
});
@@ -34,7 +37,12 @@ describe("getMongoShellUrl", () => {
queryString = `resourceId=${userContext.databaseAccount.id}&accountName=${userContext.databaseAccount.name}&mongoEndpoint=${userContext.databaseAccount.properties.documentEndpoint}`;
});
it("should return /index.html by default", () => {
expect(getMongoShellUrl().toString()).toContain(`/index.html?${queryString}`);
it("should return /indexv2.html by default", () => {
expect(getMongoShellUrl().toString()).toContain(`/indexv2.html?${queryString}`);
});
it("should return /index.html when useMongoProxyEndpoint is true", () => {
const useMongoProxyEndpoint: boolean = true;
expect(getMongoShellUrl(useMongoProxyEndpoint).toString()).toContain(`/index.html?${queryString}`);
});
});

View File

@@ -1,11 +1,11 @@
import { userContext } from "../../../UserContext";
export function getMongoShellUrl(): string {
export function getMongoShellUrl(useMongoProxyEndpoint?: boolean): string {
const { databaseAccount: account } = userContext;
const resourceId = account?.id;
const accountName = account?.name;
const mongoEndpoint = account?.properties?.mongoEndpoint || account?.properties?.documentEndpoint;
const queryString = `resourceId=${resourceId}&accountName=${accountName}&mongoEndpoint=${mongoEndpoint}`;
return `/mongoshell/index.html?${queryString}`;
return useMongoProxyEndpoint ? `/mongoshell/index.html?${queryString}` : `/mongoshell/indexv2.html?${queryString}`;
}

View File

@@ -375,7 +375,6 @@ class QueryTabComponentImpl extends React.Component<QueryTabComponentImplProps,
ruCapPerOperation: ruThreshold,
} as QueryOperationOptions;
}
const queryDocuments = async (firstItemIndex: number) =>
await queryDocumentsPage(
this.props.collection && this.props.collection.id(),

View File

@@ -1,6 +1,5 @@
import { configContext } from "ConfigContext";
import * as DataModels from "Contracts/DataModels";
import * as ViewModels from "Contracts/ViewModels";
import { userContext } from "UserContext";
import { armRequest } from "Utils/arm/request";
@@ -11,8 +10,16 @@ export async function checkFirewallRules(
setMessageFunc?: (message: string) => void,
message?: string,
): Promise<void> {
const isEnabled = await callFirewallAPis(apiVersion, firewallRulesPredicate);
const firewallRulesUri = `${userContext.databaseAccount.id}/firewallRules`;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const response: any = await armRequest({
host: configContext.ARM_ENDPOINT,
path: firewallRulesUri,
method: "GET",
apiVersion: apiVersion,
});
const firewallRules: DataModels.FirewallRule[] = response?.data?.value || response?.value || [];
const isEnabled = firewallRules.some(firewallRulesPredicate);
if (isAllPublicIPAddressesEnabled) {
isAllPublicIPAddressesEnabled(isEnabled);
@@ -35,89 +42,3 @@ export async function checkFirewallRules(
);
}
}
export async function callFirewallAPis(
apiVersion: string,
firewallRulesPredicate: (rule: DataModels.FirewallRule) => unknown):
Promise<boolean> {
const firewallRulesUri = `${userContext.databaseAccount.id}/firewallRules`;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const response: any = await armRequest({
host: configContext.ARM_ENDPOINT,
path: firewallRulesUri,
method: "GET",
apiVersion: apiVersion,
});
const firewallRules: DataModels.FirewallRule[] = response?.data?.value || response?.value || [];
const isEnabled = firewallRules.some(firewallRulesPredicate);
return isEnabled;
}
export async function checkNetworkRules(kind: ViewModels.TerminalKind, isPublicAccessEnabledFlag: ko.Observable<boolean> | React.Dispatch<React.SetStateAction<boolean>>): Promise<void> {
if (kind === ViewModels.TerminalKind.Postgres) {
await checkFirewallRules(
"2022-11-08",
(rule) => rule.properties.startIpAddress === "0.0.0.0" && rule.properties.endIpAddress === "255.255.255.255",
isPublicAccessEnabledFlag,
);
}
if (kind === ViewModels.TerminalKind.VCoreMongo) {
await checkFirewallRules(
"2023-03-01-preview",
(rule) =>
rule.name.startsWith("AllowAllAzureServicesAndResourcesWithinAzureIps") ||
(rule.properties.startIpAddress === "0.0.0.0" && rule.properties.endIpAddress === "255.255.255.255"),
isPublicAccessEnabledFlag,
);
}
}
export async function IsPublicAccessAvailable(kind: ViewModels.TerminalKind): Promise<boolean> {
if (kind === ViewModels.TerminalKind.Postgres) {
return await callFirewallAPis(
"2022-11-08",
(rule) => rule.properties.startIpAddress === "0.0.0.0" && rule.properties.endIpAddress === "255.255.255.255"
);
}
if (kind === ViewModels.TerminalKind.VCoreMongo) {
return await callFirewallAPis(
"2023-03-01-preview",
(rule) =>
rule.name.startsWith("AllowAllAzureServicesAndResourcesWithinAzureIps") ||
(rule.properties.startIpAddress === "0.0.0.0" && rule.properties.endIpAddress === "255.255.255.255")
);
}
return !hasDatabaseNetworkRestrictions();
}
/**
* Checks if the database account has network restrictions
*/
const hasDatabaseNetworkRestrictions = (): boolean => {
return hasVNetRestrictions() || hasFirewallRestrictions() || hasPrivateEndpointsRestrictions();
};
/**
* Checks if the database account has Private Endpoint restrictions
*/
export const hasPrivateEndpointsRestrictions = (): boolean => {
return userContext.databaseAccount.properties.privateEndpointConnections && userContext.databaseAccount.properties.privateEndpointConnections.length > 0;
};
/**
* Checks if the database account has Firewall restrictions
*/
export const hasFirewallRestrictions = (): boolean => {
return userContext.databaseAccount.properties.isVirtualNetworkFilterEnabled;;
};
/**
* Checks if the database account has VNet restrictions
*/
export const hasVNetRestrictions = (): boolean => {
return userContext.databaseAccount.properties.virtualNetworkRules && userContext.databaseAccount.properties.virtualNetworkRules.length > 0
};

View File

@@ -1,3 +1,7 @@
import { IMessageBarStyles, MessageBar, MessageBarType } from "@fluentui/react";
import { CassandraProxyEndpoints, MongoProxyEndpoints } from "Common/Constants";
import { configContext } from "ConfigContext";
import { IpRule } from "Contracts/DataModels";
import { CollectionTabKind } from "Contracts/ViewModels";
import Explorer from "Explorer/Explorer";
import { useCommandBar } from "Explorer/Menus/CommandBar/CommandBarComponentAdapter";
@@ -8,8 +12,10 @@ import { PostgresConnectTab } from "Explorer/Tabs/PostgresConnectTab";
import { QuickstartTab } from "Explorer/Tabs/QuickstartTab";
import { VcoreMongoConnectTab } from "Explorer/Tabs/VCoreMongoConnectTab";
import { VcoreMongoQuickstartTab } from "Explorer/Tabs/VCoreMongoQuickstartTab";
import { LayoutConstants } from "Explorer/Theme/ThemeUtil";
import { KeyboardAction, KeyboardActionGroup, useKeyboardActionGroup } from "KeyboardShortcuts";
import { userContext } from "UserContext";
import { CassandraProxyOutboundIPs, MongoProxyOutboundIPs, PortalBackendIPs } from "Utils/EndpointUtils";
import { useTeachingBubble } from "hooks/useTeachingBubble";
import ko from "knockout";
import React, { MutableRefObject, useEffect, useRef, useState } from "react";
@@ -28,6 +34,10 @@ interface TabsProps {
export const Tabs = ({ explorer }: TabsProps): JSX.Element => {
const { openedTabs, openedReactTabs, activeTab, activeReactTab } = useTabs();
const [
showMongoAndCassandraProxiesNetworkSettingsWarningState,
setShowMongoAndCassandraProxiesNetworkSettingsWarningState,
] = useState<boolean>(showMongoAndCassandraProxiesNetworkSettingsWarning());
const setKeyboardHandlers = useKeyboardActionGroup(KeyboardActionGroup.TABS);
useEffect(() => {
@@ -38,8 +48,28 @@ export const Tabs = ({ explorer }: TabsProps): JSX.Element => {
});
}, [setKeyboardHandlers]);
const defaultMessageBarStyles: IMessageBarStyles = {
root: {
height: `${LayoutConstants.rowHeight}px`,
overflow: "hidden",
flexDirection: "row",
},
};
return (
<div className="tabsManagerContainer">
{showMongoAndCassandraProxiesNetworkSettingsWarningState && (
<MessageBar
messageBarType={MessageBarType.warning}
styles={defaultMessageBarStyles}
onDismiss={() => {
setShowMongoAndCassandraProxiesNetworkSettingsWarningState(false);
}}
>
{`We have migrated our middleware to new infrastructure. To avoid issues with Data Explorer access, please
re-enable "Allow access from Azure Portal" on the Networking blade for your account.`}
</MessageBar>
)}
<div className="nav-tabs-margin">
<ul className="nav nav-tabs level navTabHeight" id="navTabs" role="tablist">
{openedReactTabs.map((tab) => (
@@ -284,3 +314,57 @@ const getReactTabContent = (activeReactTab: ReactTabKind, explorer: Explorer): J
throw Error(`Unsupported tab kind ${ReactTabKind[activeReactTab]}`);
}
};
const showMongoAndCassandraProxiesNetworkSettingsWarning = (): boolean => {
const ipRules: IpRule[] = userContext.databaseAccount?.properties?.ipRules;
if (
((userContext.apiType === "Mongo" && configContext.MONGO_PROXY_ENDPOINT !== MongoProxyEndpoints.Development) ||
(userContext.apiType === "Cassandra" &&
configContext.CASSANDRA_PROXY_ENDPOINT !== CassandraProxyEndpoints.Development)) &&
ipRules?.length
) {
const legacyPortalBackendIPs: string[] = PortalBackendIPs[configContext.BACKEND_ENDPOINT];
const ipAddressesFromIPRules: string[] = ipRules.map((ipRule) => ipRule.ipAddressOrRange);
const ipRulesIncludeLegacyPortalBackend: boolean = legacyPortalBackendIPs.every((legacyPortalBackendIP: string) =>
ipAddressesFromIPRules.includes(legacyPortalBackendIP),
);
if (!ipRulesIncludeLegacyPortalBackend) {
return false;
}
if (userContext.apiType === "Mongo") {
const isProdOrMpacMongoProxyEndpoint: boolean = [MongoProxyEndpoints.Mpac, MongoProxyEndpoints.Prod].includes(
configContext.MONGO_PROXY_ENDPOINT,
);
const mongoProxyOutboundIPs: string[] = isProdOrMpacMongoProxyEndpoint
? [...MongoProxyOutboundIPs[MongoProxyEndpoints.Mpac], ...MongoProxyOutboundIPs[MongoProxyEndpoints.Prod]]
: MongoProxyOutboundIPs[configContext.MONGO_PROXY_ENDPOINT];
const ipRulesIncludeMongoProxy: boolean = mongoProxyOutboundIPs.every((mongoProxyOutboundIP: string) =>
ipAddressesFromIPRules.includes(mongoProxyOutboundIP),
);
return !ipRulesIncludeMongoProxy;
} else if (userContext.apiType === "Cassandra") {
const isProdOrMpacCassandraProxyEndpoint: boolean = [
CassandraProxyEndpoints.Mpac,
CassandraProxyEndpoints.Prod,
].includes(configContext.CASSANDRA_PROXY_ENDPOINT);
const cassandraProxyOutboundIPs: string[] = isProdOrMpacCassandraProxyEndpoint
? [
...CassandraProxyOutboundIPs[CassandraProxyEndpoints.Mpac],
...CassandraProxyOutboundIPs[CassandraProxyEndpoints.Prod],
]
: CassandraProxyOutboundIPs[configContext.CASSANDRA_PROXY_ENDPOINT];
const ipRulesIncludeCassandraProxy: boolean = cassandraProxyOutboundIPs.every(
(cassandraProxyOutboundIP: string) => ipAddressesFromIPRules.includes(cassandraProxyOutboundIP),
);
return !ipRulesIncludeCassandraProxy;
}
}
return false;
};

View File

@@ -1,7 +1,7 @@
import { Spinner, SpinnerSize } from "@fluentui/react";
import { MessageTypes } from "Contracts/ExplorerContracts";
import { QuickstartFirewallNotification } from "Explorer/Quickstart/QuickstartFirewallNotification";
import { checkNetworkRules } from "Explorer/Tabs/Shared/CheckFirewallRules";
import { checkFirewallRules } from "Explorer/Tabs/Shared/CheckFirewallRules";
import * as ko from "knockout";
import * as React from "react";
import FirewallRuleScreenshot from "../../../images/firewallRule.png";
@@ -13,10 +13,8 @@ import { CommandButtonComponentProps } from "../Controls/CommandButton/CommandBu
import { NotebookTerminalComponent } from "../Controls/Notebook/NotebookTerminalComponent";
import Explorer from "../Explorer";
import { useNotebook } from "../Notebook/useNotebook";
import { CloudShellTerminalComponent } from "./CloudShellTab/CloudShellTabComponent";
import TabsBase from "./TabsBase";
export interface TerminalTabOptions extends ViewModels.TabOptions {
account: DataModels.DatabaseAccount;
container: Explorer;
@@ -45,98 +43,81 @@ class NotebookTerminalComponentAdapter implements ReactAdapter {
<QuickstartFirewallNotification
messageType={MessageTypes.OpenPostgresNetworkingBlade}
screenshot={FirewallRuleScreenshot}
shellName={getShellNameForDisplay(this.kind)}
shellName={this.getShellNameForDisplay(this.kind)}
/>
);
}
return this.parameters() ? (
<NotebookTerminalComponent
notebookServerInfo={this.getNotebookServerInfo()}
databaseAccount={this.getDatabaseAccount()}
tabId={this.getTabId()}
username={this.getUsername()}
/>): (
<Spinner styles={{ root: { marginTop: 10 } }} size={SpinnerSize.large}></Spinner>
);
}
}
/**
* CloudShell terminal tab
*/
class CloudShellTerminalComponentAdapter implements ReactAdapter {
// parameters: true: show, false: hide
public parameters: ko.Computed<boolean>;
constructor(
private kind: ViewModels.TerminalKind,
) {}
public renderComponent(): JSX.Element {
console.log("this.parameters() " + this.parameters() );
return this.parameters() ? (
<CloudShellTerminalComponent
shellType={this.kind}/>
/>
) : (
<Spinner styles={{ root: { marginTop: 10 } }} size={SpinnerSize.large}></Spinner>
);
}
}
export const getShellNameForDisplay = (terminalKind: ViewModels.TerminalKind): string => {
switch (terminalKind) {
case ViewModels.TerminalKind.Postgres:
return "PostgreSQL";
case ViewModels.TerminalKind.Mongo:
case ViewModels.TerminalKind.VCoreMongo:
return "MongoDB";
default:
return "";
private getShellNameForDisplay(terminalKind: ViewModels.TerminalKind): string {
switch (terminalKind) {
case ViewModels.TerminalKind.Postgres:
return "PostgreSQL";
case ViewModels.TerminalKind.Mongo:
case ViewModels.TerminalKind.VCoreMongo:
return "MongoDB";
default:
return "";
}
}
}
export default class TerminalTab extends TabsBase {
public readonly html = '<div style="height: 100%" data-bind="react: terminalComponentAdapter"></div>';
public readonly html = '<div style="height: 100%" data-bind="react:notebookTerminalComponentAdapter"></div> ';
private container: Explorer;
private terminalComponentAdapter: any;
private notebookTerminalComponentAdapter: NotebookTerminalComponentAdapter;
private isAllPublicIPAddressesEnabled: ko.Observable<boolean>;
constructor (options: TerminalTabOptions) {
constructor(options: TerminalTabOptions) {
super(options);
this.container = options.container;
this.isAllPublicIPAddressesEnabled = ko.observable(true);
checkNetworkRules(options.kind, this.isAllPublicIPAddressesEnabled);
this.initializeNotebookTerminalAdapter(options);
}
private async initializeNotebookTerminalAdapter(options: TerminalTabOptions): Promise<void> {
if (userContext.features.enableCloudShell) {
this.terminalComponentAdapter = new CloudShellTerminalComponentAdapter(
options.kind
);
this.terminalComponentAdapter.parameters = ko.computed<boolean>(() =>
this.isTemplateReady()
);
}
else {
this.terminalComponentAdapter = new NotebookTerminalComponentAdapter(
() => this.getNotebookServerInfo(options),
() => userContext?.databaseAccount,
() => this.tabId,
() => this.getUsername(),
this.isAllPublicIPAddressesEnabled,
options.kind
);
this.terminalComponentAdapter.parameters = ko.computed<boolean>(() =>
this.notebookTerminalComponentAdapter = new NotebookTerminalComponentAdapter(
() => this.getNotebookServerInfo(options),
() => userContext?.databaseAccount,
() => this.tabId,
() => this.getUsername(),
this.isAllPublicIPAddressesEnabled,
options.kind,
);
this.notebookTerminalComponentAdapter.parameters = ko.computed<boolean>(() => {
if (
this.isTemplateReady() &&
useNotebook.getState().isNotebookEnabled &&
useNotebook.getState().notebookServerInfo?.notebookServerEndpoint &&
this.isAllPublicIPAddressesEnabled()
) {
return true;
}
return false;
});
if (options.kind === ViewModels.TerminalKind.Postgres) {
checkFirewallRules(
"2022-11-08",
(rule) => rule.properties.startIpAddress === "0.0.0.0" && rule.properties.endIpAddress === "255.255.255.255",
this.isAllPublicIPAddressesEnabled,
);
}
if (options.kind === ViewModels.TerminalKind.VCoreMongo) {
checkFirewallRules(
"2023-03-01-preview",
(rule) =>
rule.name.startsWith("AllowAllAzureServicesAndResourcesWithinAzureIps") ||
(rule.properties.startIpAddress === "0.0.0.0" && rule.properties.endIpAddress === "255.255.255.255"),
this.isAllPublicIPAddressesEnabled,
);
}
}

View File

@@ -1,11 +1,13 @@
import { useBoolean } from "@fluentui/react-hooks";
import { userContext } from "UserContext";
import { useNewPortalBackendEndpoint } from "Utils/EndpointUtils";
import * as React from "react";
import ConnectImage from "../../../../images/HdeConnectCosmosDB.svg";
import ErrorImage from "../../../../images/error.svg";
import { AuthType } from "../../../AuthType";
import { HttpHeaders } from "../../../Common/Constants";
import { BackendApi, HttpHeaders } from "../../../Common/Constants";
import { configContext } from "../../../ConfigContext";
import { GenerateTokenResponse } from "../../../Contracts/DataModels";
import { isResourceTokenConnectionString } from "../Helpers/ResourceTokenUtils";
interface Props {
@@ -17,6 +19,10 @@ interface Props {
}
export const fetchEncryptedToken = async (connectionString: string): Promise<string> => {
if (!useNewPortalBackendEndpoint(BackendApi.GenerateToken)) {
return await fetchEncryptedToken_ToBeDeprecated(connectionString);
}
const headers = new Headers();
headers.append(HttpHeaders.connectionString, connectionString);
const url = configContext.PORTAL_BACKEND_ENDPOINT + "/api/connectionstring/token/generatetoken";
@@ -29,10 +35,28 @@ export const fetchEncryptedToken = async (connectionString: string): Promise<str
return decodeURIComponent(encryptedTokenResponse);
};
export const fetchEncryptedToken_ToBeDeprecated = async (connectionString: string): Promise<string> => {
const headers = new Headers();
headers.append(HttpHeaders.connectionString, connectionString);
const url = configContext.BACKEND_ENDPOINT + "/api/guest/tokens/generateToken";
const response = await fetch(url, { headers, method: "POST" });
if (!response.ok) {
throw response;
}
// This API has a quirk where it must be parsed twice
const result: GenerateTokenResponse = JSON.parse(await response.json());
return decodeURIComponent(result.readWrite || result.read);
};
export const isAccountRestrictedForConnectionStringLogin = async (connectionString: string): Promise<boolean> => {
const headers = new Headers();
headers.append(HttpHeaders.connectionString, connectionString);
const url = configContext.PORTAL_BACKEND_ENDPOINT + "/api/guest/accountrestrictions/checkconnectionstringlogin";
const backendEndpoint: string = useNewPortalBackendEndpoint(BackendApi.AccountRestrictions)
? configContext.PORTAL_BACKEND_ENDPOINT
: configContext.BACKEND_ENDPOINT;
const url = backendEndpoint + "/api/guest/accountrestrictions/checkconnectionstringlogin";
const response = await fetch(url, { headers, method: "POST" });
if (!response.ok) {
throw response;

View File

@@ -16,7 +16,6 @@ export type Features = {
readonly enableAadDataPlane: boolean;
readonly enableResourceGraph: boolean;
readonly enableKoResourceTree: boolean;
readonly enableThroughputBuckets: boolean;
readonly hostedDataExplorer: boolean;
readonly junoEndpoint?: string;
readonly phoenixEndpoint?: string;
@@ -39,7 +38,6 @@ export type Features = {
readonly copilotChatFixedMonacoEditorHeight: boolean;
readonly enablePriorityBasedExecution: boolean;
readonly disableConnectionStringLogin: boolean;
readonly enableCloudShell: boolean;
// can be set via both flight and feature flag
autoscaleDefault: boolean;
@@ -83,7 +81,6 @@ export function extractFeatures(given = new URLSearchParams(window.location.sear
enableSpark: "true" === get("enablespark"),
enableTtl: "true" === get("enablettl"),
enableKoResourceTree: "true" === get("enablekoresourcetree"),
enableThroughputBuckets: "true" === get("enablethroughputbuckets"),
executeSproc: "true" === get("dataexplorerexecutesproc"),
hostedDataExplorer: "true" === get("hosteddataexplorerenabled"),
mongoProxyEndpoint: get("mongoproxyendpoint"),
@@ -111,7 +108,6 @@ export function extractFeatures(given = new URLSearchParams(window.location.sear
copilotChatFixedMonacoEditorHeight: "true" === get("copilotchatfixedmonacoeditorheight"),
enablePriorityBasedExecution: "true" === get("enableprioritybasedexecution"),
disableConnectionStringLogin: "true" === get("disableconnectionstringlogin"),
enableCloudShell: "true" === get("enablecloudshell"),
};
}

View File

@@ -46,14 +46,14 @@ export function decryptJWTToken(token: string) {
return JSON.parse(tokenPayload);
}
export async function getMsalInstance(clientId: string = "203f1145-856a-4232-83d4-a43568fba23d"){
export async function getMsalInstance() {
const msalConfig: msal.Configuration = {
cache: {
cacheLocation: "localStorage",
},
auth: {
authority: `${configContext.AAD_ENDPOINT}organizations`,
clientId: clientId,
clientId: "203f1145-856a-4232-83d4-a43568fba23d",
},
};
@@ -68,8 +68,7 @@ export async function getMsalInstance(clientId: string = "203f1145-856a-4232-83d
export async function acquireMsalTokenForAccount(
account: DatabaseAccount,
silent: boolean = false,
clientId: string = "203f1145-856a-4232-83d4-a43568fba23d",
user_hint?: string
user_hint?: string,
) {
if (userContext.databaseAccount.properties?.documentEndpoint === undefined) {
throw new Error("Database account has no document endpoint defined");
@@ -78,7 +77,7 @@ export async function acquireMsalTokenForAccount(
/\/+$/,
"/.default",
);
const msalInstance = await getMsalInstance(clientId);
const msalInstance = await getMsalInstance();
const knownAccounts = msalInstance.getAllAccounts();
// If user_hint is provided, we will try to use it to find the account.
// If no account is found, we will use the current active account or first account in the list.

View File

@@ -1,4 +1,11 @@
import { CassandraProxyEndpoints, JunoEndpoints, MongoProxyEndpoints, PortalBackendEndpoints } from "Common/Constants";
import {
BackendApi,
CassandraProxyEndpoints,
JunoEndpoints,
MongoProxyEndpoints,
PortalBackendEndpoints,
} from "Common/Constants";
import { configContext } from "ConfigContext";
import * as Logger from "../Common/Logger";
export function validateEndpoint(
@@ -66,6 +73,9 @@ export const PortalBackendIPs: { [key: string]: string[] } = {
//"https://main2.documentdb.ext.azure.com": ["104.42.196.69"],
"https://main.documentdb.ext.azure.cn": ["139.217.8.252"],
"https://main.documentdb.ext.azure.us": ["52.244.48.71"],
// Add ussec and usnat when endpoint address is known:
//ussec: ["29.26.26.67", "29.26.26.66"],
//usnat: ["7.28.202.68"],
};
export const PortalBackendOutboundIPs: { [key: string]: string[] } = {
@@ -90,6 +100,14 @@ export const defaultAllowedMongoProxyEndpoints: ReadonlyArray<string> = [
MongoProxyEndpoints.Mooncake,
];
export const allowedMongoProxyEndpoints_ToBeDeprecated: ReadonlyArray<string> = [
"https://main.documentdb.ext.azure.com",
"https://main.documentdb.ext.azure.cn",
"https://main.documentdb.ext.azure.us",
"https://main.cosmos.ext.azure",
"https://localhost:12901",
];
export const defaultAllowedCassandraProxyEndpoints: ReadonlyArray<string> = [
CassandraProxyEndpoints.Development,
CassandraProxyEndpoints.Mpac,
@@ -135,3 +153,53 @@ export const allowedJunoOrigins: ReadonlyArray<string> = [
];
export const allowedNotebookServerUrls: ReadonlyArray<string> = [];
//
// Temporary function to determine if a portal backend API is supported by the
// new backend in this environment.
//
// TODO: Remove this function once new backend migration is completed for all environments.
//
export function useNewPortalBackendEndpoint(backendApi: string): boolean {
// This maps backend APIs to the environments supported by the new backend.
const newBackendApiEnvironmentMap: { [key: string]: string[] } = {
[BackendApi.GenerateToken]: [
PortalBackendEndpoints.Development,
PortalBackendEndpoints.Mpac,
PortalBackendEndpoints.Prod,
],
[BackendApi.PortalSettings]: [
PortalBackendEndpoints.Development,
PortalBackendEndpoints.Mpac,
PortalBackendEndpoints.Prod,
],
[BackendApi.AccountRestrictions]: [
PortalBackendEndpoints.Development,
PortalBackendEndpoints.Mpac,
PortalBackendEndpoints.Prod,
],
[BackendApi.RuntimeProxy]: [
PortalBackendEndpoints.Development,
PortalBackendEndpoints.Mpac,
PortalBackendEndpoints.Prod,
],
[BackendApi.DisallowedLocations]: [
PortalBackendEndpoints.Development,
PortalBackendEndpoints.Mpac,
PortalBackendEndpoints.Prod,
PortalBackendEndpoints.Fairfax,
PortalBackendEndpoints.Mooncake,
],
[BackendApi.SampleData]: [
PortalBackendEndpoints.Development,
PortalBackendEndpoints.Mpac,
PortalBackendEndpoints.Prod,
],
};
if (!newBackendApiEnvironmentMap[backendApi] || !configContext.PORTAL_BACKEND_ENDPOINT) {
return false;
}
return newBackendApiEnvironmentMap[backendApi].includes(configContext.PORTAL_BACKEND_ENDPOINT);
}

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Lists the Cassandra keyspaces under an existing Azure Cosmos DB database account. */
export async function listCassandraKeyspaces(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given database account and collection. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given collection, split by partition. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given collection and region, split by partition. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given database account, collection and region. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given database account and database. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given database account and region. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the properties of an existing Azure Cosmos DB database account. */
export async function get(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Lists the graphs under an existing Azure Cosmos DB database account. */
export async function listGraphs(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Lists the Gremlin databases under an existing Azure Cosmos DB database account. */
export async function listGremlinDatabases(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* List Cosmos DB locations and their properties */
export async function list(subscriptionId: string): Promise<Types.LocationListResult | Types.CloudError> {

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Lists the MongoDB databases under an existing Azure Cosmos DB database account. */
export async function listMongoDBDatabases(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Lists all of the available Cosmos DB Resource Provider operations. */
export async function list(): Promise<Types.OperationListResult> {

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given partition key range id. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given partition key range id and region. */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given database account. This url is only for PBS and Replication Latency data */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given account, source and target region. This url is only for PBS and Replication Latency data */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Retrieves the metrics determined by the given filter for the given account target region. This url is only for PBS and Replication Latency data */
export async function listMetrics(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-05-15-preview";
/* Lists the SQL databases under an existing Azure Cosmos DB database account. */
export async function listSqlDatabases(

View File

@@ -3,13 +3,13 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
import { configContext } from "../../../../ConfigContext";
import { armRequest } from "../../request";
import * as Types from "./types";
const apiVersion = "2024-12-01-preview";
const apiVersion = "2024-02-15-preview";
/* Lists the Tables under an existing Azure Cosmos DB database account. */
export async function listTables(

View File

@@ -3,7 +3,7 @@
Run "npm run generateARMClients" to regenerate
Edting this file directly should be done with extreme caution as not to diverge from ARM REST specs
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-12-01-preview/cosmos-db.json
Generated from: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/2024-02-15-preview/cosmos-db.json
*/
/* The List operation response, that contains the client encryption keys and their properties. */
@@ -553,12 +553,6 @@ export interface DatabaseAccountGetProperties {
/* The object that represents all properties related to capacity enforcement on an account. */
capacity?: Capacity;
/* Indicates the capacityMode of the Cosmos DB account. */
capacityMode?: CapacityMode;
/* The object that represents the migration state for the CapacityMode of the Cosmos DB account. */
capacityModeChangeTransitionState?: CapacityModeChangeTransitionState;
/* Flag to indicate whether to enable MaterializedViews on the Cosmos DB account */
enableMaterializedViews?: boolean;
/* The object that represents the metadata for the Account Keys of the Cosmos DB account. */
@@ -658,9 +652,6 @@ export interface DatabaseAccountCreateUpdateProperties {
/* The object that represents all properties related to capacity enforcement on an account. */
capacity?: Capacity;
/* Indicates the capacityMode of the Cosmos DB account. */
capacityMode?: CapacityMode;
/* Flag to indicate whether to enable MaterializedViews on the Cosmos DB account */
enableMaterializedViews?: boolean;
/* This property is ignored during the update/create operation, as the metadata is read-only. The object represents the metadata for the Account Keys of the Cosmos DB account. */
@@ -763,9 +754,6 @@ export interface DatabaseAccountUpdateProperties {
/* The object that represents all properties related to capacity enforcement on an account. */
capacity?: Capacity;
/* Indicates the capacityMode of the Cosmos DB account. */
capacityMode?: CapacityMode;
/* Flag to indicate whether to enable MaterializedViews on the Cosmos DB account */
enableMaterializedViews?: boolean;
/* This property is ignored during the update operation, as the metadata is read-only. The object represents the metadata for the Account Keys of the Cosmos DB account. */
@@ -1099,8 +1087,6 @@ export interface ThroughputSettingsResource {
readonly instantMaximumThroughput?: string;
/* The maximum throughput value or the maximum maxThroughput value (for autoscale) that can be specified */
readonly softAllowedMaximumThroughput?: string;
/* Array of Throughput Bucket limits to be applied to the Cosmos DB container */
throughputBuckets?: ThroughputBucketResource[];
}
/* Cosmos DB provisioned throughput settings object */
@@ -1128,14 +1114,6 @@ export interface ThroughputPolicyResource {
incrementPercent?: number;
}
/* Cosmos DB throughput bucket object */
export interface ThroughputBucketResource {
/* Represents the throughput bucket id */
id: number;
/* Represents maximum percentage throughput that can be used by the bucket */
maxThroughputPercentage: number;
}
/* Cosmos DB options resource object */
export interface OptionsResource {
/* Value of the Cosmos DB resource throughput or autoscaleSettings. Use the ThroughputSetting resource when retrieving offer details. */
@@ -1257,6 +1235,10 @@ export interface SqlDatabaseResource {
export interface SqlContainerResource {
/* Name of the Cosmos DB SQL container */
id: string;
vectorEmbeddingPolicy?: VectorEmbeddingPolicy;
fullTextPolicy?: FullTextPolicy;
/* The configuration of the indexing policy. By default, the indexing is automatic for all document paths within the container */
indexingPolicy?: IndexingPolicy;
@@ -1287,11 +1269,39 @@ export interface SqlContainerResource {
/* List of computed properties */
computedProperties?: ComputedProperty[];
}
/* The vector embedding policy for the container. */
vectorEmbeddingPolicy?: VectorEmbeddingPolicy;
export interface VectorEmbeddingPolicy {
vectorEmbeddings: VectorEmbedding[];
}
fullTextPolicy?: FullTextPolicy;
export interface VectorEmbedding {
path?: string;
dataType?: string;
dimensions?: number;
distanceFunction?: string;
}
export interface FullTextPolicy {
/**
* The default language for the full text .
*/
defaultLanguage: string;
/**
* The paths to be indexed for full text search.
*/
fullTextPaths: FullTextPath[];
}
export interface FullTextPath {
/**
* The path to be indexed for full text search.
*/
path: string;
/**
* The language for the full text path.
*/
language: string;
}
/* Cosmos DB indexing policy */
@@ -1313,14 +1323,19 @@ export interface IndexingPolicy {
/* List of spatial specifics */
spatialIndexes?: SpatialSpec[];
/* List of paths to include in the vector indexing */
vectorIndexes?: VectorIndex[];
fullTextIndexes?: FullTextIndex[];
}
/* Cosmos DB Vector Embedding Policy */
export interface VectorEmbeddingPolicy {
/* List of vector embeddings */
vectorEmbeddings?: VectorEmbedding[];
export interface VectorIndex {
path?: string;
type?: string;
}
export interface FullTextIndex {
/** The path in the JSON document to index. */
path: string;
}
/* undocumented */
@@ -1348,50 +1363,6 @@ export interface Indexes {
kind?: "Hash" | "Range" | "Spatial";
}
/* undocumented */
export interface VectorIndex {
/* The path to the vector field in the document. */
path: string;
/* The index type of the vector. Currently, flat, diskANN, and quantizedFlat are supported. */
type: "flat" | "diskANN" | "quantizedFlat";
}
/* Represents a vector embedding. A vector embedding is used to define a vector field in the documents. */
export interface VectorEmbedding {
/* The path to the vector field in the document. */
path: string;
/* Indicates the data type of vector. */
dataType: "float16" | "float32" | "uint8" | "int8";
/* The distance function to use for distance calculation in between vectors. */
distanceFunction: "euclidean" | "cosine" | "dotproduct";
/* The number of dimensions in the vector. */
dimensions: number;
}
export interface FullTextPolicy {
/**
* The default language for the full text .
*/
defaultLanguage: string;
/**
* The paths to be indexed for full text search.
*/
fullTextPaths: FullTextPath[];
}
export interface FullTextPath {
/**
* The path to be indexed for full text search.
*/
path: string;
/**
* The language for the full text path.
*/
language: string;
}
/* List of composite path */
export type CompositePathList = CompositePath[];
@@ -1717,28 +1688,6 @@ export interface Capacity {
totalThroughputLimit?: number;
}
/* Indicates the capacity mode of the account. */
export type CapacityMode = "None" | "Provisioned" | "Serverless";
/* The transition state information related capacity mode change with update request. */
export interface CapacityModeChangeTransitionState {
/* The transition status of capacity mode. */
capacityModeTransitionStatus?: "Invalid" | "Initialized" | "InProgress" | "Completed" | "Failed";
/* Indicates the current capacity mode of the account. */
currentCapacityMode?: "None" | "Provisioned" | "Serverless";
/* Indicates the previous capacity mode of the account before successful transition. */
previousCapacityMode?: "None" | "Provisioned" | "Serverless";
/* Begin time in UTC of the capacity mode change. */
readonly capacityModeTransitionBeginTimestamp?: string;
/* End time in UTC of the capacity mode change. */
readonly capacityModeTransitionEndTimestamp?: string;
/* End time in UTC of the last successful capacity mode change. */
readonly capacityModeLastSuccessfulTransitionEndTimestamp?: string;
}
/* Tags are a list of key-value pairs that describe the resource. These tags can be used in viewing and grouping this resource (across resource groups). A maximum of 15 tags can be provided for a resource. Each tag must have a key no greater than 128 characters and value no greater than 256 characters. For example, the default experience for a template type is set with "defaultExperience": "Cassandra". Current "defaultExperience" values also include "Table", "Graph", "DocumentDB", and "MongoDB". */
export type Tags = { [key: string]: string };
@@ -2004,8 +1953,8 @@ export type PublicNetworkAccess = "Enabled" | "Disabled" | "SecuredByPerimeter";
/* undocumented */
export interface ApiProperties {
/* Describes the version of the MongoDB account. */
serverVersion?: "3.2" | "3.6" | "4.0" | "4.2" | "5.0" | "6.0" | "7.0";
/* Describes the ServerVersion of an a MongoDB account. */
serverVersion?: "3.2" | "3.6" | "4.0" | "4.2";
}
/* Analytical storage specific properties. */

View File

@@ -47,7 +47,6 @@ interface Options {
body?: unknown;
queryParams?: ARMQueryParams;
contentType?: string;
customHeaders?: Record<string, string>;
}
export async function armRequestWithoutPolling<T>({
@@ -58,7 +57,6 @@ export async function armRequestWithoutPolling<T>({
body: requestBody,
queryParams,
contentType,
customHeaders
}: Options): Promise<{ result: T; operationStatusUrl: string }> {
const url = new URL(path, host);
url.searchParams.append("api-version", configContext.armAPIVersion || apiVersion);
@@ -67,7 +65,7 @@ export async function armRequestWithoutPolling<T>({
queryParams.metricNames && url.searchParams.append("metricnames", queryParams.metricNames);
}
if (!userContext.authorizationToken && !customHeaders["Authorization"]) {
if (!userContext.authorizationToken) {
throw new Error("No authority token provided");
}
@@ -76,7 +74,6 @@ export async function armRequestWithoutPolling<T>({
headers: {
Authorization: userContext.authorizationToken,
[HttpHeaders.contentType]: contentType || "application/json",
...customHeaders
},
body: requestBody ? JSON.stringify(requestBody) : undefined,
});
@@ -99,15 +96,8 @@ export async function armRequestWithoutPolling<T>({
}
const operationStatusUrl = (response.headers && response.headers.get("location")) || "";
if(!response || response.status === 204) {
return { result: {} as T, operationStatusUrl: operationStatusUrl };
}
const responseBody = await response.json().catch((error) => {
console.error("armRequestWithoutPolling: Error parsing JSON response:", error);
return response.text; // Return an empty object if JSON parsing fails
});
return { result: responseBody as T, operationStatusUrl: operationStatusUrl };
const responseBody = (await response.json()) as T;
return { result: responseBody, operationStatusUrl: operationStatusUrl };
}
// TODO: This is very similar to what is happening in ResourceProviderClient.ts. Should probably merge them.
@@ -119,7 +109,6 @@ export async function armRequest<T>({
body: requestBody,
queryParams,
contentType,
customHeaders
}: Options): Promise<T> {
const armRequestResult = await armRequestWithoutPolling<T>({
host,
@@ -129,7 +118,6 @@ export async function armRequest<T>({
body: requestBody,
queryParams,
contentType,
customHeaders
});
const operationStatusUrl = armRequestResult.operationStatusUrl;
if (operationStatusUrl) {
@@ -218,14 +206,6 @@ export async function getOfferingIdsRequest<T>({
}
const operationStatusUrl = (response.headers && response.headers.get("location")) || "";
if(!response || response.status === 204) {
return { result: {} as T, operationStatusUrl: operationStatusUrl };
}
const responseBody = await response.json().catch((error) => {
console.error("getOfferingIdsRequest: Error parsing JSON response:", error);
return response.text; // Return an empty object if JSON parsing fails
});
return { result: responseBody as T, operationStatusUrl: operationStatusUrl };
const responseBody = (await response.json()) as T;
return { result: responseBody, operationStatusUrl: operationStatusUrl };
}

View File

@@ -13,6 +13,7 @@ import {
readSubComponentState,
} from "Shared/AppStatePersistenceUtility";
import { LocalStorageUtility, StorageKey } from "Shared/StorageUtility";
import { useNewPortalBackendEndpoint } from "Utils/EndpointUtils";
import { logConsoleError } from "Utils/NotificationConsoleUtils";
import { useQueryCopilot } from "hooks/useQueryCopilot";
import { ReactTabKind, useTabs } from "hooks/useTabs";
@@ -547,6 +548,14 @@ async function configurePortal(): Promise<Explorer> {
const inputs = message?.inputs;
const openAction = message?.openAction;
if (inputs) {
if (
configContext.BACKEND_ENDPOINT &&
configContext.platform === Platform.Portal &&
process.env.NODE_ENV === "development"
) {
inputs.extensionEndpoint = configContext.PROXY_PATH;
}
updateContextsFromPortalMessage(inputs);
const { databaseAccount: account, subscriptionId, resourceGroup } = userContext;
@@ -671,17 +680,18 @@ function updateAADEndpoints(portalEnv: PortalEnv) {
function updateContextsFromPortalMessage(inputs: DataExplorerInputsFrame) {
if (
configContext.PORTAL_BACKEND_ENDPOINT &&
configContext.BACKEND_ENDPOINT &&
configContext.platform === Platform.Portal &&
process.env.NODE_ENV === "development"
) {
inputs.portalBackendEndpoint = configContext.PROXY_PATH;
inputs.extensionEndpoint = configContext.PROXY_PATH;
}
const authorizationToken = inputs.authorizationToken || "";
const databaseAccount = inputs.databaseAccount;
updateConfigContext({
BACKEND_ENDPOINT: inputs.extensionEndpoint || configContext.BACKEND_ENDPOINT,
ARM_ENDPOINT: normalizeArmEndpoint(inputs.csmEndpoint || configContext.ARM_ENDPOINT),
MONGO_PROXY_ENDPOINT: inputs.mongoProxyEndpoint,
CASSANDRA_PROXY_ENDPOINT: inputs.cassandraProxyEndpoint,
@@ -784,7 +794,17 @@ async function updateContextForSampleData(explorer: Explorer): Promise<void> {
return;
}
const url: string = createUri(configContext.PORTAL_BACKEND_ENDPOINT, "/api/sampledata");
let url: string;
if (useNewPortalBackendEndpoint(Constants.BackendApi.SampleData)) {
url = createUri(configContext.PORTAL_BACKEND_ENDPOINT, "/api/sampledata");
} else {
const sampleDatabaseEndpoint = useQueryCopilot.getState().copilotUserDBEnabled
? `/api/tokens/sampledataconnection/v2`
: `/api/tokens/sampledataconnection`;
url = createUri(`${configContext.BACKEND_ENDPOINT}`, sampleDatabaseEndpoint);
}
const authorizationHeader = getAuthorizationHeader();
const headers = { [authorizationHeader.header]: authorizationHeader.token };
@@ -800,7 +820,7 @@ async function updateContextForSampleData(explorer: Explorer): Promise<void> {
const sampleDataConnectionInfo = parseResourceTokenConnectionString(data.connectionString);
updateUserContext({ sampleDataConnectionInfo });
explorer.refreshSampleData();
await explorer.refreshSampleData();
}
interface SampledataconnectionResponse {

View File

@@ -1,9 +1,16 @@
import { useEffect, useState } from "react";
import { HttpHeaders } from "../Common/Constants";
import { useNewPortalBackendEndpoint } from "Utils/EndpointUtils";
import { ApiEndpoints, BackendApi, HttpHeaders } from "../Common/Constants";
import { configContext } from "../ConfigContext";
import { AccessInputMetadata } from "../Contracts/DataModels";
const url = `${configContext.BACKEND_ENDPOINT}${ApiEndpoints.guestRuntimeProxy}/accessinputmetadata?_=1609359229955`;
export async function fetchAccessData(portalToken: string): Promise<AccessInputMetadata> {
if (!useNewPortalBackendEndpoint(BackendApi.RuntimeProxy)) {
return fetchAccessData_ToBeDeprecated(portalToken);
}
const headers = new Headers();
// Portal encrypted token API quirk: The token header must be URL encoded
headers.append(HttpHeaders.guestAccessToken, encodeURIComponent(portalToken));
@@ -18,6 +25,25 @@ export async function fetchAccessData(portalToken: string): Promise<AccessInputM
.catch((error) => console.error(error));
}
export async function fetchAccessData_ToBeDeprecated(portalToken: string): Promise<AccessInputMetadata> {
const headers = new Headers();
// Portal encrypted token API quirk: The token header must be URL encoded
headers.append(HttpHeaders.guestAccessToken, encodeURIComponent(portalToken));
const options = {
method: "GET",
headers: headers,
};
return (
fetch(url, options)
.then((response) => response.json())
// Portal encrypted token API quirk: The response is double JSON encoded
.then((json) => JSON.parse(json))
.catch((error) => console.error(error))
);
}
export function useTokenMetadata(token: string): AccessInputMetadata | undefined {
const [state, setState] = useState<AccessInputMetadata | undefined>();

View File

@@ -39,6 +39,7 @@ const initTestExplorer = async (): Promise<void> => {
csmEndpoint: "https://management.azure.com",
dnsSuffix: "documents.azure.com",
serverId: "prod1",
extensionEndpoint: "/proxy",
portalBackendEndpoint: "https://cdb-ms-mpac-pbe.cosmos.azure.com",
mongoProxyEndpoint: "https://cdb-ms-mpac-mp.cosmos.azure.com",
cassandraProxyEndpoint: "https://cdb-ms-mpac-cp.cosmos.azure.com",

View File

@@ -16,13 +16,13 @@ Results of this file should be checked into the repo.
*/
// CHANGE THESE VALUES TO GENERATE NEW CLIENTS
const version = "2024-12-01-preview";
/* The following are legal options for resourceName but you generally will only use cosmos:
"cosmos" | "managedCassandra" | "mongorbac" | "notebook" | "privateEndpointConnection" | "privateLinkResources" |
const version = "2024-02-15-preview";
/* The following are legal options for resourceName but you generally will only use cosmos-db:
"cosmos-db" | "managedCassandra" | "mongorbac" | "notebook" | "privateEndpointConnection" | "privateLinkResources" |
"rbac" | "restorable" | "services" | "dataTransferService"
*/
const githubResourceName = "cosmos-db";
const deResourceName = "cosmos";
const deResourceName = "cosmos-db";
const schemaURL = `https://raw.githubusercontent.com/Azure/azure-rest-api-specs/main/specification/cosmos-db/resource-manager/Microsoft.DocumentDB/preview/${version}/${githubResourceName}.json`;
const outputDir = path.join(__dirname, `../../src/Utils/arm/generatedClients/${deResourceName}`);

View File

@@ -248,8 +248,7 @@ module.exports = function (_env = {}, argv = {}) {
new TerserPlugin({
terserOptions: {
// These options increase our initial bundle size by ~5% but the builds are significantly faster and won't run out of memory
// Update 2/11/2025: we are removing this flag as our bundles sizes grew so that it can remove dead and unreachable code with compromise of build time
// compress: false,
compress: false,
mangle: {
keep_fnames: true,
keep_classnames: true,