mirror of
https://github.com/Azure/cosmos-explorer.git
synced 2026-01-08 20:17:03 +00:00
* Temporarily re-enable key based auth for Mongo and Cassandra tests. * Increase number of shards for playwright tests. * Another small bump to test shard count. * click global new... button then collection in playwright tests * get new table button * create and delete container for every individual scale test * for scale and settings, dont create sample data in container * run scale tests serially * refactor scale setup and tear down to be within each test * record network traces * record network calls on all retries * when disposing of database during playwright test, refresh tree to remove deleted database * refresh tree before opening scale and settings * When opening scale and settings, refresh databases * reload all databases before loading offers * increase time for change partition key request * increase time for change partition key request * refresh databases in test instead of product code * when refreshing containers, open console window to check for status completion * close notification console window after seeing desired log * create and delete a container for each individual test * dont delete database after every test. leave it to the CI * Don't refresh databases when opening Scale+Settings and only delete database if running locally * only open scale and settings at the beginning of each test suite * get it back to working * change settings.spect.ts from serial to parallel * don't delete database after each test * update container creation throughpout to be 5000 * run tests with no throughput limit on the account * adjust scale test to reflect no throughput limit on account * remove test container throughput * don't refresh collections when clicking settings in product code * refactor and run cleanup during pr check * copy cleanup accounts * run cleanup after playwright tests * run cleanup every three hours * revert ci.yml * update cpk test * remove cpk * remove cleanup accounts and add cpk * add cpk * remove cpk changes * revert ci.yml * run cleanup every two hours --------- Co-authored-by: Jade Welton <jawelton@microsoft.com> Co-authored-by: Asier Isayas <aisayas@microsoft.com>
132 lines
5.8 KiB
JavaScript
132 lines
5.8 KiB
JavaScript
const { AzureCliCredential } = require("@azure/identity");
|
|
const { CosmosDBManagementClient } = require("@azure/arm-cosmosdb");
|
|
const ms = require("ms");
|
|
|
|
const subscriptionId = process.env["AZURE_SUBSCRIPTION_ID"];
|
|
const resourceGroupName = "de-e2e-tests";
|
|
|
|
const thirtyMinutesAgo = new Date(Date.now() - 1000 * 60 * 30).getTime();
|
|
|
|
function friendlyTime(date) {
|
|
try {
|
|
return ms(date);
|
|
} catch (error) {
|
|
return "Unknown";
|
|
}
|
|
}
|
|
|
|
async function main() {
|
|
const credentials = new AzureCliCredential();
|
|
const client = new CosmosDBManagementClient(credentials, subscriptionId);
|
|
const accounts = await client.databaseAccounts.list(resourceGroupName);
|
|
for (const account of accounts) {
|
|
if (account.name.endsWith("-readonly")) {
|
|
console.log(`SKIPPED: ${account.name}`);
|
|
continue;
|
|
}
|
|
if (account.kind === "MongoDB") {
|
|
const mongoDatabases = await client.mongoDBResources.listMongoDBDatabases(resourceGroupName, account.name);
|
|
for (const database of mongoDatabases) {
|
|
// Unfortunately Mongo does not provide a timestamp in ARM. There is no way to tell how old the DB is other thn encoding it in the ID :(
|
|
const timestamp = Number(database.name.split("_").pop());
|
|
if (timestamp && timestamp < thirtyMinutesAgo) {
|
|
await client.mongoDBResources.deleteMongoDBDatabase(resourceGroupName, account.name, database.name);
|
|
console.log(`DELETED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
} else {
|
|
console.log(`SKIPPED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
}
|
|
}
|
|
} else if (account.capabilities.find((c) => c.name === "EnableCassandra")) {
|
|
const cassandraDatabases = await client.cassandraResources.listCassandraKeyspaces(
|
|
resourceGroupName,
|
|
account.name,
|
|
);
|
|
for (const database of cassandraDatabases) {
|
|
const timestamp = Number(database.resource._ts) * 1000;
|
|
if (timestamp && timestamp < thirtyMinutesAgo) {
|
|
await client.cassandraResources.deleteCassandraKeyspace(resourceGroupName, account.name, database.name);
|
|
console.log(`DELETED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
} else {
|
|
console.log(`SKIPPED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
}
|
|
}
|
|
} else if (account.capabilities.find((c) => c.name === "EnableTable")) {
|
|
const tablesDatabase = await client.tableResources.listTables(resourceGroupName, account.name);
|
|
for (const database of tablesDatabase) {
|
|
const timestamp = Number(database.resource._ts) * 1000;
|
|
if (timestamp && timestamp < thirtyMinutesAgo) {
|
|
await client.tableResources.deleteTable(resourceGroupName, account.name, database.name);
|
|
console.log(`DELETED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
} else {
|
|
console.log(`SKIPPED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
}
|
|
}
|
|
} else if (account.capabilities.find((c) => c.name === "EnableGremlin")) {
|
|
const graphDatabases = await client.gremlinResources.listGremlinDatabases(resourceGroupName, account.name);
|
|
for (const database of graphDatabases) {
|
|
const timestamp = Number(database.resource._ts) * 1000;
|
|
if (timestamp && timestamp < thirtyMinutesAgo) {
|
|
await client.gremlinResources.deleteGremlinDatabase(resourceGroupName, account.name, database.name);
|
|
console.log(`DELETED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
} else {
|
|
console.log(`SKIPPED: ${account.name} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
}
|
|
}
|
|
} else if (account.kind === "GlobalDocumentDB") {
|
|
const sqlDatabases = await client.sqlResources.listSqlDatabases(resourceGroupName, account.name);
|
|
const sqlDatabasesToDelete = sqlDatabases.map(async (database) => {
|
|
await deleteWithRetry(client, database, account.name);
|
|
});
|
|
await Promise.all(sqlDatabasesToDelete);
|
|
}
|
|
}
|
|
}
|
|
|
|
// Retry logic for handling throttling
|
|
async function deleteWithRetry(client, database, accountName) {
|
|
const maxRetries = 5;
|
|
let attempt = 0;
|
|
let backoffTime = 1000; // Start with 1 second
|
|
|
|
while (attempt < maxRetries) {
|
|
try {
|
|
const timestamp = Number(database.resource._ts) * 1000;
|
|
if (timestamp && timestamp < thirtyMinutesAgo) {
|
|
await client.sqlResources.deleteSqlDatabase(resourceGroupName, accountName, database.name);
|
|
console.log(`DELETED: ${accountName} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
} else {
|
|
console.log(`SKIPPED: ${accountName} | ${database.name} | Age: ${friendlyTime(Date.now() - timestamp)}`);
|
|
}
|
|
return;
|
|
} catch (error) {
|
|
if (error.statusCode === 429) {
|
|
// Throttling error (HTTP 429), apply exponential backoff
|
|
console.log(`Throttling detected, retrying ${database.name}... (Attempt ${attempt + 1})`);
|
|
await delay(backoffTime);
|
|
attempt++;
|
|
backoffTime *= 2; // Exponential backoff
|
|
} else {
|
|
// For other errors, log and break
|
|
console.error(`Error deleting ${database.name}:`, error);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
console.log(`Failed to delete ${database.name} after ${maxRetries} attempts.`);
|
|
}
|
|
|
|
// Helper function to delay the retry attempts
|
|
function delay(ms) {
|
|
return new Promise(resolve => setTimeout(resolve, ms));
|
|
}
|
|
|
|
main()
|
|
.then(() => {
|
|
console.log("Completed");
|
|
process.exit(0);
|
|
})
|
|
.catch((err) => {
|
|
console.log(err);
|
|
console.log("Cleanup failed! Exiting with success. Cleanup should always fail safe.");
|
|
process.exit(0);
|
|
}); |