diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000..9c4cf3d Binary files /dev/null and b/.DS_Store differ diff --git a/.github/workflows/playwright-tests.yaml b/.github/workflows/playwright-tests.yaml new file mode 100644 index 0000000..8b1e720 --- /dev/null +++ b/.github/workflows/playwright-tests.yaml @@ -0,0 +1,34 @@ +# CI configuration for Playwright +name: Playwright Tests + +on: + push: + branches: + - main + - integration + pull_request: + branches: + - main + - integration + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Install Node.js + uses: actions/setup-node@v3 + with: + node-version: '18' + + - name: Install dependencies + run: npm ci + + - name: Install Playwright Browsers + run: npx playwright install --with-deps + + - name: Run Playwright Tests + run: npx playwright test \ No newline at end of file diff --git a/frontend/tests/README.md b/frontend/tests/README.md new file mode 100644 index 0000000..5c88124 --- /dev/null +++ b/frontend/tests/README.md @@ -0,0 +1,136 @@ +# End to End Workflow Navigation Testing + +### Overview +The following tests are designed to validate the functionality and correctness of [user workflows](https://quantum-machines.atlassian.net/wiki/spaces/hlsw/pages/3223912481/QAPP+UX+Design+Brief#User-workflows) in the QUAlibrate application. + +--- + +### Prerequisites +1. Ensure you have cloned the **QUAlibrate** repository from GitHub. +2. Verify that you have the necessary node calibration scripts available to execute the workflows. + +To start the QUAlibrate server locally, use the following command in your terminal: + +```bash +qualibrate start +``` +Once the server is running, access the application at http://127.0.0.1:8001/. + +## Workflow 1: Running a calibration node + +### This test validates the end-to-end workflow of running a calibration node in the QUAlibrate application. + +1. **Navigate to the Application** + - Open http://127.0.0.1:8001/ in your web browser. + - Verify the landing page loads successfully, and the Node Library is visible. +2. **Verify Calibration Nodes** + - Check that at least one calibration node (e.g., `test_cal`) is displayed in the Node Library. + - Check that node has no visible parameters +3. **Select a Calibration Node** + - Click the test_cal node. + - Verify: + - ~~The node label and parameters are displayed.~~ + - Check that the 3 different labels exist, their feilds are modifiable, and has corresponding default parameters +4. **Change a node parameter value** + - Varify that it's possible to replace the default parameter values with new ones +5. **Run the Calibration Node** + - Click the Run button for test_cal. + - Verify: + - The Running Job section appears, showing parameters and status. + - Job status changes to finished upon completion, along with other stats. + - parameters here match parameters in node parameter feilds +7. **Check Results** + - Confirm the Results section is populated with: + - Numerical values. + - A generated figure. + - Data storage location. +8. **Check/Update State Values** + - Verify the State Updates section displays suggested changes. + - Modify values, click the up-arrow to apply changes, and ensure updates are successful. + + +## Workflow 2: Running a Calibration Graph + +### The following workflow tests the functionality of running a calibration graph within the QUAlibrate application. + +1. **Navigate to the Graph Library**: + - Ensure the application is running at http://127.0.0.1:8001/. + - Verify that the main page loads successfully. + - Click on the "Graph Library" tab in the sidebar. + - Confirm make page elements have loaded +2. **Select a Calibration Graph**: + - Identify and click on a specific calibration graph (e.g., `Single Qubit Tuneup`). + - Verify that the graph parameters are displayed. + - You see the calibration nodes populated on the left hands side + - You see the calibration graph populated the right hand side + - Ensure the qubits section is editable to include qubits such as `Q0`, `Q2`, and `Q3`. +3. **Modify Node-Specific Parameters**: + - Navigate to a specific node in the calibration graph. + - Update a parameter, such as changing the sampling points from `100` to `1000`. + - Ensure that the updated parameter value is correctly reflected. +4. **Run the Calibration Graph**: + - Click the "Play" button to start running the graph. + - Verify that the application navigates to the "Graph Status" page. + - Confirm that the graph status shows `Running` and that progress (e.g., "1 out of 3 nodes completed") is displayed. +5. **Monitor Graph Execution**: + - Wait for the graph to finish executing. + - Verify that the status updates to `Finished` and displays the total runtime (e.g., `12 seconds`). +6. **View Results**: + - Check the results section to ensure that data (e.g., qubit spectroscopy) is displayed. + - Confirm that failed nodes or operations are clearly marked, along with the corresponding parameters. +7. **Inspect Additional Nodes**: + - Navigate through the results of other nodes in the graph (e.g., `Rabi` and `Ramsey`). + - Verify that all available results are displayed, or confirm that no results are present if the node has not generated data. + + +## Workflow 3: Viewing Past Data + +### This workflow ensures users can effectively interact with the "Data" section to review previously recorded measurements, inspect quantum system states, and view saved calibration results. + +1. **Navigate to the Data Section**: + - Open http://127.0.0.1:8001/ in your web browser. + - Click on the "Data" tab in the sidebar. + - Verify that the "Measurement History" heading is visible and that a list of past measurements is displayed. + +2. **Verify Measurements**: + - Check that at least two measurements (e.g., `qubit_spectroscopy`, `Power Rabi`) are present in the measurement list. + - Confirm each measurement has a unique identifier and associated metadata. + +3. **Search Quantum States**: + - Use the search bar to filter measurements (e.g., search for "channel1"). + - Verify that the results update dynamically and only display relevant entries. + +4. **Expand and Collapse Quantum States**: + - Click on a quantum state entry to expand it. + - Verify that the entry shows detailed data, such as channels and their respective values. + - Collapse the entry and ensure it hides the details. + +5. **Validate Qual Updates**: + - Select a measurement with qual updates (e.g., `Power Rabi`). + - Verify that the updates show old and new values for relevant parameters. + - Check that updates correspond accurately to the recorded calibration data. + +6. **Handle Missing Data**: + - Identify a measurement without saved qual states. + - Verify that an appropriate message (e.g., "No qual state saved for this measurement.") is displayed to the user. + + +#### ~~Workflow 4: Typical runtime workflows~~ +- ~~Verify seamless switching between tabs during runtime.~~ +- ~~Validate runtime updates appear dynamically in the "Running Job" container.~~ +- ~~Test runtime responsiveness to changes in parameters mid-execution.~~ +- ~~Verify logs and results update dynamically during node execution.~~ +- ~~Check the correctness of intermediate runtime results (e.g., partial graphs, lo~~gs). +- ~~Test edge cases, such as disconnecting from the server mid-execution.~~ +- ~~Ensure error handling for unexpected runtime failures.~~ + + +#### ~~Workflow 5: Changing project~~ +- ~~Verify navigation to the Project tab.~~ +- ~~Validate project selection from the list.~~ +- ~~Test search functionality for projects (case-insensitive).~~ +- ~~Verify double-click opens a project directly.~~ +- ~~Ensure switching projects reloads associated nodes and graphs.~~ +- ~~Test project state retention (e.g., saving/loading).~~ +- ~~Validate project creation via the "+" button.~~ +- ~~Ensure feedback is provided for invalid or duplicate project names.~~ diff --git a/frontend/tests/e2e/workflow1.test.ts b/frontend/tests/e2e/workflow1.test.ts new file mode 100644 index 0000000..d8a8dde --- /dev/null +++ b/frontend/tests/e2e/workflow1.test.ts @@ -0,0 +1,113 @@ +import { test, expect } from '@playwright/test'; + +// Test for Workflow 1 +test('Workflow1', { + annotation: { + type: 'First User Workflow', + description: 'Running a calibration node', + }, + }, async ({ page }) => { + + // 0. Prerequisite: + // Be sure that the QUAlibrate application is running locally at http://127.0.0.1:8001/ + + // 1. Navigate to the Application + // Open http://127.0.0.1:8001/ in your web browser. + await page.goto('http://localhost:8001/'); + expect(page.url()).toBe('http://localhost:8001/'); // page loaded successfully + + // 2. Verify Calibration Nodes + // Check that at least one calibration node (e.g., test_cal) is displayed in the Node Library. + const nodeLibrary = page.locator('.node-library'); + await expect(nodeLibrary.isVisible()).toBeTruthy(); // node-library is showing as the landing page + await expect(page.getByText('test_cal', { exact: true })).toBeVisible(); // test_cal label is visible in the node library + await expect(page.getByText('test_calRun')).toBeVisible(); // test_cal 'calibration node tab' is visible in the node library + // Check that the test_cal node has no visible parameters + await expect(page.getByText('ParametersResonator:Sampling').first()).toBeHidden(); + + // 3. Select a Calibration Node + // Click the test_cal node. + await page.getByText('test_calRun').click(); + // Check that the 3 different labels exist + await expect(page.getByText('ParametersResonator:Sampling').first()).toBeVisible(); + await expect(page.locator('div').filter({ hasText: /^Resonator:$/ }).first()).toBeVisible(); + await expect(page.locator('div[class^="Parameters-module__parametersWrapper__"] > div:nth-child(3)').first()).toBeVisible(); + await expect(page.locator('div:nth-child(4)').first()).toBeVisible(); + // Has corresponding default parameters + await expect(page.getByRole('textbox', { name: 'resonator' })).toHaveValue('q1.resonator'); + await expect(page.getByRole('textbox', { name: 'sampling_points' })).toHaveValue('100'); + await expect(page.getByRole('textbox', { name: 'noise_factor' })).toHaveValue('0.1'); + // Their feilds are modifiable, + await page.getByRole('textbox', { name: 'resonator' }).click(); + await page.getByRole('textbox', { name: 'sampling_points' }).click(); + await page.getByRole('textbox', { name: 'noise_factor' }).click(); + + // 4. Change a node parameter value + // Varify that it's possible to replace the default parameter values with new ones + await page.getByRole('textbox', { name: 'resonator' }).click(); + await page.getByRole('textbox', { name: 'resonator' }).fill('q2.resonator'); + await page.getByRole('textbox', { name: 'sampling_points' }).click(); + await page.getByRole('textbox', { name: 'sampling_points' }).fill('1000'); + await page.getByRole('textbox', { name: 'noise_factor' }).click(); + await page.getByRole('textbox', { name: 'noise_factor' }).fill('0.2'); + await expect(page.getByRole('textbox', { name: 'resonator' })).toHaveValue('q2.resonator'); + await expect(page.getByRole('textbox', { name: 'sampling_points' })).toHaveValue('1000'); + await expect(page.getByRole('textbox', { name: 'noise_factor' })).toHaveValue('0.2'); + + // 5. Run the Calibration Node + // Click the Run button for test_cal. + await page.locator('div').filter({ hasText: /^test_calRun$/ }).getByRole('button').click(); + await expect(page.getByRole('progressbar').getByRole('img')).toBeVisible(); // spinning loading icon appears + await expect(page.getByText('Status: running')).toBeVisible(); // status changes to running + // Verify: + // The Running Job section appears, showing parameters and status. + await expect(page.getByText('Running job : test_cal')).toBeVisible(); + await expect(page.getByText(/Run start:\s+(\d{4})\/(\d{2})\/(\d{2}) (\d{2}):(\d{2}):(\d{2})/)).toBeVisible(); // Matches the format: Run start: 2021/09/30 15:00:00 + // await page.waitForSelector('text=/Run duration:\\s*\\d+\\.\\d{2}\\s+seconds/', { timeout: 10000 }); + await expect(page.getByText(/Run duration:\s*\d+\.\d{2}\s+seconds/)).toBeVisible(); // Matches the format: Run duration: 4.00 seconds + await expect(page.getByText('Parameters:')).toBeVisible(); + await expect(page.getByText('Resonator:q2.resonator')).toBeVisible(); + await expect(page.getByText('Sampling Points:1000')).toBeVisible(); // Job status changes to finished upon completion, along with other stats. + await expect(page.getByText('Status: finished')).toBeVisible(); // status changes to finished + await expect(page.locator('[class^="RunningJob-module__dot__"]')).toHaveCSS('background-color', 'rgb(50, 205, 50)'); // green color + // parameters here match parameters in node parameter feilds + await expect(page.getByRole('textbox', { name: 'resonator' })).toHaveValue('q2.resonator'); + await expect(page.locator('#root')).toContainText('Resonator:q2.resonator'); + await expect(page.getByRole('textbox', { name: 'sampling_points' })).toHaveValue('1000'); + await expect(page.locator('#root')).toContainText('Sampling Points:1000'); + + // 6. Check Results + // Confirm the Results section is populated with: + // Numerical values. + await expect(page.getByTestId('data-key-pairfrequency_shift')).toBeVisible(); + await expect(page.getByTestId('data-key-pairfrequency_shift')).toContainText(/"frequency_shift":\d+(\.\d+)?/); // Matches the format of any number + await expect(page.getByTestId('data-key-pairresults_fig')).toContainText('"results_fig":{1 Items'); + await expect(page.getByTestId('data-key-pairresults_fig../results_fig.png')).toContainText('"./results_fig.png":'); + // A generated figure. + await expect(page.getByTestId('data-key-pairresults_fig').locator('div').filter({ hasText: '"./results_fig.png":' }).first()).toBeVisible(); + await expect(page.locator('a')).toBeVisible(); // the pyplot image is visible + // Data storage location. + await expect(page.getByTestId('data-key-pairarr')).toBeVisible(); + + // 7. Check/Update State Values + // Verify the State Updates section displays suggested changes. + await expect(page.locator('[class^="RunningJob-module__stateUpdateWrapper__"]').first()).toBeVisible(); + await expect(page.locator('[class^="RunningJob-module__stateUpdatesTopWrapper__"] > div:nth-child(2)')).toBeVisible(); + await expect(page.locator('#root')).toContainText('#/channels/ch1/intermediate_frequency100000000 50000000'); + await expect(page.locator('#root')).toContainText('#/channels/ch2/intermediate_frequency[1,2,3] [1,2,4]'); + // Update intermediate frequency + await page.locator('[class^="RunningJob-module__editIconWrapper__"] > svg').first().click(); + await page.getByRole('textbox', { name: 'Enter a value' }).click(); + await page.getByRole('textbox', { name: 'Enter a value' }).fill('20000000'); // Manually updating the frequency to 20000000 + await page.locator('[class^="RunningJob-module__stateUpdateWrapper__"] > div > div').first().click(); + await expect(page.locator('[class^="RunningJob-module__stateUpdateIconWrapper__"] > svg')).toBeVisible(); // Green checkmark icon appears + // Update channels from [1,2,4] to [1,2,4,5] + await page.locator('[class^="RunningJob-module__editIconWrapper__"] > svg').click(); + await page.getByPlaceholder('Enter a value').nth(1).click(); + await page.getByPlaceholder('Enter a value').nth(1).fill('[1,2,4,5]'); // manually updating the channels to [1,2,4,5] + await page.locator('[class^="RunningJob-module__stateUpdatesTopWrapper__"] > div:nth-child(2) > div > div > svg').click(); + await expect(page.locator('div:nth-child(2) > div > [class^="RunningJob-module__stateUpdateIconWrapper__"] > svg')).toBeVisible(); // Green checkmark icon appears +}); + + + //console.log(await page.locator('body').innerText()); diff --git a/frontend/tests/e2e/workflow2.test.ts b/frontend/tests/e2e/workflow2.test.ts new file mode 100644 index 0000000..474bd5f --- /dev/null +++ b/frontend/tests/e2e/workflow2.test.ts @@ -0,0 +1,123 @@ +import { test, expect } from '@playwright/test'; + +// Test for Workflow 1 +// Still in Progress +test('Workflow2', { + annotation: { + type: 'Second User Workflow', + description: 'Running a calibration graph', + }, + }, async ({ page }) => { + + + // 1. Navigate to the Graph Library: + // Ensure the application is running at http://127.0.0.1:8001/. + await page.goto('http://localhost:8001/'); + // Verify that the main page loads successfully. + expect(page.url()).toBe('http://localhost:8001/'); // page loaded successfully + // Click on the "Graph Library" tab in the sidebar. + await page.getByRole('button', { name: 'Graph library' }).click(); + // Confirm make page elements have loaded + await expect(page.getByRole('heading', { name: 'Run calibration graph' })).toBeVisible(); + await expect(page.getByPlaceholder('graph name')).toBeVisible(); + await expect(page.getByRole('button', { name: 'Refresh' })).toBeVisible(); + await expect(page.getByText('Single Qubit TuneupParametersQubits:Qubit_spectroscopyRabiRamsey')).toBeVisible(); + await expect(page.getByText('test_workflowParametersQubits')).toBeVisible(); + + // 2. Select a Calibration Graph: + // Identify and click on a specific calibration graph (e.g., `Single Qubit Tuneup`). + await page.getByText('Single Qubit TuneupParametersQubits:Qubit_spectroscopyRabiRamsey').click(); + + // Verify that the graph parameters are displayed. + await expect(page.getByRole('textbox', { name: 'qubits' })).toBeVisible(); + + // Ensure the qubits section is editable to include qubits such as `Q0`, `Q2`, and `Q3`. + const qubitsInput = page.getByRole('textbox', { name: 'qubits' }); + await qubitsInput.fill('q0, q2, q3'); + await expect(qubitsInput).toHaveValue('q0, q2, q3'); + + // 3. Modify Node-Specific Parameters: + // Navigate to a specific node in the calibration graph. + const parameterTitle3 = page.locator('div:nth-child(3) > [class^="Parameters-module__parameterTitle__"]'); + const arrowIcon3 = parameterTitle3.locator('[class^="Parameters-module__arrowIconWrapper__"]'); + const parameterTitle4 = page.locator('div:nth-child(4) > [class^="Parameters-module__parameterTitle__"]'); + const arrowIcon4 = parameterTitle4.locator('[class^="Parameters-module__arrowIconWrapper__"]'); + await arrowIcon3.first().click(); + await arrowIcon4.first().click(); + + await page.locator('div').filter({ hasText: /^Rabi$/ }).locator('div').click(); + // Update a parameter, such as changing the sampling points from `100` to `1000`. + const samplingPointsInput = page.getByPlaceholder('sampling_points'); + await samplingPointsInput.click(); + await samplingPointsInput.fill('1000'); + await expect(samplingPointsInput).toHaveValue('1000'); + + + // 4. Run the Calibration Graph: + // Click the "Play" button to start running the graph. + await page.locator('.GraphElement-module__iconWrapper__uHkqg > div > svg').first().click(); + + // Verify that the application navigates to the "Graph Status" page. + //~~await expect(page).toHaveURL(/.*graph-status/);~~ + + // Confirm that the graph status shows `Running`. + await expect(page.getByText('Status: Running')).toBeVisible(); + + // Confirm progress (e.g., "1 out of 3 nodes completed") is displayed. + await expect(page.getByText(/Graph progress: \d+\/\d+ nodes completed/)).toBeVisible(); + + // 5. Monitor Graph Execution: + // Wait for the graph to finish executing. + await page.waitForTimeout(5000); // Adjust timeout as per actual runtime. + await expect(page.getByText('Status: Finished')).toBeVisible(); + + // Verify total runtime is displayed (e.g., `12 seconds`). + await expect(page.getByText(/Run duration: \d+\.\d{1,3}s/)).toBeVisible(); + + // 6. View Results: + // Check the results section to ensure that data is displayed. + await expect(page.getByRole('textbox', { name: 'results' })).toBeVisible(); + + // Confirm failed nodes or operations are marked. + await expect(page.getByText('Failed Nodes')).not.toBeVisible(); // Update if necessary. + + // 7. Inspect Additional Nodes: + // Navigate through the results of other nodes in the graph. + await page.getByText('Rabi').click(); + await expect(page.getByRole('textbox', { name: 'results' })).toBeVisible(); + + await page.getByText('Ramsey').click(); + await expect(page.getByRole('textbox', { name: 'results' })).toBeVisible(); + + + + + // 2. Select a Calibration Graph: + // Identify and click on a specific calibration graph (e.g., `Single Qubit Tuneup`). + // Verify that the graph parameters are displayed. + // Ensure the qubits section is editable to include qubits such as `Q0`, `Q2`, and `Q3`. + + + // 3. Modify Node-Specific Parameters: + // Navigate to a specific node in the calibration graph. + // Update a parameter, such as changing the sampling points from `100` to `1000`. + // Ensure that the updated parameter value is correctly reflected. + + // 4. Run the Calibration Graph: + // Click the "Play" button to start running the graph. + // Verify that the application navigates to the "Graph Status" page. + // Confirm that the graph status shows `Running` and that progress (e.g., "1 out of 3 nodes completed") is displayed. + + // 5. Monitor Graph Execution: + // Wait for the graph to finish executing. + // Verify that the status updates to `Finished` and displays the total runtime (e.g., `12 seconds`). + + // 6. View Results: + // Check the results section to ensure that data (e.g., qubit spectroscopy) is displayed. + // Confirm that failed nodes or operations are clearly marked, along with the corresponding parameters. + + // 7. Inspect Additional Nodes: + // Navigate through the results of other nodes in the graph (e.g., `Rabi` and `Ramsey`). + // Verify that all available results are displayed, or confirm that no results are present if the node has not generated data. + + }); \ No newline at end of file diff --git a/frontend/tests/package.json b/frontend/tests/package.json new file mode 100644 index 0000000..a118699 --- /dev/null +++ b/frontend/tests/package.json @@ -0,0 +1,34 @@ +{ + "name": "qualibrate", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "test": "playwright test", + "build": "tsc", + "test:workflow1": "playwright test frontend/tests/e2e/workflow1.test.ts", + "test:workflow2": "playwright test frontend/tests/e2e/workflow2.test.ts", + "test:ci": "npx playwright test --reporter=github", + "test:master": "playwright test tests/master.test.ts" + }, + "keywords": [], + "author": "", + "license": "ISC", + "description": "", + "devDependencies": { + "@playwright/test": "^1.49.0", + "@types/node": "^22.10.1", + "typescript": "^5.7.2" + }, + "dependencies": { + "playwright": "^1.49.0" + }, + "playwright": { + "browsers": [ + "chromium", + "firefox", + "webkit", + "msedge" + ] + } +} + diff --git a/frontend/tests/playwright.config.ts b/frontend/tests/playwright.config.ts new file mode 100644 index 0000000..6033279 --- /dev/null +++ b/frontend/tests/playwright.config.ts @@ -0,0 +1,45 @@ +// Playwright configuration file +import { defineConfig } from '@playwright/test'; + +export default defineConfig({ + testDir: './tests', // Test directory + /* Run tests in files in parallel */ + fullyParallel: true, + /* Fail the build on CI if you accidentally left test.only in the source code. */ + forbidOnly: !!process.env.CI, + timeout: 30000, // Test timeout in milliseconds + /* Retry on CI only */ + retries: process.env.CI ? 2 : 0, + /* Opt out of parallel tests on CI. */ + workers: process.env.CI ? 1 : undefined, + /* Reporter to use. See https://playwright.dev/docs/test-reporters */ + reporter: 'html', + /* Shared settings for all the projects below. See https://playwright.dev/docs/api/class-testoptions. */ + use: { + /* Base URL to use in actions like `await page.goto('/')`. */ + // baseURL: 'http://127.0.0.1:3000', + + /* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */ + trace: 'on-first-retry', + screenshot: 'only-on-failure', // Take screenshots only on failure + video: 'retain-on-failure', // Record videos only for failed tests + }, + projects: [ + { + name: 'Chromium', + use: { browserName: 'chromium' }, + }, + { + name: 'Firefox', + use: { browserName: 'firefox' }, + }, + { + name: 'WebKit', + use: { browserName: 'webkit' }, + }, + // { + // name: 'Microsoft Edge', + // use: { channel: 'msedge' }, + // }, + ], +});