diff --git a/x-pack/plugins/search_inference_endpoints/common/translations.ts b/x-pack/plugins/search_inference_endpoints/common/translations.ts
index a61d185c406c0..0f1aa4a8abcfc 100644
--- a/x-pack/plugins/search_inference_endpoints/common/translations.ts
+++ b/x-pack/plugins/search_inference_endpoints/common/translations.ts
@@ -10,7 +10,7 @@ import { i18n } from '@kbn/i18n';
 export const INFERENCE_ENDPOINT_LABEL = i18n.translate(
   'xpack.searchInferenceEndpoints.inferenceEndpointsLabel',
   {
-    defaultMessage: 'Inference Endpoints',
+    defaultMessage: 'Inference endpoints',
   }
 );
 
@@ -21,7 +21,8 @@ export const CANCEL = i18n.translate('xpack.searchInferenceEndpoints.cancel', {
 export const MANAGE_INFERENCE_ENDPOINTS_LABEL = i18n.translate(
   'xpack.searchInferenceEndpoints.allInferenceEndpoints.description',
   {
-    defaultMessage: 'View and manage your deployed inference endpoints.',
+    defaultMessage:
+      'Inference endpoints streamline the deployment and management of machine\nlearning models in Elasticsearch. Set up and manage NLP tasks using unique\nendpoints, to build AI-powered search.',
   }
 );
 
@@ -69,9 +70,16 @@ export const SEMANTIC_SEARCH_WITH_E5_LINK = i18n.translate(
 );
 
 export const VIEW_YOUR_MODELS_LINK = i18n.translate(
-  'xpack.searchInferenceEndpoints.addEmptyPrompt.viewYourModels',
+  'xpack.searchInferenceEndpoints.viewYourModels',
+  {
+    defaultMessage: 'ML Trained Models',
+  }
+);
+
+export const API_DOCUMENTATION_LINK = i18n.translate(
+  'xpack.searchInferenceEndpoints.apiDocumentationLink',
   {
-    defaultMessage: 'View your models',
+    defaultMessage: 'API Documentation',
   }
 );
 
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/constants.ts b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/constants.ts
index b3fd13dc5383a..3e60bc33b049c 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/constants.ts
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/constants.ts
@@ -14,7 +14,7 @@ import {
 } from './types';
 
 export const DEFAULT_TABLE_ACTIVE_PAGE = 1;
-export const DEFAULT_TABLE_LIMIT = 10;
+export const DEFAULT_TABLE_LIMIT = 25;
 
 export const DEFAULT_QUERY_PARAMS: QueryParams = {
   page: DEFAULT_TABLE_ACTIVE_PAGE,
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_actions/actions/delete/delete_action.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_actions/actions/delete/delete_action.tsx
index 7bc428f49affa..caedf4e913387 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_actions/actions/delete/delete_action.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_actions/actions/delete/delete_action.tsx
@@ -28,7 +28,7 @@ export const DeleteAction: React.FC<DeleteActionProps> = ({ selectedEndpoint })
 
     deleteEndpoint({
       type: selectedEndpoint.type,
-      id: selectedEndpoint.endpoint.inference_id,
+      id: selectedEndpoint.endpoint,
     });
   };
 
@@ -37,7 +37,7 @@ export const DeleteAction: React.FC<DeleteActionProps> = ({ selectedEndpoint })
       <EuiButtonIcon
         aria-label={i18n.translate('xpack.searchInferenceEndpoints.actions.deleteEndpoint', {
           defaultMessage: 'Delete inference endpoint {selectedEndpointName}',
-          values: { selectedEndpointName: selectedEndpoint?.endpoint.inference_id },
+          values: { selectedEndpointName: selectedEndpoint?.endpoint },
         })}
         key="delete"
         iconType="trash"
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/deployment_status.test.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/deployment_status.test.tsx
deleted file mode 100644
index d8b13d9f00d69..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/deployment_status.test.tsx
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { render, screen } from '@testing-library/react';
-import React from 'react';
-import { DeploymentStatus } from './deployment_status';
-import { DeploymentState } from '@kbn/ml-trained-models-utils';
-
-describe('DeploymentStatus component', () => {
-  it('starting renders with warning status', () => {
-    render(<DeploymentStatus status={'starting' as DeploymentState} />);
-    const healthComponent = screen.getByTestId(`table-column-deployment-starting`);
-    expect(healthComponent).toBeInTheDocument();
-    expect(healthComponent).toHaveAttribute('color', 'warning');
-  });
-  it('stopping renders with danger status', () => {
-    render(<DeploymentStatus status={'stopping' as DeploymentState} />);
-    const healthComponent = screen.getByTestId(`table-column-deployment-stopping`);
-    expect(healthComponent).toBeInTheDocument();
-    expect(healthComponent).toHaveAttribute('color', 'danger');
-  });
-
-  it('started renders with success status', () => {
-    render(<DeploymentStatus status={'started' as DeploymentState} />);
-    const healthComponent = screen.getByTestId(`table-column-deployment-started`);
-    expect(healthComponent).toBeInTheDocument();
-    expect(healthComponent).toHaveAttribute('color', 'success');
-  });
-});
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/deployment_status.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/deployment_status.tsx
deleted file mode 100644
index 61bee26333a88..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/deployment_status.tsx
+++ /dev/null
@@ -1,62 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import React from 'react';
-import { EuiIcon, EuiToolTip } from '@elastic/eui';
-import { DeploymentState } from '@kbn/ml-trained-models-utils';
-import * as i18n from './translations';
-
-interface DeploymentStatusProps {
-  status: DeploymentState | undefined;
-}
-
-function getStatus(status: DeploymentState | undefined) {
-  switch (status) {
-    case 'started':
-      return {
-        statusColor: 'success',
-        type: 'dot',
-        tooltip: i18n.MODEL_DEPLOYED,
-      };
-
-    case 'starting':
-      return {
-        statusColor: 'warning',
-        type: 'warning',
-        tooltip: i18n.MODEL_STARTING,
-      };
-
-    case 'stopping':
-      return {
-        statusColor: 'danger',
-        type: 'dot',
-        tooltip: i18n.MODEL_STOPPING,
-      };
-
-    case undefined:
-      return {
-        statusColor: 'danger',
-        type: 'dot',
-        tooltip: i18n.MODEL_NOT_DEPLOYED,
-      };
-  }
-}
-
-export const DeploymentStatus: React.FC<DeploymentStatusProps> = ({ status }) => {
-  const { statusColor, type, tooltip } = getStatus(status);
-
-  return (
-    <EuiToolTip content={tooltip}>
-      <EuiIcon
-        aria-label={tooltip}
-        type={type}
-        data-test-subj={`table-column-deployment-${status}`}
-        color={statusColor}
-      />
-    </EuiToolTip>
-  );
-};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/translations.ts b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/translations.ts
deleted file mode 100644
index 5d811c0db45c0..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_deployment_status/translations.ts
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import { i18n } from '@kbn/i18n';
-
-export const MODEL_DEPLOYED = i18n.translate(
-  'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed',
-  {
-    defaultMessage: 'Model is deployed',
-  }
-);
-
-export const MODEL_STARTING = i18n.translate(
-  'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed',
-  {
-    defaultMessage: 'Model starting',
-  }
-);
-
-export const MODEL_NOT_DEPLOYED = i18n.translate(
-  'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed',
-  {
-    defaultMessage: 'Model is not deployed',
-  }
-);
-
-export const MODEL_STOPPING = i18n.translate(
-  'xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelStopped',
-  {
-    defaultMessage: 'Model stopping',
-  }
-);
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.test.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.test.tsx
index afb320dbf5ce9..1c91dcfd1aec3 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.test.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.test.tsx
@@ -9,315 +9,10 @@ import { render, screen } from '@testing-library/react';
 import React from 'react';
 import { EndpointInfo } from './endpoint_info';
 
-jest.mock('@kbn/ml-trained-models-utils', () => ({
-  ...jest.requireActual('@kbn/ml-trained-models-utils'),
-  ELASTIC_MODEL_DEFINITIONS: {
-    'model-with-mit-license': {
-      license: 'MIT',
-      licenseUrl: 'https://abc.com',
-    },
-  },
-}));
-
 describe('RenderEndpoint component tests', () => {
-  describe('with cohere service', () => {
-    const mockEndpoint = {
-      inference_id: 'cohere-2',
-      service: 'cohere',
-      service_settings: {
-        similarity: 'cosine',
-        dimensions: 384,
-        model_id: 'embed-english-light-v3.0',
-        rate_limit: {
-          requests_per_minute: 10000,
-        },
-        embedding_type: 'byte',
-      },
-      task_settings: {},
-    } as any;
-
-    it('renders the component with endpoint details for Cohere service', () => {
-      render(<EndpointInfo endpoint={mockEndpoint} />);
-
-      expect(screen.getByText('cohere-2')).toBeInTheDocument();
-      expect(screen.getByText('byte')).toBeInTheDocument();
-      expect(screen.getByText('embed-english-light-v3.0')).toBeInTheDocument();
-    });
-
-    it('does not render model_id badge if serviceSettings.model_id is not provided for Cohere service', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: { ...mockEndpoint.service_settings, model_id: undefined },
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.queryByText('embed-english-light-v3.0')).not.toBeInTheDocument();
-    });
-
-    it('renders only model_id if other settings are not provided for Cohere service', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: { model_id: 'embed-english-light-v3.0' },
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('embed-english-light-v3.0')).toBeInTheDocument();
-      expect(screen.queryByText(',')).not.toBeInTheDocument();
-    });
-  });
-
-  describe('with elasticsearch service', () => {
-    const mockEndpoint = {
-      inference_id: 'model-123',
-      service: 'elasticsearch',
-      service_settings: {
-        num_allocations: 5,
-        num_threads: 10,
-        model_id: 'settings-model-123',
-      },
-    } as any;
-
-    it('renders the component with endpoint model_id and model settings', () => {
-      render(<EndpointInfo endpoint={mockEndpoint} />);
-
-      expect(screen.getByText('model-123')).toBeInTheDocument();
-      expect(screen.getByText('settings-model-123')).toBeInTheDocument();
-      expect(screen.getByText('Threads: 10 | Allocations: 5')).toBeInTheDocument();
-    });
-
-    it('renders the component with only model_id if num_threads and num_allocations are not provided', () => {
-      const modifiedSettings = {
-        ...mockEndpoint.service_settings,
-        num_threads: undefined,
-        num_allocations: undefined,
-      };
-      const modifiedEndpoint = { ...mockEndpoint, service_settings: modifiedSettings };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('model-123')).toBeInTheDocument();
-      expect(screen.getByText('settings-model-123')).toBeInTheDocument();
-      expect(screen.queryByText('Threads: 10 | Allocations: 5')).not.toBeInTheDocument();
-    });
-  });
-
-  describe('with azureaistudio service', () => {
-    const mockEndpoint = {
-      inference_id: 'azure-ai-1',
-      service: 'azureaistudio',
-      service_settings: {
-        target: 'westus',
-        provider: 'microsoft_phi',
-        endpoint_type: 'realtime',
-      },
-    } as any;
-
-    it('renders the component with endpoint details', () => {
-      render(<EndpointInfo endpoint={mockEndpoint} />);
-
-      expect(screen.getByText('azure-ai-1')).toBeInTheDocument();
-      expect(screen.getByText('microsoft_phi, realtime, westus')).toBeInTheDocument();
-    });
-
-    it('renders correctly when some service settings are missing', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: { target: 'westus', provider: 'microsoft_phi' },
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('microsoft_phi, westus')).toBeInTheDocument();
-    });
-
-    it('does not render a comma when only one service setting is provided', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: { target: 'westus' },
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('westus')).toBeInTheDocument();
-      expect(screen.queryByText(',')).not.toBeInTheDocument();
-    });
-
-    it('renders nothing related to service settings when all are missing', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: {},
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('azure-ai-1')).toBeInTheDocument();
-      expect(screen.queryByText('westus')).not.toBeInTheDocument();
-      expect(screen.queryByText('microsoft_phi')).not.toBeInTheDocument();
-      expect(screen.queryByText('realtime')).not.toBeInTheDocument();
-    });
-  });
-
-  describe('with azureopenai service', () => {
-    const mockEndpoint = {
-      inference_id: 'azure-openai-1',
-      service: 'azureopenai',
-      service_settings: {
-        resource_name: 'resource-xyz',
-        deployment_id: 'deployment-123',
-        api_version: 'v1',
-      },
-    } as any;
-
-    it('renders the component with all required endpoint details', () => {
-      render(<EndpointInfo endpoint={mockEndpoint} />);
-
-      expect(screen.getByText('azure-openai-1')).toBeInTheDocument();
-      expect(screen.getByText('resource-xyz, deployment-123, v1')).toBeInTheDocument();
-    });
-  });
-
-  describe('with mistral service', () => {
-    const mockEndpoint = {
-      inference_id: 'mistral-ai-1',
-      service: 'mistral',
-      service_settings: {
-        model: 'model-xyz',
-        max_input_tokens: 512,
-        rate_limit: {
-          requests_per_minute: 1000,
-        },
-      },
-    } as any;
-
-    it('renders the component with endpoint details', () => {
-      render(<EndpointInfo endpoint={mockEndpoint} />);
-
-      expect(screen.getByText('mistral-ai-1')).toBeInTheDocument();
-      expect(screen.getByText('model-xyz')).toBeInTheDocument();
-      expect(screen.getByText('max_input_tokens: 512, rate_limit: 1000')).toBeInTheDocument();
-    });
-
-    it('renders correctly when some service settings are missing', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: {
-          model: 'model-xyz',
-          max_input_tokens: 512,
-        },
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('max_input_tokens: 512')).toBeInTheDocument();
-    });
-
-    it('does not render a comma when only one service setting is provided', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: { model: 'model-xyz' },
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('model-xyz')).toBeInTheDocument();
-      expect(screen.queryByText(',')).not.toBeInTheDocument();
-    });
-
-    it('renders nothing related to service settings when all are missing', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: {},
-      };
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('mistral-ai-1')).toBeInTheDocument();
-      expect(screen.queryByText('model-xyz')).not.toBeInTheDocument();
-      expect(screen.queryByText('max_input_tokens: 512')).not.toBeInTheDocument();
-      expect(screen.queryByText('rate_limit: 1000')).not.toBeInTheDocument();
-    });
-  });
-
-  describe('with googleaistudio service', () => {
-    const mockEndpoint = {
-      inference_id: 'google-ai-1',
-      service: 'googleaistudio',
-      service_settings: {
-        model_id: 'model-abc',
-        rate_limit: {
-          requests_per_minute: 500,
-        },
-      },
-    } as any;
-
-    it('renders the component with endpoint details', () => {
-      render(<EndpointInfo endpoint={mockEndpoint} />);
-
-      expect(screen.getByText('model-abc')).toBeInTheDocument();
-      expect(screen.getByText('rate_limit: 500')).toBeInTheDocument();
-    });
-
-    it('renders correctly when rate limit is missing', () => {
-      const modifiedEndpoint = {
-        ...mockEndpoint,
-        service_settings: {
-          model_id: 'model-abc',
-        },
-      };
-
-      render(<EndpointInfo endpoint={modifiedEndpoint} />);
-
-      expect(screen.getByText('model-abc')).toBeInTheDocument();
-      expect(screen.queryByText('Rate limit:')).not.toBeInTheDocument();
-    });
-  });
-
-  describe('with amazonbedrock service', () => {
-    const mockEndpoint = {
-      inference_id: 'amazon-bedrock-1',
-      service: 'amazonbedrock',
-      service_settings: {
-        region: 'us-west-1',
-        provider: 'AMAZONTITAN',
-        model: 'model-bedrock-xyz',
-      },
-    } as any;
-
-    it('renders the component with endpoint details', () => {
-      render(<EndpointInfo endpoint={mockEndpoint} />);
-
-      expect(screen.getByText('amazon-bedrock-1')).toBeInTheDocument();
-      expect(screen.getByText('model-bedrock-xyz')).toBeInTheDocument();
-      expect(screen.getByText('region: us-west-1, provider: amazontitan')).toBeInTheDocument();
-    });
-  });
-
-  describe('for MIT licensed models', () => {
-    const mockEndpointWithMitLicensedModel = {
-      inference_id: 'model-123',
-      service: 'elasticsearch',
-      service_settings: {
-        num_allocations: 5,
-        num_threads: 10,
-        model_id: 'model-with-mit-license',
-      },
-    } as any;
-
-    it('renders the MIT license badge if the model is eligible', () => {
-      render(<EndpointInfo endpoint={mockEndpointWithMitLicensedModel} />);
-
-      const mitBadge = screen.getByTestId('mit-license-badge');
-      expect(mitBadge).toBeInTheDocument();
-      expect(mitBadge).toHaveAttribute('href', 'https://abc.com');
-    });
-
-    it('does not render the MIT license badge if the model is not eligible', () => {
-      const mockEndpointWithNonMitLicensedModel = {
-        inference_id: 'model-123',
-        service: 'elasticsearch',
-        service_settings: {
-          num_allocations: 5,
-          num_threads: 10,
-          model_id: 'model-without-mit-license',
-        },
-      } as any;
-
-      render(<EndpointInfo endpoint={mockEndpointWithNonMitLicensedModel} />);
+  it('renders the component with inference id', () => {
+    render(<EndpointInfo inferenceId={'cohere-2'} />);
 
-      expect(screen.queryByTestId('mit-license-badge')).not.toBeInTheDocument();
-    });
+    expect(screen.getByText('cohere-2')).toBeInTheDocument();
   });
 });
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.tsx
index b09b260e8cf15..3d810a24a9ffc 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/endpoint_info.tsx
@@ -6,184 +6,13 @@
  */
 
 import React from 'react';
-import {
-  InferenceAPIConfigResponse,
-  ELASTIC_MODEL_DEFINITIONS,
-} from '@kbn/ml-trained-models-utils';
-import { EuiFlexGroup, EuiFlexItem, EuiText, EuiBadge } from '@elastic/eui';
-import { ServiceProviderKeys } from '../../types';
-import { ModelBadge } from './model_badge';
-import * as i18n from './translations';
 
 export interface EndpointInfoProps {
-  endpoint: InferenceAPIConfigResponse;
+  inferenceId: string;
 }
 
-export const EndpointInfo: React.FC<EndpointInfoProps> = ({ endpoint }) => {
-  return (
-    <EuiFlexGroup gutterSize="xs" direction="column">
-      <EuiFlexItem>
-        <strong>{endpoint.inference_id}</strong>
-      </EuiFlexItem>
-      <EuiFlexItem css={{ textWrap: 'wrap' }}>
-        <EndpointModelInfo endpoint={endpoint} />
-      </EuiFlexItem>
-    </EuiFlexGroup>
-  );
-};
-
-const EndpointModelInfo: React.FC<EndpointInfoProps> = ({ endpoint }) => {
-  const serviceSettings = endpoint.service_settings;
-  const modelId =
-    'model_id' in serviceSettings
-      ? serviceSettings.model_id
-      : 'model' in serviceSettings
-      ? serviceSettings.model
-      : undefined;
-
-  const isEligibleForMITBadge = modelId && ELASTIC_MODEL_DEFINITIONS[modelId]?.license === 'MIT';
-
-  return (
-    <>
-      <EuiText color="subdued" size="xs">
-        {modelId && <ModelBadge model={modelId} />}
-        {isEligibleForMITBadge ? (
-          <EuiBadge
-            color="hollow"
-            iconType="popout"
-            iconSide="right"
-            href={ELASTIC_MODEL_DEFINITIONS[modelId].licenseUrl ?? ''}
-            target="_blank"
-            data-test-subj={'mit-license-badge'}
-          >
-            {i18n.MIT_LICENSE}
-          </EuiBadge>
-        ) : null}{' '}
-        {endpointModelAtrributes(endpoint)}
-      </EuiText>
-    </>
-  );
-};
-
-function endpointModelAtrributes(endpoint: InferenceAPIConfigResponse) {
-  switch (endpoint.service) {
-    case ServiceProviderKeys.elser:
-    case ServiceProviderKeys.elasticsearch:
-      return elasticsearchAttributes(endpoint);
-    case ServiceProviderKeys.cohere:
-      return cohereAttributes(endpoint);
-    case ServiceProviderKeys.hugging_face:
-      return huggingFaceAttributes(endpoint);
-    case ServiceProviderKeys.openai:
-      return openAIAttributes(endpoint);
-    case ServiceProviderKeys.azureaistudio:
-      return azureOpenAIStudioAttributes(endpoint);
-    case ServiceProviderKeys.azureopenai:
-      return azureOpenAIAttributes(endpoint);
-    case ServiceProviderKeys.mistral:
-      return mistralAttributes(endpoint);
-    case ServiceProviderKeys.googleaistudio:
-      return googleAIStudioAttributes(endpoint);
-    case ServiceProviderKeys.amazonbedrock:
-      return amazonBedrockAttributes(endpoint);
-    default:
-      return null;
-  }
-}
-
-function elasticsearchAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-
-  const numAllocations =
-    'num_allocations' in serviceSettings ? serviceSettings.num_allocations : undefined;
-  const numThreads = 'num_threads' in serviceSettings ? serviceSettings.num_threads : undefined;
-
-  return `${numThreads ? i18n.THREADS(numThreads) : ''}${
-    numThreads && numAllocations ? ' | ' : ''
-  }${numAllocations ? i18n.ALLOCATIONS(numAllocations) : ''}`;
-}
-
-function cohereAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-  const embeddingType =
-    'embedding_type' in serviceSettings ? serviceSettings.embedding_type : undefined;
-
-  const taskSettings = endpoint.task_settings;
-  const inputType = 'input_type' in taskSettings ? taskSettings.input_type : undefined;
-  const truncate = 'truncate' in taskSettings ? taskSettings.truncate : undefined;
-
-  return [embeddingType, inputType, truncate && `truncate: ${truncate}`].filter(Boolean).join(', ');
-}
-
-function huggingFaceAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-  const url = 'url' in serviceSettings ? serviceSettings.url : null;
-
-  return url;
-}
-
-function openAIAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-  const url = 'url' in serviceSettings ? serviceSettings.url : null;
-
-  return url;
-}
-
-function azureOpenAIStudioAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-  const provider = 'provider' in serviceSettings ? serviceSettings?.provider : undefined;
-  const endpointType =
-    'endpoint_type' in serviceSettings ? serviceSettings.endpoint_type : undefined;
-  const target = 'target' in serviceSettings ? serviceSettings.target : undefined;
-
-  return [provider, endpointType, target].filter(Boolean).join(', ');
-}
-
-function azureOpenAIAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-
-  const resourceName =
-    'resource_name' in serviceSettings ? serviceSettings.resource_name : undefined;
-  const deploymentId =
-    'deployment_id' in serviceSettings ? serviceSettings.deployment_id : undefined;
-  const apiVersion = 'api_version' in serviceSettings ? serviceSettings.api_version : undefined;
-
-  return [resourceName, deploymentId, apiVersion].filter(Boolean).join(', ');
-}
-
-function mistralAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-
-  const maxInputTokens =
-    'max_input_tokens' in serviceSettings ? serviceSettings.max_input_tokens : undefined;
-  const rateLimit =
-    'rate_limit' in serviceSettings ? serviceSettings.rate_limit.requests_per_minute : undefined;
-
-  return [
-    maxInputTokens && `max_input_tokens: ${maxInputTokens}`,
-    rateLimit && `rate_limit: ${rateLimit}`,
-  ]
-    .filter(Boolean)
-    .join(', ');
-}
-
-function amazonBedrockAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-
-  const region = 'region' in serviceSettings ? serviceSettings.region : undefined;
-  const provider =
-    'provider' in serviceSettings ? serviceSettings.provider.toLocaleLowerCase() : undefined;
-
-  return [region && `region: ${region}`, provider && `provider: ${provider}`]
-    .filter(Boolean)
-    .join(', ');
-}
-
-function googleAIStudioAttributes(endpoint: InferenceAPIConfigResponse) {
-  const serviceSettings = endpoint.service_settings;
-
-  const rateLimit =
-    'rate_limit' in serviceSettings ? serviceSettings.rate_limit.requests_per_minute : undefined;
-
-  return rateLimit && `rate_limit: ${rateLimit}`;
-}
+export const EndpointInfo: React.FC<EndpointInfoProps> = ({ inferenceId }) => (
+  <span>
+    <strong>{inferenceId}</strong>
+  </span>
+);
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/model_badge.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/model_badge.tsx
deleted file mode 100644
index e4b241abd8199..0000000000000
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/model_badge.tsx
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
- * or more contributor license agreements. Licensed under the Elastic License
- * 2.0; you may not use this file except in compliance with the Elastic License
- * 2.0.
- */
-
-import React from 'react';
-import { EuiBadge, useEuiTheme } from '@elastic/eui';
-
-interface ModelBadgeProps {
-  model?: string;
-}
-
-export const ModelBadge: React.FC<ModelBadgeProps> = ({ model }) => {
-  const { euiTheme } = useEuiTheme();
-
-  if (!model) return null;
-
-  return <EuiBadge color={euiTheme.colors.body}>{model}</EuiBadge>;
-};
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.test.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.test.tsx
index a592569abb0aa..bcb6599632177 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.test.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.test.tsx
@@ -8,25 +8,264 @@
 import { render, screen } from '@testing-library/react';
 import React from 'react';
 import { ServiceProvider } from './service_provider';
-import { ServiceProviderKeys } from '../../types';
 
-jest.mock('../../../../assets/images/providers/elastic.svg', () => 'elasticIcon.svg');
-jest.mock('../../../../assets/images/providers/hugging_face.svg', () => 'huggingFaceIcon.svg');
-jest.mock('../../../../assets/images/providers/cohere.svg', () => 'cohereIcon.svg');
-jest.mock('../../../../assets/images/providers/open_ai.svg', () => 'openAIIcon.svg');
+jest.mock('@kbn/ml-trained-models-utils', () => ({
+  ...jest.requireActual('@kbn/ml-trained-models-utils'),
+  ELASTIC_MODEL_DEFINITIONS: {
+    'model-with-mit-license': {
+      license: 'MIT',
+      licenseUrl: 'https://abc.com',
+    },
+  },
+}));
 
 describe('ServiceProvider component', () => {
-  it('renders Hugging Face icon and name when providerKey is hugging_face', () => {
-    render(<ServiceProvider providerKey={ServiceProviderKeys.hugging_face} />);
-    expect(screen.getByText('Hugging Face')).toBeInTheDocument();
-    const icon = screen.getByTestId('table-column-service-provider-hugging_face');
-    expect(icon).toBeInTheDocument();
-  });
-
-  it('renders Open AI icon and name when providerKey is openai', () => {
-    render(<ServiceProvider providerKey={ServiceProviderKeys.openai} />);
-    expect(screen.getByText('OpenAI')).toBeInTheDocument();
-    const icon = screen.getByTestId('table-column-service-provider-openai');
-    expect(icon).toBeInTheDocument();
+  describe('with HuggingFace service', () => {
+    const mockEndpoint = {
+      inference_id: 'my-hugging-face',
+      service: 'hugging_face',
+      service_settings: {
+        api_key: 'aaaa',
+        url: 'https://dummy.huggingface.com',
+      },
+      task_settings: {},
+    } as any;
+    it('renders the component with service and model details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Hugging Face')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-hugging_face');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('https://dummy.huggingface.com')).toBeInTheDocument();
+    });
+  });
+
+  describe('with openai service', () => {
+    const mockEndpoint = {
+      inference_id: 'my-openai-endpoint',
+      service: 'openai',
+      service_settings: {
+        api_key: 'aaaa',
+        model_id: 'text-embedding-3-small',
+      },
+      task_settings: {},
+    } as any;
+    it('renders the component with service and model details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('OpenAI')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-openai');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('text-embedding-3-small')).toBeInTheDocument();
+    });
+  });
+
+  describe('with cohere service', () => {
+    const mockEndpoint = {
+      inference_id: 'cohere-2',
+      service: 'cohere',
+      service_settings: {
+        similarity: 'cosine',
+        dimensions: 384,
+        model_id: 'embed-english-light-v3.0',
+        rate_limit: {
+          requests_per_minute: 10000,
+        },
+        embedding_type: 'byte',
+      },
+      task_settings: {},
+    } as any;
+
+    it('renders the component with service and model details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Cohere')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-cohere');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('embed-english-light-v3.0')).toBeInTheDocument();
+    });
+
+    it('does not render model_id badge if serviceSettings.model_id is not provided', () => {
+      const modifiedEndpoint = {
+        ...mockEndpoint,
+        service_settings: { ...mockEndpoint.service_settings, model_id: undefined },
+      };
+      render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
+
+      expect(screen.queryByText('embed-english-light-v3.0')).not.toBeInTheDocument();
+    });
+  });
+
+  describe('with azureaistudio service', () => {
+    const mockEndpoint = {
+      inference_id: 'azure-ai-1',
+      service: 'azureaistudio',
+      service_settings: {
+        target: 'westus',
+        provider: 'microsoft_phi',
+        endpoint_type: 'realtime',
+      },
+    } as any;
+
+    it('renders the component with endpoint details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Azure AI Studio')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-azureaistudio');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('microsoft_phi')).toBeInTheDocument();
+    });
+
+    it('renders nothing related to service settings when all are missing', () => {
+      const modifiedEndpoint = {
+        ...mockEndpoint,
+        service_settings: {},
+      };
+      render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
+
+      expect(screen.getByText('Azure AI Studio')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-azureaistudio');
+      expect(icon).toBeInTheDocument();
+      expect(screen.queryByText('microsoft_phi')).not.toBeInTheDocument();
+    });
+  });
+
+  describe('with azureopenai service', () => {
+    const mockEndpoint = {
+      inference_id: 'azure-openai-1',
+      service: 'azureopenai',
+      service_settings: {
+        resource_name: 'resource-xyz',
+        deployment_id: 'deployment-123',
+        api_version: 'v1',
+      },
+    } as any;
+
+    it('renders the component with all required endpoint details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Azure OpenAI')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-azureopenai');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('resource-xyz')).toBeInTheDocument();
+    });
+  });
+
+  describe('with mistral service', () => {
+    const mockEndpoint = {
+      inference_id: 'mistral-ai-1',
+      service: 'mistral',
+      service_settings: {
+        model: 'model-xyz',
+        max_input_tokens: 512,
+        rate_limit: {
+          requests_per_minute: 1000,
+        },
+      },
+    } as any;
+
+    it('renders the component with endpoint details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Mistral')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-mistral');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('model-xyz')).toBeInTheDocument();
+    });
+
+    it('does not render model id if not provided', () => {
+      const modifiedEndpoint = {
+        ...mockEndpoint,
+        service_settings: {},
+      };
+      render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
+
+      const icon = screen.getByTestId('table-column-service-provider-mistral');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('Mistral')).toBeInTheDocument();
+      expect(screen.queryByText('model-xyz')).not.toBeInTheDocument();
+    });
+  });
+
+  describe('with elasticsearch service', () => {
+    const mockEndpoint = {
+      inference_id: 'model-123',
+      service: 'elasticsearch',
+      service_settings: {
+        num_allocations: 5,
+        num_threads: 10,
+        model_id: 'settings-model-123',
+      },
+    } as any;
+
+    it('renders the component with endpoint model_id', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Elasticsearch')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-elasticsearch');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('settings-model-123')).toBeInTheDocument();
+    });
+
+    it('renders the MIT license badge if the model is eligible', () => {
+      const modifiedEndpoint = {
+        ...mockEndpoint,
+        service_settings: { ...mockEndpoint.service_settings, model_id: 'model-with-mit-license' },
+      };
+      render(<ServiceProvider providerEndpoint={modifiedEndpoint} />);
+
+      const mitBadge = screen.getByTestId('mit-license-badge');
+      expect(mitBadge).toBeInTheDocument();
+      expect(mitBadge).toHaveAttribute('href', 'https://abc.com');
+    });
+
+    it('does not render the MIT license badge if the model is not eligible', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.queryByTestId('mit-license-badge')).not.toBeInTheDocument();
+    });
+  });
+
+  describe('with googleaistudio service', () => {
+    const mockEndpoint = {
+      inference_id: 'google-ai-1',
+      service: 'googleaistudio',
+      service_settings: {
+        model_id: 'model-abc',
+        rate_limit: {
+          requests_per_minute: 500,
+        },
+      },
+    } as any;
+
+    it('renders the component with service and model details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Google AI Studio')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-googleaistudio');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('model-abc')).toBeInTheDocument();
+    });
+  });
+
+  describe('with amazonbedrock service', () => {
+    const mockEndpoint = {
+      inference_id: 'amazon-bedrock-1',
+      service: 'amazonbedrock',
+      service_settings: {
+        region: 'us-west-1',
+        provider: 'AMAZONTITAN',
+        model: 'model-bedrock-xyz',
+      },
+    } as any;
+
+    it('renders the component with model and service details', () => {
+      render(<ServiceProvider providerEndpoint={mockEndpoint} />);
+
+      expect(screen.getByText('Amazon Bedrock')).toBeInTheDocument();
+      const icon = screen.getByTestId('table-column-service-provider-amazonbedrock');
+      expect(icon).toBeInTheDocument();
+      expect(screen.getByText('model-bedrock-xyz')).toBeInTheDocument();
+    });
   });
 });
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.tsx
index f4c3511c26ff3..b926f590335fb 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/service_provider.tsx
@@ -5,8 +5,12 @@
  * 2.0.
  */
 
-import { EuiIcon } from '@elastic/eui';
+import { EuiBadge, EuiFlexGroup, EuiFlexItem, EuiIcon, EuiText } from '@elastic/eui';
 import React from 'react';
+import {
+  ELASTIC_MODEL_DEFINITIONS,
+  InferenceAPIConfigResponse,
+} from '@kbn/ml-trained-models-utils';
 import elasticIcon from '../../../../assets/images/providers/elastic.svg';
 import huggingFaceIcon from '../../../../assets/images/providers/hugging_face.svg';
 import cohereIcon from '../../../../assets/images/providers/cohere.svg';
@@ -17,9 +21,10 @@ import googleAIStudioIcon from '../../../../assets/images/providers/google_ai_st
 import mistralIcon from '../../../../assets/images/providers/mistral.svg';
 import amazonBedrockIcon from '../../../../assets/images/providers/amazon_bedrock.svg';
 import { ServiceProviderKeys } from '../../types';
+import * as i18n from './translations';
 
 interface ServiceProviderProps {
-  providerKey: ServiceProviderKeys;
+  providerEndpoint: InferenceAPIConfigResponse;
 }
 
 interface ServiceProviderRecord {
@@ -50,7 +55,7 @@ export const SERVICE_PROVIDERS: Record<ServiceProviderKeys, ServiceProviderRecor
   },
   [ServiceProviderKeys.elser]: {
     icon: elasticIcon,
-    name: 'ELSER',
+    name: 'Elasticsearch',
   },
   [ServiceProviderKeys.googleaistudio]: {
     icon: googleAIStudioIcon,
@@ -70,19 +75,107 @@ export const SERVICE_PROVIDERS: Record<ServiceProviderKeys, ServiceProviderRecor
   },
 };
 
-export const ServiceProvider: React.FC<ServiceProviderProps> = ({ providerKey }) => {
-  const provider = SERVICE_PROVIDERS[providerKey];
+export const ServiceProvider: React.FC<ServiceProviderProps> = ({ providerEndpoint }) => {
+  const { service } = providerEndpoint;
+  const provider = SERVICE_PROVIDERS[service];
 
   return provider ? (
-    <>
-      <EuiIcon
-        data-test-subj={`table-column-service-provider-${providerKey}`}
-        type={provider.icon}
-        style={{ marginRight: '8px' }}
-      />
-      <span>{provider.name}</span>
-    </>
+    <EuiFlexGroup gutterSize="xs" direction="row" alignItems="center">
+      <EuiFlexItem grow={0}>
+        <EuiIcon
+          data-test-subj={`table-column-service-provider-${service}`}
+          type={provider.icon}
+          style={{ marginRight: '8px' }}
+        />
+      </EuiFlexItem>
+      <EuiFlexItem>
+        <EuiFlexGroup gutterSize="xs" direction="column">
+          <EuiFlexItem>
+            <EuiText size="s" color="subdued">
+              {provider.name}
+            </EuiText>
+          </EuiFlexItem>
+          <EuiFlexItem>
+            <EndpointModelInfo providerEndpoint={providerEndpoint} />
+          </EuiFlexItem>
+        </EuiFlexGroup>
+      </EuiFlexItem>
+    </EuiFlexGroup>
   ) : (
-    <span>{providerKey}</span>
+    <span>{service}</span>
+  );
+};
+
+const EndpointModelInfo: React.FC<ServiceProviderProps> = ({ providerEndpoint }) => {
+  const serviceSettings = providerEndpoint.service_settings;
+  const modelId =
+    'model_id' in serviceSettings
+      ? serviceSettings.model_id
+      : 'model' in serviceSettings
+      ? serviceSettings.model
+      : undefined;
+
+  const isEligibleForMITBadge = modelId && ELASTIC_MODEL_DEFINITIONS[modelId]?.license === 'MIT';
+
+  return (
+    <EuiFlexGroup gutterSize="xs" direction="column">
+      <EuiFlexItem>
+        <EuiFlexGroup gutterSize="xs" direction="row">
+          <EuiFlexItem grow={0}>
+            {modelId && (
+              <EuiText size="s" color="subdued">
+                {modelId}
+              </EuiText>
+            )}
+          </EuiFlexItem>
+          <EuiFlexItem grow={0}>
+            {isEligibleForMITBadge ? (
+              <EuiBadge
+                color="hollow"
+                iconType="popout"
+                iconSide="right"
+                href={ELASTIC_MODEL_DEFINITIONS[modelId].licenseUrl ?? ''}
+                target="_blank"
+                data-test-subj={'mit-license-badge'}
+              >
+                {i18n.MIT_LICENSE}
+              </EuiBadge>
+            ) : null}{' '}
+          </EuiFlexItem>
+        </EuiFlexGroup>
+      </EuiFlexItem>
+      <EuiFlexItem>{endpointModelAtrributes(providerEndpoint)}</EuiFlexItem>
+    </EuiFlexGroup>
   );
 };
+
+function endpointModelAtrributes(endpoint: InferenceAPIConfigResponse) {
+  switch (endpoint.service) {
+    case ServiceProviderKeys.hugging_face:
+      return huggingFaceAttributes(endpoint);
+    case ServiceProviderKeys.azureaistudio:
+      return azureOpenAIStudioAttributes(endpoint);
+    case ServiceProviderKeys.azureopenai:
+      return azureOpenAIAttributes(endpoint);
+    default:
+      return null;
+  }
+}
+
+function huggingFaceAttributes(endpoint: InferenceAPIConfigResponse) {
+  const serviceSettings = endpoint.service_settings;
+  const url = 'url' in serviceSettings ? serviceSettings.url : null;
+
+  return url;
+}
+
+function azureOpenAIStudioAttributes(endpoint: InferenceAPIConfigResponse) {
+  const serviceSettings = endpoint.service_settings;
+  return 'provider' in serviceSettings ? serviceSettings?.provider : undefined;
+}
+
+function azureOpenAIAttributes(endpoint: InferenceAPIConfigResponse) {
+  const serviceSettings = endpoint.service_settings;
+
+  return 'resource_name' in serviceSettings ? serviceSettings.resource_name : undefined;
+}
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/translations.ts b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/translations.ts
similarity index 50%
rename from x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/translations.ts
rename to x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/translations.ts
index d3e002505e64a..4ccb08108d6a3 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_endpoint/translations.ts
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/render_table_columns/render_service_provider/translations.ts
@@ -7,18 +7,6 @@
 
 import { i18n } from '@kbn/i18n';
 
-export const THREADS = (numThreads: number) =>
-  i18n.translate('xpack.searchInferenceEndpoints.elasticsearch.threads', {
-    defaultMessage: 'Threads: {numThreads}',
-    values: { numThreads },
-  });
-
-export const ALLOCATIONS = (numAllocations: number) =>
-  i18n.translate('xpack.searchInferenceEndpoints.elasticsearch.allocations', {
-    defaultMessage: 'Allocations: {numAllocations}',
-    values: { numAllocations },
-  });
-
 export const MIT_LICENSE = i18n.translate(
   'xpack.searchInferenceEndpoints.elasticsearch.mitLicense',
   {
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.test.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.test.tsx
index 68041996b8808..cb23ba650e5fa 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.test.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.test.tsx
@@ -10,8 +10,6 @@ import { screen } from '@testing-library/react';
 import { render } from '@testing-library/react';
 import { TabularPage } from './tabular_page';
 import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
-import { TRAINED_MODEL_STATS_QUERY_KEY } from '../../../common/constants';
-import { QueryClient, QueryClientProvider } from '@tanstack/react-query';
 
 const inferenceEndpoints = [
   {
@@ -56,39 +54,26 @@ jest.mock('../../hooks/use_delete_endpoint', () => ({
 }));
 
 describe('When the tabular page is loaded', () => {
-  beforeEach(() => {
-    const queryClient = new QueryClient();
-    queryClient.setQueryData([TRAINED_MODEL_STATS_QUERY_KEY], {
-      trained_model_stats: [
-        {
-          model_id: '.elser_model_2',
-          deployment_stats: { deployment_id: 'my-elser-model-05', state: 'started' },
-        },
-        {
-          model_id: '.own_model',
-          deployment_stats: { deployment_id: 'local-model', state: 'started' },
-        },
-      ],
-    });
-    const wrapper = ({ children }: { children: React.ReactNode }) => {
-      return <QueryClientProvider client={queryClient}>{children}</QueryClientProvider>;
-    };
-    render(wrapper({ children: <TabularPage inferenceEndpoints={inferenceEndpoints} /> }));
-  });
+  it('should display all inference ids in the table', () => {
+    render(<TabularPage inferenceEndpoints={inferenceEndpoints} />);
 
-  it('should display all model_ids in the table', () => {
     const rows = screen.getAllByRole('row');
     expect(rows[1]).toHaveTextContent('local-model');
     expect(rows[2]).toHaveTextContent('my-elser-model-05');
     expect(rows[3]).toHaveTextContent('third-party-model');
   });
-  it('should render deployment status for inference endpoints with local trained models', () => {
-    const deploymentStatusStarted = screen.getAllByTestId('table-column-deployment-started');
-    expect(deploymentStatusStarted).toHaveLength(2);
-  });
-  it('should not render deployment status for third-party endpoints', () => {
-    expect(screen.queryByTestId('table-column-deployment-undefined')).not.toBeInTheDocument();
-    expect(screen.queryByTestId('table-column-deployment-starting')).not.toBeInTheDocument();
-    expect(screen.queryByTestId('table-column-deployment-stopping')).not.toBeInTheDocument();
+
+  it('should display all service and model ids in the table', () => {
+    render(<TabularPage inferenceEndpoints={inferenceEndpoints} />);
+
+    const rows = screen.getAllByRole('row');
+    expect(rows[1]).toHaveTextContent('Elasticsearch');
+    expect(rows[1]).toHaveTextContent('.own_model');
+
+    expect(rows[2]).toHaveTextContent('Elasticsearch');
+    expect(rows[2]).toHaveTextContent('.elser_model_2');
+
+    expect(rows[3]).toHaveTextContent('OpenAI');
+    expect(rows[3]).toHaveTextContent('.own_model');
   });
 });
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.tsx b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.tsx
index 391eae63c1a29..373cfb676c36e 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/tabular_page.tsx
@@ -7,26 +7,18 @@
 
 import React, { useCallback } from 'react';
 
-import {
-  EuiBasicTable,
-  EuiBasicTableColumn,
-  EuiFlexGroup,
-  EuiFlexItem,
-  HorizontalAlignment,
-} from '@elastic/eui';
+import { EuiBasicTable, EuiBasicTableColumn, EuiFlexGroup, EuiFlexItem } from '@elastic/eui';
 import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
-import { isLocalModel } from '@kbn/ml-trained-models-utils/src/constants/trained_models';
 import { TaskTypes } from '../../../common/types';
 import * as i18n from '../../../common/translations';
 
 import { useTableData } from '../../hooks/use_table_data';
-import { FilterOptions, InferenceEndpointUI, ServiceProviderKeys } from './types';
+import { FilterOptions, InferenceEndpointUI } from './types';
 
 import { useAllInferenceEndpointsState } from '../../hooks/use_all_inference_endpoints_state';
 import { ServiceProviderFilter } from './filter/service_provider_filter';
 import { TaskTypeFilter } from './filter/task_type_filter';
 import { TableSearch } from './search/table_search';
-import { DeploymentStatus } from './render_table_columns/render_deployment_status/deployment_status';
 import { EndpointInfo } from './render_table_columns/render_endpoint/endpoint_info';
 import { ServiceProvider } from './render_table_columns/render_service_provider/service_provider';
 import { TaskType } from './render_table_columns/render_task_type/task_type';
@@ -57,38 +49,32 @@ export const TabularPage: React.FC<TabularPageProps> = ({ inferenceEndpoints })
   );
 
   const tableColumns: Array<EuiBasicTableColumn<InferenceEndpointUI>> = [
-    {
-      name: '',
-      render: ({ endpoint, deployment }: InferenceEndpointUI) =>
-        isLocalModel(endpoint) ? <DeploymentStatus status={deployment} /> : null,
-      align: 'center' as HorizontalAlignment,
-      width: '64px',
-    },
     {
       field: 'endpoint',
       name: i18n.ENDPOINT,
-      render: (endpoint: InferenceAPIConfigResponse) => {
+      render: (endpoint: string) => {
         if (endpoint) {
-          return <EndpointInfo endpoint={endpoint} />;
+          return <EndpointInfo inferenceId={endpoint} />;
         }
 
         return null;
       },
       sortable: true,
       truncateText: true,
+      width: '400px',
     },
     {
       field: 'provider',
       name: i18n.SERVICE_PROVIDER,
-      render: (provider: ServiceProviderKeys) => {
+      render: (provider: InferenceAPIConfigResponse) => {
         if (provider) {
-          return <ServiceProvider providerKey={provider} />;
+          return <ServiceProvider providerEndpoint={provider} />;
         }
 
         return null;
       },
       sortable: false,
-      width: '185px',
+      width: '592px',
     },
     {
       field: 'type',
@@ -107,7 +93,7 @@ export const TabularPage: React.FC<TabularPageProps> = ({ inferenceEndpoints })
       actions: [
         {
           render: (inferenceEndpoint: InferenceEndpointUI) => (
-            <CopyIDAction inferenceId={inferenceEndpoint.endpoint.inference_id} />
+            <CopyIDAction inferenceId={inferenceEndpoint.endpoint} />
           ),
         },
         {
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/types.ts b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/types.ts
index d7b1ca58424dd..0a5da7288607d 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/types.ts
+++ b/x-pack/plugins/search_inference_endpoints/public/components/all_inference_endpoints/types.ts
@@ -5,9 +5,9 @@
  * 2.0.
  */
 
-import { DeploymentState, InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
+import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
 import { TaskTypes } from '../../types';
-export const INFERENCE_ENDPOINTS_TABLE_PER_PAGE_VALUES = [10, 25, 50, 100];
+export const INFERENCE_ENDPOINTS_TABLE_PER_PAGE_VALUES = [25, 50, 100];
 
 export enum ServiceProviderKeys {
   amazonbedrock = 'amazonbedrock',
@@ -56,8 +56,7 @@ export interface EuiBasicTableSortTypes {
 }
 
 export interface InferenceEndpointUI {
-  deployment: DeploymentState | undefined;
-  endpoint: InferenceAPIConfigResponse;
-  provider: string;
+  endpoint: string;
+  provider: InferenceAPIConfigResponse;
   type: string;
 }
diff --git a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
index 014a596ec9e79..f12ef3e9fe8cd 100644
--- a/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/components/inference_endpoints_header.tsx
@@ -5,7 +5,7 @@
  * 2.0.
  */
 
-import { EuiPageTemplate, EuiLink, EuiText, EuiSpacer } from '@elastic/eui';
+import { EuiPageTemplate, EuiLink } from '@elastic/eui';
 import React from 'react';
 import * as i18n from '../../common/translations';
 import { docLinks } from '../../common/doc_links';
@@ -18,21 +18,16 @@ export const InferenceEndpointsHeader: React.FC = () => {
     <EuiPageTemplate.Header
       data-test-subj="allInferenceEndpointsPage"
       pageTitle={i18n.INFERENCE_ENDPOINT_LABEL}
-      description={
-        <EuiText>
-          {i18n.MANAGE_INFERENCE_ENDPOINTS_LABEL}
-          <EuiSpacer size="s" />
-          <EuiLink
-            href={docLinks.createInferenceEndpoint}
-            target="_blank"
-            data-test-subj="learn-how-to-create-inference-endpoints"
-          >
-            {i18n.LEARN_HOW_TO_CREATE_INFERENCE_ENDPOINTS_LINK}
-          </EuiLink>
-        </EuiText>
-      }
+      description={i18n.MANAGE_INFERENCE_ENDPOINTS_LABEL}
       bottomBorder={true}
       rightSideItems={[
+        <EuiLink
+          href={docLinks.createInferenceEndpoint}
+          target="_blank"
+          data-test-subj="api-documentation"
+        >
+          {i18n.API_DOCUMENTATION_LINK}
+        </EuiLink>,
         <EuiLink href={trainedModelPageUrl} target="_blank" data-test-subj="view-your-models">
           {i18n.VIEW_YOUR_MODELS_LINK}
         </EuiLink>,
diff --git a/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.test.tsx b/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.test.tsx
index b3b23b4e88f58..b16fb5f7675cb 100644
--- a/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.test.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.test.tsx
@@ -118,9 +118,7 @@ describe('useTableData', () => {
       b.inference_id.localeCompare(a.inference_id)
     );
 
-    const sortedEndpoints = result.current.sortedTableData.map(
-      (item) => item.endpoint.inference_id
-    );
+    const sortedEndpoints = result.current.sortedTableData.map((item) => item.endpoint);
     const expectedModelIds = expectedSortedData.map((item) => item.inference_id);
 
     expect(sortedEndpoints).toEqual(expectedModelIds);
@@ -153,19 +151,6 @@ describe('useTableData', () => {
       { wrapper }
     );
     const filteredData = result.current.sortedTableData;
-    expect(
-      filteredData.every((item) => item.endpoint.inference_id.includes(searchKey))
-    ).toBeTruthy();
-  });
-
-  it('should update deployment status based on deploymentStatus object', () => {
-    const { result } = renderHook(
-      () => useTableData(inferenceEndpoints, queryParams, filterOptions, searchKey),
-      { wrapper }
-    );
-
-    const updatedData = result.current.sortedTableData;
-
-    expect(updatedData[2].deployment).toEqual('started');
+    expect(filteredData.every((item) => item.endpoint.includes(searchKey))).toBeTruthy();
   });
 });
diff --git a/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.tsx b/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.tsx
index 811a84e7e46f8..775bea270559d 100644
--- a/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.tsx
+++ b/x-pack/plugins/search_inference_endpoints/public/hooks/use_table_data.tsx
@@ -7,7 +7,7 @@
 
 import type { EuiTableSortingType } from '@elastic/eui';
 import { Pagination } from '@elastic/eui';
-import { DeploymentState, InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
+import { InferenceAPIConfigResponse } from '@kbn/ml-trained-models-utils';
 import { useMemo } from 'react';
 import { TaskTypes } from '../../common/types';
 import { DEFAULT_TABLE_LIMIT } from '../components/all_inference_endpoints/constants';
@@ -19,7 +19,6 @@ import {
   SortOrder,
   ServiceProviderKeys,
 } from '../components/all_inference_endpoints/types';
-import { useTrainedModelStats } from './use_trained_model_stats';
 
 interface UseTableDataReturn {
   tableData: InferenceEndpointUI[];
@@ -35,15 +34,6 @@ export const useTableData = (
   filterOptions: FilterOptions,
   searchKey: string
 ): UseTableDataReturn => {
-  const { data: trainedModelStats } = useTrainedModelStats();
-
-  const deploymentStatus = trainedModelStats?.trained_model_stats.reduce((acc, modelStat) => {
-    if (modelStat.deployment_stats?.deployment_id) {
-      acc[modelStat.deployment_stats.deployment_id] = modelStat?.deployment_stats?.state;
-    }
-    return acc;
-  }, {} as Record<string, DeploymentState | undefined>);
-
   const tableData: InferenceEndpointUI[] = useMemo(() => {
     let filteredEndpoints = inferenceEndpoints;
 
@@ -61,21 +51,12 @@ export const useTableData = (
 
     return filteredEndpoints
       .filter((endpoint) => endpoint.inference_id.includes(searchKey))
-      .map((endpoint) => {
-        const isElasticService =
-          endpoint.service === ServiceProviderKeys.elasticsearch ||
-          endpoint.service === ServiceProviderKeys.elser;
-        const deploymentId = isElasticService ? endpoint.inference_id : undefined;
-        const deployment = (deploymentId && deploymentStatus?.[deploymentId]) || undefined;
-
-        return {
-          deployment,
-          endpoint,
-          provider: endpoint.service,
-          type: endpoint.task_type,
-        };
-      });
-  }, [inferenceEndpoints, searchKey, filterOptions, deploymentStatus]);
+      .map((endpoint) => ({
+        endpoint: endpoint.inference_id,
+        provider: endpoint,
+        type: endpoint.task_type,
+      }));
+  }, [inferenceEndpoints, searchKey, filterOptions]);
 
   const sortedTableData: InferenceEndpointUI[] = useMemo(() => {
     return [...tableData].sort((a, b) => {
@@ -83,9 +64,9 @@ export const useTableData = (
       const bValue = b[queryParams.sortField];
 
       if (queryParams.sortOrder === SortOrder.asc) {
-        return aValue.inference_id.localeCompare(bValue.inference_id);
+        return aValue.localeCompare(bValue);
       } else {
-        return bValue.inference_id.localeCompare(aValue.inference_id);
+        return bValue.localeCompare(aValue);
       }
     });
   }, [tableData, queryParams]);
diff --git a/x-pack/plugins/translations/translations/fr-FR.json b/x-pack/plugins/translations/translations/fr-FR.json
index 6b6cdefdaab5c..837c0549468dc 100644
--- a/x-pack/plugins/translations/translations/fr-FR.json
+++ b/x-pack/plugins/translations/translations/fr-FR.json
@@ -34246,10 +34246,6 @@
     "xpack.searchInferenceEndpoints.confirmDeleteEndpoint.title": "Supprimer le point de terminaison d'inférence",
     "xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "Le point de terminaison d’inférence a été supprimé avec succès.",
     "xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "Échec de la suppression du point de terminaison",
-    "xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "Le modèle est déployé",
-    "xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "Le modèle n’est pas déployé",
-    "xpack.searchInferenceEndpoints.elasticsearch.allocations": "Allocations : {numAllocations}",
-    "xpack.searchInferenceEndpoints.elasticsearch.threads": "Threads : {numThreads}",
     "xpack.searchInferenceEndpoints.endpoint": "Point de terminaison",
     "xpack.searchInferenceEndpoints.filter.emptyMessage": "Aucune option",
     "xpack.searchInferenceEndpoints.filter.options": "{totalCount, plural, one {# option} other {# options}}",
diff --git a/x-pack/plugins/translations/translations/ja-JP.json b/x-pack/plugins/translations/translations/ja-JP.json
index 642b64be05b21..d99db47f71194 100644
--- a/x-pack/plugins/translations/translations/ja-JP.json
+++ b/x-pack/plugins/translations/translations/ja-JP.json
@@ -34230,10 +34230,6 @@
     "xpack.searchInferenceEndpoints.confirmDeleteEndpoint.title": "推論エンドポイントを削除",
     "xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "推論エンドポイントは正常に削除されました。",
     "xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "エンドポイントの削除が失敗しました",
-    "xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "モデルはデプロイされます",
-    "xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "モデルはデプロイされません",
-    "xpack.searchInferenceEndpoints.elasticsearch.allocations": "割り当て:{numAllocations}",
-    "xpack.searchInferenceEndpoints.elasticsearch.threads": "スレッド:{numThreads}",
     "xpack.searchInferenceEndpoints.endpoint": "エンドポイント",
     "xpack.searchInferenceEndpoints.filter.emptyMessage": "オプションなし",
     "xpack.searchInferenceEndpoints.filter.options": "{totalCount, plural, other {# オプション}}",
diff --git a/x-pack/plugins/translations/translations/zh-CN.json b/x-pack/plugins/translations/translations/zh-CN.json
index 6c17d6f86db8d..2d535a3729dc8 100644
--- a/x-pack/plugins/translations/translations/zh-CN.json
+++ b/x-pack/plugins/translations/translations/zh-CN.json
@@ -34271,10 +34271,6 @@
     "xpack.searchInferenceEndpoints.confirmDeleteEndpoint.title": "删除推理终端",
     "xpack.searchInferenceEndpoints.deleteEndpoint.deleteSuccess": "推理终端已成功删除。",
     "xpack.searchInferenceEndpoints.deleteEndpoint.endpointDeletionFailed": "终端删除失败",
-    "xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelDeployed": "已部署模型",
-    "xpack.searchInferenceEndpoints.deploymentStatus.tooltip.modelNotDeployed": "未部署模型",
-    "xpack.searchInferenceEndpoints.elasticsearch.allocations": "分配:{numAllocations}",
-    "xpack.searchInferenceEndpoints.elasticsearch.threads": "线程:{numThreads}",
     "xpack.searchInferenceEndpoints.endpoint": "终端",
     "xpack.searchInferenceEndpoints.filter.emptyMessage": "无选项",
     "xpack.searchInferenceEndpoints.filter.options": "{totalCount, plural, other {# 个选项}}",