diff --git a/blazor-toc.html b/blazor-toc.html
index 5d2cc67eb5..17b7b5df9a 100644
--- a/blazor-toc.html
+++ b/blazor-toc.html
@@ -205,7 +205,15 @@
Getting Started
+
+ AI Services
+
diff --git a/blazor/smart-rich-text-editor/azure-openai-service.md b/blazor/smart-rich-text-editor/azure-openai-service.md
new file mode 100644
index 0000000000..09feaaa142
--- /dev/null
+++ b/blazor/smart-rich-text-editor/azure-openai-service.md
@@ -0,0 +1,349 @@
+---
+layout: post
+title: Azure OpenAI Configuration for Syncfusion Smart Rich Text Editor
+description: Configure Azure OpenAI for Syncfusion Blazor Smart Rich Text Editor: authentication, client setup, DI registration, and usage examples.
+platform: Blazor
+control: Smart Rich Text Editor
+documentation: ug
+---
+
+# Azure OpenAI Configuration
+
+The Syncfusion® Blazor Smart Rich Text Editor supports Azure OpenAI for enterprise-grade AI capabilities with enhanced security and compliance features.
+
+## Prerequisites
+
+* Active Azure subscription
+* Azure OpenAI Service resource deployed
+* Deployed model (e.g., gpt-4, gpt-35-turbo)
+* Azure credentials with necessary permissions
+
+## Deploy Azure OpenAI Service
+
+### Step 1: Create Azure OpenAI Resource
+
+1. Sign in to [Azure Portal](https://portal.azure.com/)
+2. Click **Create a resource**
+3. Search for **Azure OpenAI**
+4. Click **Create**
+5. Fill in the resource details:
+ - **Subscription**: Select your subscription
+ - **Resource group**: Create or select existing
+ - **Region**: Choose appropriate region
+ - **Name**: Give your resource a unique name
+ - **Pricing tier**: Select S0 or higher
+
+### Step 2: Deploy a Model
+
+1. Go to **Azure AI Studio** (https://oai.azure.com/)
+2. Select your Azure OpenAI resource
+3. Navigate to **Deployments**
+4. Click **Create new deployment**
+5. Configure:
+ - **Deployment name**: e.g., `gpt-35-turbo-deployment`
+ - **Model name**: Select model (e.g., `gpt-35-turbo`, `gpt-4`)
+ - **Model version**: Choose version
+ - **Deployment type**: Standard
+
+### Step 3: Obtain Credentials
+
+From your Azure OpenAI resource in Azure Portal, copy:
+- **Endpoint**: `https://.openai.azure.com/`
+- **Key**: Found under **Keys and Endpoint**
+- **Deployment name**: Created in Step 2
+
+## Installation
+
+Install required NuGet packages:
+
+{% tabs %}
+{% highlight c# tabtitle="Package Manager" %}
+
+Install-Package Syncfusion.Blazor.SmartRichTextEditor
+Install-Package Syncfusion.Blazor.Themes
+Install-Package Microsoft.Extensions.AI
+Install-Package Microsoft.Extensions.AI.OpenAI
+Install-Package Azure.AI.OpenAI
+
+{% endhighlight %}
+{% endtabs %}
+
+## Configuration
+
+### Step 1: Setup in Program.cs
+
+Add the following configuration to your **Program.cs** file:
+
+```csharp
+using Syncfusion.Blazor;
+using Syncfusion.Blazor.AI;
+using Azure.AI.OpenAI;
+using Microsoft.Extensions.AI;
+using System.ClientModel;
+
+var builder = WebApplication.CreateBuilder(args);
+
+// Add services to the container
+builder.Services.AddRazorPages();
+builder.Services.AddServerSideBlazor();
+
+// Register Syncfusion Blazor Service
+builder.Services.AddSyncfusionBlazor();
+
+// Configure Azure OpenAI
+string azureOpenAIKey = "your-azure-key";
+string azureOpenAIEndpoint = "https://your-resource-name.openai.azure.com/";
+string azureOpenAIDeployment = "your-deployment-name";
+
+AzureOpenAIClient azureOpenAIClient = new AzureOpenAIClient(
+ new Uri(azureOpenAIEndpoint),
+ new ApiKeyCredential(azureOpenAIKey)
+);
+
+IChatClient azureOpenAIChatClient = azureOpenAIClient
+ .GetChatClient(azureOpenAIDeployment)
+ .AsIChatClient();
+
+builder.Services.AddChatClient(azureOpenAIChatClient);
+
+// Register Smart Rich Text Editor Components with Azure OpenAI
+builder.Services.AddSingleton();
+
+var app = builder.Build();
+
+// ... rest of your application setup
+```
+
+### Step 2: Add Imports to _Imports.razor
+
+Update **~/_Imports.razor**:
+
+```razor
+@using Syncfusion.Blazor
+@using Syncfusion.Blazor.SmartRichTextEditor
+```
+
+### Step 3: Use Azure OpenAI with Smart Rich Text Editor Component
+
+```razor
+@page "/editor"
+@using Syncfusion.Blazor.SmartRichTextEditor
+
+
+
+
+
Tips:
+
+ - Select text and click AI Commands for quick improvements
+ - Press Alt+Enter to open AI Query dialog
+ - Use AI to fix grammar, adjust tone, or rephrase content
+
+
Welcome to the Smart Rich Text Editor — try selecting a sentence to see AI suggestions.
+
+
+```
+
+## Using Configuration Files
+
+### appsettings.json
+
+Store Azure credentials in configuration:
+
+```json
+{
+ "AzureOpenAI": {
+ "Key": "your-azure-key",
+ "Endpoint": "https://your-resource-name.openai.azure.com/",
+ "DeploymentName": "your-deployment-name"
+ }
+}
+```
+
+### Reading from Configuration
+
+```csharp
+string azureOpenAIKey = builder.Configuration["AzureOpenAI:Key"];
+string azureOpenAIEndpoint = builder.Configuration["AzureOpenAI:Endpoint"];
+string azureOpenAIDeployment = builder.Configuration["AzureOpenAI:DeploymentName"];
+
+AzureOpenAIClient azureOpenAIClient = new AzureOpenAIClient(
+ new Uri(azureOpenAIEndpoint),
+ new ApiKeyCredential(azureOpenAIKey)
+);
+
+IChatClient azureOpenAIChatClient = azureOpenAIClient
+ .GetChatClient(azureOpenAIDeployment)
+ .AsIChatClient();
+
+builder.Services.AddChatClient(azureOpenAIChatClient);
+```
+
+## Using User Secrets
+
+For development environment:
+
+```bash
+dotnet user-secrets init
+dotnet user-secrets set "AzureOpenAI:Key" "your-azure-key"
+dotnet user-secrets set "AzureOpenAI:Endpoint" "https://your-resource-name.openai.azure.com/"
+dotnet user-secrets set "AzureOpenAI:DeploymentName" "your-deployment-name"
+```
+
+## Advanced Configuration
+
+### Model Parameters
+
+Customize Azure OpenAI behavior:
+
+```csharp
+builder.Services.AddSingleton();
+ .ConfigureAzureOpenAI(options =>
+ {
+ options.MaxTokens = 500;
+ options.Temperature = 0.7f;
+ options.TopP = 0.9f;
+ options.FrequencyPenalty = 0.5f;
+ options.PresencePenalty = 0.5f;
+ });
+```
+
+### Multiple Deployments
+
+For production with multiple model deployments:
+
+```csharp
+// Create clients for different deployments
+var gpt35Client = azureOpenAIClient
+ .GetChatClient("gpt-35-turbo-deployment")
+ .AsIChatClient();
+
+var gpt4Client = azureOpenAIClient
+ .GetChatClient("gpt-4-deployment")
+ .AsIChatClient();
+
+// Register based on use case
+builder.Services.AddKeyedSingleton("gpt-35-turbo", gpt35Client);
+builder.Services.AddKeyedSingleton("gpt-4", gpt4Client);
+```
+
+## Supported Models
+
+Azure OpenAI supports various models:
+
+| Model | Deployment Name | Use Case |
+|-------|-----------------|----------|
+| GPT-4 | `gpt-4` | Complex reasoning, high quality |
+| GPT-4 Turbo | `gpt-4-turbo` | Latest capabilities, balanced cost |
+| GPT-3.5 Turbo | `gpt-35-turbo` | Fast responses, cost-effective |
+
+## Security Features
+
+### Benefits of Azure OpenAI
+
+1. **Enterprise Security**
+ - Virtual Network support
+ - Private endpoints
+ - Azure security standards
+
+2. **Compliance**
+ - HIPAA compliance
+ - SOC 2 compliance
+ - Data residency options
+
+3. **Access Control**
+ - Azure AD integration
+ - Role-based access control
+ - Managed identities
+
+4. **Monitoring**
+ - Azure Monitor integration
+ - Detailed logging
+ - Usage analytics
+
+### Managed Identity (Recommended)
+
+For enhanced security using Managed Identity:
+
+```csharp
+// Enable Managed Identity in Azure
+// In Program.cs
+using Azure.Identity;
+
+var credential = new DefaultAzureCredential();
+string azureOpenAIEndpoint = "https://your-resource-name.openai.azure.com/";
+string azureOpenAIDeployment = "your-deployment-name";
+
+AzureOpenAIClient azureOpenAIClient = new AzureOpenAIClient(
+ new Uri(azureOpenAIEndpoint),
+ credential
+);
+
+IChatClient azureOpenAIChatClient = azureOpenAIClient
+ .GetChatClient(azureOpenAIDeployment)
+ .AsIChatClient();
+
+builder.Services.AddChatClient(azureOpenAIChatClient);
+```
+
+## Troubleshooting
+
+### Common Issues
+
+**Error: ResourceNotFound (404)**
+- Verify endpoint URL is correct
+- Check resource name matches your Azure resource
+- Ensure resource exists in specified region
+
+**Error: InvalidAuthenticationTokenTenant (401)**
+- Verify API key is correct
+- Check key hasn't expired
+- Ensure using the correct region's key
+
+**Error: Model not found (404)**
+- Verify deployment name matches your Azure deployment
+- Check deployment is active and ready
+- Ensure model is properly deployed
+
+**Timeout Issues**
+- Check Azure OpenAI resource capacity
+- Verify network connectivity
+- Consider timeout configuration
+
+## Monitoring and Analytics
+
+### Azure Monitor Integration
+
+Monitor your Smart Rich Text Editor usage:
+
+1. Go to your Azure OpenAI resource
+2. Click **Monitoring** > **Metrics**
+3. View metrics:
+ - Requests per minute
+ - Token usage
+ - Response latency
+ - Error rates
+
+### Application Insights
+
+Add Application Insights for detailed tracing:
+
+```csharp
+builder.Services.AddApplicationInsightsTelemetry();
+```
+
+## Cost Optimization
+
+### Tips for Azure OpenAI
+
+1. **Right-size deployments**: Use appropriate TPM (tokens per minute)
+2. **Monitor usage**: Check metrics regularly
+3. **Use appropriate models**: GPT-3.5-turbo for most cases
+4. **Implement caching**: Reduce repeated requests
+5. **Batch operations**: Process multiple requests efficiently
+
+## See also
+
+* [Getting Started with Smart Rich Text Editor](getting-started.md)
+* [OpenAI Configuration](openai-service.md)
+* [AI Features and Customization](ai-features.md)
+* [Azure OpenAI Documentation](https://learn.microsoft.com/en-us/azure/ai-services/openai/)
\ No newline at end of file
diff --git a/blazor/smart-rich-text-editor/custom-inference-backend.md b/blazor/smart-rich-text-editor/custom-inference-backend.md
new file mode 100644
index 0000000000..4a053f657f
--- /dev/null
+++ b/blazor/smart-rich-text-editor/custom-inference-backend.md
@@ -0,0 +1,544 @@
+---
+layout: post
+title: Custom Inference Backend for Syncfusion Smart Rich Text Editor
+description: Learn how to configure custom AI inference back-ends for the Syncfusion Blazor Smart Rich Text Editor component.
+platform: Blazor
+control: Smart Rich Text Editor
+documentation: ug
+---
+
+# Custom Inference Backend
+
+The Syncfusion® Blazor Smart Rich Text Editor supports custom inference back-ends, allowing you to integrate with proprietary AI services or internal AI infrastructure.
+
+## Overview
+
+Custom back-ends enable:
+
+- Integration with internal AI services
+- Compliance with corporate data policies
+- Custom model fine-tuning
+- Specialized domain models
+- Proprietary inference engines
+
+## Prerequisites
+
+* Existing AI/ML inference service
+* API endpoint for inference
+* Authentication credentials (if required)
+* Understanding of your backend's API format
+
+## Creating a Custom Backend
+
+### Step 1: Implement IChatClient Interface
+
+Create a custom class implementing `IChatClient`:
+
+```csharp
+using Microsoft.Extensions.AI;
+using System.Collections.Generic;
+using System.Threading.Tasks;
+
+namespace YourNamespace.AI
+{
+ public class CustomAIBackend : IChatClient
+ {
+ private readonly string _endpoint;
+ private readonly string _apiKey;
+ private readonly HttpClient _httpClient;
+
+ public CustomAIBackend(string endpoint, string apiKey)
+ {
+ _endpoint = endpoint;
+ _apiKey = apiKey;
+ _httpClient = new HttpClient();
+ }
+
+ public async ValueTask CompleteAsync(
+ IList chatMessages,
+ ChatOptions? options = null,
+ CancellationToken cancellationToken = default)
+ {
+ // Format request for your custom backend
+ var requestBody = new
+ {
+ messages = ConvertChatMessages(chatMessages),
+ max_tokens = options?.MaxCompletionTokens ?? 500,
+ temperature = options?.Temperature ?? 0.7f
+ };
+
+ // Call your custom endpoint
+ var request = new HttpRequestMessage(HttpMethod.Post, _endpoint)
+ {
+ Content = new StringContent(
+ System.Text.Json.JsonSerializer.Serialize(requestBody),
+ System.Text.Encoding.UTF8,
+ "application/json"
+ )
+ };
+
+ // Add authentication if needed
+ if (!string.IsNullOrEmpty(_apiKey))
+ {
+ request.Headers.Add("Authorization", $"Bearer {_apiKey}");
+ }
+
+ var response = await _httpClient.SendAsync(request, cancellationToken);
+ response.EnsureSuccessStatusCode();
+
+ var responseContent = await response.Content.ReadAsStringAsync();
+ var result = System.Text.Json.JsonSerializer.Deserialize(responseContent);
+
+ return new ChatCompletion(new ChatMessage(
+ ChatRole.Assistant,
+ result?.choices?[0]?.message?.content ?? "No response"
+ ));
+ }
+
+ private object ConvertChatMessages(IList messages)
+ {
+ return messages.Select(m => new
+ {
+ role = m.Role.ToString().ToLower(),
+ content = m.Content?[0]?.Text ?? string.Empty
+ }).ToList();
+ }
+
+ public void Dispose()
+ {
+ _httpClient?.Dispose();
+ }
+ }
+
+ public class CustomBackendResponse
+ {
+ public Choice[]? choices { get; set; }
+ }
+
+ public class Choice
+ {
+ public Message? message { get; set; }
+ }
+
+ public class Message
+ {
+ public string? content { get; set; }
+ }
+}
+```
+
+### Step 2: Register in Program.cs
+
+Register your custom backend:
+
+```csharp
+using Syncfusion.Blazor;
+using Syncfusion.Blazor.AI;
+using YourNamespace.AI;
+
+var builder = WebApplication.CreateBuilder(args);
+
+// Add services
+builder.Services.AddRazorPages();
+builder.Services.AddServerSideBlazor();
+builder.Services.AddSyncfusionBlazor();
+
+// Register custom backend
+string customEndpoint = "https://your-ai-service.com/api/inference";
+string customApiKey = "your-api-key";
+
+var customBackend = new CustomAIBackend(customEndpoint, customApiKey);
+builder.Services.AddChatClient(customBackend);
+
+// Register Smart Rich Text Editor Components
+builder.Services.AddSingleton();
+
+var app = builder.Build();
+
+// ... rest of setup
+```
+
+### Step 3: Use in Blazor Component
+
+```razor
+@page "/editor"
+@using Syncfusion.Blazor.SmartRichTextEditor
+
+
+
+
+
Welcome to the AI-assisted editor
+
Try selecting a sentence and use AI Commands to improve tone or clarity.
+
+ - Select text and request suggestions
+ - Press Alt+Enter for AI Query
+
+
+
+```
+
+## Advanced Implementation
+
+### With Configuration Support
+
+```csharp
+public class CustomAIBackend : IChatClient
+{
+ private readonly IConfiguration _configuration;
+ private readonly string _endpoint;
+ private readonly string _apiKey;
+ private readonly HttpClient _httpClient;
+
+ public CustomAIBackend(IConfiguration configuration)
+ {
+ _configuration = configuration;
+ _endpoint = _configuration["CustomAI:Endpoint"]
+ ?? throw new InvalidOperationException("CustomAI:Endpoint not configured");
+ _apiKey = _configuration["CustomAI:ApiKey"]
+ ?? throw new InvalidOperationException("CustomAI:ApiKey not configured");
+ _httpClient = new HttpClient();
+ }
+
+ public async ValueTask CompleteAsync(
+ IList chatMessages,
+ ChatOptions? options = null,
+ CancellationToken cancellationToken = default)
+ {
+ // Implementation...
+ throw new NotImplementedException();
+ }
+
+ public void Dispose()
+ {
+ _httpClient?.Dispose();
+ }
+}
+```
+
+### Configuration in appsettings.json
+
+```json
+{
+ "CustomAI": {
+ "Endpoint": "https://your-ai-service.com/api/inference",
+ "ApiKey": "your-api-key",
+ "MaxRetries": 3,
+ "TimeoutSeconds": 30
+ }
+}
+```
+
+### Registration with Configuration
+
+```csharp
+var customBackend = new CustomAIBackend(builder.Configuration);
+builder.Services.AddSingleton(customBackend);
+```
+
+## Streaming Responses
+
+For better UX with custom back-ends:
+
+```csharp
+public class StreamingCustomAIBackend : IChatClient
+{
+ private readonly string _endpoint;
+ private readonly HttpClient _httpClient;
+
+ public StreamingCustomAIBackend(string endpoint)
+ {
+ _endpoint = endpoint;
+ _httpClient = new HttpClient();
+ }
+
+ public async ValueTask CompleteAsync(
+ IList chatMessages,
+ ChatOptions? options = null,
+ CancellationToken cancellationToken = default)
+ {
+ var request = new HttpRequestMessage(HttpMethod.Post, _endpoint);
+ request.Content = new StringContent(
+ System.Text.Json.JsonSerializer.Serialize(new { messages = chatMessages }),
+ System.Text.Encoding.UTF8,
+ "application/json"
+ );
+
+ // Enable streaming
+ request.Headers.Add("Accept", "text/event-stream");
+
+ var response = await _httpClient.SendAsync(
+ request,
+ HttpCompletionOption.ResponseHeadersRead,
+ cancellationToken
+ );
+
+ using var stream = await response.Content.ReadAsStreamAsync(cancellationToken);
+ using var reader = new System.IO.StreamReader(stream);
+
+ var fullContent = new System.Text.StringBuilder();
+
+ while (!reader.EndOfStream)
+ {
+ var line = await reader.ReadLineAsync();
+ if (line?.StartsWith("data: ") == true)
+ {
+ var data = line.Substring("data: ".Length);
+ var chunk = System.Text.Json.JsonSerializer.Deserialize(data);
+ if (chunk?.content != null)
+ {
+ fullContent.Append(chunk.content);
+ }
+ }
+ }
+
+ return new ChatCompletion(new ChatMessage(
+ ChatRole.Assistant,
+ fullContent.ToString()
+ ));
+ }
+
+ public void Dispose()
+ {
+ _httpClient?.Dispose();
+ }
+
+ private class StreamChunk
+ {
+ public string? content { get; set; }
+ }
+}
+```
+
+## Error Handling
+
+Implement robust error handling:
+
+```csharp
+public class RobustCustomAIBackend : IChatClient
+{
+ private readonly string _endpoint;
+ private readonly HttpClient _httpClient;
+ private readonly ILogger _logger;
+ private readonly int _maxRetries = 3;
+
+ public RobustCustomAIBackend(string endpoint, ILogger logger)
+ {
+ _endpoint = endpoint;
+ _logger = logger;
+ _httpClient = new HttpClient();
+ }
+
+ public async ValueTask CompleteAsync(
+ IList chatMessages,
+ ChatOptions? options = null,
+ CancellationToken cancellationToken = default)
+ {
+ for (int attempt = 1; attempt <= _maxRetries; attempt++)
+ {
+ try
+ {
+ return await AttemptCompletion(chatMessages, options, cancellationToken);
+ }
+ catch (HttpRequestException ex) when (attempt < _maxRetries)
+ {
+ _logger.LogWarning($"Attempt {attempt} failed: {ex.Message}. Retrying...");
+ await Task.Delay(TimeSpan.FromSeconds(attempt * 2), cancellationToken);
+ }
+ catch (Exception ex)
+ {
+ _logger.LogError($"Unrecoverable error: {ex.Message}");
+ throw;
+ }
+ }
+
+ throw new InvalidOperationException("Failed to get response after max retries");
+ }
+
+ private async ValueTask AttemptCompletion(
+ IList chatMessages,
+ ChatOptions? options,
+ CancellationToken cancellationToken)
+ {
+ using var cts = CancellationTokenSource.CreateLinkedTokenSource(cancellationToken);
+ cts.CancelAfter(TimeSpan.FromSeconds(30)); // 30 second timeout
+
+ var request = new HttpRequestMessage(HttpMethod.Post, _endpoint);
+ // ... request setup ...
+
+ var response = await _httpClient.SendAsync(request, cts.Token);
+ response.EnsureSuccessStatusCode();
+
+ var content = await response.Content.ReadAsStringAsync();
+ return ParseResponse(content);
+ }
+
+ private ChatCompletion ParseResponse(string content)
+ {
+ try
+ {
+ var result = System.Text.Json.JsonSerializer.Deserialize(content);
+ return new ChatCompletion(new ChatMessage(
+ ChatRole.Assistant,
+ result?.output ?? "No response"
+ ));
+ }
+ catch (System.Text.Json.JsonException ex)
+ {
+ _logger.LogError($"Failed to parse response: {ex.Message}");
+ throw;
+ }
+ }
+
+ public void Dispose()
+ {
+ _httpClient?.Dispose();
+ }
+
+ private class CustomResponse
+ {
+ public string? output { get; set; }
+ }
+}
+```
+
+## Example: Internal AI Service Integration
+
+### Scenario: Company-Internal AI API
+
+```csharp
+public class CorporateAIBackend : IChatClient
+{
+ private readonly string _internalEndpoint;
+ private readonly string _bearerToken;
+ private readonly HttpClient _httpClient;
+ private readonly ILogger _logger;
+
+ public CorporateAIBackend(string endpoint, string token, ILogger logger)
+ {
+ _internalEndpoint = endpoint;
+ _bearerToken = token;
+ _logger = logger;
+ _httpClient = new HttpClient();
+ _httpClient.Timeout = TimeSpan.FromSeconds(60);
+ }
+
+ public async ValueTask CompleteAsync(
+ IList chatMessages,
+ ChatOptions? options = null,
+ CancellationToken cancellationToken = default)
+ {
+ try
+ {
+ var requestBody = new
+ {
+ messages = chatMessages.Select(m => new
+ {
+ role = m.Role == ChatRole.User ? "user" : "assistant",
+ content = m.Content?[0]?.Text ?? string.Empty
+ }),
+ parameters = new
+ {
+ temperature = options?.Temperature ?? 0.7f,
+ max_tokens = options?.MaxCompletionTokens ?? 1000
+ }
+ };
+
+ var request = new HttpRequestMessage(HttpMethod.Post, _internalEndpoint);
+ request.Headers.Authorization = new System.Net.Http.Headers.AuthenticationHeaderValue("Bearer", _bearerToken);
+ request.Content = new StringContent(
+ System.Text.Json.JsonSerializer.Serialize(requestBody),
+ System.Text.Encoding.UTF8,
+ "application/json"
+ );
+
+ var response = await _httpClient.SendAsync(request, cancellationToken);
+
+ if (!response.IsSuccessStatusCode)
+ {
+ _logger.LogError($"API Error: {response.StatusCode}");
+ throw new InvalidOperationException($"AI service returned: {response.StatusCode}");
+ }
+
+ var content = await response.Content.ReadAsStringAsync(cancellationToken);
+ var result = System.Text.Json.JsonSerializer.Deserialize(content);
+
+ return new ChatCompletion(new ChatMessage(
+ ChatRole.Assistant,
+ result?.result?.text ?? "Unable to generate response"
+ ));
+ }
+ catch (Exception ex)
+ {
+ _logger.LogError($"Error calling corporate AI: {ex.Message}");
+ throw;
+ }
+ }
+
+ public void Dispose()
+ {
+ _httpClient?.Dispose();
+ }
+
+ private class CorporateResponse
+ {
+ public ResultData? result { get; set; }
+ }
+
+ private class ResultData
+ {
+ public string? text { get; set; }
+ }
+}
+```
+
+## Testing Your Custom Backend
+
+### Unit Test Example
+
+```csharp
+[TestClass]
+public class CustomAIBackendTests
+{
+ [TestMethod]
+ public async Task CompleteAsync_WithValidMessages_ReturnsResponse()
+ {
+ // Arrange
+ var backend = new CustomAIBackend("https://test-endpoint.com", "test-key");
+ var messages = new List
+ {
+ new ChatMessage(ChatRole.User, "Hello")
+ };
+
+ // Act
+ var result = await backend.CompleteAsync(messages);
+
+ // Assert
+ Assert.IsNotNull(result);
+ Assert.IsNotNull(result.Content);
+ }
+
+ [TestMethod]
+ public async Task CompleteAsync_WithInvalidEndpoint_ThrowsException()
+ {
+ // Arrange
+ var backend = new CustomAIBackend("https://invalid-endpoint.com", "test-key");
+ var messages = new List
+ {
+ new ChatMessage(ChatRole.User, "Test")
+ };
+
+ // Act & Assert
+ await Assert.ThrowsExceptionAsync(
+ () => backend.CompleteAsync(messages).AsTask()
+ );
+ }
+}
+```
+
+## See also
+
+* [Getting Started with Smart Rich Text Editor](getting-started.md)
+* [OpenAI Configuration](openai-service.md)
+* [Azure OpenAI Configuration](azure-openai-service.md)
+* [Ollama Configuration](ollama.md)
+* [AI Features and Customization](ai-features.md)
\ No newline at end of file
diff --git a/blazor/smart-rich-text-editor/getting-started.md b/blazor/smart-rich-text-editor/getting-started.md
index 7a3ba8e642..25f0fca679 100644
--- a/blazor/smart-rich-text-editor/getting-started.md
+++ b/blazor/smart-rich-text-editor/getting-started.md
@@ -108,8 +108,6 @@ Open **~/_Imports.razor** file and import the `Syncfusion.Blazor` and `Syncfusio
Now, register the Syncfusion® Blazor Service in the **~/Program.cs** file of your Blazor Server App.
-Now, register the Syncfusion® Blazor Service in the **~/Program.cs** file of your Blazor Server App.
-
{% tabs %}
{% highlight C# tabtitle="Blazor Server App" hl_lines="4 11" %}
@@ -312,7 +310,14 @@ N> Check out the [Blazor Themes](https://blazor.syncfusion.com/documentation/app
## Add Syncfusion® Blazor Smart Rich Text Editor component
-Add the Syncfusion® **Blazor Smart Rich Text Editor** component in the **~/Pages/Index.razor** file.
+Add the Syncfusion® **Blazor Smart Rich Text Editor** component in the **~/Pages/Index.razor** file. If an interactivity location as `per page/component`, define a render mode at the top of the `Index.razor` page.
+
+N> If an Interactivity Location is set to `Global` and the **Render Mode** is set to `Server`, the render mode is configured in the `App.razor` file by default.
+
+```
+@* desired render mode define here *@
+@rendermode InteractiveServer
+```
{% tabs %}
{% highlight razor tabtitle="~/Index.razor" %}
diff --git a/blazor/smart-rich-text-editor/ollama.md b/blazor/smart-rich-text-editor/ollama.md
new file mode 100644
index 0000000000..4950d9c84b
--- /dev/null
+++ b/blazor/smart-rich-text-editor/ollama.md
@@ -0,0 +1,386 @@
+---
+layout: post
+title: Ollama Configuration for Blazor Smart Rich Text Editor | Syncfusion
+description: Step-by-step guide to configure Ollama for the Syncfusion Blazor Smart Rich Text Editor, covering installation, client setup, and usage examples for local AI.
+platform: Blazor
+control: Smart Rich Text Editor
+documentation: ug
+---
+
+# Ollama Configuration
+
+The Syncfusion® Blazor Smart Rich Text Editor supports Ollama for running open-source models locally. This is ideal for privacy-conscious applications and development environments.
+
+## What is Ollama?
+
+Ollama is a lightweight, open-source framework for running large language models locally on your machine. It provides:
+
+- **Privacy**: All processing happens locally
+- **No API costs**: No cloud service fees
+- **Offline capability**: Works without internet connection
+- **Model variety**: Access to many open-source models
+- **Easy setup**: Simple installation and management
+
+## Prerequisites
+
+* Windows 10/11, macOS, or Linux
+* At least 8GB RAM (16GB+ recommended)
+* GPU support optional but recommended for performance
+* Docker (optional, for containerized deployment)
+
+## Installation
+
+### Step 1: Download and Install Ollama
+
+Visit [Ollama's Official Website](https://ollama.com) and download the installer for your operating system.
+
+#### Windows
+
+1. Download the Windows installer
+2. Run the installer and follow the setup wizard
+3. Accept default installation path or customize
+4. Complete installation
+
+#### macOS
+
+1. Download the macOS installer
+2. Open the `.dmg file`
+3. Drag Ollama to Applications folder
+4. Launch from Applications
+
+#### Linux
+
+Use the installation script:
+
+```bash
+curl https://ollama.ai/install.sh | sh
+```
+
+### Step 2: Verify Installation
+
+Open a terminal/command prompt and verify:
+
+```bash
+ollama --version
+```
+
+### Step 3: Start Ollama Service
+
+#### Windows
+
+Ollama starts automatically. Access at `http://localhost:11434`
+
+#### macOS/Linux
+
+Start the Ollama service:
+
+```bash
+ollama serve
+```
+
+## Installing Models
+
+### Available Models
+
+Browse available models at [Ollama Library](https://ollama.com/library)
+
+Popular models for text generation:
+
+- **Mistral**: `mistral` - Fast, efficient
+- **Llama 2**: `llama2` - General purpose
+- **Neural Chat**: `neural-chat` - Conversational
+- **Orca Mini**: `orca-mini` - Lightweight
+- **Dolphin**: `dolphin-mixtral` - Advanced
+
+### Install a Model
+
+```bash
+ollama pull mistral
+```
+
+This downloads and prepares the model (may take several minutes).
+
+### List Installed Models
+
+```bash
+ollama list
+```
+
+### Run a Model Directly
+
+Test the model:
+
+```bash
+ollama run mistral
+```
+
+Type your prompt and press Enter. Type `/bye` to exit.
+
+## Configuration in Blazor
+
+### Installation
+
+Install required NuGet packages:
+
+{% tabs %}
+{% highlight c# tabtitle="Package Manager" %}
+
+Install-Package Syncfusion.Blazor.SmartRichTextEditor
+Install-Package Syncfusion.Blazor.Themes
+Install-Package Microsoft.Extensions.AI
+Install-Package OllamaSharp
+
+{% endhighlight %}
+{% endtabs %}
+
+### Setup in Program.cs
+
+```csharp
+using Syncfusion.Blazor;
+using Syncfusion.Blazor.AI;
+using Microsoft.Extensions.AI;
+using OllamaSharp;
+
+var builder = WebApplication.CreateBuilder(args);
+
+// Add services
+builder.Services.AddRazorPages();
+builder.Services.AddServerSideBlazor();
+builder.Services.AddSyncfusionBlazor();
+
+// Configure Ollama
+string ollamaEndpoint = "http://localhost:11434";
+string modelName = "mistral"; // or any other installed model
+
+// Create Ollama client
+IOllamaApiClient ollamaClient = new OllamaApiClient(ollamaEndpoint, modelName);
+
+// Convert to IChatClient
+IChatClient chatClient = ollamaClient;
+
+builder.Services.AddChatClient(chatClient);
+
+// Register Smart Rich Text Editor Components with Azure OpenAI
+builder.Services.AddSingleton();
+
+var app = builder.Build();
+
+// ... rest of setup
+```
+
+### Add to _Imports.razor
+
+```razor
+@using Syncfusion.Blazor
+@using Syncfusion.Blazor.SmartRichTextEditor
+```
+
+### Use Ollama AI with Smart Rich Text Editor Component
+
+```razor
+@page "/editor"
+@using Syncfusion.Blazor.SmartRichTextEditor
+
+
+
+
+
Ollama + Smart Rich Text Editor
+
Use AI Commands to improve selected text or change tone.
+
+ - Select text and request suggestions
+ - Press Alt+Enter for AI Query
+
+
+
+```
+
+## Configuration Options
+
+### Custom Endpoint
+
+If Ollama runs on different host/port:
+
+```csharp
+string ollamaEndpoint = "http://192.168.1.100:11434";
+IOllamaApiClient ollamaClient = new OllamaApiClient(ollamaEndpoint, "mistral");
+```
+
+### Model Parameters
+
+Configure model behavior:
+
+```csharp
+// Create client with parameters
+IOllamaApiClient ollamaClient = new OllamaApiClient("http://localhost:11434", "mistral");
+
+// Set parameters (varies by OllamaSharp version)
+var request = new GenerateRequest
+{
+ Model = "mistral",
+ Prompt = "Your prompt",
+ Temperature = 0.7f,
+ TopK = 40,
+ TopP = 0.9f,
+};
+```
+
+### Dynamic Model Selection
+
+```csharp
+// Read model name from configuration
+string modelName = builder.Configuration["Ollama:ModelName"] ?? "mistral";
+string endpoint = builder.Configuration["Ollama:Endpoint"] ?? "http://localhost:11434";
+
+IOllamaApiClient ollamaClient = new OllamaApiClient(endpoint, modelName);
+```
+
+## Docker Deployment
+
+### Docker Compose Setup
+
+```yaml
+version: '3.8'
+services:
+ ollama:
+ image: ollama/ollama:latest
+ ports:
+ - "11434:11434"
+ volumes:
+ - ollama_data:/root/.ollama
+ environment:
+ - OLLAMA_HOST=0.0.0.0:11434
+
+volumes:
+ ollama_data:
+```
+
+### Start with Docker
+
+```bash
+docker-compose up -d
+```
+
+### Pull Models in Container
+
+```bash
+docker exec -it ollama pull mistral
+```
+
+## Advanced Configuration
+
+### Multiple Models
+
+Run different models for different purposes:
+
+```csharp
+// For general editing
+var generalClient = new OllamaApiClient("http://localhost:11434", "mistral");
+
+// For code-related tasks
+var codeClient = new OllamaApiClient("http://localhost:11434", "neural-chat");
+
+// Conditionally use based on context
+IChatClient chatClient = useForCode ? codeClient : generalClient;
+```
+
+## Performance Optimization
+
+### Tips for Better Performance
+
+1. **Use GPU Acceleration**
+ - NVIDIA GPUs: Install CUDA toolkit
+ - AMD GPUs: Install ROCm
+ - Intel GPUs: Install Intel oneAPI
+
+2. **Increase Memory Allocation**
+ ```bash
+ # Linux/macOS
+ export OLLAMA_NUM_PARALLEL=1
+ ollama serve
+ ```
+
+3. **Choose Efficient Models**
+ - Mistral is fastest
+ - `Orca-mini` for lightweight deployment
+ - `Dolphin-mixtral` for best quality
+
+4. **Implement Caching**
+ - Cache common prompts
+ - Reuse model responses
+
+5. **Optimize Request Size**
+ - Keep prompts concise
+ - Limit response length with max tokens
+
+## Troubleshooting
+
+### Connection Issues
+
+**Error: Unable to connect to Ollama**
+
+1. Verify Ollama is running:
+ ```bash
+ curl http://localhost:11434/api/tags
+ ```
+
+2. Check endpoint configuration in Program.cs
+
+3. If running on different machine, update endpoint URL
+
+### Model Issues
+
+**Model not found**
+```bash
+# List available models
+ollama list
+
+# Pull the model
+ollama pull mistral
+```
+
+**Out of memory errors**
+- Use smaller models (`orca-mini`, `mistral`)
+- Close other applications
+- Restart Ollama service
+
+### Performance Issues
+
+**Slow response times**
+- Check CPU/GPU usage
+- Enable GPU acceleration if available
+- Use faster models
+- Increase system RAM
+
+## Model Recommendations
+
+| Use Case | Model | Pros | Cons |
+|----------|-------|------|------|
+| General editing | Mistral | Fast, good quality | Less context |
+| Content creation | Llama 2 | Balanced | Larger model |
+| Code assistance | Neural-chat | Good reasoning | Slower |
+| Lightweight | Orca-mini | Very fast | Limited capability |
+| Best quality | Dolphin-mixtral | Excellent | Resource heavy |
+
+## Security Considerations
+
+### Local Processing Benefits
+
+- All data stays on your machine
+- No external API calls
+- Compliance with data regulations
+- Full control over data retention
+
+### Best Practices
+
+1. Restrict network access to Ollama
+2. Use firewall rules if networked
+3. Keep Ollama updated
+4. Monitor resource usage
+
+## See also
+
+* [Getting Started with Smart Rich Text Editor](getting-started.md)
+* [OpenAI Configuration](openai-service.md)
+* [Azure OpenAI Configuration](azure-openai-service.md)
+* [AI Features and Customization](ai-features.md)
+* [Ollama Official Documentation](https://ollama.ai/docs)
\ No newline at end of file
diff --git a/blazor/smart-rich-text-editor/openai-service.md b/blazor/smart-rich-text-editor/openai-service.md
new file mode 100644
index 0000000000..aeb776a22c
--- /dev/null
+++ b/blazor/smart-rich-text-editor/openai-service.md
@@ -0,0 +1,236 @@
+---
+layout: post
+title: OpenAI Configuration for Blazor Smart Rich Text Editor | Syncfusion
+description: Configure OpenAI for Syncfusion Blazor Smart Rich Text Editor: API keys, client setup, DI registration, usage examples, and best practices.
+platform: Blazor
+control: Smart Rich Text Editor
+documentation: ug
+---
+
+# OpenAI Configuration
+
+The Syncfusion® Blazor Smart Rich Text Editor supports OpenAI's GPT models for intelligent content assistance.
+
+## Prerequisites
+
+* Active OpenAI account with API access
+* Valid API key from OpenAI
+* Internet connectivity to access OpenAI services
+
+## Get Your OpenAI API Key
+
+1. Visit [OpenAI Platform](https://platform.openai.com/)
+2. Sign in with your OpenAI account (create one if needed)
+3. Navigate to **API Keys** section
+4. Click **Create new secret key**
+5. Copy and securely store your API key
+
+## Supported Models
+
+OpenAI offers various models for different use cases:
+
+- `gpt-4-turbo`: Most capable, latest model
+- `gpt-4`: Advanced reasoning and complex tasks
+- `gpt-3.5-turbo`: Fast and cost-effective
+- `gpt-3.5-turbo-16k`: Extended context window
+
+> **Note**: Model availability and names may change. Refer to [OpenAI documentation](https://developers.openai.com/api/docs/models) for current options.
+
+## Installation
+
+Install the required NuGet packages:
+
+{% tabs %}
+{% highlight c# tabtitle="Package Manager" %}
+
+Install-Package Syncfusion.Blazor.SmartRichTextEditor
+Install-Package Syncfusion.Blazor.Themes
+Install-Package Microsoft.Extensions.AI
+Install-Package Microsoft.Extensions.AI.OpenAI
+
+{% endhighlight %}
+{% endtabs %}
+
+## Configuration
+
+### Step 1: Setup in Program.cs
+
+Add the following configuration to your **Program.cs** file:
+
+```csharp
+using Syncfusion.Blazor;
+using Syncfusion.Blazor.AI;
+using Microsoft.Extensions.AI;
+using OpenAI;
+
+var builder = WebApplication.CreateBuilder(args);
+
+// Add services to the container
+builder.Services.AddRazorPages();
+builder.Services.AddServerSideBlazor();
+
+// Register Syncfusion Blazor Service
+builder.Services.AddSyncfusionBlazor();
+
+// Configure OpenAI
+string openAIApiKey = "your-api-key-here";
+string openAIModel = "gpt-3.5-turbo"; // or gpt-4, gpt-4-turbo, etc.
+
+OpenAIClient openAIClient = new OpenAIClient(openAIApiKey);
+IChatClient openAIChatClient = openAIClient.GetChatClient(openAIModel).AsIChatClient();
+builder.Services.AddChatClient(openAIChatClient);
+
+// Register Smart Rich Text Editor Components with OpenAI
+builder.Services.AddSingleton();
+
+
+var app = builder.Build();
+
+// ... rest of your application setup
+```
+
+### Step 2: Add Imports to _Imports.razor
+
+Update **~/_Imports.razor** to include necessary namespaces:
+
+```razor
+@using Syncfusion.Blazor
+@using Syncfusion.Blazor.SmartRichTextEditor
+```
+
+### Step 3: Use OpenAI with Smart Rich Text Editor Component
+
+Add the Smart Rich Text Editor to your Blazor page:
+
+```razor
+@page "/editor"
+@using Syncfusion.Blazor.SmartRichTextEditor
+
+
+
+
+
Tips:
+
+ - Select text and click "AI Commands" for quick improvements
+ - Press Alt+Enter to open AI Query dialog
+ - Use AI to fix grammar, adjust tone, or rephrase content
+
+
+
+```
+
+## Using Environment Variables
+
+For security, store your API key in environment variables:
+
+### Windows
+
+```powershell
+$env:OPENAI_API_KEY = "your-api-key"
+```
+
+### Linux/macOS
+
+```bash
+export OPENAI_API_KEY="your-api-key"
+```
+
+### Reading from Environment in Program.cs
+
+```csharp
+string openAIApiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY")
+ ?? throw new InvalidOperationException("OpenAI API key not found in environment variables");
+```
+
+## Using User Secrets
+
+For development, use User Secrets:
+
+```bash
+dotnet user-secrets init
+dotnet user-secrets set "OpenAI:ApiKey" "your-api-key"
+```
+
+Access in Program.cs:
+
+```csharp
+var openAIApiKey = builder.Configuration["OpenAI:ApiKey"];
+```
+
+## Advanced Configuration
+
+### Model Parameters
+
+Customize OpenAI model behavior:
+
+```csharp
+builder.Services.AddSingleton();
+ .ConfigureOpenAI(options =>
+ {
+ options.MaxTokens = 500; // Limit response length
+ options.Temperature = 0.7f; // Creativity level (0-2)
+ options.TopP = 0.9f; // Diversity control
+ options.FrequencyPenalty = 0.5f; // Repeat penalty
+ options.PresencePenalty = 0.5f; // New topic penalty
+ });
+```
+
+### Parameter Explanation
+
+| Parameter | Range | Default | Description |
+|-----------|-------|---------|-------------|
+| MaxTokens | 1-4096 | 500 | Maximum length of AI response |
+| Temperature | 0-2 | 0.7 | Higher = more creative, Lower = more focused |
+| TopP | 0-1 | 0.9 | Nucleus sampling - controls diversity |
+| FrequencyPenalty | -2-2 | 0 | Reduces likelihood of repetition |
+| PresencePenalty | -2-2 | 0 | Encourages new topics |
+
+## Troubleshooting
+
+### Common Issues
+
+**Authentication Error: 401 Unauthorized**
+- Verify API key is correct
+- Check API key is still valid and hasn't expired
+- Ensure API key has necessary permissions
+
+**Rate Limit Error: 429 Too Many Requests**
+- Reduce request frequency
+- Implement request throttling
+- Check usage limits on OpenAI account
+
+**Model Not Found Error**
+- Verify model name is correct (check [model list](https://developers.openai.com/api/docs/models))
+- Ensure model is available in your region/account
+
+**No Response from AI**
+- Check internet connectivity
+- Verify API key permissions
+- Review OpenAI service status
+
+## Cost Optimization
+
+### Tips for Reducing Costs
+
+1. **Use Efficient Models**: `gpt-3.5-turbo` is significantly cheaper than `gpt-4`
+2. **Limit Token Count**: Set reasonable `MaxTokens` values
+3. **Implement Caching**: Cache common requests
+4. **Monitor Usage**: Track API calls and costs
+5. **Batch Operations**: Process multiple requests efficiently
+
+## Security Best Practices
+
+1. **Never hardcode API keys** - Use environment variables or secure configuration
+2. **Use User Secrets** for development - Not for production
+3. **Implement Rate Limiting** - Prevent abuse of your application
+4. **Validate User Input** - Before sending to OpenAI
+5. **Review Content** - Always review AI-generated content before publishing
+6. **Set Spending Limits** - Configure usage limits on OpenAI account
+
+## See also
+
+* [Getting Started with Smart Rich Text Editor](getting-started.md)
+* [AI Features and Customization](ai-features.md)
+* [Azure OpenAI Configuration](azure-openai-service.md)
+* [Ollama Configuration](ollama.md)
+* [OpenAI Documentation](https://developers.openai.com/api/docs)
\ No newline at end of file