Skip to content

Commit

Permalink
formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
scald committed Nov 25, 2024
1 parent a878336 commit f3e34dc
Show file tree
Hide file tree
Showing 9 changed files with 184 additions and 149 deletions.
14 changes: 7 additions & 7 deletions src/errors.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,22 +4,22 @@ use thiserror::Error;
pub enum ProcessorError {
#[error("Failed to load image: {0}")]
ImageLoadError(#[from] image::error::ImageError),

#[error("AI Provider error: {0}")]
AIProviderError(String),

#[error("Failed to encode/decode base64: {0}")]
Base64Error(String),

#[error("Environment variable error: {0}")]
EnvError(#[from] std::env::VarError),

#[error("Network request failed: {0}")]
RequestError(#[from] reqwest::Error),

#[error("Invalid API response: {0}")]
ResponseParseError(String),

#[error("Thumbnail generation failed: {0}")]
ThumbnailError(String),
}
}
16 changes: 8 additions & 8 deletions src/lib.rs
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
//! eyeris is a high-performance image analysis service.
//!
//!
//! # Features
//!
//!
//! - Multiple AI provider support (OpenAI, Ollama)
//! - Image optimization and processing
//! - Customizable analysis formats
//!
//!
//! # Example
//!
//!
//! ```rust,no_run
//! use eyeris::{ImageProcessor, AIProvider};
//!
//!
//! #[tokio::main]
//! async fn main() {
//! let processor = ImageProcessor::new(
Expand All @@ -26,14 +26,14 @@
//! }
//! ```
pub mod errors;
pub mod processor;
pub mod prompts;
pub mod providers;
pub mod errors;
pub mod utils;

// Re-export commonly used types
pub use errors::ProcessorError;
pub use processor::ImageProcessor;
pub use prompts::{ImagePrompt, PromptFormat};
pub use errors::ProcessorError;
pub use providers::AIProvider;
pub use providers::AIProvider;
35 changes: 18 additions & 17 deletions src/main.rs
Original file line number Diff line number Diff line change
@@ -1,22 +1,18 @@
use anyhow::Result;
use axum::{
extract::{Multipart, Query, State},
http::StatusCode,
response::Json,
routing::post,
Router,
};
use bytes::Bytes;
use eyeris::{processor::ImageProcessor, prompts::PromptFormat, providers::AIProvider};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tokio::sync::Semaphore;
use anyhow::Result;
use bytes::Bytes;
use std::sync::OnceLock;
use std::time::Instant;
use eyeris::{
providers::AIProvider,
processor::ImageProcessor,
prompts::PromptFormat,
};
use tokio::sync::Semaphore;

// Create a static processor pool
static PROCESSOR_POOL: OnceLock<Arc<ImageProcessor>> = OnceLock::new();
Expand Down Expand Up @@ -82,7 +78,7 @@ async fn main() -> Result<()> {
// Start server
let addr = "0.0.0.0:3000";
tracing::info!("Starting server on {}", addr);

let listener = tokio::net::TcpListener::bind(addr).await?;
axum::serve(listener, app).await?;

Expand Down Expand Up @@ -115,16 +111,21 @@ async fn process_image(
});

let permit_start = Instant::now();
let _permit = state.semaphore.acquire().await
let _permit = state
.semaphore
.acquire()
.await
.map_err(|_| StatusCode::SERVICE_UNAVAILABLE)?;
tracing::info!(
duration_ms = permit_start.elapsed().as_millis(),
"Rate limit permit acquired"
);

let multipart_start = Instant::now();
let image_data: Bytes = if let Some(field) = multipart.next_field().await
.map_err(|_| StatusCode::BAD_REQUEST)?
let image_data: Bytes = if let Some(field) = multipart
.next_field()
.await
.map_err(|_| StatusCode::BAD_REQUEST)?
{
if field.name().unwrap_or("") != "image" {
return Ok(Json(ProcessResponse {
Expand Down Expand Up @@ -163,24 +164,24 @@ async fn process_image(
Ok(analysis) => {
// Get current token stats
let token_stats = processor.token_stats.read();

Ok(Json(ProcessResponse {
success: true,
message: "Image processed successfully".to_string(),
data: Some(ProcessedData {
data: Some(ProcessedData {
analysis,
token_usage: TokenUsage {
prompt_tokens: token_stats.prompt_tokens,
completion_tokens: token_stats.completion_tokens,
total_tokens: token_stats.total_tokens,
}
},
}),
}))
},
}
Err(e) => Ok(Json(ProcessResponse {
success: false,
message: format!("Processing failed: {}", e),
data: None,
})),
}
}
}
98 changes: 55 additions & 43 deletions src/processor.rs
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
use image::{ImageBuffer, DynamicImage};
use rayon::prelude::*;
use crate::errors::ProcessorError;
use crate::prompts::{ImagePrompt, PromptFormat};
use crate::providers::{AIProvider, OllamaProvider, OpenAIProvider, Provider};
use base64::Engine;
use image::codecs::jpeg::JpegEncoder;
use image::imageops::FilterType;
use crate::prompts::{ImagePrompt, PromptFormat};
use image::{DynamicImage, ImageBuffer};
use rayon::prelude::*;
use std::time::Instant;
use crate::providers::{Provider, OpenAIProvider, OllamaProvider, AIProvider};
use crate::errors::ProcessorError;

#[derive(Debug, Clone, Default)]
pub struct TokenStats {
Expand Down Expand Up @@ -44,7 +44,7 @@ impl ImageProcessor {
/// ```
pub fn new(provider: AIProvider, model: Option<String>, format: Option<PromptFormat>) -> Self {
let token_stats = std::sync::Arc::new(parking_lot::RwLock::new(TokenStats::default()));

let provider: Box<dyn Provider> = match provider {
AIProvider::OpenAI => Box::new(OpenAIProvider::new(model, token_stats.clone())),
AIProvider::Ollama => Box::new(OllamaProvider::new(model)),
Expand All @@ -59,7 +59,7 @@ impl ImageProcessor {

pub async fn process(&self, image_data: &[u8]) -> Result<String, ProcessorError> {
let start = Instant::now();

// Pre-allocate the base64 string
let base64_start = Instant::now();
let base64_image = base64::engine::general_purpose::STANDARD.encode(image_data);
Expand All @@ -78,7 +78,7 @@ impl ImageProcessor {
};

let (analysis, _thumbnail_result) = tokio::join!(analysis_future, thumbnail_future);

tracing::info!(
duration_ms = parallel_start.elapsed().as_millis(),
"Parallel processing completed"
Expand All @@ -94,20 +94,21 @@ impl ImageProcessor {

async fn analyze_image(&self, base64_image: &str) -> Result<String, ProcessorError> {
let start = Instant::now();

// Get the length before moving the string
let original_size = base64_image.len();

// Clone the string before moving into spawn_blocking
let base64_image_owned = base64_image.to_string();
let optimized_image = tokio::task::spawn_blocking(move || {
// Use base64_image_owned instead of base64_image
let image_data = base64::engine::general_purpose::STANDARD.decode(&base64_image_owned)
let image_data = base64::engine::general_purpose::STANDARD
.decode(&base64_image_owned)
.map_err(|e| ProcessorError::Base64Error(e.to_string()))?;

// Load image
let img = image::load_from_memory(&image_data)?;

// Resize to smaller dimensions while maintaining aspect ratio
// Most vision models don't need images larger than 768px
let max_dim = 768;
Expand All @@ -122,22 +123,27 @@ impl ImageProcessor {

// Convert to RGB and compress as JPEG with moderate quality
let mut buffer = Vec::new();
let mut encoder = JpegEncoder::new_with_quality(&mut buffer, 10);
encoder.encode(
resized.to_rgb8().as_raw(),
resized.width(),
resized.height(),
image::ColorType::Rgb8
).map_err(|e| ProcessorError::ImageLoadError(e))?;
let mut encoder = JpegEncoder::new_with_quality(&mut buffer, 10);
encoder
.encode(
resized.to_rgb8().as_raw(),
resized.width(),
resized.height(),
image::ColorType::Rgb8,
)
.map_err(|e| ProcessorError::ImageLoadError(e))?;

// Convert back to base64
Ok::<_, ProcessorError>(base64::engine::general_purpose::STANDARD.encode(&buffer))
}).await.map_err(|e| ProcessorError::ThumbnailError(e.to_string()))??;
})
.await
.map_err(|e| ProcessorError::ThumbnailError(e.to_string()))??;

tracing::info!(
original_size = original_size,
optimized_size = optimized_image.len(),
reduction_percent = ((original_size - optimized_image.len()) as f32 / original_size as f32 * 100.0),
reduction_percent =
((original_size - optimized_image.len()) as f32 / original_size as f32 * 100.0),
"Image optimization completed"
);

Expand All @@ -156,7 +162,7 @@ impl ImageProcessor {

async fn create_thumbnail(&self, image: DynamicImage) -> Result<Vec<u8>, ProcessorError> {
let start = Instant::now();

let result = tokio::task::spawn_blocking(move || {
let resize_start = Instant::now();
let thumbnail = image.resize(300, 300, FilterType::Triangle);
Expand All @@ -165,23 +171,26 @@ impl ImageProcessor {
duration_ms = resize_start.elapsed().as_millis(),
"Image resize completed"
);

let enhance_start = Instant::now();
let width = rgb_image.width() as usize;
let height = rgb_image.height() as usize;

let enhanced_pixels: Vec<_> = rgb_image.chunks_exact(width * 3)

let enhanced_pixels: Vec<_> = rgb_image
.chunks_exact(width * 3)
.enumerate()
.par_bridge()
.flat_map(|(y, row)| {
(0..width).map(move |x| {
let pixel = image::Rgb([
(row[x * 3] as f32 * 1.1).min(255.0) as u8,
(row[x * 3 + 1] as f32 * 1.1).min(255.0) as u8,
(row[x * 3 + 2] as f32 * 1.1).min(255.0) as u8,
]);
(x as u32, y as u32, pixel)
}).collect::<Vec<_>>()
(0..width)
.map(move |x| {
let pixel = image::Rgb([
(row[x * 3] as f32 * 1.1).min(255.0) as u8,
(row[x * 3 + 1] as f32 * 1.1).min(255.0) as u8,
(row[x * 3 + 2] as f32 * 1.1).min(255.0) as u8,
]);
(x as u32, y as u32, pixel)
})
.collect::<Vec<_>>()
})
.collect();

Expand All @@ -198,20 +207,23 @@ impl ImageProcessor {

let mut output = Vec::with_capacity(width * height * 3);
let mut encoder = JpegEncoder::new_with_quality(&mut output, 85);
encoder.encode(
enhanced.as_raw(),
enhanced.width(),
enhanced.height(),
image::ColorType::Rgb8
).map_err(|e| ProcessorError::ThumbnailError(e.to_string()))?;
encoder
.encode(
enhanced.as_raw(),
enhanced.width(),
enhanced.height(),
image::ColorType::Rgb8,
)
.map_err(|e| ProcessorError::ThumbnailError(e.to_string()))?;

tracing::info!(
duration_ms = buffer_start.elapsed().as_millis(),
"Buffer creation and JPEG encoding completed"
);

Ok(output)
}).await
})
.await
.map_err(|e| ProcessorError::ThumbnailError(e.to_string()))?;

tracing::info!(
Expand All @@ -221,4 +233,4 @@ impl ImageProcessor {

result
}
}
}
Loading

0 comments on commit f3e34dc

Please sign in to comment.