How To Integrate - ruvnet/ruv-FANN GitHub Wiki

How to Integrate ruv-FANN Components

A comprehensive guide to integrating ruv-FANN's neural networks, swarm intelligence, and WASM modules into your applications.

Integration Overview

ruv-FANN provides multiple integration patterns:

  • Rust Library Integration: Native Rust applications
  • WASM Module Integration: Web and edge applications
  • MCP Integration: Claude Code and AI assistants
  • REST API Integration: Language-agnostic HTTP services
  • Swarm Coordination: Multi-agent distributed systems

Rust Library Integration

1. Core Neural Network Integration

Add Dependencies to Cargo.toml

[dependencies]
# Core neural network functionality
ruv-fann = "0.3.0"
semantic-cartan-matrix = { path = "path/to/Semantic_Cartan_Matrix" }

# Individual components (optional)
micro-core = { path = "path/to/micro_core" }
micro-cartan-attn = { path = "path/to/micro_cartan_attn" }
micro-swarm = { path = "path/to/micro_swarm" }

# Supporting libraries
tokio = { version = "1.0", features = ["full"] }
serde = { version = "1.0", features = ["derive"] }
anyhow = "1.0"

Basic Neural Network Setup

use ruv_fann::prelude::*;
use semantic_cartan_matrix::prelude::*;
use anyhow::Result;

#[tokio::main]
async fn main() -> Result<()> {
    // Initialize neural network with custom architecture
    let mut network = NetworkBuilder::new()
        .input_layer(784)  // MNIST input size
        .hidden_layer(128, ActivationFunction::ReLU)
        .hidden_layer(64, ActivationFunction::ReLU)
        .output_layer(10, ActivationFunction::SoftMax)
        .build()?;

    // Create Cartan attention mechanism
    let cartan_matrix = CartanMatrix::identity();
    let mut attention = CartanAttention::new(cartan_matrix)?;

    // Setup training data
    let training_data = TrainingData::from_file("data/mnist_train.csv")?;
    
    // Train the network
    let trainer = NetworkTrainer::new()
        .learning_rate(0.001)
        .epochs(100)
        .batch_size(32)
        .optimizer(OptimizerType::Adam);

    trainer.train(&mut network, &training_data).await?;

    // Apply attention mechanism to enhance predictions
    let input_vector = RootVector::from_slice(&[0.1, 0.2, 0.3, /* ... */]);
    let attended_input = attention.apply_attention(&[input_vector])?;
    
    // Make predictions
    let prediction = network.predict(&attended_input[0])?;
    println!("Prediction: {:?}", prediction);

    Ok(())
}

2. Swarm Integration

Distributed Neural Processing

use micro_swarm::{SwarmOrchestrator, AgentType, TaskPriority};
use micro_core::{RootVector, RootSpace};

async fn distributed_processing() -> Result<()> {
    // Initialize swarm orchestrator
    let mut orchestrator = SwarmOrchestrator::new()
        .topology(TopologyType::Mesh)
        .max_agents(8)
        .coordination_timeout(Duration::from_secs(30))
        .build().await?;

    // Spawn specialized agents
    let agents = vec![
        (AgentType::Researcher, "data-analysis"),
        (AgentType::Coder, "model-optimization"), 
        (AgentType::Analyst, "performance-monitoring"),
        (AgentType::Tester, "validation-testing"),
    ];

    for (agent_type, task_description) in agents {
        orchestrator.spawn_agent(agent_type, task_description).await?;
    }

    // Define a complex neural processing task
    let task = ProcessingTask::new()
        .input_data(vec![/* large dataset */])
        .processing_type(ProcessingType::ParallelInference)
        .priority(TaskPriority::High)
        .timeout(Duration::from_secs(300));

    // Distribute task across swarm
    let results = orchestrator.distribute_task(task).await?;
    
    // Aggregate results from all agents
    let final_result = orchestrator.aggregate_results(results).await?;
    
    println!("Distributed processing complete: {:?}", final_result);
    Ok(())
}

3. Advanced Integration Patterns

Custom Neural Architecture with Cartan Matrix

use semantic_cartan_matrix::{CartanMatrix, RootSpace, AttentionMechanism};

struct CustomNeuralProcessor {
    network: NeuralNetwork,
    cartan_attention: CartanAttention,
    root_space: RootSpace,
    swarm_coordinator: Option<SwarmOrchestrator>,
}

impl CustomNeuralProcessor {
    pub fn new(config: ProcessorConfig) -> Result<Self> {
        // Initialize 32-dimensional root space with Cartan normalization
        let root_space = RootSpace::new(32);
        
        // Create custom Cartan matrix for attention
        let mut cartan_matrix = CartanMatrix::identity();
        cartan_matrix.apply_custom_constraints(&config.attention_constraints)?;
        
        let cartan_attention = CartanAttention::new(cartan_matrix)?;

        // Build neural network with custom layers
        let network = NetworkBuilder::new()
            .custom_layer(config.input_size, LayerType::CartanProjection)
            .attention_layer(cartan_attention.clone())
            .hidden_layer(config.hidden_size, ActivationFunction::GELU)
            .dropout_layer(0.1)
            .output_layer(config.output_size, ActivationFunction::Linear)
            .build()?;

        Ok(Self {
            network,
            cartan_attention,
            root_space,
            swarm_coordinator: None,
        })
    }

    pub async fn process_with_swarm(&mut self, input: &[f32]) -> Result<Vec<f32>> {
        // Convert input to root vector
        let mut root_vector = self.root_space.create_vector();
        root_vector.copy_from_slice(&input[..32.min(input.len())]);
        
        // Apply Cartan normalization
        self.root_space.normalize_vector(&mut root_vector)?;
        
        // Apply attention mechanism
        let attended = self.cartan_attention.apply_attention(&[root_vector])?;
        
        // If swarm is available, distribute processing
        if let Some(ref mut coordinator) = self.swarm_coordinator {
            let swarm_task = SwarmProcessingTask::new()
                .input_vectors(attended)
                .processing_type(ProcessingType::ParallelInference)
                .coordination_strategy(CoordinationStrategy::Consensus);
                
            let swarm_results = coordinator.process_task(swarm_task).await?;
            Ok(swarm_results.aggregate_outputs())
        } else {
            // Fallback to local processing
            let network_input: Vec<f32> = attended[0].iter().cloned().collect();
            self.network.predict(&network_input)
        }
    }

    pub async fn enable_swarm_coordination(&mut self, config: SwarmConfig) -> Result<()> {
        let coordinator = SwarmOrchestrator::new()
            .topology(config.topology)
            .max_agents(config.max_agents)
            .coordination_protocol(config.protocol)
            .build().await?;
            
        self.swarm_coordinator = Some(coordinator);
        Ok(())
    }
}

WASM Integration

1. Building WASM Modules

Build Configuration

# Install required tools
cargo install wasm-pack

# Build optimized WASM module
cd Semantic_Cartan_Matrix
wasm-pack build --target web --release --features wasm

# Optimize for size
wasm-opt -Os -o pkg/semantic_cartan_matrix_bg.wasm pkg/semantic_cartan_matrix_bg.wasm

# Generate TypeScript bindings
wasm-pack build --target bundler --typescript

WASM Module Wrapper

// src/wasm_bindings.rs
use wasm_bindgen::prelude::*;
use micro_core::{RootVector, RootSpace};
use micro_cartan_attn::CartanAttention;

#[wasm_bindgen]
pub struct WasmNeuralProcessor {
    attention: CartanAttention,
    root_space: RootSpace,
}

#[wasm_bindgen]
impl WasmNeuralProcessor {
    #[wasm_bindgen(constructor)]
    pub fn new() -> Result<WasmNeuralProcessor, JsValue> {
        console_error_panic_hook::set_once();
        
        let cartan_matrix = CartanMatrix::identity();
        let attention = CartanAttention::new(cartan_matrix)
            .map_err(|e| JsValue::from_str(&e.to_string()))?;
            
        let root_space = RootSpace::new(32);
        
        Ok(WasmNeuralProcessor {
            attention,
            root_space,
        })
    }

    #[wasm_bindgen]
    pub fn process_vector(&mut self, input: &[f32]) -> Result<Vec<f32>, JsValue> {
        let mut root_vector = self.root_space.create_vector();
        
        // Copy input data (truncate to 32 dimensions)
        let len = input.len().min(32);
        root_vector[..len].copy_from_slice(&input[..len]);
        
        // Normalize using Cartan space constraints
        self.root_space.normalize_vector(&mut root_vector)
            .map_err(|e| JsValue::from_str(&e.to_string()))?;
        
        // Apply attention mechanism
        let result = self.attention.apply_attention(&[root_vector])
            .map_err(|e| JsValue::from_str(&e.to_string()))?;
        
        Ok(result[0].iter().cloned().collect())
    }

    #[wasm_bindgen]
    pub fn batch_process(&mut self, batch: &[f32], batch_size: usize) -> Result<Vec<f32>, JsValue> {
        let mut results = Vec::new();
        
        for chunk in batch.chunks(32) {
            let processed = self.process_vector(chunk)?;
            results.extend(processed);
        }
        
        Ok(results)
    }
}

2. JavaScript Integration

Web Application Integration

// web-integration.js
import init, { WasmNeuralProcessor } from './pkg/semantic_cartan_matrix.js';

class RuvFannWebIntegration {
    constructor() {
        this.processor = null;
        this.initialized = false;
    }

    async initialize() {
        await init();
        this.processor = new WasmNeuralProcessor();
        this.initialized = true;
        console.log('RUV-FANN WASM integration initialized');
    }

    async processData(inputData) {
        if (!this.initialized) {
            await this.initialize();
        }

        try {
            // Convert JavaScript array to Float32Array for WASM
            const float32Input = new Float32Array(inputData);
            
            // Process through neural network
            const result = this.processor.process_vector(float32Input);
            
            return Array.from(result);
        } catch (error) {
            console.error('WASM processing failed:', error);
            throw error;
        }
    }

    async batchProcess(batchData, batchSize = 32) {
        if (!this.initialized) {
            await this.initialize();
        }

        const flattened = batchData.flat();
        const float32Batch = new Float32Array(flattened);
        
        const results = this.processor.batch_process(float32Batch, batchSize);
        
        // Reshape results back to original structure
        const reshapedResults = [];
        for (let i = 0; i < results.length; i += 32) {
            reshapedResults.push(Array.from(results.slice(i, i + 32)));
        }
        
        return reshapedResults;
    }
}

// Usage example
async function main() {
    const integration = new RuvFannWebIntegration();
    
    // Process single vector
    const input = Array.from({length: 32}, () => Math.random());
    const result = await integration.processData(input);
    console.log('Processed result:', result);
    
    // Process batch of vectors
    const batchInput = Array.from({length: 10}, () => 
        Array.from({length: 32}, () => Math.random())
    );
    const batchResults = await integration.batchProcess(batchInput);
    console.log('Batch results:', batchResults);
}

Node.js Integration

// node-integration.js
const { WasmNeuralProcessor } = require('./pkg/semantic_cartan_matrix');

class RuvFannNodeIntegration {
    constructor() {
        this.processor = new WasmNeuralProcessor();
    }

    processStream(inputStream) {
        return new Promise((resolve, reject) => {
            const results = [];
            
            inputStream.on('data', (chunk) => {
                try {
                    const float32Data = new Float32Array(chunk.buffer);
                    const result = this.processor.process_vector(float32Data);
                    results.push(result);
                } catch (error) {
                    reject(error);
                }
            });
            
            inputStream.on('end', () => {
                resolve(results);
            });
            
            inputStream.on('error', reject);
        });
    }

    async processFile(filePath) {
        const fs = require('fs').promises;
        const data = await fs.readFile(filePath);
        
        // Assuming binary data with 32-bit floats
        const float32Array = new Float32Array(data.buffer);
        
        return this.processor.batch_process(float32Array, 32);
    }
}

module.exports = { RuvFannNodeIntegration };

MCP (Model Context Protocol) Integration

1. Claude Code Integration

MCP Server Setup

// mcp-server.ts
import { Server } from '@modelcontextprotocol/sdk/server/index.js';
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
import { CallToolRequestSchema } from '@modelcontextprotocol/sdk/types.js';
import { RuvFannNodeIntegration } from './node-integration.js';

const server = new Server(
  {
    name: 'ruv-fann-mcp',
    version: '1.0.0',
  },
  {
    capabilities: {
      tools: {},
    },
  }
);

const ruvFann = new RuvFannNodeIntegration();

// Neural network processing tool
server.setRequestHandler(CallToolRequestSchema, async (request) => {
  const { name, arguments: args } = request.params;

  switch (name) {
    case 'ruv_fann_process':
      const { input_data, processing_type } = args;
      
      let result;
      switch (processing_type) {
        case 'single':
          result = await ruvFann.processor.process_vector(new Float32Array(input_data));
          break;
        case 'batch':
          result = await ruvFann.processor.batch_process(new Float32Array(input_data.flat()), 32);
          break;
        default:
          throw new Error(`Unknown processing type: ${processing_type}`);
      }
      
      return {
        content: [
          {
            type: 'text',
            text: `Neural processing complete. Result: ${JSON.stringify(Array.from(result))}`
          }
        ]
      };

    case 'ruv_fann_swarm_init':
      // Initialize swarm coordination
      const { topology, max_agents } = args;
      // Implementation would connect to Rust swarm orchestrator
      return {
        content: [
          {
            type: 'text',
            text: `Swarm initialized with ${topology} topology and ${max_agents} agents`
          }
        ]
      };

    default:
      throw new Error(`Unknown tool: ${name}`);
  }
});

// List available tools
server.setRequestHandler('tools/list', async () => {
  return {
    tools: [
      {
        name: 'ruv_fann_process',
        description: 'Process data through RUV-FANN neural networks',
        inputSchema: {
          type: 'object',
          properties: {
            input_data: {
              type: 'array',
              description: 'Input data for neural processing'
            },
            processing_type: {
              type: 'string',
              enum: ['single', 'batch'],
              description: 'Type of processing to perform'
            }
          },
          required: ['input_data', 'processing_type']
        }
      },
      {
        name: 'ruv_fann_swarm_init',
        description: 'Initialize swarm intelligence coordination',
        inputSchema: {
          type: 'object',
          properties: {
            topology: {
              type: 'string',
              enum: ['mesh', 'hierarchical', 'ring', 'star'],
              description: 'Swarm topology type'
            },
            max_agents: {
              type: 'number',
              description: 'Maximum number of agents in swarm'
            }
          },
          required: ['topology', 'max_agents']
        }
      }
    ]
  };
});

// Start server
const transport = new StdioServerTransport();
server.connect(transport);

Claude Code Usage

# In Claude Code environment, the MCP tools are available as:

# Process neural data
result = mcp_ruv_fann_process(
    input_data=[0.1, 0.2, 0.3, ...],  # 32-dimensional vector
    processing_type="single"
)

# Initialize swarm for distributed processing
swarm_status = mcp_ruv_fann_swarm_init(
    topology="mesh",
    max_agents=8
)

2. API Gateway Integration

REST API Wrapper

// src/api/mod.rs
use axum::{
    extract::{Query, State},
    http::StatusCode,
    response::Json,
    routing::{get, post},
    Router,
};
use serde::{Deserialize, Serialize};
use tokio::sync::Arc;
use crate::CustomNeuralProcessor;

#[derive(Clone)]
pub struct ApiState {
    processor: Arc<tokio::sync::Mutex<CustomNeuralProcessor>>,
}

#[derive(Deserialize)]
pub struct ProcessRequest {
    pub input: Vec<f32>,
    pub use_swarm: Option<bool>,
}

#[derive(Serialize)]
pub struct ProcessResponse {
    pub output: Vec<f32>,
    pub processing_time_ms: u64,
    pub swarm_agents_used: Option<usize>,
}

pub fn create_router(processor: CustomNeuralProcessor) -> Router {
    let state = ApiState {
        processor: Arc::new(tokio::sync::Mutex::new(processor)),
    };

    Router::new()
        .route("/health", get(health))
        .route("/process", post(process_data))
        .route("/batch", post(batch_process))
        .route("/swarm/init", post(init_swarm))
        .route("/swarm/status", get(swarm_status))
        .with_state(state)
}

async fn process_data(
    State(state): State<ApiState>,
    Json(request): Json<ProcessRequest>,
) -> Result<Json<ProcessResponse>, StatusCode> {
    let start = std::time::Instant::now();
    
    let mut processor = state.processor.lock().await;
    
    let output = if request.use_swarm.unwrap_or(false) {
        processor.process_with_swarm(&request.input).await
    } else {
        processor.network.predict(&request.input)
    };
    
    let output = output.map_err(|_| StatusCode::INTERNAL_SERVER_ERROR)?;
    let processing_time = start.elapsed().as_millis() as u64;
    
    let response = ProcessResponse {
        output,
        processing_time_ms: processing_time,
        swarm_agents_used: if request.use_swarm.unwrap_or(false) { 
            Some(processor.swarm_coordinator.as_ref()?.active_agents_count())
        } else { 
            None 
        },
    };
    
    Ok(Json(response))
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
    let processor = CustomNeuralProcessor::new(ProcessorConfig::default())?;
    let app = create_router(processor);
    
    let listener = tokio::net::TcpListener::bind("0.0.0.0:8080").await?;
    println!("RUV-FANN API server running on http://0.0.0.0:8080");
    
    axum::serve(listener, app).await?;
    Ok(())
}

Cross-Language Integration

1. Python Integration via PyO3

Python Binding Setup

// src/python_bindings.rs
use pyo3::prelude::*;
use numpy::{IntoPyArray, PyArray1, PyReadonlyArray1};
use crate::CustomNeuralProcessor;

#[pyclass]
struct PyNeuralProcessor {
    processor: CustomNeuralProcessor,
}

#[pymethods]
impl PyNeuralProcessor {
    #[new]
    fn new() -> PyResult<Self> {
        let processor = CustomNeuralProcessor::new(ProcessorConfig::default())
            .map_err(|e| PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(e.to_string()))?;
        
        Ok(PyNeuralProcessor { processor })
    }

    fn process(&mut self, py: Python, input: PyReadonlyArray1<f32>) -> PyResult<Py<PyArray1<f32>>> {
        let input_slice = input.as_slice()?;
        
        let rt = tokio::runtime::Runtime::new().unwrap();
        let result = rt.block_on(async {
            self.processor.process_with_swarm(input_slice).await
        }).map_err(|e| PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(e.to_string()))?;
        
        Ok(result.into_pyarray(py).to_owned())
    }

    fn enable_swarm(&mut self, max_agents: usize, topology: &str) -> PyResult<()> {
        let topology = match topology {
            "mesh" => TopologyType::Mesh,
            "hierarchical" => TopologyType::Hierarchical,
            "ring" => TopologyType::Ring,
            "star" => TopologyType::Star,
            _ => return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>("Invalid topology")),
        };
        
        let config = SwarmConfig {
            max_agents,
            topology,
            protocol: CoordinationProtocol::Consensus,
        };
        
        let rt = tokio::runtime::Runtime::new().unwrap();
        rt.block_on(async {
            self.processor.enable_swarm_coordination(config).await
        }).map_err(|e| PyErr::new::<pyo3::exceptions::PyRuntimeError, _>(e.to_string()))?;
        
        Ok(())
    }
}

#[pymodule]
fn ruv_fann_py(_py: Python, m: &PyModule) -> PyResult<()> {
    m.add_class::<PyNeuralProcessor>()?;
    Ok(())
}

Python Usage

# python_example.py
import numpy as np
import ruv_fann_py

# Initialize processor
processor = ruv_fann_py.PyNeuralProcessor()

# Enable swarm coordination
processor.enable_swarm(max_agents=8, topology="mesh")

# Process data
input_data = np.random.rand(32).astype(np.float32)
result = processor.process(input_data)

print(f"Input shape: {input_data.shape}")
print(f"Output shape: {result.shape}")
print(f"Processed result: {result}")

2. C++ Integration via FFI

C++ Header Generation

// src/ffi.rs
use std::ffi::{CStr, CString};
use std::os::raw::{c_char, c_float, c_int};
use crate::CustomNeuralProcessor;

static mut PROCESSOR: Option<CustomNeuralProcessor> = None;

#[no_mangle]
pub extern "C" fn ruv_fann_init() -> c_int {
    unsafe {
        match CustomNeuralProcessor::new(ProcessorConfig::default()) {
            Ok(processor) => {
                PROCESSOR = Some(processor);
                0 // Success
            }
            Err(_) => -1 // Error
        }
    }
}

#[no_mangle]
pub extern "C" fn ruv_fann_process(
    input: *const c_float,
    input_size: c_int,
    output: *mut c_float,
    output_size: c_int,
) -> c_int {
    unsafe {
        if let Some(ref mut processor) = PROCESSOR {
            let input_slice = std::slice::from_raw_parts(input, input_size as usize);
            
            let rt = tokio::runtime::Runtime::new().unwrap();
            match rt.block_on(processor.process_with_swarm(input_slice)) {
                Ok(result) => {
                    let copy_size = (result.len().min(output_size as usize));
                    let output_slice = std::slice::from_raw_parts_mut(output, copy_size);
                    output_slice.copy_from_slice(&result[..copy_size]);
                    copy_size as c_int
                }
                Err(_) => -1
            }
        } else {
            -1
        }
    }
}

#[no_mangle]
pub extern "C" fn ruv_fann_cleanup() {
    unsafe {
        PROCESSOR = None;
    }
}

C++ Usage

// cpp_example.cpp
#include <iostream>
#include <vector>
#include <random>

extern "C" {
    int ruv_fann_init();
    int ruv_fann_process(const float* input, int input_size, float* output, int output_size);
    void ruv_fann_cleanup();
}

int main() {
    // Initialize RUV-FANN processor
    if (ruv_fann_init() != 0) {
        std::cerr << "Failed to initialize RUV-FANN" << std::endl;
        return -1;
    }

    // Generate random input data
    std::vector<float> input(32);
    std::random_device rd;
    std::mt19937 gen(rd());
    std::uniform_real_distribution<float> dis(0.0f, 1.0f);
    
    for (auto& val : input) {
        val = dis(gen);
    }

    // Process data
    std::vector<float> output(32);
    int result_size = ruv_fann_process(input.data(), input.size(), output.data(), output.size());
    
    if (result_size > 0) {
        std::cout << "Processing successful, output size: " << result_size << std::endl;
        
        std::cout << "Input: ";
        for (const auto& val : input) {
            std::cout << val << " ";
        }
        std::cout << std::endl;
        
        std::cout << "Output: ";
        for (int i = 0; i < result_size; ++i) {
            std::cout << output[i] << " ";
        }
        std::cout << std::endl;
    } else {
        std::cerr << "Processing failed" << std::endl;
    }

    // Cleanup
    ruv_fann_cleanup();
    return 0;
}

Performance Integration Patterns

1. Memory-Optimized Integration

use std::sync::Arc;
use parking_lot::RwLock;

pub struct OptimizedIntegration {
    // Shared neural processor pool
    processor_pool: Arc<RwLock<Vec<CustomNeuralProcessor>>>,
    // Memory-mapped data for large datasets
    mmap_data: Option<memmap2::Mmap>,
    // SIMD-optimized processing buffers
    processing_buffers: Vec<AlignedBuffer<f32>>,
}

impl OptimizedIntegration {
    pub fn new(pool_size: usize) -> Result<Self> {
        let mut processors = Vec::with_capacity(pool_size);
        for _ in 0..pool_size {
            processors.push(CustomNeuralProcessor::new(ProcessorConfig::default())?);
        }
        
        Ok(Self {
            processor_pool: Arc::new(RwLock::new(processors)),
            mmap_data: None,
            processing_buffers: (0..pool_size).map(|_| AlignedBuffer::new(1024)).collect(),
        })
    }

    pub async fn parallel_batch_process(&self, batches: Vec<Vec<f32>>) -> Result<Vec<Vec<f32>>> {
        let tasks: Vec<_> = batches.into_iter().enumerate().map(|(idx, batch)| {
            let pool = Arc::clone(&self.processor_pool);
            tokio::spawn(async move {
                let mut processors = pool.write();
                let processor = &mut processors[idx % processors.len()];
                processor.process_with_swarm(&batch).await
            })
        }).collect();

        let mut results = Vec::new();
        for task in tasks {
            results.push(task.await??);
        }
        
        Ok(results)
    }
}

2. Real-time Streaming Integration

use tokio_stream::{Stream, StreamExt};
use futures::pin_mut;

pub struct StreamingProcessor {
    processor: CustomNeuralProcessor,
    buffer: Vec<f32>,
    batch_size: usize,
}

impl StreamingProcessor {
    pub async fn process_stream<S>(&mut self, mut stream: S) -> Result<impl Stream<Item = Vec<f32>>>
    where
        S: Stream<Item = f32> + Unpin,
    {
        let (tx, rx) = tokio::sync::mpsc::channel(100);
        
        tokio::spawn(async move {
            pin_mut!(stream);
            
            while let Some(value) = stream.next().await {
                self.buffer.push(value);
                
                if self.buffer.len() >= self.batch_size {
                    let batch = self.buffer.drain(..self.batch_size).collect::<Vec<_>>();
                    
                    match self.processor.process_with_swarm(&batch).await {
                        Ok(result) => {
                            if tx.send(result).await.is_err() {
                                break; // Receiver dropped
                            }
                        }
                        Err(e) => {
                            eprintln!("Stream processing error: {}", e);
                            break;
                        }
                    }
                }
            }
        });
        
        Ok(tokio_stream::wrappers::ReceiverStream::new(rx))
    }
}

This comprehensive integration guide provides multiple patterns for incorporating ruv-FANN into various application architectures, from simple library usage to complex distributed systems.

⚠️ **GitHub.com Fallback** ⚠️