Privacy Infrastructure for Enterprise AI — iCommunity Labs
The Privaro SDK intercepts prompts and agent steps, tokenises PII before any LLM call, and generates a blockchain-certified audit trail per interaction.
pip install privaro
pip install privaro[async] # for AsyncAgentRun + CrewAI supportimport privaro
privaro.init(
api_key="prvr_your_key",
pipeline_id="your-pipeline-uuid"
)
result = privaro.protect("Patient: María García, DNI 34521789X, Tel: 612 345 678")
print(result.protected)
# "Patient: [NM-0001], DNI [ID-0001], Tel: [PH-0001]"
response = your_llm.complete(result.protected) # LLM never sees real PIIfrom privaro.agent import AgentRun
with AgentRun(api_key="prvr_...", pipeline_id="...") as run:
# Step 1: protect before LLM
step = run.protect([
{"role": "user", "content": "Review contract for Juan García, IBAN ES91 2100 0418 4502 0005 1332"}
])
# Step 2: send protected messages to LLM
response = your_llm.complete(step.protected_messages)
# Step 3: detokenise final output
final = run.reveal(response)
print(f"Risk score: {step.risk_score}") # 0.0–1.0
print(f"PII detected: {step.total_pii_detected}")import asyncio
from privaro.async_client import AsyncAgentRun
async def analyse_document(text: str) -> str:
async with AsyncAgentRun(
api_key="prvr_...",
pipeline_id="...",
agent_framework="crewai",
) as run:
step = await run.protect(text)
response = await your_llm.acomplete(step.first_content)
return await run.reveal(response)
result = asyncio.run(analyse_document("Client: Ana López, DNI 87654321X"))from langchain_openai import ChatOpenAI
from langchain.agents import AgentExecutor
from privaro.agent import PrivaroCallbackHandler
import privaro
privaro.init(api_key="prvr_...", pipeline_id="...")
handler = PrivaroCallbackHandler(agent_name="contract-agent")
llm = ChatOpenAI(model="gpt-4", callbacks=[handler])
agent_executor = AgentExecutor(agent=agent, tools=tools, callbacks=[handler])
# All prompts and tool outputs are automatically protected
result = agent_executor.invoke({"input": "Analyse this contract for María García"})from privaro.async_client import CrewAIPrivaroTool
from crewai import Tool
class ContractAnalyser(CrewAIPrivaroTool):
name = "Contract Analyser"
description = "Analyses contracts with built-in PII protection"
async def _execute(self, protected_input: str, run) -> str:
# protected_input has PII tokenised — safe to send to LLM
return await your_llm.acomplete(protected_input)
tool = ContractAnalyser(api_key="prvr_...", pipeline_id="...")result = privaro.protect("...")
result.protected # Tokenised prompt — send this to your LLM
result.original # Original text (never sent to Privaro servers)
result.has_pii # True if PII was detected
result.is_safe # True if all PII masked, no leaks
result.risk_level # "high" | "medium" | "low"
result.risk_score # 0.0–1.0
result.gdpr_compliant # True if all critical PII was masked
result.total_detected # Count of PII entities found
result.total_masked # Count of PII entities masked
result.processing_ms # Latency in milliseconds
result.audit_log_id # Supabase audit log UUID
result.summary() # One-line log stringfrom privaro.exceptions import (
PrivaroError, # Base exception
AuthError, # Invalid or missing API key
PipelineNotFoundError, # Pipeline UUID not found
PolicyBlockError, # Request blocked by policy
RateLimitError, # Too many requests
ProxyUnavailableError, # Cannot reach proxy
)
try:
result = privaro.protect("...")
except PolicyBlockError:
# PII blocked by policy — do not proceed
pass
except AuthError:
# Check API key
pass- Python 3.9+
- No required dependencies (uses
urllibonly for sync client) aiohttp>=3.8for async support (pip install privaro[async])
- Docs: privaro.ai/docs
- Dashboard: privaro.ai/app
- Issues: github.com/Maperez1972/privaro-sdk-python
- Partners: partners@privaro.ai