Skip to content

API

patronus.api.api_client.PatronusAPIClient

PatronusAPIClient(
    *,
    client_http_async: AsyncClient,
    client_http: Client,
    base_url: str,
    api_key: str,
)

Bases: BaseAPIClient

Source code in src/patronus/api/api_client_base.py
def __init__(
    self,
    *,
    client_http_async: httpx.AsyncClient,
    client_http: httpx.Client,
    base_url: str,
    api_key: str,
):
    self.version = importlib.metadata.version("patronus")
    self.http = client_http_async
    self.http_sync = client_http
    self.base_url = base_url.rstrip("/")
    self.api_key = api_key

add_evaluator_criteria_revision async

add_evaluator_criteria_revision(
    evaluator_criteria_id,
    request: AddEvaluatorCriteriaRevisionRequest,
) -> api_types.AddEvaluatorCriteriaRevisionResponse

Adds a revision to existing evaluator criteria.

Source code in src/patronus/api/api_client.py
async def add_evaluator_criteria_revision(
    self,
    evaluator_criteria_id,
    request: api_types.AddEvaluatorCriteriaRevisionRequest,
) -> api_types.AddEvaluatorCriteriaRevisionResponse:
    """Adds a revision to existing evaluator criteria."""
    resp = await self.call(
        "POST",
        f"/v1/evaluator-criteria/{evaluator_criteria_id}/revision",
        body=request,
        response_cls=api_types.AddEvaluatorCriteriaRevisionResponse,
    )
    resp.raise_for_status()
    return resp.data

add_evaluator_criteria_revision_sync

add_evaluator_criteria_revision_sync(
    evaluator_criteria_id,
    request: AddEvaluatorCriteriaRevisionRequest,
) -> api_types.AddEvaluatorCriteriaRevisionResponse

Adds a revision to existing evaluator criteria.

Source code in src/patronus/api/api_client.py
def add_evaluator_criteria_revision_sync(
    self,
    evaluator_criteria_id,
    request: api_types.AddEvaluatorCriteriaRevisionRequest,
) -> api_types.AddEvaluatorCriteriaRevisionResponse:
    """Adds a revision to existing evaluator criteria."""
    resp = self.call_sync(
        "POST",
        f"/v1/evaluator-criteria/{evaluator_criteria_id}/revision",
        body=request,
        response_cls=api_types.AddEvaluatorCriteriaRevisionResponse,
    )
    resp.raise_for_status()
    return resp.data

annotate async

annotate(
    request: AnnotateRequest,
) -> api_types.AnnotateResponse

Annotates log based on the given request.

Source code in src/patronus/api/api_client.py
async def annotate(self, request: api_types.AnnotateRequest) -> api_types.AnnotateResponse:
    """Annotates log based on the given request."""
    resp = await self.call(
        "POST",
        "/v1/annotate",
        body=request,
        response_cls=api_types.AnnotateResponse,
    )
    resp.raise_for_status()
    return resp.data

annotate_sync

annotate_sync(
    request: AnnotateRequest,
) -> api_types.AnnotateResponse

Annotates log based on the given request.

Source code in src/patronus/api/api_client.py
def annotate_sync(self, request: api_types.AnnotateRequest) -> api_types.AnnotateResponse:
    """Annotates log based on the given request."""
    resp = self.call_sync(
        "POST",
        "/v1/annotate",
        body=request,
        response_cls=api_types.AnnotateResponse,
    )
    resp.raise_for_status()
    return resp.data

batch_create_evaluations async

batch_create_evaluations(
    request: BatchCreateEvaluationsRequest,
) -> api_types.BatchCreateEvaluationsResponse

Creates multiple evaluations in a single request.

Source code in src/patronus/api/api_client.py
async def batch_create_evaluations(
    self, request: api_types.BatchCreateEvaluationsRequest
) -> api_types.BatchCreateEvaluationsResponse:
    """Creates multiple evaluations in a single request."""
    resp = await self.call(
        "POST",
        "/v1/evaluations/batch",
        body=request,
        response_cls=api_types.BatchCreateEvaluationsResponse,
    )
    resp.raise_for_status()
    return resp.data

batch_create_evaluations_sync

batch_create_evaluations_sync(
    request: BatchCreateEvaluationsRequest,
) -> api_types.BatchCreateEvaluationsResponse

Creates multiple evaluations in a single request.

Source code in src/patronus/api/api_client.py
def batch_create_evaluations_sync(
    self, request: api_types.BatchCreateEvaluationsRequest
) -> api_types.BatchCreateEvaluationsResponse:
    """Creates multiple evaluations in a single request."""
    resp = self.call_sync(
        "POST",
        "/v1/evaluations/batch",
        body=request,
        response_cls=api_types.BatchCreateEvaluationsResponse,
    )
    resp.raise_for_status()
    return resp.data

create_annotation_criteria async

create_annotation_criteria(
    request: CreateAnnotationCriteriaRequest,
) -> api_types.CreateAnnotationCriteriaResponse

Creates annotation criteria based on the given request.

Source code in src/patronus/api/api_client.py
async def create_annotation_criteria(
    self, request: api_types.CreateAnnotationCriteriaRequest
) -> api_types.CreateAnnotationCriteriaResponse:
    """Creates annotation criteria based on the given request."""
    resp = await self.call(
        "POST",
        "/v1/annotation-criteria",
        body=request,
        response_cls=api_types.CreateAnnotationCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

create_annotation_criteria_sync

create_annotation_criteria_sync(
    request: CreateAnnotationCriteriaRequest,
) -> api_types.CreateAnnotationCriteriaResponse

Creates annotation criteria based on the given request.

Source code in src/patronus/api/api_client.py
def create_annotation_criteria_sync(
    self, request: api_types.CreateAnnotationCriteriaRequest
) -> api_types.CreateAnnotationCriteriaResponse:
    """Creates annotation criteria based on the given request."""
    resp = self.call_sync(
        "POST",
        "/v1/annotation-criteria",
        body=request,
        response_cls=api_types.CreateAnnotationCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

create_criteria async

create_criteria(
    request: CreateCriteriaRequest,
) -> api_types.CreateCriteriaResponse

Creates evaluation criteria based on the given request.

Source code in src/patronus/api/api_client.py
async def create_criteria(self, request: api_types.CreateCriteriaRequest) -> api_types.CreateCriteriaResponse:
    """Creates evaluation criteria based on the given request."""
    resp = await self.call(
        "POST",
        "/v1/evaluator-criteria",
        body=request,
        response_cls=api_types.CreateCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

create_criteria_sync

create_criteria_sync(
    request: CreateCriteriaRequest,
) -> api_types.CreateCriteriaResponse

Creates evaluation criteria based on the given request.

Source code in src/patronus/api/api_client.py
def create_criteria_sync(self, request: api_types.CreateCriteriaRequest) -> api_types.CreateCriteriaResponse:
    """Creates evaluation criteria based on the given request."""
    resp = self.call_sync(
        "POST",
        "/v1/evaluator-criteria",
        body=request,
        response_cls=api_types.CreateCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

create_experiment async

create_experiment(
    request: CreateExperimentRequest,
) -> api_types.Experiment

Creates a new experiment based on the given request.

Source code in src/patronus/api/api_client.py
async def create_experiment(self, request: api_types.CreateExperimentRequest) -> api_types.Experiment:
    """Creates a new experiment based on the given request."""
    resp = await self.call(
        "POST",
        "/v1/experiments",
        body=request,
        response_cls=api_types.CreateExperimentResponse,
    )
    resp.raise_for_status()
    return resp.data.experiment

create_experiment_sync

create_experiment_sync(
    request: CreateExperimentRequest,
) -> api_types.Experiment

Creates a new experiment based on the given request.

Source code in src/patronus/api/api_client.py
def create_experiment_sync(self, request: api_types.CreateExperimentRequest) -> api_types.Experiment:
    """Creates a new experiment based on the given request."""
    resp = self.call_sync(
        "POST",
        "/v1/experiments",
        body=request,
        response_cls=api_types.CreateExperimentResponse,
    )
    resp.raise_for_status()
    return resp.data.experiment

create_project async

create_project(
    request: CreateProjectRequest,
) -> api_types.Project

Creates a new project based on the given request.

Source code in src/patronus/api/api_client.py
async def create_project(self, request: api_types.CreateProjectRequest) -> api_types.Project:
    """Creates a new project based on the given request."""
    resp = await self.call("POST", "/v1/projects", body=request, response_cls=api_types.Project)
    resp.raise_for_status()
    return resp.data

create_project_sync

create_project_sync(
    request: CreateProjectRequest,
) -> api_types.Project

Creates a new project based on the given request.

Source code in src/patronus/api/api_client.py
def create_project_sync(self, request: api_types.CreateProjectRequest) -> api_types.Project:
    """Creates a new project based on the given request."""
    resp = self.call_sync("POST", "/v1/projects", body=request, response_cls=api_types.Project)
    resp.raise_for_status()
    return resp.data

delete_annotation_criteria async

delete_annotation_criteria(criteria_id: str) -> None

Deletes annotation criteria by its ID.

Source code in src/patronus/api/api_client.py
async def delete_annotation_criteria(self, criteria_id: str) -> None:
    """Deletes annotation criteria by its ID."""
    resp = await self.call(
        "DELETE",
        f"/v1/annotation-criteria/{criteria_id}",
        response_cls=None,
    )
    resp.raise_for_status()

delete_annotation_criteria_sync

delete_annotation_criteria_sync(criteria_id: str) -> None

Deletes annotation criteria by its ID.

Source code in src/patronus/api/api_client.py
def delete_annotation_criteria_sync(self, criteria_id: str) -> None:
    """Deletes annotation criteria by its ID."""
    resp = self.call_sync(
        "DELETE",
        f"/v1/annotation-criteria/{criteria_id}",
        response_cls=None,
    )
    resp.raise_for_status()

evaluate async

evaluate(
    request: EvaluateRequest,
) -> api_types.EvaluateResponse

Evaluates content using the specified evaluators.

Source code in src/patronus/api/api_client.py
async def evaluate(self, request: api_types.EvaluateRequest) -> api_types.EvaluateResponse:
    """Evaluates content using the specified evaluators."""
    resp = await self.call(
        "POST",
        "/v1/evaluate",
        body=request,
        response_cls=api_types.EvaluateResponse,
    )
    resp.raise_for_status()
    return resp.data

evaluate_one async

evaluate_one(
    request: EvaluateRequest,
) -> api_types.EvaluationResult

Evaluates content using a single evaluator.

Source code in src/patronus/api/api_client.py
async def evaluate_one(self, request: api_types.EvaluateRequest) -> api_types.EvaluationResult:
    """Evaluates content using a single evaluator."""
    if len(request.evaluators) > 1:
        raise ValueError("'evaluate_one()' cannot accept more than one evaluator in the request body")
    resp = await self.call(
        "POST",
        "/v1/evaluate",
        body=request,
        response_cls=api_types.EvaluateResponse,
    )
    return self._evaluate_one_process_resp(resp)

evaluate_one_sync

evaluate_one_sync(
    request: EvaluateRequest,
) -> api_types.EvaluationResult

Evaluates content using a single evaluator.

Source code in src/patronus/api/api_client.py
def evaluate_one_sync(self, request: api_types.EvaluateRequest) -> api_types.EvaluationResult:
    """Evaluates content using a single evaluator."""
    if len(request.evaluators) > 1:
        raise ValueError("'evaluate_one_sync()' cannot accept more than one evaluator in the request body")
    resp = self.call_sync(
        "POST",
        "/v1/evaluate",
        body=request,
        response_cls=api_types.EvaluateResponse,
    )
    return self._evaluate_one_process_resp(resp)

evaluate_sync

evaluate_sync(
    request: EvaluateRequest,
) -> api_types.EvaluateResponse

Evaluates content using the specified evaluators.

Source code in src/patronus/api/api_client.py
def evaluate_sync(self, request: api_types.EvaluateRequest) -> api_types.EvaluateResponse:
    """Evaluates content using the specified evaluators."""
    resp = self.call_sync(
        "POST",
        "/v1/evaluate",
        body=request,
        response_cls=api_types.EvaluateResponse,
    )
    resp.raise_for_status()
    return resp.data

export_evaluations async

export_evaluations(
    request: ExportEvaluationRequest,
) -> api_types.ExportEvaluationResponse

Exports evaluations based on the given request.

Source code in src/patronus/api/api_client.py
async def export_evaluations(
    self, request: api_types.ExportEvaluationRequest
) -> api_types.ExportEvaluationResponse:
    """Exports evaluations based on the given request."""
    resp = await self.call(
        "POST",
        "/v1/evaluation-results/batch",
        body=request,
        response_cls=api_types.ExportEvaluationResponse,
    )
    resp.raise_for_status()
    return resp.data

export_evaluations_sync

export_evaluations_sync(
    request: ExportEvaluationRequest,
) -> api_types.ExportEvaluationResponse

Exports evaluations based on the given request.

Source code in src/patronus/api/api_client.py
def export_evaluations_sync(self, request: api_types.ExportEvaluationRequest) -> api_types.ExportEvaluationResponse:
    """Exports evaluations based on the given request."""
    resp = self.call_sync(
        "POST",
        "/v1/evaluation-results/batch",
        body=request,
        response_cls=api_types.ExportEvaluationResponse,
    )
    resp.raise_for_status()
    return resp.data

get_experiment async

get_experiment(
    experiment_id: str,
) -> Optional[api_types.Experiment]

Fetches an experiment by its ID or returns None if not found.

Source code in src/patronus/api/api_client.py
async def get_experiment(self, experiment_id: str) -> Optional[api_types.Experiment]:
    """Fetches an experiment by its ID or returns None if not found."""
    resp = await self.call(
        "GET",
        f"/v1/experiments/{experiment_id}",
        response_cls=api_types.GetExperimentResponse,
    )
    if resp.response.status_code == 404:
        return None
    resp.raise_for_status()
    return resp.data.experiment

get_experiment_sync

get_experiment_sync(
    experiment_id: str,
) -> Optional[api_types.Experiment]

Fetches an experiment by its ID or returns None if not found.

Source code in src/patronus/api/api_client.py
def get_experiment_sync(self, experiment_id: str) -> Optional[api_types.Experiment]:
    """Fetches an experiment by its ID or returns None if not found."""
    resp = self.call_sync(
        "GET",
        f"/v1/experiments/{experiment_id}",
        response_cls=api_types.GetExperimentResponse,
    )
    if resp.response.status_code == 404:
        return None
    resp.raise_for_status()
    return resp.data.experiment

get_project async

get_project(project_id: str) -> api_types.Project

Fetches a project by its ID.

Source code in src/patronus/api/api_client.py
async def get_project(self, project_id: str) -> api_types.Project:
    """Fetches a project by its ID."""
    resp = await self.call(
        "GET",
        f"/v1/projects/{project_id}",
        response_cls=api_types.GetProjectResponse,
    )
    resp.raise_for_status()
    return resp.data.project

get_project_sync

get_project_sync(project_id: str) -> api_types.Project

Fetches a project by its ID.

Source code in src/patronus/api/api_client.py
def get_project_sync(self, project_id: str) -> api_types.Project:
    """Fetches a project by its ID."""
    resp = self.call_sync(
        "GET",
        f"/v1/projects/{project_id}",
        response_cls=api_types.GetProjectResponse,
    )
    resp.raise_for_status()
    return resp.data.project

list_annotation_criteria async

list_annotation_criteria(
    *,
    project_id: Optional[str] = None,
    limit: Optional[int] = None,
    offset: Optional[int] = None,
) -> api_types.ListAnnotationCriteriaResponse

Retrieves a list of annotation criteria with optional filtering.

Source code in src/patronus/api/api_client.py
async def list_annotation_criteria(
    self, *, project_id: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None
) -> api_types.ListAnnotationCriteriaResponse:
    """Retrieves a list of annotation criteria with optional filtering."""
    params = {}
    if project_id is not None:
        params["project_id"] = project_id
    if limit is not None:
        params["limit"] = limit
    if offset is not None:
        params["offset"] = offset
    resp = await self.call(
        "GET",
        "/v1/annotation-criteria",
        params=params,
        response_cls=api_types.ListAnnotationCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

list_annotation_criteria_sync

list_annotation_criteria_sync(
    *,
    project_id: Optional[str] = None,
    limit: Optional[int] = None,
    offset: Optional[int] = None,
) -> api_types.ListAnnotationCriteriaResponse

Retrieves a list of annotation criteria with optional filtering.

Source code in src/patronus/api/api_client.py
def list_annotation_criteria_sync(
    self, *, project_id: Optional[str] = None, limit: Optional[int] = None, offset: Optional[int] = None
) -> api_types.ListAnnotationCriteriaResponse:
    """Retrieves a list of annotation criteria with optional filtering."""
    params = {}
    if project_id is not None:
        params["project_id"] = project_id
    if limit is not None:
        params["limit"] = limit
    if offset is not None:
        params["offset"] = offset
    resp = self.call_sync(
        "GET",
        "/v1/annotation-criteria",
        params=params,
        response_cls=api_types.ListAnnotationCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

list_criteria async

list_criteria(
    request: ListCriteriaRequest,
) -> api_types.ListCriteriaResponse

Retrieves a list of evaluation criteria based on the given request.

Source code in src/patronus/api/api_client.py
async def list_criteria(self, request: api_types.ListCriteriaRequest) -> api_types.ListCriteriaResponse:
    """Retrieves a list of evaluation criteria based on the given request."""
    params = request.model_dump(exclude_none=True)
    resp = await self.call(
        "GET",
        "/v1/evaluator-criteria",
        params=params,
        response_cls=api_types.ListCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

list_criteria_sync

list_criteria_sync(
    request: ListCriteriaRequest,
) -> api_types.ListCriteriaResponse

Retrieves a list of evaluation criteria based on the given request.

Source code in src/patronus/api/api_client.py
def list_criteria_sync(self, request: api_types.ListCriteriaRequest) -> api_types.ListCriteriaResponse:
    """Retrieves a list of evaluation criteria based on the given request."""
    params = request.model_dump(exclude_none=True)
    resp = self.call_sync(
        "GET",
        "/v1/evaluator-criteria",
        params=params,
        response_cls=api_types.ListCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

list_dataset_data async

list_dataset_data(
    dataset_id: str,
) -> api_types.ListDatasetData

Retrieves data from a dataset by its ID.

Source code in src/patronus/api/api_client.py
async def list_dataset_data(self, dataset_id: str) -> api_types.ListDatasetData:
    """Retrieves data from a dataset by its ID."""
    resp = await self.call(
        "GET",
        f"/v1/datasets/{dataset_id}/data",
        response_cls=api_types.ListDatasetData,
    )
    resp.raise_for_status()
    return resp.data

list_dataset_data_sync

list_dataset_data_sync(
    dataset_id: str,
) -> api_types.ListDatasetData

Retrieves data from a dataset by its ID.

Source code in src/patronus/api/api_client.py
def list_dataset_data_sync(self, dataset_id: str) -> api_types.ListDatasetData:
    """Retrieves data from a dataset by its ID."""
    resp = self.call_sync(
        "GET",
        f"/v1/datasets/{dataset_id}/data",
        response_cls=api_types.ListDatasetData,
    )
    resp.raise_for_status()
    return resp.data

list_datasets async

list_datasets(
    dataset_type: Optional[str] = None,
) -> list[api_types.Dataset]

Retrieves a list of datasets, optionally filtered by type.

Source code in src/patronus/api/api_client.py
async def list_datasets(self, dataset_type: Optional[str] = None) -> list[api_types.Dataset]:
    """
    Retrieves a list of datasets, optionally filtered by type.
    """
    params = {}
    if dataset_type is not None:
        params["type"] = dataset_type

    resp = await self.call(
        "GET",
        "/v1/datasets",
        params=params,
        response_cls=api_types.ListDatasetsResponse,
    )
    resp.raise_for_status()
    return resp.data.datasets

list_datasets_sync

list_datasets_sync(
    dataset_type: Optional[str] = None,
) -> list[api_types.Dataset]

Retrieves a list of datasets, optionally filtered by type.

Source code in src/patronus/api/api_client.py
def list_datasets_sync(self, dataset_type: Optional[str] = None) -> list[api_types.Dataset]:
    """
    Retrieves a list of datasets, optionally filtered by type.
    """
    params = {}
    if dataset_type is not None:
        params["type"] = dataset_type

    resp = self.call_sync(
        "GET",
        "/v1/datasets",
        params=params,
        response_cls=api_types.ListDatasetsResponse,
    )
    resp.raise_for_status()
    return resp.data.datasets

list_evaluators async

list_evaluators() -> list[api_types.Evaluator]

Retrieves a list of available evaluators.

Source code in src/patronus/api/api_client.py
async def list_evaluators(self) -> list[api_types.Evaluator]:
    """Retrieves a list of available evaluators."""
    resp = await self.call("GET", "/v1/evaluators", response_cls=api_types.ListEvaluatorsResponse)
    resp.raise_for_status()
    return resp.data.evaluators

list_evaluators_sync

list_evaluators_sync() -> list[api_types.Evaluator]

Retrieves a list of available evaluators.

Source code in src/patronus/api/api_client.py
def list_evaluators_sync(self) -> list[api_types.Evaluator]:
    """Retrieves a list of available evaluators."""
    resp = self.call_sync("GET", "/v1/evaluators", response_cls=api_types.ListEvaluatorsResponse)
    resp.raise_for_status()
    return resp.data.evaluators

search_evaluations async

search_evaluations(
    request: SearchEvaluationsRequest,
) -> api_types.SearchEvaluationsResponse

Searches for evaluations based on the given criteria.

Source code in src/patronus/api/api_client.py
async def search_evaluations(
    self, request: api_types.SearchEvaluationsRequest
) -> api_types.SearchEvaluationsResponse:
    """Searches for evaluations based on the given criteria."""
    resp = await self.call(
        "POST",
        "/v1/evaluations/search",
        body=request,
        response_cls=api_types.SearchEvaluationsResponse,
    )
    resp.raise_for_status()
    return resp.data

search_evaluations_sync

search_evaluations_sync(
    request: SearchEvaluationsRequest,
) -> api_types.SearchEvaluationsResponse

Searches for evaluations based on the given criteria.

Source code in src/patronus/api/api_client.py
def search_evaluations_sync(
    self, request: api_types.SearchEvaluationsRequest
) -> api_types.SearchEvaluationsResponse:
    """Searches for evaluations based on the given criteria."""
    resp = self.call_sync(
        "POST",
        "/v1/evaluations/search",
        body=request,
        response_cls=api_types.SearchEvaluationsResponse,
    )
    resp.raise_for_status()
    return resp.data

search_logs async

search_logs(
    request: SearchLogsRequest,
) -> api_types.SearchLogsResponse

Searches for logs based on the given request.

Source code in src/patronus/api/api_client.py
async def search_logs(self, request: api_types.SearchLogsRequest) -> api_types.SearchLogsResponse:
    """Searches for logs based on the given request."""
    resp = await self.call(
        "POST",
        "/v1/otel/logs/search",
        body=request,
        response_cls=api_types.SearchLogsResponse,
    )
    resp.raise_for_status()
    return resp.data

search_logs_sync

search_logs_sync(
    request: SearchLogsRequest,
) -> api_types.SearchLogsResponse

Searches for logs based on the given request.

Source code in src/patronus/api/api_client.py
def search_logs_sync(self, request: api_types.SearchLogsRequest) -> api_types.SearchLogsResponse:
    """Searches for logs based on the given request."""
    resp = self.call_sync(
        "POST",
        "/v1/otel/logs/search",
        body=request,
        response_cls=api_types.SearchLogsResponse,
    )
    resp.raise_for_status()
    return resp.data

update_annotation_criteria async

update_annotation_criteria(
    criteria_id: str,
    request: UpdateAnnotationCriteriaRequest,
) -> api_types.UpdateAnnotationCriteriaResponse

Creates annotation criteria based on the given request.

Source code in src/patronus/api/api_client.py
async def update_annotation_criteria(
    self, criteria_id: str, request: api_types.UpdateAnnotationCriteriaRequest
) -> api_types.UpdateAnnotationCriteriaResponse:
    """Creates annotation criteria based on the given request."""
    resp = await self.call(
        "PUT",
        f"/v1/annotation-criteria/{criteria_id}",
        body=request,
        response_cls=api_types.UpdateAnnotationCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

update_annotation_criteria_sync

update_annotation_criteria_sync(
    criteria_id: str,
    request: UpdateAnnotationCriteriaRequest,
) -> api_types.UpdateAnnotationCriteriaResponse

Creates annotation criteria based on the given request.

Source code in src/patronus/api/api_client.py
def update_annotation_criteria_sync(
    self, criteria_id: str, request: api_types.UpdateAnnotationCriteriaRequest
) -> api_types.UpdateAnnotationCriteriaResponse:
    """Creates annotation criteria based on the given request."""
    resp = self.call_sync(
        "PUT",
        f"/v1/annotation-criteria/{criteria_id}",
        body=request,
        response_cls=api_types.UpdateAnnotationCriteriaResponse,
    )
    resp.raise_for_status()
    return resp.data

upload_dataset async

upload_dataset(
    file_path: str,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[
        dict[str, Union[str, list[str]]]
    ] = None,
) -> api_types.Dataset

Upload a dataset file to create a new dataset in Patronus.

Parameters:

Name Type Description Default
file_path str

Path to the dataset file (CSV or JSONL format)

required
dataset_name str

Name for the created dataset

required
dataset_description Optional[str]

Optional description for the dataset

None
custom_field_mapping Optional[dict[str, Union[str, list[str]]]]

Optional mapping of standard field names to custom field names in the dataset

None

Returns:

Type Description
Dataset

Dataset object representing the created dataset

Source code in src/patronus/api/api_client.py
async def upload_dataset(
    self,
    file_path: str,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[dict[str, Union[str, list[str]]]] = None,
) -> api_types.Dataset:
    """
    Upload a dataset file to create a new dataset in Patronus.

    Args:
        file_path: Path to the dataset file (CSV or JSONL format)
        dataset_name: Name for the created dataset
        dataset_description: Optional description for the dataset
        custom_field_mapping: Optional mapping of standard field names to custom field names in the dataset

    Returns:
        Dataset object representing the created dataset
    """
    with open(file_path, "rb") as f:
        return await self.upload_dataset_from_buffer(f, dataset_name, dataset_description, custom_field_mapping)

upload_dataset_from_buffer async

upload_dataset_from_buffer(
    file_obj: BinaryIO,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[
        dict[str, Union[str, list[str]]]
    ] = None,
) -> api_types.Dataset

Upload a dataset file to create a new dataset in Patronus AI Platform.

Parameters:

Name Type Description Default
file_obj BinaryIO

File-like object containing dataset content (CSV or JSONL format)

required
dataset_name str

Name for the created dataset

required
dataset_description Optional[str]

Optional description for the dataset

None
custom_field_mapping Optional[dict[str, Union[str, list[str]]]]

Optional mapping of standard field names to custom field names in the dataset

None

Returns:

Type Description
Dataset

Dataset object representing the created dataset

Source code in src/patronus/api/api_client.py
async def upload_dataset_from_buffer(
    self,
    file_obj: typing.BinaryIO,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[dict[str, Union[str, list[str]]]] = None,
) -> api_types.Dataset:
    """
    Upload a dataset file to create a new dataset in Patronus AI Platform.

    Args:
        file_obj: File-like object containing dataset content (CSV or JSONL format)
        dataset_name: Name for the created dataset
        dataset_description: Optional description for the dataset
        custom_field_mapping: Optional mapping of standard field names to custom field names in the dataset

    Returns:
        Dataset object representing the created dataset
    """
    data = {
        "dataset_name": dataset_name,
    }

    if dataset_description is not None:
        data["dataset_description"] = dataset_description

    if custom_field_mapping is not None:
        data["custom_field_mapping"] = json.dumps(custom_field_mapping)

    files = {"file": (dataset_name, file_obj)}

    resp = await self.call_multipart(
        "POST",
        "/v1/datasets",
        files=files,
        data=data,
        response_cls=api_types.CreateDatasetResponse,
    )

    resp.raise_for_status()
    return resp.data.dataset

upload_dataset_from_buffer_sync

upload_dataset_from_buffer_sync(
    file_obj: BinaryIO,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[
        dict[str, Union[str, list[str]]]
    ] = None,
) -> api_types.Dataset

Upload a dataset file to create a new dataset in Patronus AI Platform.

Parameters:

Name Type Description Default
file_obj BinaryIO

File-like object containing dataset content (CSV or JSONL format)

required
dataset_name str

Name for the created dataset

required
dataset_description Optional[str]

Optional description for the dataset

None
custom_field_mapping Optional[dict[str, Union[str, list[str]]]]

Optional mapping of standard field names to custom field names in the dataset

None

Returns:

Type Description
Dataset

Dataset object representing the created dataset

Source code in src/patronus/api/api_client.py
def upload_dataset_from_buffer_sync(
    self,
    file_obj: typing.BinaryIO,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[dict[str, Union[str, list[str]]]] = None,
) -> api_types.Dataset:
    """
    Upload a dataset file to create a new dataset in Patronus AI Platform.

    Args:
        file_obj: File-like object containing dataset content (CSV or JSONL format)
        dataset_name: Name for the created dataset
        dataset_description: Optional description for the dataset
        custom_field_mapping: Optional mapping of standard field names to custom field names in the dataset

    Returns:
        Dataset object representing the created dataset
    """
    data = {
        "dataset_name": dataset_name,
    }

    if dataset_description is not None:
        data["dataset_description"] = dataset_description

    if custom_field_mapping is not None:
        data["custom_field_mapping"] = json.dumps(custom_field_mapping)

    files = {"file": (dataset_name, file_obj)}

    resp = self.call_multipart_sync(
        "POST",
        "/v1/datasets",
        files=files,
        data=data,
        response_cls=api_types.CreateDatasetResponse,
    )

    resp.raise_for_status()
    return resp.data.dataset

upload_dataset_sync

upload_dataset_sync(
    file_path: str,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[
        dict[str, Union[str, list[str]]]
    ] = None,
) -> api_types.Dataset

Upload a dataset file to create a new dataset in Patronus AI Platform.

Parameters:

Name Type Description Default
file_path str

Path to the dataset file (CSV or JSONL format)

required
dataset_name str

Name for the created dataset

required
dataset_description Optional[str]

Optional description for the dataset

None
custom_field_mapping Optional[dict[str, Union[str, list[str]]]]

Optional mapping of standard field names to custom field names in the dataset

None

Returns:

Type Description
Dataset

Dataset object representing the created dataset

Source code in src/patronus/api/api_client.py
def upload_dataset_sync(
    self,
    file_path: str,
    dataset_name: str,
    dataset_description: Optional[str] = None,
    custom_field_mapping: Optional[dict[str, Union[str, list[str]]]] = None,
) -> api_types.Dataset:
    """
    Upload a dataset file to create a new dataset in Patronus AI Platform.

    Args:
        file_path: Path to the dataset file (CSV or JSONL format)
        dataset_name: Name for the created dataset
        dataset_description: Optional description for the dataset
        custom_field_mapping: Optional mapping of standard field names to custom field names in the dataset

    Returns:
        Dataset object representing the created dataset
    """
    with open(file_path, "rb") as f:
        return self.upload_dataset_from_buffer_sync(f, dataset_name, dataset_description, custom_field_mapping)

whoami async

whoami() -> api_types.WhoAmIResponse

Fetches information about the authenticated user.

Source code in src/patronus/api/api_client.py
async def whoami(self) -> api_types.WhoAmIResponse:
    """Fetches information about the authenticated user."""
    resp = await self.call("GET", "/v1/whoami", response_cls=api_types.WhoAmIResponse)
    resp.raise_for_status()
    return resp.data

whoami_sync

whoami_sync() -> api_types.WhoAmIResponse

Fetches information about the authenticated user.

Source code in src/patronus/api/api_client.py
def whoami_sync(self) -> api_types.WhoAmIResponse:
    """Fetches information about the authenticated user."""
    resp = self.call_sync("GET", "/v1/whoami", response_cls=api_types.WhoAmIResponse)
    resp.raise_for_status()
    return resp.data

patronus.api.api_types

SanitizedApp module-attribute

SanitizedApp = Annotated[
    str,
    _create_field_sanitizer(
        "[^a-zA-Z0-9-_./ -]", max_len=50, replace_with="_"
    ),
]

SanitizedLocalEvaluatorID module-attribute

SanitizedLocalEvaluatorID = Annotated[
    Optional[str],
    _create_field_sanitizer(
        "[^a-zA-Z0-9\\-_./]", max_len=50, replace_with="-"
    ),
]

SanitizedProjectName module-attribute

SanitizedProjectName = Annotated[
    str, project_name_sanitizer
]

project_name_sanitizer module-attribute

project_name_sanitizer = (
    _create_field_sanitizer(
        "[^a-zA-Z0-9_ -]", max_len=50, replace_with="_"
    ),
)

Account

Bases: BaseModel

id instance-attribute

id: str

name instance-attribute

name: str

AddEvaluatorCriteriaRevisionRequest

Bases: BaseModel

config instance-attribute

config: dict[str, Any]

AddEvaluatorCriteriaRevisionResponse

Bases: BaseModel

evaluator_criteria instance-attribute

evaluator_criteria: EvaluatorCriteria

AnnotateRequest

Bases: BaseModel

annotation_criteria_id instance-attribute

annotation_criteria_id: str

explanation class-attribute instance-attribute

explanation: Optional[str] = None

log_id instance-attribute

log_id: str

value_pass class-attribute instance-attribute

value_pass: Optional[bool] = None

value_score class-attribute instance-attribute

value_score: Optional[float] = None

value_text class-attribute instance-attribute

value_text: Optional[str] = None

AnnotateResponse

Bases: BaseModel

evaluation instance-attribute

evaluation: Evaluation

AnnotationCategory

Bases: BaseModel

label class-attribute instance-attribute

label: Optional[str] = None

score class-attribute instance-attribute

score: Optional[float] = None

AnnotationCriteria

Bases: BaseModel

annotation_type instance-attribute

annotation_type: AnnotationType

categories class-attribute instance-attribute

categories: Optional[list[AnnotationCategory]] = None

created_at instance-attribute

created_at: datetime

description class-attribute instance-attribute

description: Optional[str] = None

id instance-attribute

id: str

name instance-attribute

name: str

project_id instance-attribute

project_id: str

updated_at instance-attribute

updated_at: datetime

AnnotationType

Bases: str, Enum

binary class-attribute instance-attribute

binary = 'binary'

categorical class-attribute instance-attribute

categorical = 'categorical'

continuous class-attribute instance-attribute

continuous = 'continuous'

discrete class-attribute instance-attribute

discrete = 'discrete'

text_annotation class-attribute instance-attribute

text_annotation = 'text_annotation'

BatchCreateEvaluationsRequest

Bases: BaseModel

evaluations class-attribute instance-attribute

evaluations: list[ClientEvaluation] = Field(
    min_length=1, max_length=1000
)

BatchCreateEvaluationsResponse

Bases: BaseModel

evaluations instance-attribute

evaluations: list[Evaluation]

ClientEvaluation

Bases: BaseModel

app class-attribute instance-attribute

app: Optional[SanitizedApp] = None

created_at class-attribute instance-attribute

created_at: Optional[datetime] = None

criteria class-attribute instance-attribute

criteria: Optional[str] = None

dataset_id class-attribute instance-attribute

dataset_id: Optional[str] = None

dataset_sample_id class-attribute instance-attribute

dataset_sample_id: Optional[str] = None

evaluation_duration class-attribute instance-attribute

evaluation_duration: Optional[timedelta] = None

evaluator_id instance-attribute

evaluator_id: SanitizedLocalEvaluatorID

experiment_id class-attribute instance-attribute

experiment_id: Optional[str] = None

explanation class-attribute instance-attribute

explanation: Optional[str] = None

explanation_duration class-attribute instance-attribute

explanation_duration: Optional[timedelta] = None

log_id instance-attribute

log_id: UUID

metadata class-attribute instance-attribute

metadata: Optional[dict[str, Any]] = None

metric_description class-attribute instance-attribute

metric_description: Optional[str] = None

metric_name class-attribute instance-attribute

metric_name: Optional[str] = None

pass_ class-attribute instance-attribute

pass_: Optional[bool] = Field(
    default=None, serialization_alias="pass"
)

project_id class-attribute instance-attribute

project_id: Optional[str] = None

project_name class-attribute instance-attribute

project_name: Optional[SanitizedProjectName] = None

score class-attribute instance-attribute

score: Optional[float] = None

span_id class-attribute instance-attribute

span_id: Optional[str] = None

tags class-attribute instance-attribute

tags: Optional[dict[str, str]] = None

text_output class-attribute instance-attribute

text_output: Optional[str] = None

trace_id class-attribute instance-attribute

trace_id: Optional[str] = None

CreateAnnotationCriteriaRequest

Bases: BaseModel

annotation_type instance-attribute

annotation_type: AnnotationType

categories class-attribute instance-attribute

categories: Optional[list[AnnotationCategory]] = None

description class-attribute instance-attribute

description: Optional[str] = None

name class-attribute instance-attribute

name: str = Field(min_length=1, max_length=100)

project_id instance-attribute

project_id: str

CreateAnnotationCriteriaResponse

Bases: BaseModel

annotation_criteria instance-attribute

annotation_criteria: AnnotationCriteria

CreateCriteriaRequest

Bases: BaseModel

config instance-attribute

config: dict[str, Any]

evaluator_family instance-attribute

evaluator_family: str

name instance-attribute

name: str

CreateCriteriaResponse

Bases: BaseModel

evaluator_criteria instance-attribute

evaluator_criteria: EvaluatorCriteria

CreateDatasetResponse

Bases: BaseModel

dataset instance-attribute

dataset: Dataset

dataset_id instance-attribute

dataset_id: str

CreateExperimentRequest

Bases: BaseModel

name instance-attribute

name: str

project_id instance-attribute

project_id: str

tags class-attribute instance-attribute

tags: dict[str, str] = Field(default_factory=dict)

CreateExperimentResponse

Bases: BaseModel

experiment instance-attribute

experiment: Experiment

CreateProjectRequest

Bases: BaseModel

name instance-attribute

name: SanitizedProjectName

Dataset

Bases: BaseModel

created_at instance-attribute

created_at: datetime

creation_at class-attribute instance-attribute

creation_at: Optional[datetime] = None

description class-attribute instance-attribute

description: Optional[str] = None

id instance-attribute

id: str

name instance-attribute

name: str

samples instance-attribute

samples: int

type instance-attribute

type: str

DatasetDatum

Bases: BaseModel

dataset_id instance-attribute

dataset_id: str

evaluated_model_gold_answer class-attribute instance-attribute

evaluated_model_gold_answer: Optional[str] = None

evaluated_model_input class-attribute instance-attribute

evaluated_model_input: Optional[str] = None

evaluated_model_output class-attribute instance-attribute

evaluated_model_output: Optional[str] = None

evaluated_model_retrieved_context class-attribute instance-attribute

evaluated_model_retrieved_context: Optional[list[str]] = (
    None
)

evaluated_model_system_prompt class-attribute instance-attribute

evaluated_model_system_prompt: Optional[str] = None

meta_evaluated_model_name class-attribute instance-attribute

meta_evaluated_model_name: Optional[str] = None

meta_evaluated_model_params class-attribute instance-attribute

meta_evaluated_model_params: Optional[
    dict[str, Union[str, int, float]]
] = None

meta_evaluated_model_provider class-attribute instance-attribute

meta_evaluated_model_provider: Optional[str] = None

meta_evaluated_model_selected_model class-attribute instance-attribute

meta_evaluated_model_selected_model: Optional[str] = None

sid instance-attribute

sid: int

EvaluateEvaluator

Bases: BaseModel

criteria class-attribute instance-attribute

criteria: Optional[str] = None

evaluator instance-attribute

evaluator: str

explain_strategy class-attribute instance-attribute

explain_strategy: str = 'always'

EvaluateRequest

Bases: BaseModel

app class-attribute instance-attribute

app: Optional[str] = None

capture class-attribute instance-attribute

capture: str = 'all'

dataset_id class-attribute instance-attribute

dataset_id: Optional[str] = None

dataset_sample_id class-attribute instance-attribute

dataset_sample_id: Optional[str] = None

evaluated_model_attachments class-attribute instance-attribute

evaluated_model_attachments: Optional[
    list[EvaluatedModelAttachment]
] = None

evaluated_model_gold_answer class-attribute instance-attribute

evaluated_model_gold_answer: Optional[str] = None

evaluated_model_input class-attribute instance-attribute

evaluated_model_input: Optional[str] = None

evaluated_model_output class-attribute instance-attribute

evaluated_model_output: Optional[str] = None

evaluated_model_retrieved_context class-attribute instance-attribute

evaluated_model_retrieved_context: Optional[
    Union[list[str], str]
] = None

evaluated_model_system_prompt class-attribute instance-attribute

evaluated_model_system_prompt: Optional[str] = None

evaluators class-attribute instance-attribute

evaluators: list[EvaluateEvaluator] = Field(min_length=1)

experiment_id class-attribute instance-attribute

experiment_id: Optional[str] = None

log_id class-attribute instance-attribute

log_id: Optional[str] = None

project_id class-attribute instance-attribute

project_id: Optional[str] = None

project_name class-attribute instance-attribute

project_name: Optional[str] = None

span_id class-attribute instance-attribute

span_id: Optional[str] = None

tags class-attribute instance-attribute

tags: Optional[dict[str, str]] = None

trace_id class-attribute instance-attribute

trace_id: Optional[str] = None

EvaluateResponse

Bases: BaseModel

results instance-attribute

results: list[EvaluateResult]

EvaluateResult

Bases: BaseModel

criteria instance-attribute

criteria: str

error_message instance-attribute

error_message: Optional[str]

evaluation_result instance-attribute

evaluation_result: Optional[EvaluationResult]

evaluator_id instance-attribute

evaluator_id: str

status instance-attribute

status: str

EvaluatedModelAttachment

Bases: BaseModel

media_type instance-attribute

media_type: str

url instance-attribute

url: str

usage_type class-attribute instance-attribute

usage_type: Optional[str] = 'evaluated_model_input'

Evaluation

Bases: BaseModel

annotation_criteria_id class-attribute instance-attribute

annotation_criteria_id: Optional[str] = None

app class-attribute instance-attribute

app: Optional[str] = None

created_at class-attribute instance-attribute

created_at: datetime

criteria class-attribute instance-attribute

criteria: Optional[str] = None

criteria_id class-attribute instance-attribute

criteria_id: Optional[str] = None

dataset_id class-attribute instance-attribute

dataset_id: Optional[str] = None

dataset_sample_id class-attribute instance-attribute

dataset_sample_id: Optional[str] = None

evaluation_duration class-attribute instance-attribute

evaluation_duration: Optional[timedelta] = None

evaluation_type class-attribute instance-attribute

evaluation_type: Optional[str] = None

evaluator_family class-attribute instance-attribute

evaluator_family: Optional[str] = None

evaluator_id class-attribute instance-attribute

evaluator_id: Optional[str] = None

experiment_id class-attribute instance-attribute

experiment_id: Optional[int] = None

explain_strategy class-attribute instance-attribute

explain_strategy: Optional[str] = None

explanation class-attribute instance-attribute

explanation: Optional[str] = None

explanation_duration class-attribute instance-attribute

explanation_duration: Optional[timedelta] = None

id instance-attribute

id: int

log_id instance-attribute

log_id: str

metadata class-attribute instance-attribute

metadata: Optional[dict[str, Any]] = None

metric_description class-attribute instance-attribute

metric_description: Optional[str] = None

metric_name class-attribute instance-attribute

metric_name: Optional[str] = None

pass_ class-attribute instance-attribute

pass_: Optional[bool] = Field(default=None, alias='pass')

project_id class-attribute instance-attribute

project_id: Optional[str] = None

score class-attribute instance-attribute

score: Optional[float] = None

span_id class-attribute instance-attribute

span_id: Optional[str] = None

tags class-attribute instance-attribute

tags: Optional[dict[str, str]] = None

text_output class-attribute instance-attribute

text_output: Optional[str] = None

trace_id class-attribute instance-attribute

trace_id: Optional[str] = None

usage class-attribute instance-attribute

usage: Optional[dict[str, Any]] = None

EvaluationResult

Bases: BaseModel

additional_info class-attribute instance-attribute

additional_info: Optional[dict[str, Any]] = None

app class-attribute instance-attribute

app: Optional[str] = None

created_at class-attribute instance-attribute

created_at: Optional[AwareDatetime] = None

criteria instance-attribute

criteria: str

dataset_id class-attribute instance-attribute

dataset_id: Optional[str] = None

dataset_sample_id class-attribute instance-attribute

dataset_sample_id: Optional[int] = None

evaluated_model_gold_answer class-attribute instance-attribute

evaluated_model_gold_answer: Optional[str] = None

evaluated_model_input class-attribute instance-attribute

evaluated_model_input: Optional[str] = None

evaluated_model_output class-attribute instance-attribute

evaluated_model_output: Optional[str] = None

evaluated_model_retrieved_context class-attribute instance-attribute

evaluated_model_retrieved_context: Optional[list[str]] = (
    None
)

evaluated_model_system_prompt class-attribute instance-attribute

evaluated_model_system_prompt: Optional[str] = None

evaluation_duration class-attribute instance-attribute

evaluation_duration: Optional[timedelta] = None

evaluation_metadata class-attribute instance-attribute

evaluation_metadata: Optional[dict] = None

evaluator_family instance-attribute

evaluator_family: str

evaluator_id instance-attribute

evaluator_id: str

evaluator_profile_public_id instance-attribute

evaluator_profile_public_id: str

experiment_id class-attribute instance-attribute

experiment_id: Optional[str] = None

explanation class-attribute instance-attribute

explanation: Optional[str] = None

explanation_duration class-attribute instance-attribute

explanation_duration: Optional[timedelta] = None

id class-attribute instance-attribute

id: Optional[str] = None

pass_ class-attribute instance-attribute

pass_: Optional[bool] = Field(default=None, alias='pass')

project_id class-attribute instance-attribute

project_id: Optional[str] = None

score_raw class-attribute instance-attribute

score_raw: Optional[float] = None

tags class-attribute instance-attribute

tags: Optional[dict[str, str]] = None

text_output class-attribute instance-attribute

text_output: Optional[str] = None

Evaluator

Bases: BaseModel

aliases instance-attribute

aliases: Optional[list[str]]

evaluator_family instance-attribute

evaluator_family: Optional[str]

id instance-attribute

id: str

name instance-attribute

name: str

EvaluatorCriteria

Bases: BaseModel

config instance-attribute

config: Optional[dict[str, Any]]

created_at instance-attribute

created_at: datetime

description instance-attribute

description: Optional[str]

evaluator_family instance-attribute

evaluator_family: str

is_patronus_managed instance-attribute

is_patronus_managed: bool

name instance-attribute

name: str

public_id instance-attribute

public_id: str

revision instance-attribute

revision: int

Experiment

Bases: BaseModel

id instance-attribute

id: str

name instance-attribute

name: str

project_id instance-attribute

project_id: str

tags class-attribute instance-attribute

tags: Optional[dict[str, str]] = None

ExportEvaluationRequest

Bases: BaseModel

evaluation_results instance-attribute

evaluation_results: list[ExportEvaluationResult]

ExportEvaluationResponse

Bases: BaseModel

evaluation_results instance-attribute

evaluation_results: list[ExportEvaluationResultPartial]

ExportEvaluationResult

Bases: BaseModel

app class-attribute instance-attribute

app: Optional[str] = None

criteria class-attribute instance-attribute

criteria: Optional[str] = None

dataset_id class-attribute instance-attribute

dataset_id: Optional[str] = None

dataset_sample_id class-attribute instance-attribute

dataset_sample_id: Optional[int] = None

evaluated_model_attachments class-attribute instance-attribute

evaluated_model_attachments: Optional[
    list[EvaluatedModelAttachment]
] = None

evaluated_model_gold_answer class-attribute instance-attribute

evaluated_model_gold_answer: Optional[str] = None

evaluated_model_input class-attribute instance-attribute

evaluated_model_input: Optional[str] = None

evaluated_model_name class-attribute instance-attribute

evaluated_model_name: Optional[str] = None

evaluated_model_output class-attribute instance-attribute

evaluated_model_output: Optional[str] = None

evaluated_model_params class-attribute instance-attribute

evaluated_model_params: Optional[
    dict[str, Union[str, int, float]]
] = None

evaluated_model_provider class-attribute instance-attribute

evaluated_model_provider: Optional[str] = None

evaluated_model_retrieved_context class-attribute instance-attribute

evaluated_model_retrieved_context: Optional[list[str]] = (
    None
)

evaluated_model_selected_model class-attribute instance-attribute

evaluated_model_selected_model: Optional[str] = None

evaluated_model_system_prompt class-attribute instance-attribute

evaluated_model_system_prompt: Optional[str] = None

evaluation_duration class-attribute instance-attribute

evaluation_duration: Optional[timedelta] = None

evaluation_metadata class-attribute instance-attribute

evaluation_metadata: Optional[dict[str, Any]] = None

evaluator_id instance-attribute

evaluator_id: SanitizedLocalEvaluatorID

experiment_id class-attribute instance-attribute

experiment_id: Optional[str] = None

explanation class-attribute instance-attribute

explanation: Optional[str] = None

explanation_duration class-attribute instance-attribute

explanation_duration: Optional[timedelta] = None

pass_ class-attribute instance-attribute

pass_: Optional[bool] = Field(
    default=None, serialization_alias="pass"
)

score_raw class-attribute instance-attribute

score_raw: Optional[float] = None

tags class-attribute instance-attribute

tags: Optional[dict[str, str]] = None

text_output class-attribute instance-attribute

text_output: Optional[str] = None

ExportEvaluationResultPartial

Bases: BaseModel

app instance-attribute

app: Optional[str]

created_at instance-attribute

created_at: AwareDatetime

evaluator_id instance-attribute

evaluator_id: str

id instance-attribute

id: str

GetAnnotationCriteriaResponse

Bases: BaseModel

annotation_criteria instance-attribute

annotation_criteria: AnnotationCriteria

GetEvaluationResponse

Bases: BaseModel

evaluation instance-attribute

evaluation: Evaluation

GetExperimentResponse

Bases: BaseModel

experiment instance-attribute

experiment: Experiment

GetProjectResponse

Bases: BaseModel

project instance-attribute

project: Project

ListAnnotationCriteriaResponse

Bases: BaseModel

annotation_criteria instance-attribute

annotation_criteria: list[AnnotationCriteria]

ListCriteriaRequest

Bases: BaseModel

evaluator_family class-attribute instance-attribute

evaluator_family: Optional[str] = None

evaluator_id class-attribute instance-attribute

evaluator_id: Optional[str] = None

get_last_revision class-attribute instance-attribute

get_last_revision: bool = False

is_patronus_managed class-attribute instance-attribute

is_patronus_managed: Optional[bool] = None

limit class-attribute instance-attribute

limit: int = 1000

name class-attribute instance-attribute

name: Optional[str] = None

offset class-attribute instance-attribute

offset: int = 0

public_id class-attribute instance-attribute

public_id: Optional[str] = None

revision class-attribute instance-attribute

revision: Optional[str] = None

ListCriteriaResponse

Bases: BaseModel

evaluator_criteria instance-attribute

evaluator_criteria: list[EvaluatorCriteria]

ListDatasetData

Bases: BaseModel

data instance-attribute

data: list[DatasetDatum]

ListDatasetsResponse

Bases: BaseModel

datasets instance-attribute

datasets: list[Dataset]

ListEvaluatorsResponse

Bases: BaseModel

evaluators instance-attribute

evaluators: list[Evaluator]

Log

Bases: BaseModel

body class-attribute instance-attribute

body: Any = None

log_attributes class-attribute instance-attribute

log_attributes: Optional[dict[str, str]] = None

resource_attributes class-attribute instance-attribute

resource_attributes: Optional[dict[str, str]] = None

resource_schema_url class-attribute instance-attribute

resource_schema_url: Optional[str] = None

scope_attributes class-attribute instance-attribute

scope_attributes: Optional[dict[str, str]] = None

scope_name class-attribute instance-attribute

scope_name: Optional[str] = None

scope_schema_url class-attribute instance-attribute

scope_schema_url: Optional[str] = None

scope_version class-attribute instance-attribute

scope_version: Optional[str] = None

service_name class-attribute instance-attribute

service_name: Optional[str] = None

severity_number class-attribute instance-attribute

severity_number: Optional[int] = None

severity_test class-attribute instance-attribute

severity_test: Optional[str] = None

span_id class-attribute instance-attribute

span_id: Optional[str] = None

timestamp class-attribute instance-attribute

timestamp: Optional[datetime] = None

trace_flags class-attribute instance-attribute

trace_flags: Optional[int] = None

trace_id class-attribute instance-attribute

trace_id: Optional[str] = None

Project

Bases: BaseModel

id instance-attribute

id: str

name instance-attribute

name: str

SearchEvaluationsFilter

Bases: BaseModel

and_ class-attribute instance-attribute

and_: Optional[list[SearchEvaluationsFilter]] = None

field class-attribute instance-attribute

field: Optional[str] = None

operation class-attribute instance-attribute

operation: Optional[str] = None

or_ class-attribute instance-attribute

or_: Optional[list[SearchEvaluationsFilter]] = None

value class-attribute instance-attribute

value: Optional[Any] = None

SearchEvaluationsRequest

Bases: BaseModel

filters class-attribute instance-attribute

filters: Optional[list[SearchEvaluationsFilter]] = None

SearchEvaluationsResponse

Bases: BaseModel

evaluations instance-attribute

evaluations: list[Evaluation]

SearchLogsFilter

Bases: BaseModel

and_ class-attribute instance-attribute

and_: Optional[list[SearchLogsFilter]] = None

field class-attribute instance-attribute

field: Optional[str] = None

op class-attribute instance-attribute

op: Optional[str] = None

or_ class-attribute instance-attribute

or_: Optional[list[SearchLogsFilter]] = None

value class-attribute instance-attribute

value: Optional[Any] = None

SearchLogsRequest

Bases: BaseModel

filters class-attribute instance-attribute

filters: Optional[list[SearchLogsFilter]] = None

limit class-attribute instance-attribute

limit: int = 1000

order class-attribute instance-attribute

order: str = 'timestamp desc'

SearchLogsResponse

Bases: BaseModel

logs instance-attribute

logs: list[Log]

UpdateAnnotationCriteriaRequest

Bases: BaseModel

annotation_type instance-attribute

annotation_type: AnnotationType

categories class-attribute instance-attribute

categories: Optional[list[AnnotationCategory]] = None

description class-attribute instance-attribute

description: Optional[str] = None

name class-attribute instance-attribute

name: str = Field(min_length=1, max_length=100)

UpdateAnnotationCriteriaResponse

Bases: BaseModel

annotation_criteria instance-attribute

annotation_criteria: AnnotationCriteria

WhoAmIAPIKey

Bases: BaseModel

account instance-attribute

account: Account

id instance-attribute

id: str

WhoAmICaller

Bases: BaseModel

api_key instance-attribute

api_key: WhoAmIAPIKey

WhoAmIResponse

Bases: BaseModel

caller instance-attribute

caller: WhoAmICaller

_create_field_sanitizer

_create_field_sanitizer(
    pattern: str,
    *,
    max_len: int,
    replace_with: str,
    strip: bool = True,
)
Source code in src/patronus/api/api_types.py
def _create_field_sanitizer(pattern: str, *, max_len: int, replace_with: str, strip: bool = True):
    def sanitize(value: typing.Any, _: pydantic.ValidationInfo) -> str:
        if not isinstance(value, str):
            return value
        if strip:
            value = value.strip()
        return re.sub(pattern, replace_with, value[:max_len])

    return pydantic.BeforeValidator(sanitize)

sanitize_field

sanitize_field(max_length: int, sub_pattern: str)
Source code in src/patronus/api/api_types.py
def sanitize_field(max_length: int, sub_pattern: str):
    def wrapper(value: str) -> str:
        if not value:
            return value
        value = value[:max_length]
        return re.sub(sub_pattern, "_", value).strip()

    return wrapper