curl --request POST \
--url https://api.galileo.ai/v2/projects/{project_id}/metrics-testing/available_columns \
--header 'Content-Type: application/json' \
--header 'Galileo-API-Key: <api-key>' \
--data '
{
"name": "<string>",
"log_stream_id": "<string>",
"experiment_id": "<string>",
"metrics_testing_id": "<string>",
"output_type": "boolean",
"cot_enabled": false
}
'{
"columns": [
{
"id": "<string>",
"category": "standard",
"data_type": "uuid",
"label": "<string>",
"description": "<string>",
"group_label": "<string>",
"data_unit": "percentage",
"multi_valued": false,
"allowed_values": [
"<unknown>"
],
"sortable": true,
"filterable": true,
"is_empty": false,
"applicable_types": [
"llm"
],
"complex": false,
"is_optional": false,
"scorer_config": {
"id": "<string>",
"scorer_type": "llm",
"model_name": "<string>",
"num_judges": 123,
"filters": [
{
"value": "<string>",
"operator": "eq",
"name": "node_name",
"filter_type": "string",
"case_sensitive": true
}
],
"scoreable_node_types": [
"<string>"
],
"cot_enabled": true,
"output_type": "boolean",
"input_type": "basic",
"name": "<string>",
"model_type": "slm",
"scorer_version": {
"id": "<string>",
"version": 123,
"scorer_id": "<string>",
"generated_scorer": {
"id": "<string>",
"name": "<string>",
"chain_poll_template": {
"template": "<string>",
"metric_system_prompt": "<string>",
"metric_description": "<string>",
"value_field_name": "rating",
"explanation_field_name": "explanation",
"metric_few_shot_examples": [
{
"generation_prompt_and_response": "<string>",
"evaluating_response": "<string>"
}
],
"response_schema": {}
},
"instructions": "<string>",
"user_prompt": "<string>"
},
"registered_scorer": {
"id": "<string>",
"name": "<string>",
"score_type": "<string>"
},
"finetuned_scorer": {
"id": "<string>",
"name": "<string>",
"lora_task_id": 123,
"prompt": "<string>",
"luna_input_type": "span",
"luna_output_type": "float",
"class_name_to_vocab_ix": {},
"executor": "action_completion_luna"
},
"model_name": "<string>",
"num_judges": 123,
"scoreable_node_types": [
"<string>"
],
"cot_enabled": true,
"output_type": "boolean",
"input_type": "basic"
}
},
"scorer_id": "<string>",
"insight_type": "vertical_bar",
"filter_type": "id",
"threshold": {
"inverted": false,
"buckets": [
123
],
"display_value_levels": [
"<string>"
]
}
}
]
}curl --request POST \
--url https://api.galileo.ai/v2/projects/{project_id}/metrics-testing/available_columns \
--header 'Content-Type: application/json' \
--header 'Galileo-API-Key: <api-key>' \
--data '
{
"name": "<string>",
"log_stream_id": "<string>",
"experiment_id": "<string>",
"metrics_testing_id": "<string>",
"output_type": "boolean",
"cot_enabled": false
}
'{
"columns": [
{
"id": "<string>",
"category": "standard",
"data_type": "uuid",
"label": "<string>",
"description": "<string>",
"group_label": "<string>",
"data_unit": "percentage",
"multi_valued": false,
"allowed_values": [
"<unknown>"
],
"sortable": true,
"filterable": true,
"is_empty": false,
"applicable_types": [
"llm"
],
"complex": false,
"is_optional": false,
"scorer_config": {
"id": "<string>",
"scorer_type": "llm",
"model_name": "<string>",
"num_judges": 123,
"filters": [
{
"value": "<string>",
"operator": "eq",
"name": "node_name",
"filter_type": "string",
"case_sensitive": true
}
],
"scoreable_node_types": [
"<string>"
],
"cot_enabled": true,
"output_type": "boolean",
"input_type": "basic",
"name": "<string>",
"model_type": "slm",
"scorer_version": {
"id": "<string>",
"version": 123,
"scorer_id": "<string>",
"generated_scorer": {
"id": "<string>",
"name": "<string>",
"chain_poll_template": {
"template": "<string>",
"metric_system_prompt": "<string>",
"metric_description": "<string>",
"value_field_name": "rating",
"explanation_field_name": "explanation",
"metric_few_shot_examples": [
{
"generation_prompt_and_response": "<string>",
"evaluating_response": "<string>"
}
],
"response_schema": {}
},
"instructions": "<string>",
"user_prompt": "<string>"
},
"registered_scorer": {
"id": "<string>",
"name": "<string>",
"score_type": "<string>"
},
"finetuned_scorer": {
"id": "<string>",
"name": "<string>",
"lora_task_id": 123,
"prompt": "<string>",
"luna_input_type": "span",
"luna_output_type": "float",
"class_name_to_vocab_ix": {},
"executor": "action_completion_luna"
},
"model_name": "<string>",
"num_judges": 123,
"scoreable_node_types": [
"<string>"
],
"cot_enabled": true,
"output_type": "boolean",
"input_type": "basic"
}
},
"scorer_id": "<string>",
"insight_type": "vertical_bar",
"filter_type": "id",
"threshold": {
"inverted": false,
"buckets": [
123
],
"display_value_levels": [
"<string>"
]
}
}
]
}Request to get the available columns for the metrics testing table.
Name of the metric that we are testing.
Log stream id associated with the traces.
Experiment id associated with the traces.
Metrics testing id associated with the traces.
Output type of the metrics testing table. If not provided, all columns are returned.
boolean, categorical, count, discrete, freeform, percentage, multilabel Whether the metrics testing table is using chain of thought (CoT) enabled scorers. If True, the columns will be generated for CoT enabled scorers.
Successful Response
Show child attributes
Was this page helpful?