Calibrate
List
Get Started
API Reference
- Scoring System
- Data Generation
Calibrate
List
Lists the Scoring Spec Calibration Jobs owned by a user
GET
/
scoring_system
/
calibrate
import PiClient from 'withpi';
const client = new PiClient({
apiKey: process.env['WITHPI_API_KEY'], // This is the default and can be omitted
});
async function main() {
const scoringSpecCalibrationStatuses = await client.scoringSystem.calibrate.list();
console.log(scoringSpecCalibrationStatuses);
}
main();
[
{
"calibrated_scoring_spec": [
{
"custom_model_id": "your-model-id",
"is_lower_score_desirable": "False",
"label": "Relevance to Prompt",
"parameters": [
0.14285714285714285,
0.2857142857142857,
0.42857142857142855,
0.5714285714285714,
0.7142857142857143,
0.8571428571428571
],
"python_code": "\ndef score(response_text: str, input_text: str, kwargs: dict) -> dict:\n word_count = len(response_text.split())\n if word_count > 10:\n return {\"score\": 0.2, \"explanation\": \"Response has more than 10 words\"}\n elif word_count > 5:\n return{\"score\": 0.6, \"explanation\": \"Response has more than 5 words\"}\n else:\n return {\"score\": 1, \"explanation\": \"Response has 5 or fewer words\"}\n",
"question": "Is the response relevant to the prompt?",
"scoring_type": "PI_SCORER",
"tag": "Legal Formatting",
"weight": 1
}
],
"detailed_status": [
"Downloading model",
"Tuning prompt"
],
"job_id": "1234abcd",
"state": "RUNNING"
}
]
Authorizations
Query Parameters
Filter jobs by state
Available options:
QUEUED
, RUNNING
, DONE
, ERROR
, CANCELLED
Response
200
application/json
Successful Response
The response is of type object[]
.
import PiClient from 'withpi';
const client = new PiClient({
apiKey: process.env['WITHPI_API_KEY'], // This is the default and can be omitted
});
async function main() {
const scoringSpecCalibrationStatuses = await client.scoringSystem.calibrate.list();
console.log(scoringSpecCalibrationStatuses);
}
main();
[
{
"calibrated_scoring_spec": [
{
"custom_model_id": "your-model-id",
"is_lower_score_desirable": "False",
"label": "Relevance to Prompt",
"parameters": [
0.14285714285714285,
0.2857142857142857,
0.42857142857142855,
0.5714285714285714,
0.7142857142857143,
0.8571428571428571
],
"python_code": "\ndef score(response_text: str, input_text: str, kwargs: dict) -> dict:\n word_count = len(response_text.split())\n if word_count > 10:\n return {\"score\": 0.2, \"explanation\": \"Response has more than 10 words\"}\n elif word_count > 5:\n return{\"score\": 0.6, \"explanation\": \"Response has more than 5 words\"}\n else:\n return {\"score\": 1, \"explanation\": \"Response has 5 or fewer words\"}\n",
"question": "Is the response relevant to the prompt?",
"scoring_type": "PI_SCORER",
"tag": "Legal Formatting",
"weight": 1
}
],
"detailed_status": [
"Downloading model",
"Tuning prompt"
],
"job_id": "1234abcd",
"state": "RUNNING"
}
]