How to integrate an asset
This guide helps you understand how to integrate models, pipelines and agents into your workflows seamlessly.
Models
To find all the models available on aiXplain, explore our marketplace.
- Python
- Swift
- cURL
To see the list of available models, run the code below.
from aixplain.factories import ModelFactory
from aixplain.enums import Supplier
model_list = ModelFactory.list(suppliers=Supplier.GROQ)["results"]
for model in model_list:
print(model.__dict__)
The aiXplain SDK allows you to run models synchronously or asynchronously. The examples shown, use the GPT-4 model.
Synchronous
from aixplain.factories import ModelFactory
model = ModelFactory.get("6414bd3cd09663e9225130e8")
result = model.run({
"text": "TEXT_DATA",
# "prompt": "<PROMPT_TEXT_DATA>",
# "context": "<CONTEXT_TEXT_DATA>",
# "temperature": "<TEMPERATURE_TEXT_DATA>",
# "max_tokens": "<MAX_TOKENS_TEXT_DATA>",
# "history": "<HISTORY_TEXT_DATA>",
})
print(result)
Asynchronous
import time
from aixplain.factories import ModelFactory
model = ModelFactory.get("6414bd3cd09663e9225130e8")
start_response = model.run_async({
"text": "TEXT_DATA",
# "prompt": "<PROMPT_TEXT_DATA>",
# "context": "<CONTEXT_TEXT_DATA>",
# "temperature": "<TEMPERATURE_TEXT_DATA>",
# "max_tokens": "<MAX_TOKENS_TEXT_DATA>",
# "history": "<HISTORY_TEXT_DATA>",
})
# Polling loop: Wait for the completion of the asynchronous request
while True:
status = model.poll(start_response['url'])
print(status)
if status['status'] != 'IN_PROGRESS':
break
time.sleep(5)
Use the code below to run your model. The example shown uses the GPT-4 model.
let model = try? await ModelProvider().get("6414bd3cd09663e9225130e8")
let data: [String: Any] = [
"text": "<TEXT_TEXT_DATA>",
// "prompt": "<PROMPT_TEXT_DATA>",
// "context": "<CONTEXT_TEXT_DATA>",
// "temperature": "<TEMPERATURE_TEXT_DATA>",
// "max_tokens": "<MAX_TOKENS_TEXT_DATA>",
// "history": "<HISTORY_TEXT_DATA>"
]
let response = try await model.run(data)
print(response)
Run models with cURL using the code below. The endpoint shown is for the GPT-4 model.
POST
curl -X POST 'https://models.aixplain.com/api/v1/execute/6414bd3cd09663e9225130e8' \
-H 'x-api-key: TEAM_API_KEY' \
-H 'Content-Type: application/json' \
-d '{
"text": "<TEXT_TEXT_DATA>",
"prompt": "<PROMPT_TEXT_DATA>",
"context": "<CONTEXT_TEXT_DATA>",
"temperature": "<TEMPERATURE_TEXT_DATA>",
"max_tokens": "<MAX_TOKENS_TEXT_DATA>",
"history": "<HISTORY_TEXT_DATA>"
}'
GET
curl -X GET '<https://models.aixplain.com/api/v1/data/<requestId>>' \
-H 'x-api-key: TEAM_API_KEY' \
-H 'Content-Type: application/json'
Pipelines
To learn more about how to build pipelines, follow this guide.
- Python
- Swift
- cURL
To display pipelines that you have onboarded, run the code below.
from aixplain.factories import PipelineFactory
pipeline_list = PipelineFactory.list()['results']
for pipeline in pipeline_list:
print(pipeline.__dict__)
You can call a pipeline using this code.
pipeline = PipelineFactory.get('<pipeline_id>')
Then run the pipeline either synchronously or asynchronously.
Synchronous
result = pipeline.run("This is a sample text")
For multi-input pipelines, you can specify as input a dictionary where the keys are the label names of the input nodes and values are their corresponding content.
result = pipeline.run({
"Input 1": "This is a sample text to input node 1.",
"Input 2": "This is a sample text to input node 2."
})
# or
result = pipeline.run(data = {
"Input 1": "This is a sample text to input node 1.",
"Input 2": "This is a sample text to input node 2."
})
Asynchronous
import time
from aixplain.factories import PipelineFactory
pipeline = PipelineFactory.get("<pipeline_id>")
start_response = pipeline.run_async("This is a sample text")
# Polling loop: Wait for the completion of the asynchronous request
while True:
result = model.poll(poll_url)
if result.get("completed"):
print(result)
break
else:
time.sleep(5) # Wait for 5 seconds before checking the result again
You can run your onboarded pipelines with this code.
let pipeline = try? await PipelineProvider().get("<pipeline_id>")
let data: [String: Any] = [
"Input 1": "<INPUT_1_TEXT_DATA>",
"Input 2": "<INPUT_2_AUDIO_DATA>"
]
let response = try await pipeline.run(data)
print(response)
You can run your existing pipelines using the code below.
POST
curl -X POST 'https://platform-api.aixplain.com/assets/pipeline/execution/run/<pipeline_id>' \
-H 'x-api-key: TEAM_API_KEY' \
-H 'Content-Type: application/json' \
-d '{
"Input 1": "<INPUT_1_TEXT_DATA>",
"Input 2": "<INPUT_1_AUDIO_DATA>"
}'
GET
curl -X GET 'https://platform-api.aixplain.com/assets/pipeline/execution/check/<requestId>' \
-H 'x-api-key: TEAM_API_KEY' \
-H 'Content-Type: application/json'
Agents
Learn more about building agents through this guide.
- Python
- cURL
To display agents that you have onboarded, run the code below.
from aixplain.factories import AgentFactory
agent_list = AgentFactory.list()["results"]
for agent in agent_list:
print(agent.__dict__)
Once you know an agent's unique ID, you can access the agent directly.
agent = AgentFactory.get("<agent_id>")
agent.__dict__
Run the agent using the following code
agent_response = agent.run(
"This is an example"
)
display(agent_response)
To run your agent, use the code below.
POST
curl -X POST 'https://platform-api.aixplain.com/sdk/agents/<agent_id>/run' \
-H 'x-api-key: TEAM_API_KEY' \
-H 'Content-Type: application/json' \
-d '{
"query": "<QUERY_TEXT_DATA>",
}'
GET
curl -X GET 'https://platform-api.aixplain.com/sdk/agents/<requestId>/result' \
-H 'x-api-key: TEAM_API_KEY' \
-H 'Content-Type: application/json'
API Requests
- Python
- JavaScript
Models
Run a model on aiXplain with POST requests and fetch results with GET. The example here utilises the GPT-4 model.
import requests
import time
AIXPLAIN_API_KEY = "TEAM_API_KEY"
MODEL_ID = "6414bd3cd09663e9225130e8"
POST_URL = f"https://models.aixplain.com/api/v1/execute/{MODEL_ID}"
headers = {
"x-api-key": AIXPLAIN_API_KEY,
"Content-Type": "application/json"
}
data = {
"text": "<TEXT_TEXT_DATA>",
# "prompt": "<PROMPT_TEXT_DATA>",
# "context": "<CONTEXT_TEXT_DATA>",
# "temperature": "<TEMPERATURE_TEXT_DATA>",
# "max_tokens": "<MAX_TOKENS_TEXT_DATA>",
# "history": "<HISTORY_TEXT_DATA>"
}
# POST request to execute the model
response = requests.post(POST_URL, headers=headers, json=data)
response_data = response.json()
request_id = response_data.get("requestId")
get_url = f"https://models.aixplain.com/api/v1/data/{request_id}"
# Polling loop: GET request until the result is completed
while True:
get_response = requests.get(get_url, headers=headers)
result = get_response.json()
if result.get("completed"):
print(result)
break
else:
time.sleep(5) # Wait for 5 seconds before checking the result again
Pipelines
Execute pipelines with inputs and retrieve results using requests.
import requests
import time
AIXPLAIN_API_KEY = "TEAM_API_KEY"
PIPELINE_ID = "<pipeline_id>"
POST_URL = f"https://platform-api.aixplain.com/assets/pipeline/execution/run/{PIPELINE_ID}"
headers = {
'x-api-key': AIXPLAIN_API_KEY,
'Content-Type': 'application/json'
}
data = {
"Input 1": "<INPUT_1_TEXT_DATA>",
"Input 2": "<INPUT_2_AUDIO_DATA>"
}
# POST request to execute the pipeline
response = requests.post(POST_URL, headers=headers, json=data)
response_data = response.json()
get_url = response_data.get("url")
# Polling loop: GET request until the result is completed
while True:
get_response = requests.get(get_url, headers=headers)
result = get_response.json()
if result.get("completed"):
print(result)
break
else:
time.sleep(5) # Wait for 5 seconds before checking the result again
Agents
Send queries to aiXplain agents and fetch the results using API requests.
import requests
import time
AIXPLAIN_API_KEY = "TEAM_API_KEY"
AGENT_ID = "<agent_id>"
POST_URL = f"https://platform-api.aixplain.com/sdk/agents/{AGENT_ID}/run"
headers = {
"x-api-key": AIXPLAIN_API_KEY,
"Content-Type": 'application/json'
}
data = {
"query": "<QUERY_TEXT_DATA>",
# "sessionId": "<SESSIONID_TEXT_DATA>", # Optional: Specify sessionId from the previous message
}
# POST request to execute the agent
response = requests.post(POST_URL, headers=headers, json=data)
response_data = response.json()
request_id = response_data.get("requestId")
get_url = f"https://platform-api.aixplain.com/sdk/agents/{request_id}/result"
# Polling loop: GET request until the result is completed
while True:
get_response = requests.get(get_url, headers=headers)
result = get_response.json()
if result.get("completed"):
print(result)
break
else:
time.sleep(5) # Wait for 5 seconds before checking the result again
Models
Trigger a model on aiXplain and get results with API requests. Here, we use the GPT-4 model.
const AIXPLAIN_API_KEY = 'TEAM_API_KEY';
const MODEL_ID = '6414bd3cd09663e9225130e8';
const POST_URL = `https://models.aixplain.com/api/v1/execute/${MODEL_ID}`;
const data = {
"text": "<TEXT_TEXT_DATA>",
// "prompt": "<PROMPT_TEXT_DATA>",
// "context": "<CONTEXT_TEXT_DATA>",
// "temperature": "<TEMPERATURE_TEXT_DATA>",
// "max_tokens": "<MAX_TOKENS_TEXT_DATA>",
// "history": "<HISTORY_TEXT_DATA>"
};
async function runModel() {
try {
const postResponse = await fetch(POST_URL, {
method: 'POST',
headers: {
'x-api-key': AIXPLAIN_API_KEY,
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
});
const postResult = await postResponse.json();
const requestId = postResult.requestId;
const getUrl = `https://models.aixplain.com/api/v1/data/${requestId}`;
// Polling loop to get the result once it's completed
while (true) {
const getResponse = await fetch(getUrl, {
method: 'GET',
headers: {
'x-api-key': AIXPLAIN_API_KEY,
'Content-Type': 'application/json'
}
});
const result = await getResponse.json();
if (result.completed) {
console.log(result);
break;
} else {
await new Promise(resolve => setTimeout(resolve, 5000)); // Wait for 5 seconds before checking the result again
}
}
} catch (error) {
console.error(error);
}
}
runModel();
Pipelines
Run pipelines and get outputs via POST and GET requests.
const AIXPLAIN_API_KEY = 'TEAM_API_KEY';
const PIPELINE_ID = '<pipeline_id>';
const POST_URL = `https://platform-api.aixplain.com/assets/pipeline/execution/run/${PIPELINE_ID}`;
const data = {
"Input 1": "<INPUT_1_TEXT_DATA>",
"Input 2": "<INPUT_2_AUDIO_DATA>"
};
async function runPipeline() {
try {
const postResponse = await fetch(POST_URL, {
method: 'POST',
headers: {
'x-api-key': AIXPLAIN_API_KEY,
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
});
const postResult = await postResponse.json();
const getUrl = postResult.url;
while (true) {
const getResponse = await fetch(getUrl, {
method: 'GET',
headers: {
'x-api-key': AIXPLAIN_API_KEY,
'Content-Type': 'application/json'
}
});
const result = await getResponse.json();
if (result.status === "IN_PROGRESS") {
await new Promise(resolve => setTimeout(resolve, 5000)); // Wait for 5 seconds before checking the result again
} else {
console.log(result);
break;
}
}
} catch (error) {
console.error(error);
}
}
runPipeline();
Agents
Send agent queries and fetch results with API calls.
const AIXPLAIN_API_KEY = 'TEAM_API_KEY';
const AGENT_ID = '<agent_id>';
const POST_URL = `https://platform-api.aixplain.com/sdk/agents/${AGENT_ID}/run`;
const data = {
"query": "<QUERY_TEXT_DATA>",
"sessionId": "<SESSIONID_TEXT_DATA>" // Optional: Specify sessionId the from previous message
};
async function runAgent() {
try {
const postResponse = await fetch(POST_URL, {
method: 'POST',
headers: {
'x-api-key': AIXPLAIN_API_KEY,
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
});
const postResult = await postResponse.json();
const requestId = postResult.requestId;
const getUrl = `https://platform-api.aixplain.com/sdk/agents/${requestId}/result`;
while (true) {
const getResponse = await fetch(getUrl, {
method: 'GET',
headers: {
'x-api-key': AIXPLAIN_API_KEY,
'Content-Type': 'application/json'
}
});
const result = await getResponse.json();
if (result.completed) {
console.log(result);
break;
} else {
await new Promise(resolve => setTimeout(resolve, 5000)); // Wait for 5 seconds before checking the result again
}
}
} catch (error) {
console.error(error);
}
}
runAgent();
OpenAI API
You can integrate aixplain endpoints into your OpenAI API. Here is an example to help you get started.
import openai
openai.api_key = "<OPENAI_API_KEY"
openai.api_base = "https://models.aixplain.com/api/v1/execute/<model_id>" # aiXplain model endpoint
response = openai.chat.completions.create(
model="gpt-3.5-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the importance of AI in modern industries?"}
],
temperature=0.7,
max_tokens=150,
top_p=1.0,
frequency_penalty=0.0,
presence_penalty=0.0
)
print("Response from aiXplain:")
print(response)