Using variables from earlier in a sequential list of tasks
Question:
Let’s say I need to first create a company, then create a bunch of employees for that company. I could do this:
from locust import HttpUser, task
class CompanyAPI(HttpUser):
@task
def create_company(self):
resp = self.client.post(
"/companies",
json={"name": "Company Name"}
)
company_id = resp.json()["id"]
for i in range(50):
resp = self.client.post(
"/employees",
json={
"first_name": f"{i}",
"last_name": "Smith",
"company": company_id,
},
)
But it seems like I should be able to break this up into two tasks so that I can create a bunch of these employees in parallel. For that to work tho I’d need to somehow save off the company_id
in one task, and then use it in subsequent tasks.
Is this possible somehow?
Answers:
There are many ways to accomplish that, but the most obvious one would be to call /companies
in a test_start event handler, save it on module or class level and use it in your task.
https://docs.locust.io/en/stable/writing-a-locustfile.html#test-start-and-test-stop
Heres a starting point (this will reuse the same company id for all Users forever, so probably you’ll want to modify it a little, maybe by calling /companies
again once i
reaches 50)
from locust.runners import MasterRunner
import requests
class CompanyAPI(HttpUser):
i = -1
@task
def create_company(self):
i += 1
resp = self.client.post(
"/employees",
json={
"first_name": f"{i}",
"last_name": "Smith",
"company": self.company_id,
},
)
@events.test_start.add_listener
def _(environment, **_kwargs):
if not isinstance(environment.runner, MasterRunner):
resp = requests.post(
"/companies",
json={"name": "Company Name"}
)
CompanyAPI.company_id = resp.json()["id"]
For a full example: https://github.com/locustio/locust/blob/master/examples/test_data_management.py
I was able to get things work ok-ish, I guess, with this code:
from functools import partial
from typing import List, Optional
from gevent import pool as gpool
from locust import HttpUser, SequentialTaskSet, task
from urllib3 import PoolManager
CONNECTION_LIMIT = 10
POOL = gpool.Pool(CONNECTION_LIMIT - 2)
class QuickStartSequence(SequentialTaskSet):
company_id: Optional[str]
workplace_id: Optional[str]
employees: List[str]
def on_start(self):
self.company_id = None
self.workplace_id = None
self.employees = []
@task
def create_company(self):
resp = self.client.post("/companies", json={"legal_name": "Thunderbolts"})
self.company_id = resp.json()["id"]
@task
def create_workplace(self):
resp = self.client.post("/workplaces", json={"company": self.company_id})
self.workplace_id = resp.json()["id"]
@task
def create_employees(self):
def request(_, i: int):
resp = self.client.post(
"/employees",
json={
"first_name": "America",
"last_name": f"Chavez {i}",
"company": self.company_id,
"workplaces": [self.workplace_id],
},
)
self.employees.append(resp.json()["id"])
for i in range(5):
POOL.spawn(partial(request, i=i), self._parent.environment)
POOL.join()
class AuthenticatedUser(HttpUser):
tasks = [QuickStartSequence]
pool_manager = PoolManager(maxsize=CONNECTION_LIMIT, block=True)
The connection limit and pool are important because my backing database is only accepting 10 connections at a time.
Because I am using a SequentialTaskSet
I know that each method will be called in the order it’s defined in the code. So I can set things like self.company_id
in one method and know that it will be set already in the next method.
Then to get parallel execution for the creation of employees, I just need to spawn those requests with a pool executor.
Let’s say I need to first create a company, then create a bunch of employees for that company. I could do this:
from locust import HttpUser, task
class CompanyAPI(HttpUser):
@task
def create_company(self):
resp = self.client.post(
"/companies",
json={"name": "Company Name"}
)
company_id = resp.json()["id"]
for i in range(50):
resp = self.client.post(
"/employees",
json={
"first_name": f"{i}",
"last_name": "Smith",
"company": company_id,
},
)
But it seems like I should be able to break this up into two tasks so that I can create a bunch of these employees in parallel. For that to work tho I’d need to somehow save off the company_id
in one task, and then use it in subsequent tasks.
Is this possible somehow?
There are many ways to accomplish that, but the most obvious one would be to call /companies
in a test_start event handler, save it on module or class level and use it in your task.
https://docs.locust.io/en/stable/writing-a-locustfile.html#test-start-and-test-stop
Heres a starting point (this will reuse the same company id for all Users forever, so probably you’ll want to modify it a little, maybe by calling /companies
again once i
reaches 50)
from locust.runners import MasterRunner
import requests
class CompanyAPI(HttpUser):
i = -1
@task
def create_company(self):
i += 1
resp = self.client.post(
"/employees",
json={
"first_name": f"{i}",
"last_name": "Smith",
"company": self.company_id,
},
)
@events.test_start.add_listener
def _(environment, **_kwargs):
if not isinstance(environment.runner, MasterRunner):
resp = requests.post(
"/companies",
json={"name": "Company Name"}
)
CompanyAPI.company_id = resp.json()["id"]
For a full example: https://github.com/locustio/locust/blob/master/examples/test_data_management.py
I was able to get things work ok-ish, I guess, with this code:
from functools import partial
from typing import List, Optional
from gevent import pool as gpool
from locust import HttpUser, SequentialTaskSet, task
from urllib3 import PoolManager
CONNECTION_LIMIT = 10
POOL = gpool.Pool(CONNECTION_LIMIT - 2)
class QuickStartSequence(SequentialTaskSet):
company_id: Optional[str]
workplace_id: Optional[str]
employees: List[str]
def on_start(self):
self.company_id = None
self.workplace_id = None
self.employees = []
@task
def create_company(self):
resp = self.client.post("/companies", json={"legal_name": "Thunderbolts"})
self.company_id = resp.json()["id"]
@task
def create_workplace(self):
resp = self.client.post("/workplaces", json={"company": self.company_id})
self.workplace_id = resp.json()["id"]
@task
def create_employees(self):
def request(_, i: int):
resp = self.client.post(
"/employees",
json={
"first_name": "America",
"last_name": f"Chavez {i}",
"company": self.company_id,
"workplaces": [self.workplace_id],
},
)
self.employees.append(resp.json()["id"])
for i in range(5):
POOL.spawn(partial(request, i=i), self._parent.environment)
POOL.join()
class AuthenticatedUser(HttpUser):
tasks = [QuickStartSequence]
pool_manager = PoolManager(maxsize=CONNECTION_LIMIT, block=True)
The connection limit and pool are important because my backing database is only accepting 10 connections at a time.
Because I am using a SequentialTaskSet
I know that each method will be called in the order it’s defined in the code. So I can set things like self.company_id
in one method and know that it will be set already in the next method.
Then to get parallel execution for the creation of employees, I just need to spawn those requests with a pool executor.