Skip to content

CAT Example

PWardell86 edited this page Nov 11, 2025 · 2 revisions

Install the library

pip install adaptivetesting

Then you should be able to just python this_file.py. The correct answer is always 0. Once you complete the test it will print your estimated ability score after each answer. The Question index is directly related to its difficulty too.

from math import log1p
from adaptivetesting.models import ItemPool, TestItem
from adaptivetesting.implementations import TestAssembler
from adaptivetesting.math.estimators import BayesModal, NormalPrior
import pandas as pd

class Question:
    content = ""
    options = []
    answer = -1
    difficulty = 0

    def __init__(self, content, options, answer, difficulty):
        self.content = content
        self.options = options
        self.answer = answer
        self.difficulty = difficulty

def generate_debugging_bank(length):
    bank = []
    for i in range(length):
        content = f"{i+1} -> What is {i} + {i}?"
        options = [str(i + j) for j in range(i, i + 4)]
        answer = 0  # Correct answer is always the first option
        difficulty = min(1 - i * (1 / length), 0.99)  # Increasing difficulty
        bank.append(Question(content, options, answer, difficulty))
    return bank

# Compute the b parameter for the test from the proportion of correct responses (difficulty)
def get_b_from_difficulty(difficulty, avg_theta=0):
    return avg_theta - log1p(difficulty / (1 - difficulty))

def get_item_pool1(question_bank):
    return pd.DataFrame({
        "ids": [i for i in range(len(question_bank))],
        "a": [1.0 for _ in question_bank],  # Discrimination: Ignore
        "b": [get_b_from_difficulty(q.difficulty) for q in question_bank],  # Difficulty
        "c": [0 for _ in question_bank],  # Guessing: Ignore
        "d": [1.0 for _ in question_bank],  # Not sure what this does yet lol
    })

test_bank = generate_debugging_bank(10)
item_pool1 = ItemPool.load_from_dataframe(get_item_pool1(test_bank))

adaptive_test1 = TestAssembler(
    item_pool=item_pool1,
    simulation_id="adaptive_test1",
    participant_id="test_user1",
    ability_estimator=BayesModal,
    estimator_args={"prior": NormalPrior(0, 1)},
    simulation=False
)

def get_response(item: TestItem):
    q_index = item.id
    question = test_bank[q_index]
    print(f"Question: {question.content}")
    for idx, option in enumerate(question.options):
        print(f"{idx}: {option}")
    user_answer = int(input("Your answer (index): "))
    is_correct = (user_answer == question.answer)
    return is_correct

# Override the default get_response method. Should return a 
# boolean of whether the option chosen was the answer.
adaptive_test1.get_response = get_response

# Repeatedly run the test. run_test_once() takes care of selecting 
# the next item and updating the ability score based on the 
# parameters provided in initialization
while True:
    adaptive_test1.run_test_once()
    if len(adaptive_test1.item_pool.test_items) == 0:
        print("No more questions available.")
        print([result.ability_estimation for result in adaptive_test1.test_results])
        break

Clone this wiki locally