Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
23 commits
Select commit Hold shift + click to select a range
d524d8e
Add core adaptive stimulus changes (WIP)
bboudaoud-nv Feb 6, 2023
7b698f0
Add null checking to playerCamera
bboudaoud-nv Feb 6, 2023
0c356ad
Check nextTrial/Block() return values
bboudaoud-nv Feb 6, 2023
017a727
Add skipping questions when session complete
bboudaoud-nv Feb 6, 2023
ca2754f
Fixes for empty tasks array (trials as tasks)
bboudaoud-nv Feb 14, 2023
663354d
Exception for empty trials with all adaptive tasks
bboudaoud-nv Feb 20, 2023
a25880f
Fix to always get logger trial parameters
bboudaoud-nv Feb 20, 2023
58786d4
Force update to db before adaptationCmd call
bboudaoud-nv Feb 21, 2023
d2b31bd
Generic windows platform version
bboudaoud-nv Feb 22, 2023
081a508
Support count for adaptive stimulus tasks
bboudaoud-nv Feb 22, 2023
480a827
Add adaptive stimulus sample and python libs
bboudaoud-nv Feb 22, 2023
17c061a
Update gitignore to ignore pylog.txt
bboudaoud-nv Feb 22, 2023
6d0d497
Quick fix to ensure highest index for task
bboudaoud-nv Feb 22, 2023
be56e7e
Comment and clean up scripts
bboudaoud-nv Feb 22, 2023
68d034e
Fix for a bug in how next step is determined
bboudaoud-nv Feb 22, 2023
ebaf762
Fix for order indexing issue
bboudaoud-nv Feb 23, 2023
ea9cf97
Added an intro session and some improved feedback
bboudaoud-nv Feb 23, 2023
a7db700
Add removeAdaptationConfig parameter
bboudaoud-nv Feb 28, 2023
790d89c
Fix empty row errors and flush lock-up
bboudaoud-nv Feb 28, 2023
3351c38
Make trial_order NULL for adaptive tasks
bboudaoud-nv Feb 28, 2023
49e00d8
Fix to work around ' appearing in config
bboudaoud-nv Feb 28, 2023
b28c502
Add use of task start time to filter results
bboudaoud-nv Feb 28, 2023
03420fe
Merge branch 'master' into AdaptiveStimulus
bboudaoud-nv Apr 13, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# Don't check-in any of these data-files
/data-files/debugging_db.db
/data-files/g3d-license.txt
/data-files/log.txt
/data-files/*log.txt
/data-files/*.db
/data-files/*.csv
/data-files/*.pdn
Expand Down
78 changes: 78 additions & 0 deletions data-files/samples/adaptive_stimulus.Experiment.Any
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
{
description = "Adaptive Stimulus Example";

// This must be logged for the adaptation script to work!
trialParametersToLog: ["frameRate"];

// 1-hit fast firing weapon
weapon = {
firePeriod = 0.1;
damagePerSecond = 10;
};

sessions = [
{
id = "Intro";
description = "A session that introduces the conditions";

referenceTargetInitialFeedback = "This session familiarizes you with the frame rates used in this experiment";
sessionCompleteFeedback = "Introduction complete, please let the experimenter know if you could not notice a difference!";

// Single intro task
tasks = [
{
id = "intro";
trialOrders = [
{ order = ["60", "108"]; }
];
}
];

// Use 2 named trials to introduce conditions
trials = [
{
id = "60";
frameRate = 60;
trialSuccessFeedback = "This was an example of a 60Hz trial";
targetIds = ["moving", "moving", "moving"];
},
{
id = "108";
frameRate = 108;
trialSuccessFeedback = "This was an example of a 108Hz trial";
targetIds = ["moving", "moving", "moving"];
}
]
},
{
id = "AdaptFrameRate";
description = "An example session dynamically adapting frame rate";

referenceTargetInitialFeedback = "This session adapts frame rate based on your answers to questions\nRespond to each question to the best of your ability!";
taskFailureFeedback = "Successfully completed the task!"; // Since we do not provide a correct answer task will always "fail"

// The task array handles all creation of trials here (no trials array required)
tasks = [
{
id = "fr_adapt";
type = "adaptive"; // Required to indicate this is adaptive
count = 4;

// This command will be called to generate a new trials.Any file
adaptationCmd = "python samples/framerate_adapt_sample.py";
}
];
}
];

targets = [
{
"id": "moving",
"axisLocked": [ false, false, true ],
"destSpace": "player",
"motionChangePeriod": [ 2, 3 ],
"speed": [ 7, 10 ],
"visualSize": [ 0.05, 0.05 ]
}
];
}
11 changes: 11 additions & 0 deletions data-files/samples/adaptive_stimulus.Status.Any
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
{
currentUser = "Sample User";
sessions = ( "Intro", "AdaptFrameRate");
settingsVersion = 1;
users = (
{
id = "Sample User";
sessions = ( "Intro", "AdaptFrameRate");
} );

}
101 changes: 101 additions & 0 deletions data-files/samples/adaptive_stimulus/fpsci_results.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
import sqlite3 # Used for db queries

# Utility methods
def runQuery(db, query):
# Handle option for db as filename instead of sqlite3.Connection
if type(db) is str: db = sqlite3.connect(db)
c = db.cursor(); c.execute(query)
return c.fetchall()

def unpack_results(rows):
if len(rows) > 0 and len(rows[0]) == 1:
# Make single-item rows a single array of values
for i, row in enumerate(rows): rows[i] = row[0]
return rows

def runQueryAndUnpack(db, query): return unpack_results(runQuery(db, query))

# Run a query that returns a single result (first value)
def runSingleResultQuery(db, query):
rows = runQuery(db, query)
if len(rows) == 0: return None
else: return rows[0][0]

# User methods
def getLastResultsFilename(dir):
import glob, os
files = list(glob.glob(os.path.join(dir, '*.db')))
if len(files) == 0: return None
files.sort(key=lambda x: os.path.getmtime(x), reverse=True)
return files[0]

def getLastResultsDb(path):
return sqlite3.connect(getLastResultsFilename(path))

def getUserFromDb(db):
if type(db) is str: return db.split('_')[-2]
elif type(db) is sqlite3.Connection:
return runSingleResultQuery(db, 'SELECT subject_id FROM Users LIMIT 1')

def getCurrentTaskId(db): return runSingleResultQuery(db, 'SELECT task_id FROM Tasks ORDER BY rowid DESC LIMIT 1')

def getCurrentTaskIndex(db, taskId=None):
q = 'SELECT task_index FROM Tasks '
if taskId is not None: q += f'WHERE task_id is "{taskId}" '
q += 'ORDER BY rowid DESC LIMIT 1'
return runSingleResultQuery(db, q)

def getTaskStartTime(db, taskId=None, taskIndex=None):
q = 'SELECT start_time FROM Tasks '
if taskId is not None:
q += f'WHERE task_id is "{taskId}" '
if taskIndex is not None: q += f'AND task_index is {taskIndex} '
q += 'ORDER BY rowid DESC LIMIT 1'
return runSingleResultQuery(db, q)

def getTrialCountByTask(db, taskId=None, taskIndex=None):
q = 'SELECT trials_complete FROM Tasks '
if taskId is not None:
q += f'WHERE task_id is "{taskId}" '
if taskIndex is not None: q += f'AND task_index is {taskIndex} '
q += 'ORDER BY rowid DESC LIMIT 1'
count = runSingleResultQuery(db, q)
if count is None: return 0
return count

def getTotalTrialCount(db):
rows = runQuery(db, f'SELECT rowid FROM Trials')
if rows is None: return 0
return len(rows)

def getLastQuestionResponses(db, n=1, taskId=None, taskIdx=None, startTime=None):
# You should specify a task id in order to filter on index
if taskId is None and taskIdx is not None: raise Exception("Task index was provided but no task id!")
# If no task ID is specified check all records
q = 'SELECT response FROM Questions '
if taskId is not None:
q += f'WHERE task_id is "{taskId}" '
if taskIdx is not None: q += f'AND task_index is {taskIdx} '
if startTime is not None: q += f'AND time > "{startTime}" '
q += f'ORDER BY rowid DESC LIMIT {n}'
if n == 1: return runSingleResultQuery(db, q)
else: return runQueryAndUnpack(db, q)

def getLastTrialIds(db, n=1, taskId=None, taskIndex=None):
q = f'SELECT trial_id FROM Trials '
if taskId is not None:
q += f'WHERE task_id is "{taskId}" '
if taskIndex is not None: q += f'AND task_index is {taskIndex} '
q += f'ORDER BY rowid DESC LIMIT {n}'

if n == 1: return runSingleResultQuery(db, q)
else: return runQueryAndUnpack(db, q)

def getLastTrialParams(db, paramName, n=1, taskId=None, taskIndex=None):
q = f'SELECT {paramName} FROM Trials '
if taskId is not None:
q += f'WHERE task_id is "{taskId}" '
if taskIndex is not None: q += f'AND task_index is {taskIndex} '
q += f'ORDER BY rowid DESC LIMIT {n}'
if n == 1: return runSingleResultQuery(db, q)
else: return runQueryAndUnpack(db, q)
62 changes: 62 additions & 0 deletions data-files/samples/adaptive_stimulus/fpsci_task_gen.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
VERBOSE = True # This sets whether additional checks are performed and warnings/errors printed

# An empty task (task complete signal) with optional progress value
def emptyTaskConfig(progress=None):
config = {
'trials': [],
}
if progress is not None: config['progress'] = progress
return config

# A task which sets a single parameter to an array of values and asks a question
def singleParamTaskConfig(param, values, targetIds, questions, progress=None, questionIdx=None, correctAnswer=None, description=None):
# Build base config
config = {
'trials': singleParamTrialArray(param, values, targetIds),
'questions': questions,
}
# Add optional fields
if progress is not None: config['progress'] = progress
if description is not None: config['description'] = description
if questionIdx is not None: config['questionIndex'] = questionIdx
if correctAnswer is not None: config['correctAnswer'] = correctAnswer
return config

# Note this method returns a trials array, not a complete task config!
def singleParamTrialArray(param, values, targetIds, randomizeOrder=False):
if VERBOSE:
if type(targetIds) is not list: print(f'ERROR: Provided targetIds must be a list (is a {type(targetIds)})!')
if len(values) == 0: print('ERROR: No values provided!')
trials = []
if randomizeOrder:
import random
random.shuffle(values)
for val in values:
# Create a trial
trials.append({
'id': f'{val}',
f'{param}': val,
'targetIds': targetIds # This assumes identical targets for all trials
})
return trials

# Set the target ids for a specific trial (allows customization of targets on a per-trial basis)
def setTrialTargetIds(config, trialId, targetIds):
if type(targetIds) is not list and VERBOSE:
raise f'Provided targetIds must be a list (is a {type(targetIds)})!'
# Find the trial in the config array and modify its targetIds field
for t in config['trials']:
if t['id'] == trialId:
t['targetIds'] = targetIds # Update the targetIds
return
raise f'Could not find trial with id {trialId}!'

# Write this configuration to an any file (defaults to trials.Any)
def writeToAny(path, config, fname='trials.Any'):
import os, json
# Add filename if missing from provided path
if os.path.isdir(path): path = os.path.join(path, fname)
if not path.endswith('Any') and VERBOSE: print('WARNING: Results should be written to a ".Any" file!')
# Dump JSON to file
with open(path, 'w') as f: json.dump(config, f, indent=4)

131 changes: 131 additions & 0 deletions data-files/samples/framerate_adapt_sample.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
import adaptive_stimulus.fpsci_results as results
import adaptive_stimulus.fpsci_task_gen as taskgen
import numpy as np

# File paths
RESULTS_DIR = './results/' # Path to current results directory
CONFIG_DIR = './' # Path to output trials.Any file
OUTPUT_LOG = 'pylog.txt' # Path to write log of runtime to (for debug)

# Adaptation constants
BASE_FRAMERATE_HZ = 60 # The base frame rate to compare to
INITIAL_STEP_PERCENT = 80 # The amount of the base frame rate to step up/down (negative to step down)

# Stop conditions
ND_TO_END = 4 # The amount of answers of "these are the same condition" to terminate
MAX_QUESTIONS = 100 # An alternate end crtiera (must be > ND_TO_END!)

# The (single) question configuration for our experiment (could be a list)
QUESTION = [
{
'type': 'MultipleChoice',
'prompt': 'Was this trial different than the previous one?',
'options': ['yes', 'no'],
'optionKeys': ['y', 'n'],
}
]

# Targets to use for all trials (referenced from experiment config)
TARGETS = ["moving", "moving", "moving"]

# Redirect print output to log file
import sys
orig_stdout = sys.stdout
f = open(OUTPUT_LOG, 'a+') # Open in append mode (keep previous results)
sys.stdout = f

# Trap case for when configuration above is incorrect
if MAX_QUESTIONS < ND_TO_END:
print("ERROR: Must have ND_END < MAX_QUESTIONS!")
exit()

# Get results file(name) and print it to console
from datetime import datetime
fname = results.getLastResultsFilename(RESULTS_DIR)
print(f'\nReading {fname} @ {datetime.now()}...')

# Open results database
db = results.getLastResultsDb(RESULTS_DIR)

# 2 ways to get the task id (hard code or query database)
TASK_ID = 'fr_adapt' # Hard-coding this requires keeping it in-sync with the config file, but avoids edge-cases
# TASK_ID = results.getCurrentTaskId(db) # (Alternative) read whatever the most recent task_id is from the experiment config file (should be written by FPSci)
# if TASK_ID is None: print('WARNING: Did not find a valid task_id in the database!')

TASK_INDEX = results.getCurrentTaskIndex(db, TASK_ID)
if TASK_INDEX is None:
print(f'WARNING: Did not find a valid task_index for task {TASK_ID}, using 0 instead!')
TASK_INDEX = 0
print(f'Using task id: "{TASK_ID}" and index: {TASK_INDEX}')

# Print trial status (informational only)
total_trials = results.getTotalTrialCount(db)
task_trial_cnt = results.getTrialCountByTask(db, TASK_ID)
idx_trial_cnt = results.getTrialCountByTask(db, TASK_ID, TASK_INDEX)
print(f'Results database contains a total of {total_trials} trial(s). {task_trial_cnt} ({idx_trial_cnt}) for this task (index)!')

# Use time to filter out only questions that come from our current task iteration
START_TIME = results.getTaskStartTime(db, TASK_ID, TASK_INDEX)
print(f'Filtering responses using task start time of: {START_TIME}')

# Get answers for questions from our task (used for generating next task)
answers = results.getLastQuestionResponses(db, MAX_QUESTIONS, TASK_ID, TASK_INDEX, START_TIME)
print(f'Got {len(answers)} responses from results file...')

# Check various termination conditions
done = False # Flag indicating the task is complete
if(len(answers) >= MAX_QUESTIONS):
# We have asked too many questions (forced end w/o convergence)
print('Termination criteria reached: maximum question count!')
done = True
elif len(answers) >= ND_TO_END:
# Check for enough "no difference" responses to end
done = True
for a in answers[:ND_TO_END]:
# If any answer is "yes there is a difference" we are not done
if 'yes' in a.lower(): done = False; break
if done: print(f'Termination criteria reached: {ND_TO_END} consectuive "no difference" responses!')

# Create an empty task config (signals end of adaptation if unpopulated)
config = taskgen.emptyTaskConfig()

# Update the next set of trials based on previous responses
if not done:
if(len(answers) == 0): # First time running this task (create initial conditions)
print(f"No responses from task {TASK_ID} creating first set...")
otherRate = BASE_FRAMERATE_HZ * (1.0+INITIAL_STEP_PERCENT/100.0)
rates = [BASE_FRAMERATE_HZ, otherRate]
else: # At least 1 previous trial has been run
# Try to get last 2 rates from the
try: lastRates = results.getLastTrialParams(db, 'frameRate', 2, TASK_ID, TASK_INDEX) # Get last 2 frame rates from trials table
except:
print('ERROR: Could not get "frameRate" parameter from previous trials, make sure it is in your experiment/session trialParamsToLog!')
exit() # Cannot recover from this condition (don't know previous frame rates)...
lastRates = [float(r) for r in lastRates] # Convert to float
print(f"Found last trial rates of {lastRates[0]} and {lastRates[1]} Hz")

#TODO: Replace this logic in the future (code decides how many answers were "yes I can tell a difference" required to adapt)
answerCount = min(len(answers), ND_TO_END)
ndRatio = sum([1 for a in answers[:answerCount] if 'no' in a.lower()]) / answerCount
print(f'{100*ndRatio}% of last {answerCount} responses were "no difference"...')

if ndRatio >= 0.5: # Step away from the baseline
print('Mostly "no difference": keep last levels')
rates = lastRates
else: # Step towards the baseline
print('Mostly report a difference: move towards baseline')
rates = [np.average(lastRates), BASE_FRAMERATE_HZ]

print(f'Next rates will be {rates}Hz...')

# Generate task (w/ shuffled condition order)
import random; random.shuffle(rates)
config = taskgen.singleParamTaskConfig('frameRate', rates, TARGETS, QUESTION, correctAnswer='Yes')

# Write task config to trials.Any file
taskgen.writeToAny(CONFIG_DIR, config)
print('Wrote config to trials.Any!')

# Restore original stdout
sys.stdout = orig_stdout
f.close()
Loading