-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdisplay_resources.py
More file actions
288 lines (248 loc) · 13.4 KB
/
display_resources.py
File metadata and controls
288 lines (248 loc) · 13.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
from random import shuffle
from pathlib import *
from psychopy import core, event, gui, visual
from psychopy.constants import (PLAYING, PAUSED, FINISHED, STOPPED,
NOT_STARTED)
from config import *
# Ideally we should minimze imports from here, but there is some overlap between what's on screen and what TPL needs to know.
from eye_tracking_resources import finish_display, record_event
# These need to be updated throughout the experiment, to send messages about what is being shown on screen
current_display = None
current_display_start_time = None
# A helper method that calls another method specific to the set input type
def check_for_input_on_images(mouse, images, prev_mouse_location):
if USER_INPUT_DEVICE == 'mouse':
selected_image, prev_mouse_location = _check_for_click_on_images(mouse, images, prev_mouse_location)
elif USER_INPUT_DEVICE == 'touch':
selected_image, prev_mouse_location = _check_for_tap_on_images(mouse, images, prev_mouse_location)
else:
print("Error: User input device is not set to a valid value (mouse or touch). Quitting...")
core.quit()
return selected_image, prev_mouse_location
# Given a list of images, returns the one that is being clicked (or None)
# Hold for the duration of the click - so that when this function ends, the click is over
def _check_for_click_on_images(mouse, images, prev_mouse_location):
clicked_image = None
for image in images:
if mouse.isPressedIn(image):
clicked_image = image
while any(mouse.getPressed()):
pass
# Not necessary, but keeps things consistent with the use of checkForTap
prev_mouse_location = mouse.getPos()
return clicked_image, prev_mouse_location
# Checks for a single tap on an image
# Does this by asking: has the "mouse" moved? (= yes if a tap was received)
# And: If so, is the "mouse" within the image?
def _check_for_tap_on_images(mouse, images, prev_mouse_location):
tapped_image = None
mouse_location = mouse.getPos()
# If the mouse moved... (check x and y coords)
if not(mouse_location[0] == prev_mouse_location[0] and mouse_location[1] == prev_mouse_location[1]):
# If the mouse is within one of the images...
for image in images:
if image.contains(mouse):
tapped_image = image
prev_mouse_location = mouse_location # Update for the next check
return tapped_image, prev_mouse_location
# A helper method that calls another method specific to the set input type
def check_for_input_anywhere(mouse, prev_mouse_location):
if USER_INPUT_DEVICE == 'mouse':
input_received, prev_mouse_location = _check_for_click_anywhere(mouse, prev_mouse_location)
elif USER_INPUT_DEVICE == 'touch':
input_received, prev_mouse_location = _check_for_tap_anywhere(mouse, prev_mouse_location)
else:
print("Error: User input device is not set to a valid value (mouse or touch). Quitting...")
core.quit()
return input_received, prev_mouse_location
def _check_for_click_anywhere(mouse, prev_mouse_location):
clicked = False
if any(mouse.getPressed()):
clicked = True
prev_mouse_location = mouse.getPos()
while any(mouse.getPressed()): # Wait for the click to end before proceeding
pass
return clicked, prev_mouse_location
def _check_for_tap_anywhere(mouse, prev_mouse_location):
tapped = False
mouse_location = mouse.getPos()
# If the mouse moved... (check x and y coords)
if not(mouse_location[0] == prev_mouse_location[0] and mouse_location[1] == prev_mouse_location[1]):
tapped = True
prev_mouse_location = mouse_location
return tapped, prev_mouse_location
def clear_clicks_and_events(mouse):
mouse.clickReset()
event.clearEvents()
def display_blank_screen(recorder, main_window):
if EYETRACKING_ON:
switch_displays('blank', recorder)
main_window.flip()
# Displays a buffer screen with given text, and only proceeds once the user clicks/taps/etc on screen
def display_buffer_screen(recorder, main_window, mouse, buffer_text, quit_function):
display_text_screen(recorder, main_window, buffer_text, "buffer")
input_received = False
prev_mouse_location = mouse.getPos()
while not input_received: # Wait for user input (anywhere on screen)
listen_for_quit(quit_function) # Allow the user to quit at this stage, too
input_received, prev_mouse_location = check_for_input_anywhere(mouse, prev_mouse_location)
# Displays a fixation cross on the screen
def display_fixation_cross_screen(recorder, main_window):
if EYETRACKING_ON:
switch_displays('fixation_cross', recorder)
fixation_screen = visual.TextStim(
main_window,
text = '+',
pos = (0.0, 0.0),
bold = True,
height = WINDOW_HEIGHT / 10,
color = "black")
fixation_screen.draw()
main_window.flip()
def display_stimuli_screen(recorder, main_window, stimuli_list):
if EYETRACKING_ON:
switch_displays('stimuli', recorder)
for stimulus in stimuli_list:
stimulus.draw()
main_window.flip()
# Displays a screen with given text (how to proceed from this screen is not a part of this function!)
# displayName can be None (if this is not a display for the recording process, e.g. the quitting screen)
def display_text_screen(recorder, main_window, text_to_display, display_name):
if EYETRACKING_ON:
switch_displays(display_name, recorder)
text_screen = visual.TextStim(
main_window,
text = text_to_display,
pos = (0.0, 0.0),
height = WINDOW_HEIGHT / 20,
wrapWidth = WINDOW_WIDTH,
color = "black")
text_screen.draw()
main_window.flip()
# Displays a dialog box to get subject number
def display_subj_ID_dialog():
gui_box = gui.Dlg()
gui_box.addField("Subject ID:")
gui_box.show()
subj_ID = int(gui_box.data[0])
return subj_ID
# Get the images to be displayed for the given trial.
def get_images(image_file_names, image_size, checkmark_size, repeat_icon_size, main_window):
number_of_images = len(image_file_names)
# Create an ImageStim object for each image stimuli, and a unique check object for each image - even though they're all the same check image, they'll end up having different positions
images = []
checkmarks = []
for i in range(0, number_of_images):
images.append(visual.ImageStim(win = main_window, image = Path.cwd()/"visualStims"/str(image_file_names[i]), units = "pix", size = image_size, name = image_file_names[i]))
checkmarks.append(visual.ImageStim(win = main_window, image = Path.cwd()/"icons"/"checkmark.png", units = "pix", size = checkmark_size))
repeat_icon = visual.ImageStim(win = main_window, image = Path.cwd()/"icons"/"repeat.png", units = "pix", size = repeat_icon_size)
selection_box = visual.Rect(win = main_window, lineWidth = 2.5, lineColor = "#7AC043", fillColor = None, units = "pix", size = image_size)
return images, checkmarks, repeat_icon, selection_box
def handle_input_on_stimulus(selected_image, images, checkmarks, selection_box, repeat_icon, trial_clock, clicks, recorder, main_window):
assert(selected_image != None)
number_of_images = len(images)
# Determine the names of the images we're dealing with. These name lists match their order in the experimental items input file!
ONE_IMAGE_NAME = ["agent"]
TWO_IMAGE_NAMES = ["agent", "patient"]
THREE_IMAGE_NAMES = ["agent", "patient", "distractor"]
FOUR_IMAGE_NAMES = ["agent", "patient", "distractorA", "distractorB"]
NAME_LISTS = {
1: ONE_IMAGE_NAME,
2: TWO_IMAGE_NAMES,
3: THREE_IMAGE_NAMES,
4: FOUR_IMAGE_NAMES
}
assert number_of_images in NAME_LISTS.keys()
image_names = NAME_LISTS[number_of_images]
# Figure out which image was selected
trial_duration = trial_clock.getTime()
selection_box.setPos(selected_image.pos)
for i, image in enumerate(images):
if selected_image == image:
pic = image_names[i]
checkmark = checkmarks[i]
response = [pic, trial_duration]
clicks.append(response)
# Re-draw to include the selection box and checkmark
display_stimuli_screen(recorder, main_window, images + [repeat_icon, selection_box, checkmark])
return checkmark
# Listens for a keyboard shortcut that tells us to quit the experiment - if detected, it runs the given quit routine
def listen_for_quit(quit_function):
quit_key = 'escape'
keys = event.getKeys()
if quit_key in keys:
quit_function()
def listen_for_repeat(repeat_icon, prev_mouse_location, audio, trial_clock, clicks, mouse, recorder):
repeat_clicked, prev_mouse_location = check_for_input_on_images(mouse, [repeat_icon], prev_mouse_location)
if repeat_clicked:
if EYETRACKING_ON:
record_event(recorder, "RepeatPressed")
play_sound(audio, recorder)
pic = "replay"
trial_duration = trial_clock.getTime()
response = [pic, trial_duration]
clicks.append(response)
return prev_mouse_location
def play_sound(audio, recorder):
if EYETRACKING_ON:
record_event(recorder, "AudioStart")
if audio.status != PLAYING:
audio.play()
# Note that setPos defines the position of the image's /centre/, and screen positions are determined based on the /centre/ of the screen being (0,0)
def set_image_positions(image_size, checkmark_size, images, checkmarks, repeat_icon, IMAGE_OFFSET_FROM_EDGE):
number_of_images = len(images)
# The repeat button's position is always the same, no randomization needed
buffer_size = min(WINDOW_WIDTH, WINDOW_HEIGHT) / 15
repeat_icon.setPos([-WINDOW_WIDTH / 2 + buffer_size, -WINDOW_HEIGHT / 2 + buffer_size])
# Calculate positions for the images relative to the window
x_spacing = (WINDOW_WIDTH / 2) - (image_size / 2) #i.e. distance from centre of screen to centre of image in order for the image to be against one side of the screen
y_spacing = (WINDOW_HEIGHT / 2) - (image_size / 2)
left = -x_spacing + IMAGE_OFFSET_FROM_EDGE
right = x_spacing - IMAGE_OFFSET_FROM_EDGE
bottom = -y_spacing + IMAGE_OFFSET_FROM_EDGE
top = y_spacing - IMAGE_OFFSET_FROM_EDGE
centre = 0
# Position the checkmarks just above/below the image. This offset should be added/subtracted from the corresponding image's position.
checkmark_offset = image_size / 2 + checkmark_size / 2
# Using human-readable names for the positions.
# Horizontally: left, centre, right
# Vertically: top, centre, bottom
POSITION_COORDINATES = {"left_top": [left, top], "left_centre": [left, centre], "left_bottom": [left, bottom], "centre_top": [centre, top], "centre_centre": [centre, centre], "centre_bottom": [centre, bottom], "right_top": [right, top], "right_centre": [right, centre], "right_bottom": [right, bottom]}
# Determine the image positions we'll be using, based on the # of images
ONE_POSITION = ["centre_centre"]
TWO_POSITIONS = ["left_centre", "right_centre"]
THREE_POSITIONS = ["left_top", "right_top", "centre_bottom"]
FOUR_POSITIONS = ["left_top", "right_top", "left_bottom", "right_bottom"]
POSITION_LISTS = {
1: ONE_POSITION,
2: TWO_POSITIONS,
3: THREE_POSITIONS,
4: FOUR_POSITIONS
}
assert number_of_images in POSITION_LISTS.keys()
image_positions = POSITION_LISTS[number_of_images]
# Now randomize the order of positions, and apply them to the images (and their corresponding checkmark)
shuffle(image_positions)
image_position_info_to_print = {}
for image, checkmark, image_position in zip(images, checkmarks, image_positions):
current_pos_coords = POSITION_COORDINATES[image_position]
image.setPos(current_pos_coords)
image_position_info_to_print.update({image.name: image_position})
# Put the check below the image UNLESS the image is already at the bottom of the screen, in which case put the check above the image
checkmark.setPos([current_pos_coords[0], current_pos_coords[1] - checkmark_offset if current_pos_coords[1] != bottom else current_pos_coords[1] + checkmark_offset])
return images, checkmarks, repeat_icon, image_position_info_to_print
# Either the new or current display can be None, indicating this is the last or first display
# If new_display_name = None, we only finish with the previous display, we don't add a new one
# If current_display = None, we do not finish any previous display (because we're saying there isn't one!), we merely note the start time of this new screen
# If both are None, we are neither beginning nor finishing with a screen (i.e. this is a dummy call - we don't want TPL to know about this display at all)
def switch_displays(new_display_name, recorder):
global current_display
global current_display_start_time
# Store the start time for the new display (and finish with the old one, if there was one).
if current_display == None and new_display_name == None: # No switch at all
pass
elif current_display == None: # No previous display, but we're starting a new one
current_display_start_time = int((recorder.get_time_stamp())['timestamp'])
else: # We are finishing with a previous display
current_display_start_time = finish_display(current_display_start_time, current_display, recorder)
current_display = new_display_name # Update the current display