
Шевцова Юлия
Публикации с аффилиацией МЭГ-центра
2025
Shevtsova, Yulia G.; Yashin, Artem S.; Shishkin, Sergei L.; Vasilyev, Anatoly N. (2025). Disentangling High-Paced Alternating I/O in Gaze-Based Interaction. IEEE Access, 13, 81948-81966. https://doi.org/10.1109/access.2025.3568650
@article{Shevtsova2025,
title = {Disentangling High-Paced Alternating I/O in Gaze-Based Interaction},
author = {Yulia G. Shevtsova and Artem S. Yashin and Sergei L. Shishkin and Anatoly N. Vasilyev},
url = {https://megmoscow.ru/wp-content/uploads/pubs/10.1109_access.2025.3568650.pdf},
doi = {10.1109/access.2025.3568650},
issn = {2169-3536},
year = {2025},
date = {2025-05-09},
urldate = {2025-00-00},
journal = {IEEE Access},
volume = {13},
pages = {81948-81966},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
abstract = {Gaze-based input to machines utilizes the ability of eye-gaze to serve as a user’s “output.” However, gaze should also support information flow in the opposite direction, namely, “input” to the user’s visual system from a machine’s output. The two functions can be easily separated in some tasks, like eye typing, but more complex scenarios typically require users to perform additional actions to avoid misinterpreting their intent. In this study, we modeled a free-behavior interaction with rapid transitions between visual search, decision-making, and gaze-based input operations through an engaging game called EyeLines. When playing the game, 15 volunteers selected screen objects using a 500 ms dwell time without additional actions for intention confirmation. By applying machine learning algorithms to gaze features and action context information, we achieved a threefold reduction in false positives, improved the quality of in-game decisions, and increased participant satisfaction with system ergonomics. To our knowledge, this is the first study that demonstrates the effectiveness of machine learning applied to gaze features in enhancing gaze-based interaction within visually challenging environments.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
2024
Yashin, Artem S.; Vasilyev, Anatoly N.; Shevtsova, Yulia G.; Shishkin, Sergei L. (2024). Can Quasi-Movements be Used as a Model of the BCI Based on Attempted Movements? 2024 IEEE International Conference on Systems, Man, and Cybernetics (SMC). Sarawak, Malaysia, October 6-10, 2024. 2028-2033. https://doi.org/10.1109/smc54092.2024.10831475
@conference{Yashin2024d,
title = {Can Quasi-Movements be Used as a Model of the BCI Based on Attempted Movements?},
author = {Artem S. Yashin and Anatoly N. Vasilyev and Yulia G. Shevtsova and Sergei L. Shishkin},
doi = {10.1109/smc54092.2024.10831475},
year = {2024},
date = {2024-10-06},
urldate = {2024-10-06},
booktitle = {2024 IEEE International Conference on Systems, Man, and Cybernetics (SMC)},
pages = {2028-2033},
address = {Sarawak, Malaysia, October 6-10, 2024},
abstract = {Brain-computer interfaces (BCls) based on motor imagery (imagined movements, 1M) are among the most common BCls for the rehabilitation of paralyzed patients. However, it is possible that attempted movements (AM) would be more an effective alternative for 1M. Unlike 1M, AM are difficult to study outside of clinical practice. Nikulin et al. (2008) suggest that quasi-movements (QM) could help model AM in healthy participants without immobilizing interventions. QM result from the amplitude reduction of an overt movement, which leads to the practical absence of electromyography (EMG) response. The performance of QM may have features that may distance QM from AM. Here, we examined the compatibility of QM with a saccade task, which modelled visual interaction with the outside world during the practical use of a BCI. In a study involving 24 volunteers, we used electroencephalography (EEG), EMG, and conducted an extensive survey of the participants. We expected that, compared to 1M, QM in the dual-task condition would be easier and less tiring and would be accompanied by greater event-related desynchronization (ERD) of the sensorimotor rhythms. Our hypotheses were based on the assumption that like AM and unlike 1M, QM is a more external task, and so is more compatible with the saccade task. We reproduced the effect of greater ERD for QM in the dual-task condition but did not find any significant difference between the difficulty or tediousness of QM and 1M. Nevertheless, the survey data gave us important insights into the challenges participants faced when performing QM. Despite EMG values similar to 1M, the feeling of muscle tension experienced by the participants correlated with mean EMG values. The main challenge in performing QM by the participants was to make movements without an amplitude. Performing QM conflicted with the illusion of movement that was supposed to accompany them: without proprioceptive feedback, participants doubt the reality of QM. Our results can be used to improve the procedure of QM training, which should bring them closer to genuine attempts of movements in the eyes of participants.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
2023
Shevtsova, Yulia G.; Vasilyev, Anatoly N.; Shishkin, Sergei L. (2023). Machine Learning for Gaze-Based Selection: Performance Assessment Without Explicit Labeling. HCI International 2023 – Late Breaking Papers. HCII 2023. Lecture Notes in Computer Science, volume 14054. Springer Nature Switzerland. 311-322. https://doi.org/10.1007/978-3-031-48038-6_19
@conference{Shevtsova2023,
title = {Machine Learning for Gaze-Based Selection: Performance Assessment Without Explicit Labeling},
author = {Yulia G. Shevtsova and Anatoly N. Vasilyev and Sergei L. Shishkin},
doi = {10.1007/978-3-031-48038-6_19},
isbn = {9783031480386},
year = {2023},
date = {2023-11-25},
urldate = {2023-11-25},
booktitle = {HCI International 2023 – Late Breaking Papers. HCII 2023. Lecture Notes in Computer Science, volume 14054},
pages = {311-322},
publisher = {Springer Nature Switzerland},
abstract = {Gaze-based interaction typically requires certain actions to confirm selections, which often makes interaction less convenient. Recently, effective identification of the user’s intention to make a gaze-based selection was demonstrated by Isomoto et al. (2022) using machine learning applied to gaze behavior features. However, a certain bias could appear in that study since the participants were requested to report their intentions during the interaction experiment. Here, we applied several classification algorithms (linear discriminant analysis, RBF and linear support vector machines, and random forest) to gaze features characterizing selections made in a freely played gaze-controlled game, in which moves were made by sequences of gaze-based selections and their gaze-based confirmations, without separate reporting the correctness of the selection. Intention to select was successfully predicted by each of the classifiers using features collected before the selection.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}