
Shevtsova Julia
Junior researcher
Publications:
2024
2.
Yashin, Artem S.; Vasilyev, Anatoly N.; Shevtsova, Yulia G.; Shishkin, Sergei L. (2024). Can Quasi-Movements be Used as a Model of the BCI Based on Attempted Movements? 2024 IEEE International Conference on Systems, Man, and Cybernetics (SMC). Sarawak, Malaysia, October 6-10, 2024. 2028-2033. https://doi.org/10.1109/smc54092.2024.10831475
@conference{Yashin2024d,
title = {Can Quasi-Movements be Used as a Model of the BCI Based on Attempted Movements?},
author = {Artem S. Yashin and Anatoly N. Vasilyev and Yulia G. Shevtsova and Sergei L. Shishkin},
doi = {10.1109/smc54092.2024.10831475},
year = {2024},
date = {2024-10-06},
urldate = {2024-10-06},
booktitle = {2024 IEEE International Conference on Systems, Man, and Cybernetics (SMC)},
pages = {2028-2033},
address = {Sarawak, Malaysia, October 6-10, 2024},
abstract = {Brain-computer interfaces (BCls) based on motor imagery (imagined movements, 1M) are among the most common BCls for the rehabilitation of paralyzed patients. However, it is possible that attempted movements (AM) would be more an effective alternative for 1M. Unlike 1M, AM are difficult to study outside of clinical practice. Nikulin et al. (2008) suggest that quasi-movements (QM) could help model AM in healthy participants without immobilizing interventions. QM result from the amplitude reduction of an overt movement, which leads to the practical absence of electromyography (EMG) response. The performance of QM may have features that may distance QM from AM. Here, we examined the compatibility of QM with a saccade task, which modelled visual interaction with the outside world during the practical use of a BCI. In a study involving 24 volunteers, we used electroencephalography (EEG), EMG, and conducted an extensive survey of the participants. We expected that, compared to 1M, QM in the dual-task condition would be easier and less tiring and would be accompanied by greater event-related desynchronization (ERD) of the sensorimotor rhythms. Our hypotheses were based on the assumption that like AM and unlike 1M, QM is a more external task, and so is more compatible with the saccade task. We reproduced the effect of greater ERD for QM in the dual-task condition but did not find any significant difference between the difficulty or tediousness of QM and 1M. Nevertheless, the survey data gave us important insights into the challenges participants faced when performing QM. Despite EMG values similar to 1M, the feeling of muscle tension experienced by the participants correlated with mean EMG values. The main challenge in performing QM by the participants was to make movements without an amplitude. Performing QM conflicted with the illusion of movement that was supposed to accompany them: without proprioceptive feedback, participants doubt the reality of QM. Our results can be used to improve the procedure of QM training, which should bring them closer to genuine attempts of movements in the eyes of participants.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Brain-computer interfaces (BCls) based on motor imagery (imagined movements, 1M) are among the most common BCls for the rehabilitation of paralyzed patients. However, it is possible that attempted movements (AM) would be more an effective alternative for 1M. Unlike 1M, AM are difficult to study outside of clinical practice. Nikulin et al. (2008) suggest that quasi-movements (QM) could help model AM in healthy participants without immobilizing interventions. QM result from the amplitude reduction of an overt movement, which leads to the practical absence of electromyography (EMG) response. The performance of QM may have features that may distance QM from AM. Here, we examined the compatibility of QM with a saccade task, which modelled visual interaction with the outside world during the practical use of a BCI. In a study involving 24 volunteers, we used electroencephalography (EEG), EMG, and conducted an extensive survey of the participants. We expected that, compared to 1M, QM in the dual-task condition would be easier and less tiring and would be accompanied by greater event-related desynchronization (ERD) of the sensorimotor rhythms. Our hypotheses were based on the assumption that like AM and unlike 1M, QM is a more external task, and so is more compatible with the saccade task. We reproduced the effect of greater ERD for QM in the dual-task condition but did not find any significant difference between the difficulty or tediousness of QM and 1M. Nevertheless, the survey data gave us important insights into the challenges participants faced when performing QM. Despite EMG values similar to 1M, the feeling of muscle tension experienced by the participants correlated with mean EMG values. The main challenge in performing QM by the participants was to make movements without an amplitude. Performing QM conflicted with the illusion of movement that was supposed to accompany them: without proprioceptive feedback, participants doubt the reality of QM. Our results can be used to improve the procedure of QM training, which should bring them closer to genuine attempts of movements in the eyes of participants.
2023
1.
Shevtsova, Yulia G.; Vasilyev, Anatoly N.; Shishkin, Sergei L. (2023). Machine Learning for Gaze-Based Selection: Performance Assessment Without Explicit Labeling. HCI International 2023 – Late Breaking Papers. HCII 2023. Lecture Notes in Computer Science, volume 14054. Springer Nature Switzerland. 311-322. https://doi.org/10.1007/978-3-031-48038-6_19
@conference{Shevtsova2023,
title = {Machine Learning for Gaze-Based Selection: Performance Assessment Without Explicit Labeling},
author = {Yulia G. Shevtsova and Anatoly N. Vasilyev and Sergei L. Shishkin},
doi = {10.1007/978-3-031-48038-6_19},
isbn = {9783031480386},
year = {2023},
date = {2023-11-25},
urldate = {2023-11-25},
booktitle = {HCI International 2023 – Late Breaking Papers. HCII 2023. Lecture Notes in Computer Science, volume 14054},
pages = {311-322},
publisher = {Springer Nature Switzerland},
abstract = {Gaze-based interaction typically requires certain actions to confirm selections, which often makes interaction less convenient. Recently, effective identification of the user’s intention to make a gaze-based selection was demonstrated by Isomoto et al. (2022) using machine learning applied to gaze behavior features. However, a certain bias could appear in that study since the participants were requested to report their intentions during the interaction experiment. Here, we applied several classification algorithms (linear discriminant analysis, RBF and linear support vector machines, and random forest) to gaze features characterizing selections made in a freely played gaze-controlled game, in which moves were made by sequences of gaze-based selections and their gaze-based confirmations, without separate reporting the correctness of the selection. Intention to select was successfully predicted by each of the classifiers using features collected before the selection.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Gaze-based interaction typically requires certain actions to confirm selections, which often makes interaction less convenient. Recently, effective identification of the user’s intention to make a gaze-based selection was demonstrated by Isomoto et al. (2022) using machine learning applied to gaze behavior features. However, a certain bias could appear in that study since the participants were requested to report their intentions during the interaction experiment. Here, we applied several classification algorithms (linear discriminant analysis, RBF and linear support vector machines, and random forest) to gaze features characterizing selections made in a freely played gaze-controlled game, in which moves were made by sequences of gaze-based selections and their gaze-based confirmations, without separate reporting the correctness of the selection. Intention to select was successfully predicted by each of the classifiers using features collected before the selection.