@inproceedings{97a10e413e884d099c3aa66438091654,
title = "Challenges of Self-Supervised Learning for Unified, Multi-Modal, Multi-Task Transformer Models",
abstract = "The recent success of multi-modal multi-task transformer models combined with their ability to learn in a scalable self-supervised fashion has presented evidence that omnipotent models trained with heterogeneous data and tasks are within the realms of possibility. This paper presents several research questions and impediments related towards the training of generalized transformer architectures.",
keywords = "multi-modal, multi-task, self-supervised learning",
author = "Graham Annett and Tim Andersen and Robert Annett",
note = "Publisher Copyright: {\textcopyright} 2022 IEEE.; 2022 International Conference on Computational Science and Computational Intelligence, CSCI 2022 ; Conference date: 14-12-2022 Through 16-12-2022",
year = "2022",
doi = "10.1109/CSCI58124.2022.00056",
language = "English",
series = "Proceedings - 2022 International Conference on Computational Science and Computational Intelligence, CSCI 2022",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "293--297",
booktitle = "Proceedings - 2022 International Conference on Computational Science and Computational Intelligence, CSCI 2022",
}