@inproceedings{7ba208da257d46fa9da322a512148ed7,
title = "FlutPIM: A Look-up Table-based Processing in Memory Architecture with Floating-point Computation Support for Deep Learning Applications",
abstract = "Processing-in-Memory (PIM) has shown great potential for a wide range of data-driven applications, especially Deep Learning and AI. However, it is a challenge to facilitate the computational sophistication of a standard processor (i.e. CPU or GPU) within the limited scope of a memory chip without contributing significant circuit overheads. To address the challenge, we propose a programmable LUT-based area-efficient PIM architecture capable of performing various low-precision floating point (FP) computations using a novel LUT-oriented operand-decomposition technique. We incorporate such compact computational units within the memory banks in a large count to achieve impressive parallel processing capabilities, up to 4x higher than state-of-the-art FP-capable PIM. Additionally, we adopt a highly-optimized low-precision FP format that maximizes computational performance at a minimal compromise of computational precision, especially for Deep Learning Applications. The overall result is a 17% higher throughput and an impressive 8-20x higher compute Bandwidth/bank compared to the state-of-the-art of in-memory acceleration.",
keywords = "DRAM, Deep Learning, floating point, processing in memory",
author = "Sutradhar, {Purab Ranjan} and Sathwika Bavikadi and Mark Indovina and {Pudukotai Dinakarrao}, {Sai Manoj} and Amlan Ganguly",
note = "Publisher Copyright: {\textcopyright} 2023 ACM.; 33rd Great Lakes Symposium on VLSI, GLSVLSI 2023 ; Conference date: 05-06-2023 Through 07-06-2023",
year = "2023",
doi = "10.1145/3583781.3590313",
language = "American English",
series = "Proceedings of the ACM Great Lakes Symposium on VLSI, GLSVLSI",
pages = "207--211",
booktitle = "GLSVLSI '23",
}