@inproceedings{stamant2014general-purpose, author = {St. Amant, Renee and Yazdanbakhsh, A. and Park, Jongse and Thwaites, Bradley and Esmaeilzadeh, Hadi and Hassibi, A. and Ceze, Luis and Burger, Doug}, title = {General-purpose code acceleration with limited-precision analog computation}, booktitle = {2014 International Symposium on Computer Architecture}, year = {2014}, month = {June}, abstract = {As improvements in per-transistor speed and energy efficiency diminish, radical departures from conventional approaches are becoming critical to improving the performance and energy efficiency of general-purpose processors. We propose a solution-from circuit to compiler-that enables general-purpose use of limited-precision, analog hardware to accelerate “approximable” code-code that can tolerate imprecise execution. We utilize an algorithmic transformation that automatically converts approximable regions of code from a von Neumann model to an “analog” neural model. We outline the challenges of taking an analog approach, including restricted-range value encoding, limited precision in computation, circuit inaccuracies, noise, and constraints on supported topologies. We address these limitations with a combination of circuit techniques, a hardware/software interface, neural-network training techniques, and compiler support. Analog neural acceleration provides whole application speedup of 3.7× and energy savings of 6.3× with quality loss less than 10% for all except one benchmark. These results show that using limited-precision analog circuits for code acceleration, through a neural approach, is both feasible and beneficial over a range of approximation-tolerant, emerging applications including financial analysis, signal processing, robotics, 3D gaming, compression, and image processing.}, url = {http://approjects.co.za/?big=en-us/research/publication/general-purpose-code-acceleration-with-limited-precision-analog-computation/}, }