{ "id": "2407.04803", "version": "v1", "published": "2024-07-05T18:21:17.000Z", "updated": "2024-07-05T18:21:17.000Z", "title": "The Impact of Quantization and Pruning on Deep Reinforcement Learning Models", "authors": [ "Heng Lu", "Mehdi Alemi", "Reza Rawassizadeh" ], "categories": [ "cs.LG", "cs.AI" ], "abstract": "Deep reinforcement learning (DRL) has achieved remarkable success across various domains, such as video games, robotics, and, recently, large language models. However, the computational costs and memory requirements of DRL models often limit their deployment in resource-constrained environments. The challenge underscores the urgent need to explore neural network compression methods to make RDL models more practical and broadly applicable. Our study investigates the impact of two prominent compression methods, quantization and pruning on DRL models. We examine how these techniques influence four performance factors: average return, memory, inference time, and battery utilization across various DRL algorithms and environments. Despite the decrease in model size, we identify that these compression techniques generally do not improve the energy efficiency of DRL models, but the model size decreases. We provide insights into the trade-offs between model compression and DRL performance, offering guidelines for deploying efficient DRL models in resource-constrained settings.", "revisions": [ { "version": "v1", "updated": "2024-07-05T18:21:17.000Z" } ], "analyses": { "keywords": [ "deep reinforcement learning models", "quantization", "neural network compression methods", "deploying efficient drl models", "large language models" ], "note": { "typesetting": "TeX", "pages": 0, "language": "en", "license": "arXiv", "status": "editable" } } }