@inproceedings{zhang2024, title = {Shrinking POMCP: A Framework for Real-Time UAV Search and Rescue}, author = {Zhang, Yunuo and Luo, Baiting and Mukhopadhyay, Ayan and Stojcsics, Daniel and Elenius, Daniel and Roy, Anirban and Jha, Susmit and Maroti, Miklos and Koutsoukos, Xenofon and Karsai, Gabor and Dubey, Abhishek}, year = {2024}, booktitle = {2024 International Conference on Assured Autonomy (ICAA)}, volume = {}, number = {}, pages = {48--57}, doi = {10.1109/ICAA64256.2024.00016}, keywords = {Three-dimensional displays;Navigation;Markov decision processes;Urban areas;Probabilistic logic;Real-time systems;Trajectory;Maintenance;Time factors;Optimization;Search and Rescue;POMDP;MCTS} }
2024
- Y. Zhang, B. Luo, A. Mukhopadhyay, D. Stojcsics, D. Elenius, A. Roy, S. Jha, M. Maroti, X. Koutsoukos, G. Karsai, and A. Dubey, Shrinking POMCP: A Framework for Real-Time UAV Search and Rescue, in 2024 International Conference on Assured Autonomy (ICAA), 2024, pp. 48–57.
- R. Sen, A. Sivagnanam, A. Laszka, A. Mukhopadhyay, and A. Dubey, Grid-Aware Charging and Operational Optimization for Mixed-Fleet Public Transit, in 2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC), 2024.
@inproceedings{rishavITSC2024, title = {Grid-Aware Charging and Operational Optimization for Mixed-Fleet Public Transit}, author = {Sen, Rishav and Sivagnanam, Amutheezan and Laszka, Aron and Mukhopadhyay, Ayan and Dubey, Abhishek}, year = {2024}, booktitle = {2024 IEEE 27th International Conference on Intelligent Transportation Systems (ITSC)}, volume = {}, number = {}, keywords = {Mixed transit fleet, electrification, dynamic pricing, hierarchical MILP} }
The rapid growth of urban populations and the increasing need for sustainable transportation solutions have prompted a shift towards electric buses in public transit systems. However, the effective management of mixed fleets consisting of both electric and diesel buses poses significant operational chal- lenges. One major challenge is coping with dynamic electricity pricing, where charging costs vary throughout the day. Transit agencies must optimize charging assignments in response to such dynamism while accounting for secondary considerations such as seating constraints. This paper presents a comprehensive mixed-integer linear programming (MILP) model to address these challenges by jointly optimizing charging schedules and trip assignments for mixed (electric and diesel bus) fleets while considering factors such as dynamic electricity pricing, vehicle capacity, and route constraints. We address the potential computational intractability of the MILP formulation, which can arise even with relatively small fleets, by employing a hierarchical approach tailored to the fleet composition. By using real-world data from the city of Chattanooga, Tennessee, USA, we show that our approach can result in significant savings in the operating costs of the mixed transit fleets.
- Z. An, H. Baier, A. Dubey, A. Mukhopadhyay, and M. Ma, Enabling MCTS Explainability for Sequential Planning Through Computation Tree Logic, ECAI 2024 - 27th European Conference on Artificial Intelligence. 2024.
@misc{an2024enablingmctsexplainabilitysequential, title = {Enabling MCTS Explainability for Sequential Planning Through Computation Tree Logic}, author = {An, Ziyan and Baier, Hendrik and Dubey, Abhishek and Mukhopadhyay, Ayan and Ma, Meiyi}, year = {2024}, url = {https://arxiv.org/abs/2407.10820}, eprint = {2407.10820}, booktitle = {{ECAI} 2024 - 27th European Conference on Artificial Intelligence}, location = {Santiago de Compostela, Spain}, archiveprefix = {arXiv}, primaryclass = {cs.AI} }
Monte Carlo tree search (MCTS) is one of the most capa- ble online search algorithms for sequential planning tasks, with sig- nificant applications in areas such as resource allocation and transit planning. Despite its strong performance in real-world deployment, the inherent complexity of MCTS makes it challenging to understand for users without technical background. This paper considers the use of MCTS in transportation routing services, where the algorithm is integrated to develop optimized route plans. These plans are required to meet a range of constraints and requirements simultaneously, fur- ther complicating the task of explaining the algorithm’s operation in real-world contexts. To address this critical research gap, we intro- duce a novel computation tree logic-based explainer for MCTS. Our framework begins by taking user-defined requirements and translat- ing them into rigorous logic specifications through the use of lan- guage templates. Then, our explainer incorporates a logic verifica- tion and quantitative evaluation module that validates the states and actions traversed by the MCTS algorithm. The outcomes of this anal- ysis are then rendered into human-readable descriptive text using a second set of language templates. The user satisfaction of our ap- proach was assessed through a survey with 82 participants. The re- sults indicated that our explanatory approach significantly outper- forms other baselines in user preference.
- A. Sivagnanam, A. Pettet, H. Lee, A. Mukhopadhyay, A. Dubey, and A. Laszka, Multi-Agent Reinforcement Learning with Hierarchical Coordination for Emergency Responder Stationing, in Proceedings of the 41st International Conference on Machine Learning (ICML), 2024.
@inproceedings{sivagnanam2024, title = {Multi-Agent Reinforcement Learning with Hierarchical Coordination for Emergency Responder Stationing}, author = {Sivagnanam, Amutheezan and Pettet, Ava and Lee, Hunter and Mukhopadhyay, Ayan and Dubey, Abhishek and Laszka, Aron}, year = {2024}, booktitle = {Proceedings of the 41st International Conference on Machine Learning (ICML)}, location = {Vienna, Austria}, publisher = {JMLR.org}, series = {ICML'24} }
An emergency responder management (ERM) system dispatches responders, such as ambulances, when it receives requests for medical aid. ERM systems can also proactively reposition responders between predesignated waiting locations to cover any gaps that arise due to the prior dispatch of responders or significant changes in the distribution of anticipated requests. Optimal repositioning is computationally challenging due to the exponential number of ways to allocate responders between locations and the uncertainty in future requests. The state-of-the-art approach in proactive repositioning is a hierarchical approach based on spatial decomposition and online Monte Carlo tree search, which may require minutes of computation for each decision in a domain where seconds can save lives. We address the issue of long decision times by introducing a novel reinforcement learning (RL) approach, based on the same hierarchical decomposition, but replacing online search with learning. To address the computational challenges posed by large, variable-dimensional, and discrete state and action spaces, we propose: (1) actor-critic based agents that incorporate transformers to handle variable-dimensional states and actions, (2) projections to fixed-dimensional observations to handle complex states, and (3) combinatorial techniques to map continuous actions to discrete allocations. We evaluate our approach using realworld data from two U.S. cities, Nashville, TN and Seattle, WA. Our experiments show that compared to the state of the art, our approach reduces computation time per decision by three orders of magnitude, while also slightly reducing average ambulance response time by 5 seconds.
- S. Pavia, D. Rogers, A. Sivagnanam, M. Wilbur, D. Edirimanna, Y. Kim, P. Pugliese, S. Samaranayake, A. Laszka, A. Mukhopadhyay, and A. Dubey, Deploying Mobility-On-Demand for All by Optimizing Paratransit Services, International Joint Conference on Artificial Intelligence (IJCAI), 2024.
@article{paviaIJCAI24AISG, title = {Deploying Mobility-On-Demand for All by Optimizing Paratransit Services}, author = {Pavia, Sophie and Rogers, David and Sivagnanam, Amutheezan and Wilbur, Michael and Edirimanna, Danushka and Kim, Youngseo and Pugliese, Philip and Samaranayake, Samitha and Laszka, Aron and Mukhopadhyay, Ayan and Dubey, Abhishek}, year = {2024}, journal = {International Joint Conference on Artificial Intelligence (IJCAI)} }
- S. Pavia, D. Rogers, A. Sivagnanam, M. Wilbur, D. Edirimanna, Y. Kim, A. Mukhopadhyay, P. Pugliese, S. Samaranayake, A. Laszka, and A. Dubey, SmartTransit.AI: A Dynamic Paratransit and Microtransit Application, International Joint Conference on Artificial Intelligence (IJCAI), 2024.
@article{paviaIJCAI24demo, title = {SmartTransit.AI: A Dynamic Paratransit and Microtransit Application}, author = {Pavia, Sophie and Rogers, David and Sivagnanam, Amutheezan and Wilbur, Michael and Edirimanna, Danushka and Kim, Youngseo and Mukhopadhyay, Ayan and Pugliese, Philip and Samaranayake, Samitha and Laszka, Aron and Dubey, Abhishek}, year = {2024}, journal = {International Joint Conference on Artificial Intelligence (IJCAI)} }
- S. Gupta, A. Khanna, J. P. Talusan, A. Said, D. Freudberg, A. Mukhopadhyay, and A. Dubey, A Graph Neural Network Framework for Imbalanced Bus Ridership Forecasting, in 2024 IEEE International Conference on Smart Computing (SMARTCOMP), 2024.
@inproceedings{samir2024smartcomp, title = {A Graph Neural Network Framework for Imbalanced Bus Ridership Forecasting}, author = {Gupta, Samir and Khanna, Agrima and Talusan, Jose Paolo and Said, Anwar and Freudberg, Dan and Mukhopadhyay, Ayan and Dubey, Abhishek}, year = {2024}, month = jun, booktitle = {2024 IEEE International Conference on Smart Computing (SMARTCOMP)}, volume = {}, number = {} }
Public transit systems are paramount in lowering carbon emissions and reducing urban congestion for environmental sustainability. However, overcrowding has adverse effects on the quality of service, passenger experience, and overall efficiency of public transit causing a decline in the usage of public transit systems. Therefore, it is crucial to identify and forecast potential windows of overcrowding to improve passenger experience and encourage higher ridership. Predicting ridership is a complex task, due to the inherent noise of collected data and the sparsity of overcrowding events. Existing studies in predicting public transit ridership consider only a static depiction of bus networks. We address these issues by first applying a data processing pipeline that cleans noisy data and engineers several features for training. Then, we address sparsity by converting the network to a dynamic graph and using a graph convolutional network, incorporating temporal, spatial, and auto-regressive features, to learn generalizable patterns for each route. Finally, since conventional loss functions like categorical cross-entropy have limitations in addressing class imbalance inherent in ridership data, our proposed approach uses focal loss to refine the prediction focus on less frequent yet task-critical overcrowding instances. Our experiments, using real-world data from our partner agency, show that the proposed approach outperforms existing state-of-the-art baselines in terms of accuracy and robustness.
- J. P. Talusan, R. Sen, A. K. Ava Pettet, Y. Suzue, L. Pedersen, A. Mukhopadhyay, and A. Dubey, OPTIMUS: Discrete Event Simulator for Vehicle-to-Building Charging Optimization, in 2024 IEEE International Conference on Smart Computing (SMARTCOMP), 2024.
@inproceedings{talusan2024smartcomp, title = {OPTIMUS: Discrete Event Simulator for Vehicle-to-Building Charging Optimization}, author = {Talusan, Jose Paolo and Sen, Rishav and Ava Pettet, Aaron Kandel and Suzue, Yoshinori and Pedersen, Liam and Mukhopadhyay, Ayan and Dubey, Abhishek}, year = {2024}, month = jun, booktitle = {2024 IEEE International Conference on Smart Computing (SMARTCOMP)}, volume = {}, number = {} }
The increasing popularity of electronic vehicles has spurred a demand for EV charging infrastructure. In the United States alone, over 160,000 public and private charging ports have been installed. This has stoked fear of potential grid issues in the future. Meanwhile, companies, specifically building owners are also seeing the opportunity to leverage EV batteries as energy stores to serve as buffers against the electric grid. The main idea is to influence and control charging behavior to provide a certain level of energy resiliency and demand responsiveness to the building from grid events while ensuring that they meet the demands of EV users. However, managing and co-optimizing energy requirements of EVs and cost-saving measures of building owners is a difficult task. First, user behavior and grid uncertainty contribute greatly to the potential effectiveness of different policies. Second, different charger configurations can have drastically different effects on the cost. Therefore, we propose a complete end-to-end discrete event simulator for vehicle-to-building charging optimization. This software is aimed at building owners and EV manufacturers such as Nissan, looking to deploy their charging stations with state-of-the-art optimization algorithms. We provide a complete solution that allows the owners to train, evaluate, introduce uncertainty, and benchmark policies on their datasets. Lastly, we discuss the potential for extending our work with other vehicle-to-grid deployments.
- J. P. Talusan, C. Han, A. Mukhopadhyay, A. Laszka, D. Freudberg, and A. Dubey, An Online Approach to Solving Public Transit Stationing and Dispatch Problem, in Proceedings of the ACM/IEEE 15th International Conference on Cyber-Physical Systems (ICCPS), New York, NY, USA, 2024.
@inproceedings{talusan2024ICCPS, title = {An Online Approach to Solving Public Transit Stationing and Dispatch Problem}, author = {Talusan, Jose Paolo and Han, Chaeeun and Mukhopadhyay, Ayan and Laszka, Aron and Freudberg, Dan and Dubey, Abhishek}, year = {2024}, booktitle = {Proceedings of the ACM/IEEE 15th International Conference on Cyber-Physical Systems (ICCPS)}, location = {Hong Kong, China}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, series = {ICCPS '24}, numpages = {10} }
Public bus transit systems provide critical transportation services for large sections of modern communities. On-time performance and maintaining the reliable quality of service is therefore very important. Unfortunately, disruptions caused by overcrowding, vehicular failures, and road accidents often lead to service performance degradation. Though transit agencies keep a limited number of vehicles in reserve and dispatch them to relieve the affected routes during disruptions, the procedure is often ad-hoc and has to rely on human experience and intuition to allocate resources (vehicles) to affected trips under uncertainty. In this paper, we describe a principled approach using non-myopic sequential decision procedures to solve the problem and decide (a) if it is advantageous to anticipate problems and proactively station transit buses near areas with high-likelihood of disruptions and (b) decide if and which vehicle to dispatch to a particular problem. Our approach was developed in partnership with the Metropolitan Transportation Authority for a mid-sized city in the USA and models the system as a semi-Markov decision problem (solved as a Monte-Carlo tree search procedure) and shows that it is possible to obtain an answer to these two coupled decision problems in a way that maximizes the overall reward (number of people served). We sample many possible futures from generative models, each is assigned to a tree and processed using root parallelization. We validate our approach using 3 years of data from our partner agency. Our experiments show that the proposed framework serves 2% more passengers while reducing deadhead miles by 40%.
- C. Han, J. P. Talusan, D. Freudberg, A. Mukhopadhyay, A. Dubey, and A. Laszka, Forecasting and Mitigating Disruptions in Public Bus Transit Services, in Proceedings of the 23rd Conference on Autonomous Agents and MultiAgent Systems, AAMAS 2024, Auckland, New Zealand, Richland, SC, 2024.
@inproceedings{talusan2024AAMAS, title = {Forecasting and Mitigating Disruptions in Public Bus Transit Services}, author = {Han, Chaeeun and Talusan, Jose Paolo and Freudberg, Dan and Mukhopadhyay, Ayan and Dubey, Abhishek and Laszka, Aron}, year = {2024}, booktitle = {Proceedings of the 23rd Conference on Autonomous Agents and MultiAgent Systems, {AAMAS} 2024, Auckland, New Zealand}, location = {Auckland, New Zealand}, publisher = {International Foundation for Autonomous Agents and Multiagent Systems}, address = {Richland, SC}, series = {AAMAS '24}, numpages = {9}, keywords = {Public transportation, Data-driven optimization, Disruption forecasting, Simulation, Metaheuristic optimization} }
Public transportation systems often suffer from unexpected fluctuations in demand and disruptions, such as mechanical failures and medical emergencies. These fluctuations and disruptions lead to delays and overcrowding, which are detrimental to the passengers’ experience and to the overall performance of the transit service. To proactively mitigate such events, many transit agencies station substitute (reserve) vehicles throughout their service areas, which they can dispatch to augment or replace vehicles on routes that suffer overcrowding or disruption. However, determining the optimal locations where substitute vehicles should be stationed is a challenging problem due to the inherent randomness of disruptions and due to the combinatorial nature of selecting locations across a city. In collaboration with the transit agency of a mid-size U.S. city, we address this problem by introducing data-driven statistical and machine-learning models for forecasting disruptions and an effective randomized local-search algorithm for selecting locations where substitute vehicles are to be stationed. Our research demonstrates promising results in proactive disruption management, offering a practical and easily implementable solution for transit agencies to enhance the reliability of their services. Our results resonate beyond mere operational efficiency—by advancing proactive strategies, our approach fosters more resilient and accessible public transportation, contributing to equitable urban mobility and ultimately benefiting the communities that rely on public transportation the most.
- A. Pettet, Y. Zhang, B. Luo, K. Wray, H. Baier, A. Laszka, A. Dubey, and A. Mukhopadhyay, Decision Making in Non-Stationary Environments with Policy-Augmented Search, Proceedings of the 23rd Conference on Autonomous Agents and MultiAgent Systems, AAMAS 2024, Auckland, New Zealand. 2024.
@misc{pettet2024decision, title = {Decision Making in Non-Stationary Environments with Policy-Augmented Search}, author = {Pettet, Ava and Zhang, Yunuo and Luo, Baiting and Wray, Kyle and Baier, Hendrik and Laszka, Aron and Dubey, Abhishek and Mukhopadhyay, Ayan}, year = {2024}, booktitle = {Proceedings of the 23rd Conference on Autonomous Agents and MultiAgent Systems, {AAMAS} 2024, Auckland, New Zealand}, location = {Auckland, New Zealand}, numpages = {9} }
Sequential decision-making under uncertainty is present in many important problems. Two popular approaches for tackling such problems are reinforcement learning and online search (e.g., Monte Carlo tree search). While the former learns a policy by interacting with the environment (typically done before execution), the latter uses a generative model of the environment to sample promising action trajectories at decision time. Decision-making is particularly challenging in non-stationary environments, where the environment in which an agent operates can change over time. Both approaches have shortcomings in such settings – on the one hand, policies learned before execution become stale when the environment changes and relearning takes both time and computational effort. Online search, on the other hand, can return sub-optimal actions when there are limitations on allowed runtime. In this paper, we introduce \textitPolicy-Augmented Monte Carlo tree search (PA-MCTS), which combines action-value estimates from an out-of-date policy with an online search using an up-to-date model of the environment. We prove theoretical results showing conditions under which PA-MCTS selects the one-step optimal action and also bound the error accrued while following PA-MCTS as a policy. We compare and contrast our approach with AlphaZero, another hybrid planning approach, and Deep Q Learning on several OpenAI Gym environments. Through extensive experiments, we show that under non-stationary settings with limited time constraints, PA-MCTS outperforms these baselines.
- B. Luo, Y. Zhang, A. Mukhopadhyay, and A. Dubey, Act as You Learn: Adaptive Decision-Making in Non-Stationary Markov Decision Processes, in Proceedings of the 23rd Conference on Autonomous Agents and MultiAgent Systems, AAMAS 2024, Auckland, New Zealand, 2024.
@inproceedings{baiting2024AAMAS, title = {Act as You Learn: Adaptive Decision-Making in Non-Stationary Markov Decision Processes}, author = {Luo, Baiting and Zhang, Yunuo and Mukhopadhyay, Ayan and Dubey, Abhishek}, year = {2024}, booktitle = {Proceedings of the 23rd Conference on Autonomous Agents and MultiAgent Systems, {AAMAS} 2024, Auckland, New Zealand}, location = {Auckland, New Zealand}, numpages = {9} }
A fundamental (and largely open) challenge in sequential decision- making is dealing with non-stationary environments, where exoge- nous environmental conditions change over time. Such problems are traditionally modeled as non-stationary Markov decision pro- cesses (NSMDP), which can account for a non-stationary environ- mental distribution during planning. However, existing approaches for decision-making in NSMDPs have two major shortcomings: first, they assume that the updated environmental dynamics at the current time are known (although future dynamics can change); and second, planning is largely pessimistic, i.e., the agent acts “safely” to account for the non-stationary evolution of the environment. We argue that both these assumptions are invalid in practice—updated environmental conditions are rarely known, and as the agent inter- acts with the environment, it can learn about the updated dynamics and avoid being pessimistic, at least in states whose dynamics it is confident about. We present a heuristic search algorithm called Adaptive Monte Carlo Tree Search (ADA-MCTS) that addresses these challenges. We show that the agent can learn the updated dynamics of the environment over time and then act as it learns, i.e., if the agent is in a region of the state space about which it has updated knowledge, it can avoid being pessimistic. To quantify “updated knowledge,” we disintegrate the aleatoric and epistemic uncertainty in the agent’s updated belief and show how the agent can use these estimates for decision-making. We compare the proposed approach with the multiple state-of-the-art approaches in decision-making across multiple well-established open-source problems and empirically show that our approach is faster and highly adaptive without sacrificing safety.
2023
- Y. Senarath, A. Mukhopadhyay, H. Purohit, and A. Dubey, Designing a Human-Centered AI Tool for Proactive Incident Detection Using Crowdsourced Data Sources to Support Emergency Response, Digit. Gov.: Res. Pract., Nov. 2023.
@article{yasas2023ACM, title = {Designing a Human-Centered AI Tool for Proactive Incident Detection Using Crowdsourced Data Sources to Support Emergency Response}, author = {Senarath, Yasas and Mukhopadhyay, Ayan and Purohit, Hemant and Dubey, Abhishek}, year = {2023}, month = nov, journal = {Digit. Gov.: Res. Pract.}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, doi = {10.1145/3633784}, url = {https://doi.org/10.1145/3633784}, note = {Just Accepted}, keywords = {Human-centered AI Tool, Crowdsourcing, Emergency Response, Incident Detection} }
Time of incident reporting is a critical aspect of emergency response. However, the conventional approaches to receiving incident reports have time delays. Non-traditional sources such as crowdsourced data present an opportunity to detect incidents proactively. However, detecting incidents from such data streams is challenging due to inherent noise and data uncertainty. Naively maximizing detection accuracy can compromise spatial-temporal localization of inferred incidents, hindering response efforts. This paper presents a novel human-centered AI tool to address the above challenges. We demonstrate how crowdsourced data can aid incident detection while acknowledging associated challenges. We use an existing CROME framework to facilitate training and selection of best incident detection models, based on parameters suited for deployment. The human-centered AI tool provides a visual interface for exploring various measures to analyze the models for the practitioner’s needs, which could help the practitioners select the best model for their situation. Moreover, in this study, we illustrate the tool usage by comparing different models for incident detection. The experiments demonstrate that the CNN-based incident detection method can detect incidents significantly better than various alternative modeling approaches. In summary, this research demonstrates a promising application of human-centered AI tools for incident detection to support emergency response agencies.
- F. Tiausas, K. Yasumoto, J. P. Talusan, H. Yamana, H. Yamaguchi, S. Bhattacharjee, A. Dubey, and S. K. Das, HPRoP: Hierarchical Privacy-Preserving Route Planning for Smart Cities, ACM Trans. Cyber-Phys. Syst., Jun. 2023.
@article{talusan2023tcps2, title = {HPRoP: Hierarchical Privacy-Preserving Route Planning for Smart Cities}, author = {Tiausas, Francis and Yasumoto, Keiichi and Talusan, Jose Paolo and Yamana, Hayato and Yamaguchi, Hirozumi and Bhattacharjee, Shameek and Dubey, Abhishek and Das, Sajal K.}, year = {2023}, month = jun, journal = {ACM Trans. Cyber-Phys. Syst.}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, doi = {10.1145/3603381}, issn = {2378-962X}, url = {}, note = {Just Accepted}, keywords = {Security and privacy, Privacy-preserving protocols, Domain-specific security and privacy architectures} }
Route Planning Systems (RPS) are a core component of autonomous personal transport systems essential for safe and efficient navigation of dynamic urban environments with the support of edge-based smart city infrastructure, but they also raise concerns about user route privacy in the context of both privately-owned and commercial vehicles. Numerous high profile data breaches in recent years have fortunately motivated research on privacy-preserving RPS, but most of them are rendered impractical by greatly increased communication and processing overhead. We address this by proposing an approach called Hierarchical Privacy-Preserving Route Planning (HPRoP) which divides and distributes the route planning task across multiple levels, and protects locations along the entire route. This is done by combining Inertial Flow partitioning, Private Information Retrieval (PIR), and Edge Computing techniques with our novel route planning heuristic algorithm. Normalized metrics were also formulated to quantify the privacy of the source/destination points (endpoint location privacy) and the route itself (route privacy). Evaluation on a simulated road network showed that HPRoP reliably produces routes differing only by <=20% in length from optimal shortest paths, with completion times within 25 seconds which is reasonable for a PIR-based approach. On top of this, more than half of the produced routes achieved near-optimal endpoint location privacy ( 1.0) and good route privacy (>= 0.8).
- M. J. Islam, J. P. Talusan, S. Bhattacharjee, F. Tiausas, A. Dubey, K. Yasumoto, and S. K. Das, Scalable Pythagorean Mean Based Incident Detection in Smart Transportation Systems, ACM Trans. Cyber-Phys. Syst., Jun. 2023.
@article{talusan2023tcps1, title = {Scalable Pythagorean Mean Based Incident Detection in Smart Transportation Systems}, author = {Islam, Md. Jaminur and Talusan, Jose Paolo and Bhattacharjee, Shameek and Tiausas, Francis and Dubey, Abhishek and Yasumoto, Keiichi and Das, Sajal K.}, year = {2023}, month = jun, journal = {ACM Trans. Cyber-Phys. Syst.}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, doi = {10.1145/3603381}, issn = {2378-962X}, url = {https://doi.org/10.1145/3603381}, note = {Just Accepted}, keywords = {Incident Detection, Weakly Unsupervised Learning, Graph Algorithms, Approximation Algorithm., Regression, Smart Transportation, Cluster Analysis, Anomaly Detection} }
Modern smart cities need smart transportation solutions to quickly detect various traffic emergencies and incidents in the city to avoid cascading traffic disruptions. To materialize this, roadside units and ambient transportation sensors are being deployed to collect speed data that enables the monitoring of traffic conditions on each road segment. In this paper, we first propose a scalable data-driven anomaly-based traffic incident detection framework for a city-scale smart transportation system. Specifically, we propose an incremental region growing approximation algorithm for optimal Spatio-temporal clustering of road segments and their data; such that road segments are strategically divided into highly correlated clusters. The highly correlated clusters enable identifying a Pythagorean Mean-based invariant as an anomaly detection metric that is highly stable under no incidents but shows a deviation in the presence of incidents. We learn the bounds of the invariants in a robust manner such that anomaly detection can generalize to unseen events, even when learning from real noisy data. Second, using cluster-level detection, we propose a folded Gaussian classifier to pinpoint the particular segment in a cluster where the incident happened in an automated manner. We perform extensive experimental validation using mobility data collected from four cities in Tennessee, compare with the state-of-the-art ML methods, to prove that our method can detect incidents within each cluster in real-time and outperforms known ML methods.
- S. Pavia, J. C. M. Mori, A. Sharma, P. Pugliese, A. Dubey, S. Samaranayake, and A. Mukhopadhyay, Designing Equitable Transit Networks, ACM Conference on Equity and Access in Algorithms, Mechanisms, and Optimization (Poster) (EAAMO). 2023.
@misc{pavia2023designing, title = {Designing Equitable Transit Networks}, author = {Pavia, Sophie and Mori, J. Carlos Martinez and Sharma, Aryaman and Pugliese, Philip and Dubey, Abhishek and Samaranayake, Samitha and Mukhopadhyay, Ayan}, year = {2023}, journal = {ACM Conference on Equity and Access in Algorithms, Mechanisms, and Optimization (Poster) (EAAMO)}, preprint = {https://arxiv.org/abs/2212.12007} }
- S. Pavia, J. C. M. Mori, A. Sharma, P. Pugliese, A. Dubey, S. Samaranayake, and A. Mukhopadhyay, Designing Equitable Transit Networks, INFORMS Transportation and Logistics Society Conference (extended abstract) (TSL). 2023.
@misc{pavia2023designing_abstract, title = {Designing Equitable Transit Networks}, author = {Pavia, Sophie and Mori, J. Carlos Martinez and Sharma, Aryaman and Pugliese, Philip and Dubey, Abhishek and Samaranayake, Samitha and Mukhopadhyay, Ayan}, year = {2023}, journal = {INFORMS Transportation and Logistics Society Conference (extended abstract) (TSL)} }
- B. Luo, S. Ramakrishna, A. Pettet, C. Kuhn, G. Karsai, and A. Mukhopadhyay, Dynamic Simplex: Balancing Safety and Performance in Autonomous Cyber Physical Systems, in Proceedings of the ACM/IEEE 14th International Conference on Cyber-Physical Systems (with CPS-IoT Week 2023), New York, NY, USA, 2023, pp. 177–186.
@inproceedings{baiting2023iccps, title = {Dynamic Simplex: Balancing Safety and Performance in Autonomous Cyber Physical Systems}, author = {Luo, Baiting and Ramakrishna, Shreyas and Pettet, Ava and Kuhn, Christopher and Karsai, Gabor and Mukhopadhyay, Ayan}, year = {2023}, booktitle = {Proceedings of the ACM/IEEE 14th International Conference on Cyber-Physical Systems (with CPS-IoT Week 2023)}, location = {San Antonio, TX, USA}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, series = {ICCPS '23}, pages = {177--186}, doi = {10.1145/3576841.3585934}, isbn = {9798400700361}, url = {https://doi.org/10.1145/3576841.3585934}, numpages = {10} }
Learning Enabled Components (LEC) have greatly assisted cyber-physical systems in achieving higher levels of autonomy. However, LEC’s susceptibility to dynamic and uncertain operating conditions is a critical challenge for the safety of these systems. Redundant controller architectures have been widely adopted for safety assurance in such contexts. These architectures augment LEC "performant" controllers that are difficult to verify with "safety" controllers and the decision logic to switch between them. While these architectures ensure safety, we point out two limitations. First, they are trained offline to learn a conservative policy of always selecting a controller that maintains the system’s safety, which limits the system’s adaptability to dynamic and non-stationary environments. Second, they do not support reverse switching from the safety controller to the performant controller, even when the threat to safety is no longer present. To address these limitations, we propose a dynamic simplex strategy with an online controller switching logic that allows two-way switching. We consider switching as a sequential decision-making problem and model it as a semi-Markov decision process. We leverage a combination of a myopic selector using surrogate models (for the forward switch) and a non-myopic planner (for the reverse switch) to balance safety and performance. We evaluate this approach using an autonomous vehicle case study in the CARLA simulator using different driving conditions, locations, and component failures. We show that the proposed approach results in fewer collisions and higher performance than state-of-the-art alternatives.
- M. Wilbur, M. Coursey, P. Koirala, Z. Al-Quran, P. Pugliese, and A. Dubey, Mobility-On-Demand Transportation: A System for Microtransit and Paratransit Operations, in Proceedings of the ACM/IEEE 14th International Conference on Cyber-Physical Systems (with CPS-IoT Week 2023), New York, NY, USA, 2023, pp. 260–261.
@inproceedings{wilbur2023mobility, title = {Mobility-On-Demand Transportation: A System for Microtransit and Paratransit Operations}, author = {Wilbur, Michael and Coursey, Maxime and Koirala, Pravesh and Al-Quran, Zakariyya and Pugliese, Philip and Dubey, Abhishek}, year = {2023}, booktitle = {Proceedings of the ACM/IEEE 14th International Conference on Cyber-Physical Systems (with CPS-IoT Week 2023)}, location = {San Antonio, TX, USA}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, series = {ICCPS '23}, pages = {260--261}, doi = {10.1145/3576841.3589625}, isbn = {9798400700361}, url = {https://doi.org/10.1145/3576841.3589625}, numpages = {2}, keywords = {ridepooling, software, mobility-on-demand, transit operations} }
New rideshare and shared-mobility services have transformed urban mobility in recent years. Therefore, transit agencies are looking for ways to adapt to this rapidly changing environment. In this space, ridepooling has the potential to improve efficiency and reduce costs by allowing users to share rides in high-capacity vehicles and vans. Most transit agencies already operate various ridepooling services including microtransit and paratransit. However, the objectives and constraints for implementing these services vary greatly between agencies. This brings multiple challenges. First, off-the-shelf ridepooling formulations must be adapted for real-world conditions and constraints. Second, the lack of modular and reusable software makes it hard to implement and evaluate new ridepooling algorithms and approaches in real-world settings. Therefore, we propose an on-demand transportation scheduling software for microtransit and paratransit services. This software is aimed at transit agencies looking to incorporate state-of-the-art rideshare and ridepooling algorithms in their everyday operations. We provide management software for dispatchers and mobile applications for drivers and users. Lastly, we discuss the challenges in adapting state-of-the-art methods to real-world operations.
- A. Zulqarnain, S. Gupta, J. P. Talusan, P. Pugliese, A. Mukhopadhyay, and A. Dubey, Addressing APC Data Sparsity in Predicting Occupancy and Delay of Transit Buses: A Multitask Learning Approach, in 2023 IEEE International Conference on Smart Computing (SMARTCOMP), 2023.
@inproceedings{Zulqarnain2023, title = {Addressing APC Data Sparsity in Predicting Occupancy and Delay of Transit Buses: A Multitask Learning Approach}, author = {Zulqarnain, Ammar and Gupta, Samir and Talusan, Jose Paolo and Pugliese, Philip and Mukhopadhyay, Ayan and Dubey, Abhishek}, year = {2023}, booktitle = {2023 IEEE International Conference on Smart Computing (SMARTCOMP)}, volume = {}, number = {} }
Public transit is a vital mode of transportation in urban areas, and its efficiency is crucial for the daily commute of millions of people. To improve the reliability and predictability of transit systems, researchers have developed separate single-task learning models to predict the occupancy and delay of buses at the stop or route level. However, these models provide a narrow view of delay and occupancy at each stop and do not account for the correlation between the two. We propose a novel approach that leverages broader generalizable patterns governing delay and occupancy for improved prediction. We introduce a multitask learning toolchain that takes into account General Transit Feed Specification feeds, Automatic Passenger Counter data, and contextual information temporal and spatial information. The toolchain predicts transit delay and occupancy at the stop level, improving the accuracy of the predictions of these two features of a trip given sparse and noisy data. We also show that our toolchain can adapt to fewer samples of new transit data once it has been trained on previous routes/trips as compared to state-of-the-art methods. Finally, we use actual data from Chattanooga, Tennessee, to validate our approach. We compare our approach against the state-of-the-art methods and we show that treating occupancy and delay as related problems improves the accuracy of the predictions. We show that our approach improves delay prediction significantly by as much as 6% in F1 scores while producing equivalent or better results for occupancy.
- J. Buckelew, S. Basumallik, V. Sivaramakrishnan, A. Mukhopadhyay, A. K. Srivastava, and A. Dubey, Synchrophasor Data Event Detection using Unsupervised Wavelet Convolutional Autoencoders, in 2023 IEEE International Conference on Smart Computing (SMARTCOMP), 2023.
@inproceedings{Buckelew2023, title = {Synchrophasor Data Event Detection using Unsupervised Wavelet Convolutional Autoencoders}, author = {Buckelew, Jacob and Basumallik, Sagnik and Sivaramakrishnan, Vasavi and Mukhopadhyay, Ayan and Srivastava, Anurag K. and Dubey, Abhishek}, year = {2023}, booktitle = {2023 IEEE International Conference on Smart Computing (SMARTCOMP)}, volume = {}, number = {}, pages = {}, doi = {} }
- Y. Kim, D. Edirimanna, M. Wilbur, P. Pugliese, A. Laszka, A. Dubey, and S. Samaranayake, Rolling Horizon based Temporal Decomposition for the Offline Pickup and Delivery Problem with Time Windows, in Proceedings of the 37th AAAI Conference on Artificial Intelligence (AAAI-23), 2023.
@inproceedings{youngseo2023, title = {Rolling Horizon based Temporal Decomposition for the Offline Pickup and Delivery Problem with Time Windows}, author = {Kim, Youngseo and Edirimanna, Danushka and Wilbur, Michael and Pugliese, Philip and Laszka, Aron and Dubey, Abhishek and Samaranayake, Samitha}, booktitle = {Proceedings of the 37th AAAI Conference on Artificial Intelligence (AAAI-23)}, tag = {ai4cps,transit}, year = {2023} }
The offline pickup and delivery problem with time windows (PDPTW) is a classical combinatorial optimization problem in the transportation community, which has proven to be very challenging computationally. Due to the complexity of the problem, practical problem instances can be solved only via heuristics, which trade-off solution quality for computational tractability. Among the various heuristics, a common strategy is problem decomposition, that is, the reduction of a largescale problem into a collection of smaller sub-problems, with spatial and temporal decompositions being two natural approaches. While spatial decomposition has been successful in certain settings, effective temporal decomposition has been challenging due to the difficulty of stitching together the sub-problem solutions across the decomposition boundaries. In this work, we introduce a novel temporal decomposition scheme for solving a class of PDPTWs that have narrow time windows, for which it is able to provide both fast and highquality solutions. We utilize techniques that have been popularized recently in the context of online dial-a-ride problems along with the general idea of rolling horizon optimization. To the best of our knowledge, this is the first attempt to solve offline PDPTWs using such an approach. To show the performance and scalability of our framework, we use the optimization of paratransit services as a motivating example. Due to the lack of benchmark solvers similar to ours (i.e., temporal decomposition with an online solver), we compare our results with an offline heuristic algorithm using Google OR-Tools. In smaller problem instances (with an average of 129 requests per instance), the baseline approach is as competitive as our framework. However, in larger problem instances (approximately 2,500 requests per instance), our framework is more scalable and can provide good solutions to problem instances of varying degrees of difficulty, while the baseline algorithm often fails to find a feasible solution within comparable compute times.
- S. U. Kadir, S. Majumder, A. Srivastava, A. Chhokra, H. Neema, A. Dubey, and A. Laszka, Reinforcement Learning based Proactive Control for Enabling Power Grid Resilience to Wildfire, IEEE Transactions on Industrial Informatics, pp. 1–11, 2023.
@article{proactivewildfire, author = {Kadir, Salah Uddin and Majumder, Subir and Srivastava, A. and Chhokra, A. and Neema, H. and Dubey, A. and Laszka, A.}, journal = {IEEE Transactions on Industrial Informatics}, title = {Reinforcement Learning based Proactive Control for Enabling Power Grid Resilience to Wildfire}, year = {2023}, volume = {}, number = {}, pages = {1-11}, doi = {10.1109/TII.2023.3263500} }
- M. Wilbur, A. Ayman, A. Sivagnanam, A. Ouyang, V. Poon, R. Kabir, A. Vadali, P. Pugliese, D. Freudberg, A. Laszka, and A. Dubey, Impact of COVID-19 on Public Transit Accessibility and Ridership, Transportation Research Record, vol. 2677, no. 4, pp. 531–546, 2023.
@article{wilbur2022_trr, title = {Impact of COVID-19 on Public Transit Accessibility and Ridership}, author = {Wilbur, Michael and Ayman, Afiya and Sivagnanam, Amutheezan and Ouyang, Anna and Poon, Vincent and Kabir, Riyan and Vadali, Abhiram and Pugliese, Philip and Freudberg, Daniel and Laszka, Aron and Dubey, Abhishek}, year = {2023}, journal = {Transportation Research Record}, volume = {2677}, number = {4}, pages = {531--546}, doi = {10.1177/03611981231160531}, url = {https://doi.org/10.1177/03611981231160531}, eprint = {https://doi.org/10.1177/03611981231160531} }
COVID-19 has radically transformed urban travel behavior throughout the world. Agencies have had to provide adequate service while navigating a rapidly changing environment with reduced revenue. As COVID-19-related restrictions are lifted, transit agencies are concerned about their ability to adapt to changes in ridership behavior and public transit usage. To aid their becoming more adaptive to sudden or persistent shifts in ridership, we addressed three questions: To what degree has COVID-19 affected fixed-line public transit ridership and what is the relationship between reduced demand and -vehicle trips? How has COVID-19 changed ridership patterns and are they expected to persist after restrictions are lifted? Are there disparities in ridership changes across socioeconomic groups and mobility-impaired riders? Focusing on Nashville and Chattanooga, TN, ridership demand and vehicle trips were compared with anonymized mobile location data to study the relationship between mobility patterns and transit usage. Correlation analysis and multiple linear regression were used to investigate the relationship between socioeconomic indicators and changes in transit ridership, and an analysis of changes in paratransit demand before and during COVID-19. Ridership initially dropped by 66% and 65% over the first month of the pandemic for Nashville and Chattanooga, respectively. Cellular mobility patterns in Chattanooga indicated that foot traffic recovered to a greater degree than transit ridership between mid-April and the last week in June, 2020. Education-level had a statistically significant impact on changes in fixed-line bus transit, and the distribution of changes in demand for paratransit services were similar to those of fixed-line bus transit.
2022
- G. Karsai, A. Coglio, and A. Dubey, Model-Based Intent-Driven Adaptive Software (MIDAS), Vanderbilt University, 2022.
@techreport{karsai2022model, title = {Model-Based Intent-Driven Adaptive Software (MIDAS)}, author = {Karsai, Gabor and Coglio, Alessandro and Dubey, Abhishek}, year = {2022}, institution = {Vanderbilt University} }
The increasing complexity of software systems makes the rapid propagation of requirement changes into the design and implementation code very problematic. The goal of the Intent-Driven Adaptive Software program was to develop technologies that assist developers in making changes to requirements and automatically propagating those changes to the design and implementation of software systems. The Model-based Intent-Driven Adaptive software project developed a vision for a comprehensive technology to achieve this goal by developing and implementing two components of that vision a program specification and synthesis tool, and a domain-specific language and generators for the rapid configuration and adaptation of service-based architectures. These two results can serve as a foundation for the future implementation of the vision.
- B. McLoughlin, S. Bhandari, E. Henrick, E. Hotchkiss, M. Jha, S. Jiang, E. Kern, L. Marston, C. Vanags, C. Snyder, and others, A modular approach for integrating data science concepts into multiple undergraduate STEM+ C courses, in 2022 ASEE Annual Conference & Exposition, 2022.
@inproceedings{mcloughlin2022modular, title = {A modular approach for integrating data science concepts into multiple undergraduate STEM+ C courses}, author = {McLoughlin, Brendan and Bhandari, Sambridhi and Henrick, Erin and Hotchkiss, Erin and Jha, Manoj and Jiang, Steven and Kern, Emily and Marston, Landon and Vanags, Christopher and Snyder, Caitlin and others}, booktitle = {2022 ASEE Annual Conference \& Exposition}, url = {https://peer.asee.org/a-modular-approach-for-integrating-data-science-concepts-into-multiple-undergraduate-stem-c-courses}, year = {2022} }
- A. Coglio, E. McCarthy, S. Westfold, D. Balasubramanian, A. Dubey, and G. Karsai, Syntheto: A Surface Language for APT and ACL2, Electronic Proceedings in Theoretical Computer Science, vol. 359, pp. 151–167, May 2022.
@article{Coglio_2022, doi = {10.4204/eptcs.359.13}, url = {https://doi.org/10.4204%2Feptcs.359.13}, year = {2022}, preprint = {https://arxiv.org/abs/2205.11706}, month = may, publisher = {Open Publishing Association}, volume = {359}, pages = {151--167}, author = {Coglio, Alessandro and McCarthy, Eric and Westfold, Stephen and Balasubramanian, Daniel and Dubey, Abhishek and Karsai, Gabor}, title = {Syntheto: A Surface Language for {APT} and {ACL}2}, journal = {Electronic Proceedings in Theoretical Computer Science} }
Syntheto is a surface language for carrying out formally verified program synthesis by transformational refinement in ACL2 using the APT toolkit. Syntheto aims at providing more familiarity and automation, in order to make this technology more widely usable. Syntheto is a strongly statically typed functional language that includes both executable and non-executable constructs, including facilities to state and prove theorems and facilities to apply proof-generating transformations. Syntheto is integrated into an IDE with a notebook-style, interactive interface that translates Syntheto to ACL2 definitions and APT transformation invocations, and back-translates the prover’s results to Syntheto; the bidirectional translation happens behind the scenes, with the user interacting solely with Syntheto.
- S. Pavia, J. C. M. Mori, A. Sharma, P. Pugliese, A. Dubey, S. Samaranayake, and A. Mukhopadhyay, Designing Equitable Transit Networks. arXiv, 2022.
@misc{sophiefairtransit2022arxiv, doi = {10.48550/ARXIV.2212.12007}, url = {https://arxiv.org/abs/2212.12007}, author = {Pavia, Sophie and Mori, J. Carlos Martinez and Sharma, Aryaman and Pugliese, Philip and Dubey, Abhishek and Samaranayake, Samitha and Mukhopadhyay, Ayan}, keywords = {Computers and Society (cs.CY), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Designing Equitable Transit Networks}, publisher = {arXiv}, year = {2022}, preprint = {https://arxiv.org/abs/2212.12007}, copyright = {arXiv.org perpetual, non-exclusive license} }
Public transit is an essential infrastructure enabling access to employment, healthcare, education, and recreational facilities. While accessibility to transit is important in general, some sections of the population depend critically on transit. However, existing public transit is often not designed equitably, and often, equity is only considered as an additional objective post hoc, which hampers systemic changes. We present a formulation for transit network design that considers different notions of equity and welfare explicitly. We study the interaction between network design and various concepts of equity and present trade-offs and results based on real-world data from a large metropolitan area in the United States of America.
- A. Ayman, J. Martinez, P. Pugliese, A. Dubey, and A. Laszka, Neural Architecture and Feature Search for Predicting the Ridership of Public Transportation Routes, in 8th IEEE International Conference on Smart Computing (SMARTCOMP), 2022.
@inproceedings{ayman2022neural, author = {Ayman, Afiya and Martinez, Juan and Pugliese, Philip and Dubey, Abhishek and Laszka, Aron}, title = {Neural Architecture and Feature Search for Predicting the Ridership of Public Transportation Routes}, booktitle = {8th IEEE International Conference on Smart Computing (SMARTCOMP)}, year = {2022}, month = jun }
Accurately predicting the ridership of public-transit routes provides substantial benefits to both transit agencies, who can dispatch additional vehicles proactively before the vehicles that serve a route become crowded, and to passengers, who can avoid crowded vehicles based on publicly available predictions. The spread of the coronavirus disease has further elevated the importance of ridership prediction as crowded vehicles now present not only an inconvenience but also a public-health risk. At the same time, accurately predicting ridership has become more challenging due to evolving ridership patterns, which may make all data except for the most recent records stale. One promising approach for improving prediction accuracy is to fine-tune the hyper-parameters of machine-learning models for each transit route based on the characteristics of the particular route, such as the number of records. However, manually designing a machine-learning model for each route is a labor-intensive process, which may require experts to spend a significant amount of their valuable time. To help experts with designing machine-learning models, we propose a neural-architecture and feature search approach, which optimizes the architecture and features of a deep neural network for predicting the ridership of a public-transit route. Our approach is based on a randomized local hyper-parameter search, which minimizes both prediction error as well as the complexity of the model. We evaluate our approach on real-world ridership data provided by the public transit agency of Chattanooga, TN, and we demonstrate that training neural networks whose architectures and features are optimized for each route provides significantly better performance than training neural networks whose architectures and features are generic.
- R. Sen, T. Tran, S. Khaleghian, M. Sartipi, H. Neema, and A. Dubey, BTE-Sim: Fast simulation environment for public transportation, 2022 IEEE International Conference on Big Data, 2022.
@article{sen2022, author = {Sen, Rishav and Tran, Toan and Khaleghian, Seyedmehdi and Sartipi, Mina and Neema, Himanshu and Dubey, Abhishek}, title = {BTE-Sim: Fast simulation environment for public transportation}, year = {2022}, isbn = {}, publisher = {IEEE}, address = {}, url = {}, booktitle = {2022 IEEE International Conference on Big Data}, pages = {}, numpages = {8}, keywords = {public transit, fast traffic simulation, model integration, data processing, road speed calibration}, location = {}, series = {} }
The public commute is essential to all urban centers and is an efficient and environment-friendly way to travel. Transit systems must become more accessible and user-friendly. Since public transit is majorly designed statically, with very few improvements coming over time, it can get stagnated, unable to update itself with changing population trends. To better understand transportation demands and make them more usable, efficient, and demographic-focused, we propose a fast, multi-layered transit simulation that primarily focuses on public transit simulation (BTE-Sim). BTE-Sim is designed based on the population demand, existing traffic conditions, and the road networks that exist in a region. The system is versatile, with the ability to run different configurations of the existing transit routes, or inculcate any new changes that may seem necessary, or even in extreme cases, new transit network design as well. In all situations, it can compare multiple transit networks and provide evaluation metrics for them. It provides detailed data on each transit vehicle, the trips it performs, its on-time performance and other necessary factors. Its highlighting feature is the considerably low computation time it requires to perform all these tasks and provide consistently reliable results.
- J. P. Talusan, A. Mukhopadhyay, D. Freudberg, and A. Dubey, On Designing Day Ahead and Same Day Ridership Level Prediction Models for City-Scale Transit Networks Using Noisy APC Data, in 2022 IEEE International Conference on Big Data (Big Data), Los Alamitos, CA, USA, 2022, pp. 5598–5606.
@inproceedings{talusan2022apc, author = {Talusan, Jose Paolo and Mukhopadhyay, Ayan and Freudberg, Dan and Dubey, Abhishek}, booktitle = {2022 IEEE International Conference on Big Data (Big Data)}, title = {On Designing Day Ahead and Same Day Ridership Level Prediction Models for City-Scale Transit Networks Using Noisy APC Data}, year = {2022}, volume = {}, issn = {}, pages = {5598-5606}, keywords = {training;schedules;statistical analysis;stochastic processes;predictive models;big data;data models}, doi = {10.1109/BigData55660.2022.10020390}, url = {https://doi.ieeecomputersociety.org/10.1109/BigData55660.2022.10020390}, publisher = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, month = dec }
The ability to accurately predict public transit ridership demand benefits passengers and transit agencies. Agencies will be able to reallocate buses to handle under or over-utilized bus routes, improving resource utilization, and passengers will be able to adjust and plan their schedules to avoid overcrowded buses and maintain a certain level of comfort. However, accurately predicting occupancy is a non-trivial task. Various reasons such as heterogeneity, evolving ridership patterns, exogenous events like weather, and other stochastic variables, make the task much more challenging. With the progress of big data, transit authorities now have access to real-time passenger occupancy information for their vehicles. The amount of data generated is staggering. While there is no shortage in data, it must still be cleaned, processed, augmented, and merged before any useful information can be generated. In this paper, we propose the use and fusion of data from multiple sources, cleaned, processed, and merged together, for use in training machine learning models to predict transit ridership. We use data that spans a 2-year period (2020-2022) incorporating transit, weather, traffic, and calendar data. The resulting data, which equates to 17 million observations, is used to train separate models for the trip and stop level prediction. We evaluate our approach on real-world transit data provided by the public transit agency of Nashville, TN. We demonstrate that the trip level model based on Xgboost and the stop level model based on LSTM outperform the baseline statistical model across the entire transit service day.
- G. Pettet, H. Baxter, S. Vazirizade, H. Purohit, M. Ma, A. Mukhopadhyay, and A. Dubey, Designing Decision Support Systems for Emergency Response: Challenges and Opportunities, in 2022 Workshop on Cyber Physical Systems for Emergency Response (CPS-ER), Los Alamitos, CA, USA, 2022, pp. 30–35.
@inproceedings{pettet2022designing, author = {Pettet, G. and Baxter, H. and Vazirizade, S. and Purohit, H. and Ma, M. and Mukhopadhyay, A. and Dubey, A.}, booktitle = {2022 Workshop on Cyber Physical Systems for Emergency Response (CPS-ER)}, title = {Designing Decision Support Systems for Emergency Response: Challenges and Opportunities}, year = {2022}, volume = {}, issn = {}, pages = {30-35}, keywords = {decision support systems;road accidents;uncertainty;decision making;medical services;emergency services;hazards}, doi = {10.1109/CPS-ER56134.2022.00012}, url = {https://doi.ieeecomputersociety.org/10.1109/CPS-ER56134.2022.00012}, publisher = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, month = may }
Designing effective emergency response management (ERM) systems to respond to incidents such as road accidents is a major problem faced by communities. In addition to responding to frequent incidents each day (about 240 million emergency medical services calls and over 5 million road accidents in the US each year), these systems also support response during natural hazards. Recently, there has been a consistent interest in building decision support and optimization tools that can help emergency responders provide more efficient and effective response. This includes a number of principled subsystems that implement early incident detection, incident likelihood forecasting and strategic resource allocation and dispatch policies. In this paper, we highlight the key challenges and provide an overview of the approach developed by our team in collaboration with our community partners.
- S. Eisele, M. Wilbur, T. Eghtesad, K. Silvergold, F. Eisele, A. Mukhopadhyay, A. Laszka, and A. Dubey, Decentralized Computation Market for Stream Processing Applications, in 2022 IEEE International Conference on Cloud Engineering (IC2E), Pacific Grove, CA, USA, 2022.
@inproceedings{eisele2022Decentralized, author = {Eisele, Scott and Wilbur, Michael and Eghtesad, Taha and Silvergold, Kevin and Eisele, Fred and Mukhopadhyay, Ayan and Laszka, Aron and Dubey, Abhishek}, booktitle = {2022 IEEE International Conference on Cloud Engineering (IC2E)}, title = {Decentralized Computation Market for Stream Processing Applications}, year = {2022}, volume = {}, issn = {}, pages = {}, doi = {}, publisher = {IEEE Computer Society}, address = {Pacific Grove, CA, USA}, month = oct }
While cloud computing is the current standard for outsourcing computation, it can be prohibitively expensive for cities and infrastructure operators to deploy services. At the same time, there are underutilized computing resources within cities and local edge-computing deployments. Using these slack resources may enable significantly lower pricing than comparable cloud computing; such resources would incur minimal marginal expenditure since their deployment and operation are mostly sunk costs. However, there are challenges associated with using these resources. First, they are not effectively aggregated or provisioned. Second, there is a lack of trust between customers and suppliers of computing resources, given that they are distinct stakeholders and behave according to their own interests. Third, delays in processing inputs may diminish the value of the applications. To resolve these challenges, we introduce an architecture combining a distributed trusted computing mechanism, such as a blockchain, with an efficient messaging system like Apache Pulsar. Using this architecture, we design a decentralized computation market where customers and suppliers make offers to deploy and host applications. The proposed architecture can be realized using any trusted computing mechanism that supports smart contracts, and any messaging framework with the necessary features. This combination ensures that the market is robust without incurring the input processing delays that limit other blockchain based solutions. We evaluate the market protocol using game-theoretic analysis to show that deviation from the protocol is discouraged. Finally, we assess the performance of a prototype implementation based on experiments with a streaming computer-vision application.
- R. Sen, A. K. Bharati, S. Khaleghian, M. Ghosal, M. Wilbur, T. Tran, P. Pugliese, M. Sartipi, H. Neema, and A. Dubey, E-Transit-Bench: Simulation Platform for Analyzing Electric Public Transit Bus Fleet Operations, in Proceedings of the Thirteenth ACM International Conference on Future Energy Systems, New York, NY, USA, 2022, pp. 532–541.
@inproceedings{rishav2022eEnergy, author = {Sen, Rishav and Bharati, Alok Kumar and Khaleghian, Seyedmehdi and Ghosal, Malini and Wilbur, Michael and Tran, Toan and Pugliese, Philip and Sartipi, Mina and Neema, Himanshu and Dubey, Abhishek}, title = {E-Transit-Bench: Simulation Platform for Analyzing Electric Public Transit Bus Fleet Operations}, year = {2022}, isbn = {9781450393973}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3538637.3539586}, doi = {10.1145/3538637.3539586}, booktitle = {Proceedings of the Thirteenth ACM International Conference on Future Energy Systems}, pages = {532–541}, numpages = {10}, keywords = {model-integration, cyber-physical systems, co-simulation, powergrid simulation, traffic simulation}, location = {Virtual Event}, series = {e-Energy '22} }
When electrified transit systems make grid aware choices, improved social welfare is achieved by reducing grid stress, reducing system loss, and minimizing power quality issues. Electrifying transit fleet has numerous challenges like non availability of buses during charging, varying charging costs and so on, that are related the electric grid behavior. However, transit systems do not have access to the information about the co-evolution of the grid’s power flow and therefore cannot account for the power grid’s needs in its day-to-day operation. In this paper we propose a framework of transportation-grid co-simulation, analyzing the spatio-temporal interaction between the transit operations with electric buses and the power distribution grid. Real-world data for a day’s traffic from Chattanooga city’s transit system is simulated in SUMO and integrated with a realistic distribution grid simulation (using GridLAB-D) to understand the grid impact due to transit electrification. Charging information is obtained from the transportation simulation to feed into grid simulation to assess the impact of charging. We also discuss the impact to the grid with higher degree of transit electrification that further necessitates such an integrated transportation-grid co-simulation to operate the integrated system optimally. Our future work includes extending the platform for optimizing the charging and trip assignment operations.
- Z. Kang, A. Mukhopadhyay, A. Gokhale, S. Wen, and A. Dubey, Traffic Anomaly Detection Via Conditional Normalizing Flow, in 2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC), 2022, pp. 2563–2570.
@inproceedings{kang2022generative, author = {Kang, Zhuangwei and Mukhopadhyay, Ayan and Gokhale, Aniruddha and Wen, Shijie and Dubey, Abhishek}, booktitle = {2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC)}, title = {Traffic Anomaly Detection Via Conditional Normalizing Flow}, year = {2022}, volume = {}, number = {}, pages = {2563-2570}, doi = {10.1109/ITSC55140.2022.9922061} }
Traffic congestion anomaly detection is of paramount importance in intelligent traffic systems. The goals of transportation agencies are two-fold: to monitor the general traffic conditions in the area of interest and to locate road segments under abnormal congestion states. Modeling congestion patterns can achieve these goals for citywide roadways, which amounts to learning the distribution of multivariate time series (MTS). However, existing works are either not scalable or unable to capture the spatial-temporal information in MTS simultaneously. To this end, we propose a principled and comprehensive framework consisting of a data-driven generative approach that can perform tractable density estimation for detecting traffic anomalies. Our approach first clusters segments in the feature space and then uses conditional normalizing flow to identify anomalous temporal snapshots at the cluster level in an unsupervised setting. Then, we identify anomalies at the segment level by using a kernel density estimator on the anomalous cluster. Extensive experiments on synthetic datasets show that our approach significantly outperforms several state-of-the-art congestion anomaly detection and diagnosis methods in terms of Recall and F1-Score. We also use the generative model to sample labeled data, which can train classifiers in a supervised setting, alleviating the lack of labeled data for anomaly detection in sparse settings.
- V. Nair, K. Prakash, M. Wilbur, A. Taneja, C. Namblard, O. Adeyemo, A. Dubey, A. Adereni, M. Tambe, and A. Mukhopadhyay, ADVISER: AI-Driven Vaccination Intervention Optimiser for Increasing Vaccine Uptake in Nigeria, in 31st International Joint Conference on Artificial Intelligence (IJCAI), 2022.
@inproceedings{ijcai22Ayan, title = {ADVISER: AI-Driven Vaccination Intervention Optimiser for Increasing Vaccine Uptake in Nigeria}, author = {Nair, Vineet and Prakash, Kritika and Wilbur, Michael and Taneja, Aparna and Namblard, Corinne and Adeyemo, Oyindamola and Dubey, Abhishek and Adereni, Abiodun and Tambe, Milind and Mukhopadhyay, Ayan}, doi = {https://doi.org/10.48550/ARXIV.2204.13663}, url = {https://arxiv.org/abs/2204.13663}, booktitle = {31st International Joint Conference on Artificial Intelligence (IJCAI)}, year = {2022}, month = jul }
More than 5 million children under five years die from largely preventable or treatable medical conditions every year, with an overwhelmingly large proportion of deaths occurring in under-developed countries with low vaccination uptake. One of the United Nations’ sustainable development goals (SDG 3) aims to end preventable deaths of newborns and children under five years of age. We focus on Nigeria, where the rate of infant mortality is appalling. We collaborate with HelpMum, a large non-profit organization in Nigeria to design and optimize the allocation of heterogeneous health interventions under uncertainty to increase vaccination uptake, the first such collaboration in Nigeria. Our framework, ADVISER: AI-Driven Vaccination Intervention Optimiser, is based on an integer linear program that seeks to maximize the cumulative probability of successful vaccination. Our optimization formulation is intractable in practice. We present a heuristic approach that enables us to solve the problem for real-world use-cases. We also present theoretical bounds for the heuristic method. Finally, we show that the proposed approach outperforms baseline methods in terms of vaccination uptake through experimental evaluation. HelpMum is currently planning a pilot program based on our approach to be deployed in the largest city of Nigeria, which would be the first deployment of an AIdriven vaccination uptake program in the country and hopefully, pave the way for other data-driven programs to improve health outcomes in Nigeria.
- A. Sivagnanam, S. U. Kadir, A. Mukhopadhyay, P. Pugliese, A. Dubey, S. Samaranayake, and A. Laszka, Offline Vehicle Routing Problem with Online Bookings: A Novel Problem Formulation with Applications to Paratransit, in 31st International Joint Conference on Artificial Intelligence (IJCAI), 2022.
@inproceedings{sivagnanam2022offline, title = {Offline Vehicle Routing Problem with Online Bookings: A Novel Problem Formulation with Applications to Paratransit}, preprint = {https://arxiv.org/abs/2204.11992}, author = {Sivagnanam, Amutheezan and Kadir, Salah Uddin and Mukhopadhyay, Ayan and Pugliese, Philip and Dubey, Abhishek and Samaranayake, Samitha and Laszka, Aron}, booktitle = {31st International Joint Conference on Artificial Intelligence (IJCAI)}, year = {2022}, month = jul }
Vehicle routing problems (VRPs) can be divided into two major categories: offline VRPs, which consider a given set of trip requests to be served, and online VRPs, which consider requests as they arrive in real-time. Based on discussions with public transit agencies, we identify a real-world problem that is not addressed by existing formulations: booking trips with flexible pickup windows (e.g., 3 hours) in advance (e.g., the day before) and confirming tight pickup windows (e.g., 30 minutes) at the time of booking. Such a service model is often required in paratransit service settings, where passengers typically book trips for the next day over the phone. To address this gap between offline and online problems, we introduce a novel formulation, the offline vehicle routing problem with online bookings. This problem is very challenging computationally since it faces the complexity of considering large sets of requests—similar to offline VRPs—but must abide by strict constraints on running time—similar to online VRPs. To solve this problem, we propose a novel computational approach, which combines an anytime algorithm with a learning-based policy for real-time decisions. Based on a paratransit dataset obtained from our partner transit agency, we demonstrate that our novel formulation and computational approach lead to significantly better outcomes in this service setting than existing algorithms.
- S. Ramakrishna, B. Luo, C. B. Kuhn, G. Karsai, and A. Dubey, ANTI-CARLA: An Adversarial Testing Framework for Autonomous Vehicles in CARLA, in 2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC), 2022, pp. 2620–2627.
@inproceedings{ramakrishna2022anticarla, author = {Ramakrishna, Shreyas and Luo, Baiting and Kuhn, Christopher B. and Karsai, Gabor and Dubey, Abhishek}, booktitle = {2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC)}, title = {ANTI-CARLA: An Adversarial Testing Framework for Autonomous Vehicles in CARLA}, year = {2022}, volume = {}, number = {}, pages = {2620-2627}, doi = {10.1109/ITSC55140.2022.9921776} }
Despite recent advances in autonomous driving systems, accidents such as the fatal Uber crash in 2018 show these systems are still susceptible to edge cases. Such systems need to be thoroughly tested and validated before being deployed in the real world to avoid such events. Testing in open-world scenarios can be difficult, time-consuming, and expensive. These challenges can be addressed by using driving simulators such as CARLA instead. A key part of such tests is adversarial testing, in which the goal is to find scenarios that lead to failures of the given system. While several independent efforts in adversarial testing have been made, a well-established testing framework that enables adaptive stress testing has yet to be made available for CARLA. We therefore propose ANTI-CARLA, an adversarial testing framework in CARLA. The operating conditions in which a given system should be tested are specified in a scenario description language. The framework offers an adversarial search mechanism that searches for operating conditions that will fail the tested system. In this way, ANTI-CARLA extends the CARLA simulator with the capability of performing adversarial testing on any given driving pipeline. We use ANTI-CARLA to test the driving pipeline trained with Learning By Cheating (LBC) approach. The simulation results demonstrate that ANTI-CARLA can effectively and automatically find a range of failure cases despite LBC reaching an accuracy of 100% in the CARLA benchmark.
- G. Pettet, A. Mukhopadhyay, and A. Dubey, Decision Making in Non-Stationary Environments with Policy-Augmented Monte Carlo Tree Search. 2022.
@misc{pettet2022decision, title = {Decision Making in Non-Stationary Environments with Policy-Augmented Monte Carlo Tree Search}, author = {Pettet, Geoffrey and Mukhopadhyay, Ayan and Dubey, Abhishek}, year = {2022}, eprint = {2202.13003}, preprint = {https://arxiv.org/abs/2202.13003}, archiveprefix = {arXiv}, primaryclass = {cs.AI} }
Decision-making under uncertainty (DMU) is present in many important problems. An open challenge is DMU in non-stationary environments, where the dynamics of the environment can change over time. Reinforcement Learning (RL), a popular approach for DMU problems, learns a policy by interacting with a model of the environment offline. Unfortunately, if the environment changes the policy can become stale and take sub-optimal actions, and relearning the policy for the updated environment takes time and computational effort. An alternative is online planning approaches such as Monte Carlo Tree Search (MCTS), which perform their computation at decision time. Given the current environment, MCTS plans using high-fidelity models to determine promising action trajectories. These models can be updated as soon as environmental changes are detected to immediately incorporate them into decision making. However, MCTS’s convergence can be slow for domains with large state-action spaces. In this paper, we present a novel hybrid decision-making approach that combines the strengths of RL and planning while mitigating their weaknesses. Our approach, called Policy Augmented MCTS (PA-MCTS), integrates a policy’s actin-value estimates into MCTS, using the estimates to seed the action trajectories favored by the search. We hypothesize that PA-MCTS will converge more quickly than standard MCTS while making better decisions than the policy can make on its own when faced with nonstationary environments. We test our hypothesis by comparing PA-MCTS with pure MCTS and an RL agent applied to the classical CartPole environment. We find that PC-MCTS can achieve higher cumulative rewards than the policy in isolation under several environmental shifts while converging in significantly fewer iterations than pure MCTS.
- B. Potteiger, A. Dubey, F. Cai, X. Koutsoukos, and Z. Zhang, Moving target defense for the security and resilience of mixed time and event triggered cyber-physical systems, Journal of Systems Architecture, p. 102420, 2022.
@article{POTTEIGER2022102420, title = {Moving target defense for the security and resilience of mixed time and event triggered cyber-physical systems}, journal = {Journal of Systems Architecture}, pages = {102420}, year = {2022}, issn = {1383-7621}, doi = {https://doi.org/10.1016/j.sysarc.2022.102420}, url = {https://www.sciencedirect.com/science/article/pii/S1383762122000212}, author = {Potteiger, Bradley and Dubey, Abhishek and Cai, Feiyang and Koutsoukos, Xenofon and Zhang, Zhenkai}, keywords = {Moving target defense, Time triggered, Event triggered, Cyber-physical systems} }
Memory corruption attacks such as code injection, code reuse, and non-control data attacks have become widely popular for compromising safety-critical Cyber-Physical Systems (CPS). Moving target defense (MTD) techniques such as instruction set randomization (ISR), address space randomization (ASR), and data space randomization (DSR) can be used to protect systems against such attacks. CPS often use time-triggered architectures to guarantee predictable and reliable operation. MTD techniques can cause time delays with unpredictable behavior. To protect CPS against memory corruption attacks, MTD techniques can be implemented in a mixed time and event-triggered architecture that provides capabilities for maintaining safety and availability during an attack. This paper presents a mixed time and event-triggered MTD security approach based on the ARINC 653 architecture that provides predictable and reliable operation during normal operation and rapid detection and reconfiguration upon detection of attacks. We leverage a hardware-in-the-loop testbed and an advanced emergency braking system (AEBS) case study to show the effectiveness of our approach.
- S. Ramakrishna, H. Jin, A. Dubey, and A. Ramamurthy, Automating Pattern Selection for Assurance Case Development for Cyber-Physical Systems, in Computer Safety, Reliability, and Security, Cham, 2022, pp. 82–96.
@inproceedings{ramakrishna2022assurance, author = {Ramakrishna, Shreyas and Jin, Hyunjee and Dubey, Abhishek and Ramamurthy, Arun}, editor = {Trapp, Mario and Saglietti, Francesca and Spisl{\"a}nder, Marc and Bitsch, Friedemann}, title = {Automating Pattern Selection for Assurance Case Development for Cyber-Physical Systems}, booktitle = {Computer Safety, Reliability, and Security}, year = {2022}, publisher = {Springer International Publishing}, address = {Cham}, pages = {82--96}, isbn = {978-3-031-14835-4} }
Assurance Cases are increasingly being required for regulatory acceptance of Cyber-Physical Systems. However, the ever-increasing complexity of these systems has made the assurance cases development complex, labor-intensive and time-consuming. Assurance case fragments called patterns are used to handle the complexity. The state-of-the-art approach has been to manually select generic patterns from online catalogs, instantiate them with system-specific information, and assemble them into an assurance case. While there has been some work in automating the instantiation and assembly, a less researched area is the automation of the pattern selection process, which takes a considerable amount of the assurance case development time. To close this automation gap, we have developed an automated pattern selection workflow that handles the selection problem as a coverage problem, intending to find the smallest set of patterns that can cover the available system artifacts. For this, we utilize the ontology graphs of the system artifacts and the patterns and perform graph analytics. The selected patterns are fed into an external instantiation function to develop an assurance case. Then, they are evaluated for coverage using two coverage metrics. An illustrative autonomous vehicle example is provided, demonstrating the utility of the proposed workflow in developing an assurance case with reduced efforts and time compared to the manual development alternative.
- J. Islam, J. P. Talusan, S. Bhattacharjee, F. Tiausas, S. M. Vazirizade, A. Dubey, K. Yasumoto, and S. Das, Anomaly based Incident Detection in Large Scale Smart Transportation Systems, in ACM/IEEE 13th International Conference on Cyber-Physical Systems (ICCPS), 2022.
@inproceedings{jp2022, title = {Anomaly based Incident Detection in Large Scale Smart Transportation Systems}, booktitle = {ACM/IEEE 13th International Conference on Cyber-Physical Systems (ICCPS)}, publisher = {IEEE}, author = {Islam, Jaminur and Talusan, Jose Paolo and Bhattacharjee, Shameek and Tiausas, Francis and Vazirizade, Sayyed Mohsen and Dubey, Abhishek and Yasumoto, Keiichi and Das, Sajal}, year = {2022}, month = apr }
Modern smart cities are focusing on smart transportation solutions to detect and mitigate the effects of various traffic incidents in the city. To materialize this, roadside units and ambient transportation sensors are being deployed to collect vehicular data that provides real-time traffic monitoring. In this paper, we first propose a real-time data-driven anomaly-based traffic incident detection framework for a city-scale smart transportation system. Specifically, we propose an incremental region growing approximation algorithm for optimal Spatio-temporal clustering of road segments and their data; such that road segments are strategically divided into highly correlated clusters. The highly correlated clusters enable identifying a Pythagorean Mean-based invariant as an anomaly detection metric that is highly stable under no incidents but shows a deviation in the presence of incidents. We learn the bounds of the invariants in a robust manner such that anomaly detection can generalize to unseen events, even when learning from real noisy data. We perform extensive experimental validation using mobility data collected from the City of Nashville, Tennessee, and prove that the method can detect incidents within each cluster in real-time.
- M. Wilbur, S. Kadir, Y. Kim, G. Pettet, A. Mukhopadhyay, P. Pugliese, S. Samaranayake, A. Laszka, and A. Dubey, An Online Approach to Solve the Dynamic Vehicle Routing Problem with Stochastic Trip Requests for Paratransit Services, in ACM/IEEE 13th International Conference on Cyber-Physical Systems (ICCPS), 2022.
@inproceedings{wilbur2022, title = {An Online Approach to Solve the Dynamic Vehicle Routing Problem with Stochastic Trip Requests for Paratransit Services}, booktitle = {ACM/IEEE 13th International Conference on Cyber-Physical Systems (ICCPS)}, publisher = {IEEE}, author = {Wilbur, Michael and Kadir, Salah and Kim, Youngseo and Pettet, Geoffrey and Mukhopadhyay, Ayan and Pugliese, Philip and Samaranayake, Samitha and Laszka, Aron and Dubey, Abhishek}, year = {2022}, month = apr }
Many transit agencies operating paratransit and microtransit services have to respond to trip requests that arrive in real-time, which entails solving hard combinatorial and sequential decision-making problems under uncertainty. To avoid decisions that lead to significant inefficiency in the long term, vehicles should be allocated to requests by optimizing a non-myopic utility function or by batching requests together and optimizing a myopic utility function. While the former approach is typically offline, the latter can be performed online. We point out two major issues with such approaches when applied to paratransit services in practice. First, it is difficult to batch paratransit requests together as they are temporally sparse. Second, the environment in which transit agencies operate changes dynamically (e.g., traffic conditions can change over time), causing the estimates that are learned offline to become stale. To address these challenges, we propose a fully online approach to solve the dynamic vehicle routing problem (DVRP) with time windows and stochastic trip requests that is robust to changing environmental dynamics by construction. We focus on scenarios where requests are relatively sparse—our problem is motivated by applications to paratransit services. We formulate DVRP as a Markov decision process and use Monte Carlo tree search to compute near-optimal actions for any given state. Accounting for stochastic requests while optimizing a non-myopic utility function is computationally challenging; indeed, the action space for such a problem is intractably large in practice. To tackle the large action space, we leverage the structure of the problem to design heuristics that can sample promising actions for the tree search. Our experiments using real-world data from our partner agency show that the proposed approach outperforms existing state-of-the-art approaches both in terms of performance and robustness.
- G. Pettet, A. Mukhopadhyay, M. J. Kochenderfer, and A. Dubey, Hierarchical Planning for Dynamic Resource Allocation in Smart and Connected Communities, ACM Trans. Cyber-Phys. Syst., vol. 6, no. 4, Nov. 2022.
@article{pettet2021hierarchical, author = {Pettet, Geoffrey and Mukhopadhyay, Ayan and Kochenderfer, Mykel J. and Dubey, Abhishek}, title = {Hierarchical Planning for Dynamic Resource Allocation in Smart and Connected Communities}, year = {2022}, issue_date = {October 2022}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {6}, number = {4}, issn = {2378-962X}, url = {https://doi-org.proxy.library.vanderbilt.edu/10.1145/3502869}, doi = {10.1145/3502869}, journal = {ACM Trans. Cyber-Phys. Syst.}, month = nov, articleno = {32}, numpages = {26}, preprint = {https://arxiv.org/abs/2107.01292}, keywords = {planning under uncertainty, semi-Markov decision process, large-scale CPS, hierarchical planning, Dynamic resource allocation} }
Resource allocation under uncertainty is a classic problem in city-scale cyber-physical systems. Consider emergency response, where urban planners and first responders optimize the location of ambulances to minimize expected response times to incidents such as road accidents. Typically, such problems involve sequential decision making under uncertainty and can be modeled as Markov (or semi-Markov) decision processes. The goal of the decision maker is to learn a mapping from states to actions that can maximize expected rewards. While online, offline, and decentralized approaches have been proposed to tackle such problems, scalability remains a challenge for real world use cases. We present a general approach to hierarchical planning that leverages structure in city level CPS problems for resource allocation. We use emergency response as a case study and show how a large resource allocation problem can be split into smaller problems. We then use Monte Carlo planning for solving the smaller problems and managing the interaction between them. Finally, we use data from Nashville, Tennessee, a major metropolitan area in the United States, to validate our approach. Our experiments show that the proposed approach outperforms state-of-the-art approaches used in the field of emergency response.
- A. Mukhopadhyay, G. Pettet, S. M. Vazirizade, D. Lu, A. Jaimes, S. E. Said, H. Baroud, Y. Vorobeychik, M. Kochenderfer, and A. Dubey, A Review of Incident Prediction, Resource Allocation, and Dispatch Models for Emergency Management, Accident Analysis & Prevention, vol. 165, p. 106501, 2022.
@article{mukhopadhyay2021review, title = {A Review of Incident Prediction, Resource Allocation, and Dispatch Models for Emergency Management}, journal = {Accident Analysis & Prevention}, volume = {165}, pages = {106501}, year = {2022}, issn = {0001-4575}, doi = {https://doi.org/10.1016/j.aap.2021.106501}, url = {https://www.sciencedirect.com/science/article/pii/S0001457521005327}, author = {Mukhopadhyay, Ayan and Pettet, Geoffrey and Vazirizade, Sayyed Mohsen and Lu, Di and Jaimes, Alejandro and Said, Said El and Baroud, Hiba and Vorobeychik, Yevgeniy and Kochenderfer, Mykel and Dubey, Abhishek}, keywords = {Resource allocation for smart cities, Incident prediction, Computer aided dispatch, Decision making under uncertainty, Accident analysis, Emergency response}, preprint = {https://arxiv.org/abs/2006.04200} }
In the last fifty years, researchers have developed statistical, data-driven, analytical, and algorithmic approaches for designing and improving emergency response management (ERM) systems. The problem has been noted as inherently difficult and constitutes spatio-temporal decision making under uncertainty, which has been addressed in the literature with varying assumptions and approaches. This survey provides a detailed review of these approaches, focusing on the key challenges and issues regarding four sub-processes: (a) incident prediction, (b) incident detection, (c) resource allocation, and (c) computer-aided dispatch for emergency response. We highlight the strengths and weaknesses of prior work in this domain and explore the similarities and differences between different modeling paradigms. We conclude by illustrating open challenges and opportunities for future research in this complex domain.
- S. Ramakrishna, Z. Rahiminasab, G. Karsai, A. Easwaran, and A. Dubey, Efficient Out-of-Distribution Detection Using Latent Space of β-VAE for Cyber-Physical Systems, ACM Trans. Cyber-Phys. Syst., vol. 6, no. 2, Apr. 2022.
@article{ramakrishna2022tcps, author = {Ramakrishna, Shreyas and Rahiminasab, Zahra and Karsai, Gabor and Easwaran, Arvind and Dubey, Abhishek}, title = {Efficient Out-of-Distribution Detection Using Latent Space of β-VAE for Cyber-Physical Systems}, year = {2022}, issue_date = {April 2022}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {6}, preprint = {https://arxiv.org/abs/2108.11800}, number = {2}, issn = {2378-962X}, url = {https://doi-org.proxy.library.vanderbilt.edu/10.1145/3491243}, doi = {10.1145/3491243}, journal = {ACM Trans. Cyber-Phys. Syst.}, month = apr, articleno = {15}, numpages = {34}, keywords = {β-variational autoencoders, out-of-distribution, Cyber-physical systems, mutual information gap, disentanglement, deep neural networks} }
Deep Neural Networks are actively being used in the design of autonomous Cyber-Physical Systems (CPSs). The advantage of these models is their ability to handle high-dimensional state-space and learn compact surrogate representations of the operational state spaces. However, the problem is that the sampled observations used for training the model may never cover the entire state space of the physical environment, and as a result, the system will likely operate in conditions that do not belong to the training distribution. These conditions that do not belong to training distribution are referred to as Out-of-Distribution (OOD). Detecting OOD conditions at runtime is critical for the safety of CPS. In addition, it is also desirable to identify the context or the feature(s) that are the source of OOD to select an appropriate control action to mitigate the consequences that may arise because of the OOD condition. In this article, we study this problem as a multi-labeled time series OOD detection problem over images, where the OOD is defined both sequentially across short time windows (change points) as well as across the training data distribution. A common approach to solving this problem is the use of multi-chained one-class classifiers. However, this approach is expensive for CPSs that have limited computational resources and require short inference times. Our contribution is an approach to design and train a single β-Variational Autoencoder detector with a partially disentangled latent space sensitive to variations in image features. We use the feature sensitive latent variables in the latent space to detect OOD images and identify the most likely feature(s) responsible for the OOD. We demonstrate our approach using an Autonomous Vehicle in the CARLA simulator and a real-world automotive dataset called nuImages.
- S. Ramakrishna, B. Luo, Y. Barve, G. Karsai, and A. Dubey, Risk-Aware Scene Sampling for Dynamic Assurance of Autonomous Systems, in 2022 IEEE International Conference on Assured Autonomy (ICAA) (ICAA’22), virtual, Puerto Rico, 2022.
@inproceedings{ICAA2022, author = {Ramakrishna, Shreyas and Luo, Baiting and Barve, Yogesh and Karsai, Gabor and Dubey, Abhishek}, title = {{Risk-Aware} Scene Sampling for Dynamic Assurance of Autonomous Systems}, booktitle = {2022 IEEE International Conference on Assured Autonomy (ICAA) (ICAA'22)}, address = {virtual, Puerto Rico}, days = {22}, month = mar, tag = {ai4cps}, year = {2022}, keywords = {Cyber-Physical Systems; Dynamic Assurance; Dynamic Risk; High-Risk Scenes; Bow-Tie Diagram; Hazards} }
Autonomous Cyber-Physical Systems must often operate under uncertainties like sensor degradation and distribution shifts in the operating environment, thus increasing operational risk. Dynamic Assurance of these systems requires augmenting runtime safety components like out-of-distribution detectors and risk estimators. Designing these safety components requires labeled data from failure conditions and risky corner cases that fail the system. However, collecting real-world data of these high-risk scenes can be expensive and sometimes not possible. To address this, there are several scenario description languages with sampling capability for generating synthetic data from simulators to replicate the scenes that are not possible in the real world. Most often, simple search-based techniques like random search and grid search are used as samplers. But we point out three limitations in using these techniques. First, they are passive samplers, which do not use the feedback of previous results in the sampling process. Second, the variables to be sampled may have constraints that need to be applied. Third, they do not balance the tradeoff between exploration and exploitation, which we hypothesize is needed for better coverage of the search space. We present a scene generation workflow with two samplers called Random Neighborhood Search (RNS) and Guided Bayesian Optimization (GBO). These samplers extend the conventional random search and Bayesian Optimization search with the limitation points. We demonstrate our approach using an Autonomous Vehicle case study in CARLA simulation. To evaluate our samplers, we compared them against the baselines of random search, grid search, and Halton sequence search.
2021
- Y. Senarath, A. Mukhopadhyay, S. Vazirizade, hemant Purohit, S. Nannapaneni, and A. Dubey, Practitioner-Centric Approach for Early Incident Detection Using Crowdsourced Data for Emergency Services, in 21st IEEE International Conference on Data Mining (ICDM 2021), 2021.
@inproceedings{ICDM_2021, author = {Senarath, Yasas and Mukhopadhyay, Ayan and Vazirizade, Sayyed and hemant Purohit and Nannapaneni, Saideep and Dubey, Abhishek}, booktitle = {21st IEEE International Conference on Data Mining (ICDM 2021)}, tag = {ai4cps,incident}, title = {Practitioner-Centric Approach for Early Incident Detection Using Crowdsourced Data for Emergency Services}, year = {2021} }
Emergency response is highly dependent on the time of incident reporting. Unfortunately, the traditional approach to receiving incident reports (e.g., calling 911 in the USA) has time delays. Crowdsourcing platforms such as Waze provide an opportunity for early identification of incidents. However, detecting incidents from crowdsourced data streams is difficult due to the challenges of noise and uncertainty associated with such data. Further, simply optimizing over detection accuracy can compromise spatial-temporal localization of the inference, thereby making such approaches infeasible for real-world deployment. This paper presents a novel problem formulation and solution approach for practitioner-centered incident detection using crowdsourced data by using emergency response management as a case-study. The proposed approach CROME (Crowdsourced Multi-objective Event Detection) quantifies the relationship between the performance metrics of incident classification (e.g., F1 score) and the requirements of model practitioners (e.g., 1 km. radius for incident detection). First, we show how crowdsourced reports, ground-truth historical data, and other relevant determinants such as traffic and weather can be used together in a Convolutional Neural Network (CNN) architecture for early detection of emergency incidents. Then, we use a Pareto optimization-based approach to optimize the output of the CNN in tandem with practitioner-centric parameters to balance detection accuracy and spatial-temporal localization. Finally, we demonstrate the applicability of this approach using crowdsourced data from Waze and traffic accident reports from Nashville, TN, USA. Our experiments demonstrate that the proposed approach outperforms existing approaches in incident detection while simultaneously optimizing the needs for realworld deployment and usability.
- H. Baroud, A. Dubey, S. M. Vazirizade, and others, Collaborative Research Project to Coordinate the Data from the CRASH Predictive Analytics Program Between TDOT and TDOSHS, Tennessee. Department of Transportation, 2021.
@techreport{dot_61069_DS1, title = {Collaborative Research Project to Coordinate the Data from the CRASH Predictive Analytics Program Between TDOT and TDOSHS}, author = {Baroud, Hiba and Dubey, Abhishek and Vazirizade, Sayyed Mohsen and others}, year = {2021}, institution = {Tennessee. Department of Transportation} }
- S. Singla, A. Mukhopadhyay, M. Wilbur, T. Diao, V. Gajjewar, A. Eldawy, M. Kochenderfer, R. Shachter, and A. Dubey, WildfireDB: An Open-Source Dataset ConnectingWildfire Spread with Relevant Determinants, in 35th Conference on Neural Information Processing Systems (NeurIPS 2021) Track on Datasets and Benchmarks, 2021.
@inproceedings{wildfiredb2021, author = {Singla, Samriddhi and Mukhopadhyay, Ayan and Wilbur, Michael and Diao, Tina and Gajjewar, Vinayak and Eldawy, Ahmed and Kochenderfer, Mykel and Shachter, Ross and Dubey, Abhishek}, booktitle = {35th Conference on Neural Information Processing Systems (NeurIPS 2021) Track on Datasets and Benchmarks}, title = {WildfireDB: An Open-Source Dataset ConnectingWildfire Spread with Relevant Determinants}, tag = {ai4cps,incident}, year = {2021} }
Modeling fire spread is critical in fire risk management. Creating data-driven models to forecast spread remains challenging due to the lack of comprehensive data sources that relate fires with relevant covariates. We present the first comprehensive and open-source dataset that relates historical fire data with relevant covariates such as weather, vegetation, and topography. Our dataset, named \textitWildfireDB, contains over 17 million data points that capture how fires spread in the continental USA in the last decade. In this paper, we describe the algorithmic approach used to create and integrate the data, describe the dataset, and present benchmark results regarding data-driven models that can be learned to forecast the spread of wildfires.
- M. Wilbur, A. Mukhopadhyay, S. Vazirizade, P. Pugliese, A. Laszka, and A. Dubey, Energy and Emission Prediction for Mixed-Vehicle Transit Fleets Using Multi-Task and Inductive Transfer Learning, in Joint European Conference on Machine Learning and Knowledge Discovery in Databases, 2021.
@inproceedings{ecml2021, author = {Wilbur, Michael and Mukhopadhyay, Ayan and Vazirizade, Sayyed and Pugliese, Philip and Laszka, Aron and Dubey, Abhishek}, title = {Energy and Emission Prediction for Mixed-Vehicle Transit Fleets Using Multi-Task and Inductive Transfer Learning}, booktitle = {Joint European Conference on Machine Learning and Knowledge Discovery in Databases}, year = {2021}, tag = {ai4cps,transit} }
Public transit agencies are focused on making their fixed-line bus systems more energy efficient by introducing electric (EV) and hybrid (HV) vehicles to their eets. However, because of the high upfront cost of these vehicles, most agencies are tasked with managing a mixed-fleet of internal combustion vehicles (ICEVs), EVs, and HVs. In managing mixed-fleets, agencies require accurate predictions of energy use for optimizing the assignment of vehicles to transit routes, scheduling charging, and ensuring that emission standards are met. The current state-of-the-art is to develop separate neural network models to predict energy consumption for each vehicle class. Although different vehicle classes’ energy consumption depends on a varied set of covariates, we hypothesize that there are broader generalizable patterns that govern energy consumption and emissions. In this paper, we seek to extract these patterns to aid learning to address two problems faced by transit agencies. First, in the case of a transit agency which operates many ICEVs, HVs, and EVs, we use multi-task learning (MTL) to improve accuracy of forecasting energy consumption. Second, in the case where there is a significant variation in vehicles in each category, we use inductive transfer learning (ITL) to improve predictive accuracy for vehicle class models with insufficient data. As this work is to be deployed by our partner agency, we also provide an online pipeline for joining the various sensor streams for xed-line transit energy prediction. We find that our approach outperforms vehicle-specific baselines in both the MTL and ITL settings.
- R. Sun, R. Gui, H. Neema, Y. Chen, J. Ugirumurera, J. Severino, P. Pugliese, A. Laszka, and A. Dubey, Transit-Gym: A Simulation and Evaluation Engine for Analysis of Bus Transit Systems, in Preprint at Arxiv. Accepted at IEEE SmartComp., 2021.
@inproceedings{sun2021transitgym, author = {Sun, Ruixiao and Gui, Rongze and Neema, Himanshu and Chen, Yuche and Ugirumurera, Juliette and Severino, Joseph and Pugliese, Philip and Laszka, Aron and Dubey, Abhishek}, title = {Transit-Gym: A Simulation and Evaluation Engine for Analysis of Bus Transit Systems}, booktitle = {Preprint at Arxiv. Accepted at IEEE SmartComp.}, year = {2021}, archiveprefix = {arXiv}, tag = {transit}, eprint = {2107.00105}, preprint = {https://arxiv.org/abs/2107.00105}, primaryclass = {eess.SY} }
Public transit is central to cultivating equitable communities. Meanwhile, the novel coronavirus disease COVID-19 and associated social restrictions has radically transformed ridership behavior in urban areas. Perhaps the most concerning aspect of the COVID-19 pandemic is that low-income and historically marginalized groups are not only the most susceptible to economic shifts but are also most reliant on public transportation. As revenue decreases, transit agencies are tasked with providing adequate public transportation services in an increasingly hostile economic environment. Transit agencies therefore have two primary concerns. First, how has COVID-19 impacted ridership and what is the new post-COVID normal? Second, how has ridership varied spatio-temporally and between socio-economic groups? In this work we provide a data-driven analysis of COVID-19’s affect on public transit operations and identify temporal variation in ridership change. We then combine spatial distributions of ridership decline with local economic data to identify variation between socio-economic groups. We find that in Nashville and Chattanooga, TN, fixed-line bus ridership dropped by 66.9% and 65.1% from 2019 baselines before stabilizing at 48.4% and 42.8% declines respectively. The largest declines were during morning and evening commute time. Additionally, there was a significant difference in ridership decline between the highest-income areas and lowest-income areas (77% vs 58%) in Nashville.
- S. M. Vazirizade, A. Mukhopadhyay, G. Pettet, S. E. Said, H. Baroud, and A. Dubey, Learning Incident Prediction Models Over Large Geographical Areas for Emergency Response Systems, 2021.
@inproceedings{vazirizade2021learning, title = {Learning Incident Prediction Models Over Large Geographical Areas for Emergency Response Systems}, author = {Vazirizade, Sayyed Mohsen and Mukhopadhyay, Ayan and Pettet, Geoffrey and Said, Said El and Baroud, Hiba and Dubey, Abhishek}, year = {2021}, eprint = {2106.08307}, archiveprefix = {arXiv}, tag = {ai4cps,incident}, preprint = {https://arxiv.org/abs/2106.08307}, primaryclass = {cs.LG} }
Principled decision making in emergency response management necessitates the use of statistical models that predict the spatial-temporal likelihood of incident occurrence. These statistical models are then used for proactive stationing which allocates first responders across the spatial area in order to reduce overall response time. Traditional methods that simply aggregate past incidents over space and time fail to make useful short-term predictions when the spatial region is large and focused on fine-grained spatial entities like interstate highway networks. This is partially due to the sparsity of incidents with respect to the area in consideration. Further, accidents are affected by several covariates, and collecting, cleaning, and managing multiple streams of data from various sources is challenging for large spatial areas. In this paper, we highlight how this problem is being solved for the state of Tennessee, a state in the USA with a total area of over 100,000 sq. km. Our pipeline, based on a combination of synthetic resampling, non-spatial clustering, and learning from data can efficiently forecast the spatial and temporal dynamics of accident occurrence, even under sparse conditions. In the paper, we describe our pipeline that uses data related to roadway geometry, weather, historical accidents, and real-time traffic congestion to aid accident forecasting. To understand how our forecasting model can affect allocation and dispatch, we improve upon a classical resource allocation approach. Experimental results show that our approach can significantly reduce response times in the field in comparison with current approaches followed by first responders.
- R. Sandoval, C. Van Geffen, M. Wilbur, B. Hall, A. Dubey, W. Barbour, and D. B. Work, Data driven methods for effective micromobility parking, Transportation Research Interdisciplinary Perspectives, 2021.
@article{sandoval2021data, author = {Sandoval, Ricardo and Van Geffen, Caleb and Wilbur, Michael and Hall, Brandon and Dubey, Abhishek and Barbour, William and Work, Daniel B.}, title = {Data driven methods for effective micromobility parking}, journal = {Transportation Research Interdisciplinary Perspectives}, tag = {transit}, year = {2021} }
- R. Sun, Y. Chen, A. Dubey, and P. Pugliese, Hybrid electric buses fuel consumption prediction based on real-world driving data, Transportation Research Part D: Transport and Environment, vol. 91, p. 102637, 2021.
@article{SUN2021102637, title = {Hybrid electric buses fuel consumption prediction based on real-world driving data}, journal = {Transportation Research Part D: Transport and Environment}, volume = {91}, pages = {102637}, year = {2021}, tag = {transit}, issn = {1361-9209}, doi = {https://doi.org/10.1016/j.trd.2020.102637}, url = {https://www.sciencedirect.com/science/article/pii/S1361920920308221}, author = {Sun, Ruixiao and Chen, Yuche and Dubey, Abhishek and Pugliese, Philip}, keywords = {Hybrid diesel transit bus, Artificial neural network, Fuel consumption prediction} }
Estimating fuel consumption by hybrid diesel buses is challenging due to its diversified operations and driving cycles. In this study, long-term transit bus monitoring data were utilized to empirically compare fuel consumption of diesel and hybrid buses under various driving conditions. Artificial neural network (ANN) based high-fidelity microscopic (1 Hz) and mesoscopic (5–60 min) fuel consumption models were developed for hybrid buses. The microscopic model contained 1 Hz driving, grade, and environment variables. The mesoscopic model aggregated 1 Hz data into 5 to 60-minute traffic pattern factors and predicted average fuel consumption over its duration. The prediction results show mean absolute percentage errors of 1–2% for microscopic models and 5–8% for mesoscopic models. The data were partitioned by different driving speeds, vehicle engine demand, and road grade to investigate their impacts on prediction performance.
- A. Chhokra, C. Barreto, A. Dubey, G. Karsai, and X. Koutsoukos, Power-Attack: A comprehensive tool-chain for modeling and simulating attacks in power systems, in 9th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek, 2021.
@inproceedings{ajay2021powerattack, author = {Chhokra, Ajay and Barreto, Carlos and Dubey, Abhishek and Karsai, Gabor and Koutsoukos, Xenofon}, title = {Power-Attack: A comprehensive tool-chain for modeling and simulating attacks in power systems}, booktitle = {9th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek}, year = {2021}, category = {workshop}, keywords = {power grid}, project = {cps-reliability}, tag = {platform,power} }
Due to the increased deployment of novel communication, control and protection functions, the grid has become vulnerable to a variety of attacks. Designing robust machine learning based attack detection and mitigation algorithms require large amounts of data that rely heavily on a representative environment, where different attacks can be simulated. This paper presents a comprehensive tool-chain for modeling and simulating attacks in power systems. The paper makes the following contributions, first, we present a probabilistic domain specific language to define multiple attack scenarios and simulation configuration parameters. Secondly, we extend the PyPower-dynamics simulator with protection system components to simulate cyber attacks in control and protection layers of power system. In the end, we demonstrate multiple attack scenarios with a case study based on IEEE 39 bus system.
- H. M. Mustafa, M. Bariya, K. S. Sajan, A. Chhokra, A. Srivastava, A. Dubey, A. von Meier, and G. Biswas, RT-METER: A Real-Time, Multi-Layer Cyber–Power Testbed for Resiliency Analysis, in 9th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek, 2021.
@inproceedings{rtmeter2021, author = {Mustafa, Hussain M. and Bariya, Mohini and Sajan, K.S. and Chhokra, Ajay and Srivastava, Anurag and Dubey, Abhishek and von Meier, Alexandra and Biswas, Gautam}, title = {RT-METER: A Real-Time, Multi-Layer Cyber–Power Testbed for Resiliency Analysis}, booktitle = {9th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek}, year = {2021}, category = {workshop}, keywords = {power grid}, project = {cps-reliability}, tag = {platform,power} }
In this work, we present a Real-Time, Multi-layer cybEr–power TestbEd for the Resiliency analysis (RT-METER) to support power grid operation and planning. Developed cyber-power testbed provides a mechanism for end-to-end validation of advanced tools for cyber-power grid monitoring, control, and planning. By integrating a host of features across three core layers—physical power system, communication network, and monitoring/ control center with advanced tools,—the testbed allows for the simulation of rich and varied cyber-power grid scenarios and the generating realistic sensor, system, and network data. Developing advanced tools to assist operators during complex and challenging scenarios is essential for the successful operation of the future grid. We detail a suite of algorithmic tools validated using the developed testbed for the realistic grid data.
- S. Basak, S. Sengupta, S.-J. Wen, and A. Dubey, Spatio-temporal AI inference engine for estimating hard disk reliability, Pervasive and Mobile Computing, vol. 70, p. 101283, 2021.
@article{BASAK2021101283, title = {Spatio-temporal AI inference engine for estimating hard disk reliability}, journal = {Pervasive and Mobile Computing}, volume = {70}, pages = {101283}, year = {2021}, issn = {1574-1192}, tag = {ai4cps, platform}, doi = {https://doi.org/10.1016/j.pmcj.2020.101283}, url = {http://www.sciencedirect.com/science/article/pii/S1574119220301231}, author = {Basak, Sanchita and Sengupta, Saptarshi and Wen, Shi-Jie and Dubey, Abhishek}, keywords = {Remaining useful life, Long short term memory, Prognostics, Predictive health maintenance, Hierarchical clustering} }
This paper focuses on building a spatio-temporal AI inference engine for estimating hard disk reliability. Most electronic systems such as hard disks routinely collect such reliability parameters in the field to monitor the health of the system. Changes in parameters as a function of time are monitored and any observed changes are compared with the known failure signatures. If the trajectory of the measured data matches that of a failure signature, operators are alerted to take corrective action. However, the interest of the operators lies in being able to identify the failures before they occur. The state of the art methodology including our prior work is to train machine learning models on temporal sequence data capturing the variations across multiple features and using it to predict the remaining useful life of the devices. However, as we show in this paper temporal prediction capability alone is not sufficient and can lead to low precision and the uncertainty around the prediction is very large. This is primarily due to the non-uniform progression of feature patterns over time. Our hypothesis is that the accuracy can be improved if we combine the temporal prediction methods with a spatial analysis that compares the value of key SMART features of the devices across similar model in a fixed time window (unlike the temporal method which uses the data from a single device and a much larger historical window). In this paper, we first describe both temporal and spatial approaches, describe the methods to select various hyperparameters, and then show a workflow to combine these two methodologies and provide comparative results. Our results illustrate that the average precision of temporal methods using long-short temporal memory networks to predict impending failures in the next ten days was 84 percent. To improve precision, we use the set of disks identified as potential failures and start applying spatial anomaly detection methods on those disks. This helps us remove the false positives from the temporal prediction results and provide a tighter bound on the set of disks with impending failure.
- Y. Chen, G. Wu, R. Sun, A. Dubey, A. Laszka, and P. Pugliese, A Review and Outlook of Energy Consumption Estimation Models for Electric Vehicles, Society of Automotive Engineers (SAE) International Journal of Sustainable Transportation, Energy, Environment, & Policy, 2021.
@article{yuchesae2021, author = {Chen, Yuche and Wu, Guoyuan and Sun, Ruixiao and Dubey, Abhishek and Laszka, Aron and Pugliese, Philip}, title = {A Review and Outlook of Energy Consumption Estimation Models for Electric Vehicles}, journal = {Society of Automotive Engineers (SAE) International Journal of Sustainable Transportation, Energy, Environment, \& Policy}, year = {2021}, tag = {transit} }
Electric vehicles (EVs) are critical to the transition to a low-carbon transportation system. The successful adoption of EVs heavily depends on energy consumption models that can accurately and reliably estimate electricity consumption. This paper reviews the state of the art of EV energy consumption models, aiming to provide guidance for future development of EV applications. We summarize influential variables of EV energy consumption in four categories: vehicle component, vehicle dynamics, traffic, and environment-related factors. We classify and discuss EV energy consumption models in terms of modeling scale (microscopic vs. macroscopic) and methodology (data-driven vs. rule-based). Our review shows trends of increasing macroscopic models that can be used to estimate trip-level EV energy consumption and increasing data-driven models that utilize machine learning technologies to estimate EV energy consumption based on a large volume of real-world data. We identify research gaps for EV energy consumption models, including the development of energy estimation models for modes other than personal vehicles (e.g., electric buses, trucks, and nonroad vehicles), energy estimation models that are suitable for applications related to vehicle-to-grid integration, and multiscale energy estimation models as a holistic modeling approach.
- Z. Kang, Y. D. Barve, S. Bao, A. Dubey, and A. Gokhale, Poster Abstract: Configuration Tuning for Distributed IoT Message Systems Using Deep Reinforcement Learning, in International Conference on Internet-of-Things Design and Implementation (IoTDI), 2021.
@inproceedings{iotdi21kang, author = {Kang, Zhuangwei and Barve, Yogesh D. and Bao, Shunxing and Dubey, Abhishek and Gokhale, Aniruddha}, title = {Poster Abstract: Configuration Tuning for Distributed IoT Message Systems Using Deep Reinforcement Learning}, booktitle = {International Conference on Internet-of-Things Design and Implementation (IoTDI)}, year = {2021}, tag = {platform} }
Distributed messaging systems (DMSs) are often equipped with a large number of configurable parameters that enable users to define application run-time behaviors and information dissemination rules. However, the resulting high-dimensional configuration space makes it difficult for users to determine the best configuration that can maximize application QoS under a variety of operational conditions. This poster introduces a novel, automatic knob tuning framework called DMSConfig. DMSConfig explores the configuration space by interacting with a data-driven environment prediction model (a DMS simulator), which eliminates the prohibitive cost of conducting online interactions with the production environment. DMSConfig employs the deep deterministic policy gradient (DDPG) method and a custom reward mechanism to learn and make configuration decisions based on predicted DMS states and performance. Our initial experimental results, conducted on a single-broker Kafka cluster, show that DMSConfig significantly outperforms the default configuration and has better adaptability to CPU and bandwidth-limited environments. We also confirm that DMSConfig produces fewer violations of latency constraints than three prevalent parameter tuning tools.
- F. Tiausas, J. P. Talusan, Y. Ishimaki, H. Yamana, H. Yamaguchi, S. Bhattacharjee, A. Dubey, K. Yasumoto, and S. K. Das, User-centric Distributed Route Planning in Smart Cities based on Multi-objective Optimization, in 2021 IEEE International Conference on Smart Computing (SMARTCOMP), 2021, pp. 77–82.
@inproceedings{jp21, author = {Tiausas, Francis and Talusan, Jose Paolo and Ishimaki, Yu and Yamana, Hayato and Yamaguchi, Hirozumi and Bhattacharjee, Shameek and Dubey, Abhishek and Yasumoto, Keiichi and Das, Sajal K.}, booktitle = {2021 IEEE International Conference on Smart Computing (SMARTCOMP)}, title = {User-centric Distributed Route Planning in Smart Cities based on Multi-objective Optimization}, year = {2021}, tag = {transit}, volume = {}, number = {}, pages = {77-82}, doi = {10.1109/SMARTCOMP52413.2021.00031} }
The realization of edge-based cyber-physical systems (CPS) poses important challenges in terms of performance, robustness, security, etc. This paper examines a novel approach to providing a user-centric adaptive route planning service over a network of Road Side Units (RSUs) in smart cities. The key idea is to adaptively select routing task parameters such as privacy-cloaked area sizes and number of retained intersections to balance processing time, privacy protection level, and route accuracy for privacy-augmented distributed route search while also handling per-query user preferences. This is formulated as an optimization problem with a set of parameters giving the best result for a set of queries given system constraints. Processing Throughput, Privacy Protection, and Travel Time Accuracy were developed as the objective functions to be balanced. A Multi-Objective Genetic Algorithm based technique (NSGA-II) is applied to recover a feasible solution. The performance of this approach was then evaluated using traffic data from Osaka, Japan. Results show good performance of the approach in balancing the aforementioned objectives based on user preferences.
- M. Burruss, S. Ramakrishna, and A. Dubey, Deep-RBF Networks for Anomaly Detection in Automotive Cyber-Physical Systems, in 2021 IEEE International Conference on Smart Computing (SMARTCOMP), 2021, pp. 55–60.
@inproceedings{matthew21, author = {Burruss, Matthew and Ramakrishna, Shreyas and Dubey, Abhishek}, booktitle = {2021 IEEE International Conference on Smart Computing (SMARTCOMP)}, title = {Deep-RBF Networks for Anomaly Detection in Automotive Cyber-Physical Systems}, year = {2021}, tag = {a14cps}, volume = {}, number = {}, pages = {55-60}, doi = {10.1109/SMARTCOMP52413.2021.00028} }
Deep Neural Networks (DNNs) are popularly used for implementing autonomy related tasks in automotive Cyber-Physical Systems (CPSs). However, these networks have been shown to make erroneous predictions to anomalous inputs, which manifests either due to Out-of-Distribution (OOD) data or adversarial attacks. To detect these anomalies, a separate DNN called assurance monitor is often trained and used in parallel to the controller DNN, increasing the resource burden and latency. We hypothesize that a single network that can perform controller predictions and anomaly detection is necessary to reduce the resource requirements. Deep-Radial Basis Function (RBF) networks provide a rejection class alongside the class predictions, which can be utilized for detecting anomalies at runtime. However, the use of RBF activation functions limits the applicability of these networks to only classification tasks. In this paper, we show how the deep-RBF network can be used for detecting anomalies in CPS regression tasks such as continuous steering predictions. Further, we design deep-RBF networks using popular DNNs such as NVIDIA DAVE-II, and ResNet20, and then use the resulting rejection class for detecting adversarial attacks such as a physical attack and data poison attack. Finally, we evaluate these attacks and the trained deep-RBF networks using a hardware CPS testbed called DeepNNCar and a real-world German Traffic Sign Benchmark (GTSB) dataset. Our results show that the deep-RBF networks can robustly detect these attacks in a short time without additional resource requirements.
- J. Martinez, A. M. A. Ayman, M. Wilbur, P. Pugliese, D. Freudberg, A. Laszka, and A. Dubey, Predicting Public Transportation Load to Estimate the Probability of Social Distancing Violations, in Proceedings of the Workshop on AI for Urban Mobility at the 35th AAAI Conference on Artificial Intelligence (AAAI-21), 2021.
@inproceedings{juan21, title = {Predicting Public Transportation Load to Estimate the Probability of Social Distancing Violations}, author = {Martinez, Juan and Ayman, Ayan Mukhopadhyay Afiya and Wilbur, Michael and Pugliese, Philip and Freudberg, Dan and Laszka, Aron and Dubey, Abhishek}, booktitle = {Proceedings of the Workshop on AI for Urban Mobility at the 35th AAAI Conference on Artificial Intelligence (AAAI-21)}, year = {2021}, tag = {transit} }
Public transit agencies struggle to maintain transit accessibility with reduced resources, unreliable ridership data, reduced vehicle capacities due to social distancing, and reduced services due to driver unavailability. In collaboration with transit agencies from two large metropolitan areas in the USA, we are designing novel approaches for addressing the afore-mentioned challenges by collecting accurate real-time ridership data, providing guidance to commuters, and performing operational optimization for public transit. We estimate rider-ship data using historical automated passenger counting data, conditional on a set of relevant determinants. Accurate ridership forecasting is essential to optimize the public transit schedule, which is necessary to improve current fixed lines with on-demand transit. Also, passenger crowding has been a problem for public transportation since it deteriorates passengers’ wellbeing and satisfaction. During the COVID-19 pandemic, passenger crowding has gained importance since it represents a risk for social distancing violations. Therefore, we are creating optimization models to ensure that social distancing norms can be adequately followed while ensuring that the total demand for transit is met. We will then use accurate forecasts for operational optimization that includes \textit(a) proactive fixed-line schedule optimization based on predicted demand, \textit(b) dispatch of on-demand micro-transit, prioritizing at-risk populations, and \textit(c) allocation of vehicles to transit and cargo trips, considering exigent vehicle maintenance requirements (\textiti.e., disinfection). Finally, this paper presents some initial results from our project regarding the estimation of ridership in public transit.
- M. Wilbur, P. Pugliese, A. Laszka, and A. Dubey, Efficient Data Management for Intelligent Urban Mobility Systems, in Proceedings of the Workshop on AI for Urban Mobility at the 35th AAAI Conference on Artificial Intelligence (AAAI-21), 2021.
@inproceedings{wilbur21, title = {Efficient Data Management for Intelligent Urban Mobility Systems}, tag = {ai4cps,transit}, author = {Wilbur, Michael and Pugliese, Philip and Laszka, Aron and Dubey, Abhishek}, booktitle = {Proceedings of the Workshop on AI for Urban Mobility at the 35th AAAI Conference on Artificial Intelligence (AAAI-21)}, year = {2021} }
Modern intelligent urban mobility applications are underpinned by large-scale, multivariate, spatiotemporal data streams. Working with this data presents unique challenges of data management, processing and presentation that is often overlooked by researchers. Therefore, in this work we present an integrated data management and processing framework for intelligent urban mobility systems currently in use by our partner transit agencies. We discuss the available data sources and outline our cloud-centric data management and stream processing architecture built upon open-source publish-subscribe and NoSQL data stores. We then describe our data-integrity monitoring methods. We then present a set of visualization dashboards designed for our transit agency partners. Lastly, we discuss how these tools are currently being used for AI-driven urban mobility applications that use these tools.
- A. Sivagnanam, A. Ayman, M. Wilbur, P. Pugliese, A. Dubey, and A. Laszka, Minimizing Energy Use of Mixed-Fleet Public Transit for Fixed-Route Service, in Proceedings of the 35th AAAI Conference on Artificial Intelligence (AAAI-21), 2021.
@inproceedings{aaai21, title = {Minimizing Energy Use of Mixed-Fleet Public Transit for Fixed-Route Service}, author = {Sivagnanam, Amutheezan and Ayman, Afiya and Wilbur, Michael and Pugliese, Philip and Dubey, Abhishek and Laszka, Aron}, booktitle = {Proceedings of the 35th AAAI Conference on Artificial Intelligence (AAAI-21)}, tag = {ai4cps,transit}, year = {2021} }
Affordable public transit services are crucial for communities since they enable residents to access employment, education, and other services. Unfortunately, transit services that provide wide coverage tend to suffer from relatively low utilization, which results in high fuel usage per passenger per mile, leading to high operating costs and environmental impact. Electric vehicles (EVs) can reduce energy costs and environmental impact, but most public transit agencies have to employ them in combination with conventional, internal-combustion engine vehicles due to the high upfront costs of EVs. To make the best use of such a mixed fleet of vehicles, transit agencies need to optimize route assignments and charging schedules, which presents a challenging problem for large transit networks. We introduce a novel problem formulation to minimize fuel and electricity use by assigning vehicles to transit trips and scheduling them for charging, while serving an existing fixed-route transit schedule. We present an integer program for optimal assignment and scheduling, and we propose polynomial-time heuristic and meta-heuristic algorithms for larger networks. We evaluate our algorithms on the public transit service of Chattanooga, TN using operational data collected from transit vehicles. Our results show that the proposed algorithms are scalable and can reduce energy use and, hence, environmental impact and operational costs. For Chattanooga, the proposed algorithms can save $145,635 in energy costs and 576.7 metric tons of CO2 emission annually.
- G. Pettet, A. Mukhopadhyay, M. Kochenderfer, and A. Dubey, Hierarchical Planning for Resource Allocation in Emergency Response Systems, in Proceedings of the 12th ACM/IEEE International Conference on Cyber-Physical Systems, ICCPS 2021, Nashville, TN, USA, 2021.
@inproceedings{iccps2021, author = {Pettet, Geoffrey and Mukhopadhyay, Ayan and Kochenderfer, Mykel and Dubey, Abhishek}, title = {Hierarchical Planning for Resource Allocation in Emergency Response Systems}, booktitle = {Proceedings of the 12th {ACM/IEEE} International Conference on Cyber-Physical Systems, {ICCPS} 2021, Nashville, TN, USA}, year = {2021}, tag = {ai4cps,decentralization,incident}, keywords = {emergency}, project = {smart-cities,smart-emergency-response} }
A classical problem in city-scale cyber-physical systems (CPS) is resource allocation under uncertainty. Spatial-temporal allocation of resources is optimized to allocate electric scooters across urban areas, place charging stations for vehicles, and design efficient on-demand transit. Typically, such problems are modeled as Markov (or semi-Markov) decision processes. While online, offline, and decentralized methodologies have been used to tackle such problems, none of the approaches scale well for large-scale decision problems. We create a general approach to hierarchical planning that leverages structure in city-level CPS problems to tackle resource allocation under uncertainty. We use emergency response as a case study and show how a large resource allocation problem can be split into smaller problems. We then create a principled framework for solving the smaller problems and tackling the interaction between them. Finally, we use real-world data from a major metropolitan area in the United States to validate our approach. Our experiments show that the proposed approach outperforms state-of-the-art approaches used in the field of emergency response.
- S. Eisele, T. Eghtesad, K. Campanelli, P. Agrawal, A. Laszka, and A. Dubey, Safe and Private Forward-Trading Platform for Transactive Microgrids, ACM Trans. Cyber-Phys. Syst., vol. 5, no. 1, Jan. 2021.
@article{eisele2020Safe, author = {Eisele, Scott and Eghtesad, Taha and Campanelli, Keegan and Agrawal, Prakhar and Laszka, Aron and Dubey, Abhishek}, title = {Safe and Private Forward-Trading Platform for Transactive Microgrids}, year = {2021}, issue_date = {January 2021}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, volume = {5}, number = {1}, tag = {decentralization, power}, issn = {2378-962X}, url = {https://doi.org/10.1145/3403711}, doi = {10.1145/3403711}, journal = {ACM Trans. Cyber-Phys. Syst.}, month = jan, articleno = {8}, numpages = {29}, keywords = {privacy, cyber-physical system, decentralized application, smart contract, transactive energy, Smart grid, distributed ledger, blockchain} }
Transactive microgrids have emerged as a transformative solution for the problems faced by distribution system operators due to an increase in the use of distributed energy resources and rapid growth in renewable energy generation. Transactive microgrids are tightly coupled cyber and physical systems, which require resilient and robust financial markets where transactions can be submitted and cleared, while ensuring that erroneous or malicious transactions cannot destabilize the grid. In this paper, we introduce TRANSAX, a novel decentralized platform for transactive microgrids. TRANSAX enables participants to trade in an energy futures market, which improves efficiency by finding feasible matches for energy trades, reducing the load on the distribution system operator. TRANSAX provides privacy to participants by anonymizing their trading activity using a distributed mixing service, while also enforcing constraints that limit trading activity based on safety requirements, such as keeping power flow below line capacity. We show that TRANSAX can satisfy the seemingly conflicting requirements of efficiency, safety, and privacy, and we demonstrate its performance using simulation results.
- C. Hartsell, S. Ramakrishna, A. Dubey, D. Stojcsics, N. Mahadevan, and G. Karsai, ReSonAte: A Runtime Risk Assessment Framework for Autonomous Systems, in 16th International Symposium on Software Engineering for Adaptive and Self-Managing Systems, SEAMS 2021, 2021.
@inproceedings{resonate2021, author = {Hartsell, Charles and Ramakrishna, Shreyas and Dubey, Abhishek and Stojcsics, Daniel and Mahadevan, Nag and Karsai, Gabor}, title = {ReSonAte: A Runtime Risk Assessment Framework for Autonomous Systems}, booktitle = {16th {International} Symposium on Software Engineering for Adaptive and Self-Managing Systems, {SEAMS} 2021}, year = {2021}, tag = {ai4cps}, category = {selectiveconference}, project = {cps-middleware,cps-reliability} }
Autonomous Cyber-Physical Systems (CPSs) are often required to handle uncertainties and self-manage the system operation in response to problems and increasing risk in the operating paradigm. This risk may arise due to distribution shifts, environmental context, or failure of software or hardware components. Traditional techniques for risk assessment focus on design-time techniques such as hazard analysis, risk reduction, and assurance cases among others. However, these static, design time techniques do not consider the dynamic contexts and failures the systems face at runtime. We hypothesize that this requires a dynamic assurance approach that computes the likelihood of unsafe conditions or system failures considering the safety requirements, assumptions made at design time, past failures in a given operating context, and the likelihood of system component failures. We introduce the ReSonAte dynamic risk estimation framework for autonomous systems. ReSonAte reasons over Bow-Tie Diagrams (BTDs), which capture information about hazard propagation paths and control strategies. Our innovation is the extension of the BTD formalism with attributes for modeling the conditional relationships with the state of the system and environment. We also describe a technique for estimating these conditional relationships and equations for estimating risk-based on the state of the system and environment. To help with this process, we provide a scenario modeling procedure that can use the prior distributions of the scenes and threat conditions to generate the data required for estimating the conditional relationships. To improve scalability and reduce the amount of data required, this process considers each control strategy in isolation and composes several single-variate distributions into one complete multi-variate distribution for the control strategy in question. Lastly, we describe the effectiveness of our approach using two separate autonomous system simulations: CARLA and an unmanned underwater vehicle.
- Y. Zhang, Y. Chen, R. Sun, A. Dubey, and P. Pugliese, A Data Partitioning-based Artificial Neural Network Model to Estimate Real-driving Energy Consumption of Electric Buses, Transportation Research Board 100th Annual Meeting, 2021.
@article{yucheTRB21, author = {Zhang, Yunteng and Chen, Yuche and Sun, Ruixiao and Dubey, Abhishek and Pugliese, Philip}, title = {A Data Partitioning-based Artificial Neural Network Model to Estimate Real-driving Energy Consumption of Electric Buses}, journal = {Transportation Research Board 100th Annual Meeting}, year = {2021} }
Reliable and accurate estimation of electric bus energy consumption is critical for electric bus operation and planning. But energy prediction for electric buses is challenging because of diversified driving cycles of transit services. We propose to establish a data-partition based artificial neural network model to predict energy consumption of electric buses at microscopic level and link level. The purpose of data partitioning is to separate data into charging and discharging modes and then develop most efficient prediction for each mode. We utilize a long-term transit operation and energy consumption monitoring dataset from Chattanooga, SC to train and test our neural network models. The microscopic model estimates energy consumption of electric bus at 1Hz frequency based on instantaneous driving and road environment data. The prediction errors of micro model ranges between 8% and 15% on various instantaneous power demand, vehicle specific power, bins. The link-level model is to predict average energy consumption rate based on aggregated traffic pattern parameters derived from instantaneous driving data at second level. The prediction errors of link-level model are around 15% on various average speed, temperature and road grade conditions. The validation results demonstrate our models’ capability to capture impacts of driving, meteorology and road grade on electric bus energy consumption at different temporal and spatial resolution.
2020
- G. Pettet, M. Ghosal, S. Mahserejian, S. Davis, S. Sridhar, A. Dubey, and M. Meyer, A Decision Support Framework for Grid-Aware Electric Bus Charge Scheduling, in 2020 IEEE Power & Energy Society Innovative Smart Grid Technologies Conference (ISGT), 2020.
@inproceedings{pettetisgt2020, author = {Pettet, Geoffrey and Ghosal, Malini and Mahserejian, Shant and Davis, Sarah and Sridhar, Siddharth and Dubey, Abhishek and Meyer, Michael}, title = {A Decision Support Framework for Grid-Aware Electric Bus Charge Scheduling}, booktitle = {2020 IEEE Power \& Energy Society Innovative Smart Grid Technologies Conference (ISGT)}, year = {2020}, organization = {IEEE}, tag = {ai4cps,power} }
While there are many advantages to electric public transit vehicles, they also pose new challenges for fleet operators. One key challenge is defining a charge scheduling policy that minimizes operating costs and power grid disruptions while maintaining schedule adherence. An uncoordinated policy could result in buses running out of charge before completing their trip, while a grid agnostic policy might incur higher energy costs or cause an adverse impact on the grid’s distribution system. We present a grid aware decision-theoretic framework for electric bus charge scheduling that accounts for energy price and grid load. The framework co-simulates models for traffic (Simulation of Urban Mobility) and the electric grid (GridLAB-D), which are used by a decision-theoretic planner to evaluate charging decisions with regard to their long-term effect on grid reliability and cost. We evaluated the framework on a simulation of Richland, WA’s bus and grid network, and found that it could save over $100k per year on operating costs for the city compared to greedy methods.
- K. Sajan, M. Bariya, S. Basak, A. K. Srivastava, A. Dubey, A. von Meier, and G. Biswas, Realistic Synchrophasor Data Generation for Anomaly Detection and Event Classification, in 8th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek, 2020.
@inproceedings{basak2020mscpes, author = {Sajan, Kaduvettykunnal and Bariya, Mohini and Basak, Sanchita and Srivastava, Anurag K. and Dubey, Abhishek and von Meier, Alexandra and Biswas, Gautam}, title = {Realistic Synchrophasor Data Generation for Anomaly Detection and Event Classification}, booktitle = {8th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek}, year = {2020}, category = {workshop}, keywords = {transactive}, project = {cps-reliability}, tag = {platform,power} }
The push to automate and digitize the electric grid has led to widespread installation of Phasor Measurement Units (PMUs) for improved real-time wide-area system monitoring and control. Nevertheless, transforming large volumes of highresolution PMU measurements into actionable insights remains challenging. A central challenge is creating flexible and scalable online anomaly detection in PMU data streams. PMU data can hold multiple types of anomalies arising in the physical system or the cyber system (measurements and communication networks). Increasing the grid situational awareness for noisy measurement data and Bad Data (BD) anomalies has become more and more significant. Number of machine learning, data analytics and physics based algorithms have been developed for anomaly detection, but need to be validated with realistic synchophasor data. Access to field data is very challenging due to confidentiality and security reasons. This paper presents a method for generating realistic synchrophasor data for the given synthetic network as well as event and bad data detection and classification algorithms. The developed algorithms include Bayesian and change-point techniques to identify anomalies, a statistical approach for event localization and multi-step clustering approach for event classification. Developed algorithms have been validated with satisfactory results for multiple examples of power system events including faults and load/generator/capacitor variations/switching for an IEEE test system. Set of synchrophasor data will be available publicly for other researchers.
- Z. Kang, R. Canady, A. Dubey, A. Gokhale, S. Shekhar, and M. Sedlacek, A Study of Publish/Subscribe Middleware Under Different IoT Traffic Conditions, in Proceedings of the 7th Workshop on Middleware and Applications for the Internet of Things, M4IoT@Middleware, 2020.
@inproceedings{m4iot2020, author = {Kang, Zhuangwei and Canady, Robert and Dubey, Abhishek and Gokhale, Aniruddha and Shekhar, Shashank and Sedlacek, Matous}, title = {A Study of Publish/Subscribe Middleware Under Different IoT Traffic Conditions}, booktitle = {Proceedings of the 7th Workshop on Middleware and Applications for the Internet of Things, M4IoT@Middleware}, tag = {platform}, year = {2020} }
Publish/Subscribe (pub/sub) semantics are critical forIoT applications due to their loosely coupled nature.Although OMG DDS, MQTT, and ZeroMQ are mature pub/sub solutions used for IoT, prior studies show that their performance varies significantly under differentload conditions and QoS configurations, which makes middleware selection and configuration decisions hard. Moreover, the load conditions and role of QoS settings inprior comparison studies are not comprehensive and well-documented. To address these limitations, we (1) propose a set of performance-related properties for pub/sub middleware and investigate their support in DDS, MQTT,and ZeroMQ; (2) perform systematic experiments under three representative, lab-based real-world IoT use cases; and (3) improve DDS performance by applying three of our proposed QoS properties. Empirical results show that DDS has the most thorough QoS support, and more reliable performance in most scenarios. In addition, its Multicast, TurboMode, and AutoThrottle QoS policies can effectively improve DDS performance in terms of throughput and latency
- A. Ayman, A. Sivagnanam, M. Wilbur, P. Pugliese, A. Dubey, and A. Laszka, Data-Driven Prediction and Optimization of Energy Use for Transit Fleets of Electric and ICE Vehicles, ACM Transations of Internet Technology, 2020.
@article{aymantoit2020, author = {Ayman, Afiya and Sivagnanam, Amutheezan and Wilbur, Michael and Pugliese, Philip and Dubey, Abhishek and Laszka, Aron}, title = {Data-Driven Prediction and Optimization of Energy Use for Transit Fleets of Electric and ICE Vehicles}, journal = {ACM Transations of Internet Technology}, year = {2020}, tag = {ai4cps,transit} }
Due to the high upfront cost of electric vehicles, many public transit agencies can afford only mixed fleets of internal-combustion and electric vehicles. Optimizing the operation of such mixed fleets is challenging because it requires accurate trip-level predictions of electricity and fuel use as well as efficient algorithms for assigning vehicles to transit routes. We present a novel framework for the data-driven prediction of trip-level energy use for mixed-vehicle transit fleets and for the optimization of vehicle assignments, which we evaluate using data collected from the bus fleet of CARTA, the public transit agency of Chattanooga, TN. We first introduce a data collection, storage, and processing framework for system-level and high-frequency vehicle-level transit data, including domain-specific data cleansing methods. We train and evaluate machine learning models for energy prediction, demonstrating that deep neural networks attain the highest accuracy. Based on these predictions, we formulate the problem of minimizing energy use through assigning vehicles to fixed-route transit trips. We propose an optimal integer program as well as efficient heuristic and meta-heuristic algorithms, demonstrating the scalability and performance of these algorithms numerically using the transit network of CARTA.
- J. P. V. Talusan, M. Wilbur, A. Dubey, and K. Yasumoto, Route Planning Through Distributed Computing by Road Side Units, IEEE Access, vol. 8, pp. 176134–176148, 2020.
@article{wilburaccess2020, author = {{Talusan}, J. P. V. and {Wilbur}, M. and {Dubey}, A. and {Yasumoto}, K.}, journal = {IEEE Access}, title = {Route Planning Through Distributed Computing by Road Side Units}, year = {2020}, tag = {decentralization,transit}, volume = {8}, number = {}, pages = {176134-176148} }
Cities are embracing data-intensive applications to maximize their constrained transportation networks. Platforms such as Google offer route planning services to mitigate the effect of traffic congestion. These use remote servers that require an Internet connection, which exposes data to increased risk of network failures and latency issues. Edge computing, an alternative to centralized architectures, offers computational power at the edge that could be used for similar services. Road side units (RSU), Internet of Things (IoT) devices within a city, offer an opportunity to offload computation to the edge. To provide an environment for processing on RSUs, we introduce RSU-Edge, a distributed edge computing system for RSUs. We design and develop a decentralized route planning service over RSU-Edge. In the service, the city is divided into grids and assigned an RSU. Users send trip queries to the service and obtain routes. For maximum accuracy, tasks must be allocated to optimal RSUs. However, this overloads RSUs, increasing delay. To reduce delays, tasks may be reallocated from overloaded RSUs to its neighbors. The distance between the optimal and actual allocation causes accuracy loss due to stale data. The problem is identifying the most efficient allocation of tasks such that response constraints are met while maintaining acceptable accuracy. We created the system and present an analysis of a case study in Nashville, Tennessee that shows the effect of our algorithm on route accuracy and query response, given varying neighbor levels. We find that our system can respond to 1000 queries up to 57.17% faster, with only a model accuracy loss of 5.57% to 7.25% compared to using only optimal grid allocation.
- T. Bapty, A. Dubey, and J. Sztipanovits, Cyber-Physical Vulnerability Analysis of IoT Applications Using Multi-Modeling, in Modeling and Design of Secure Internet of Things, John Wiley & Sons, Ltd, 2020, pp. 161–184.
@inbook{baptydubeyjanos2020, author = {Bapty, Ted and Dubey, Abhishek and Sztipanovits, Janos}, publisher = {John Wiley & Sons, Ltd}, isbn = {9781119593386}, title = {Cyber-Physical Vulnerability Analysis of IoT Applications Using Multi-Modeling}, booktitle = {Modeling and Design of Secure Internet of Things}, chapter = {8}, pages = {161-184}, tag = {platform}, doi = {10.1002/9781119593386.ch8}, url = {https://onlinelibrary.wiley.com/doi/abs/10.1002/9781119593386.ch8}, eprint = {https://onlinelibrary.wiley.com/doi/pdf/10.1002/9781119593386.ch8}, year = {2020}, keywords = {energy injection, home automation system, IoT-based cyber-physical systems, low-level physical vulnerabilities, multi-modeling approach, vulnerability analysis} }
Summary Using the Smart Home as a use case, we examine the vulnerabilities in the system across the technologies used in its implementation. A typical smart home will contain a variety of sensors, actuators (e.g. for opening doors), communication links, storage devices, video cameras, network interfaces, and control units. Each of these physical components and subsystems must be secure in order for the overall system to be secure. Typical security analysis focuses on the defined interfaces of the system: network security via firewalls, communications encryption, and authentication at terminals. Unfortunately, many of these devices in the Internet of Things (IoT) space are susceptible to physical attacks via electromagnetic energy, or other sound/heat energy. Properly designed electromagnetic (EM) waveforms can access a range of vulnerabilities, providing unanticipated entry points into the system. In this chapter, we discuss a multi-modeling methodology for analyzing cyber-physical vulnerabilities, assessing the system across geometry, electronic, and behavioral domains. A home automation system is used as an example, showing a methodology for assessing vulnerabilities in hardware. The example exploits the use of EM energy injection. A multi-modeling of the system captures the geometric structure of the hardware with links to behavioral models. Low-energy EM pathways are discovered that may impact system behavior. Computation is minimized by applying analysis of EM effects only at behavior-critical inputs and outputs. The chapter also discusses a methodology for system-level impact analysis. The final conclusion is that susceptibility to physical layer presents many attack surfaces, due to a large number of heterogeneous IoT devices, mandating consideration of the physical dimensions to vulnerability analysis and risk mitigation.
- H. Tu, Y. Du, H. Yu, A. Dubey, S. Lukic, and G. Karsai, Resilient Information Architecture Platform for the Smart Grid: A Novel Open-Source Platform for Microgrid Control, IEEE Transactions on Industrial Electronics, vol. 67, no. 11, pp. 9393–9404, 2020.
@article{riaps2020, author = {{Tu}, H. and {Du}, Y. and {Yu}, H. and {Dubey}, Abhishek and {Lukic}, S. and {Karsai}, G.}, journal = {IEEE Transactions on Industrial Electronics}, title = {Resilient Information Architecture Platform for the Smart Grid: A Novel Open-Source Platform for Microgrid Control}, year = {2020}, volume = {67}, tag = {platform}, number = {11}, pages = {9393-9404} }
Microgrids are seen as an effective way to achieve reliable, resilient, and efficient operation of the power distribution system. Core functions of the microgrid control system are defined by the IEEE Standard 2030.7; however, the algorithms that realize these functions are not standardized, and are a topic of research. Furthermore, the corresponding controller hardware, operating system, and communication system to implement these functions vary significantly from one implementation to the next. In this article, we introduce an open-source platform, resilient information architecture platform for the smart grid (RIAPS), ideally suited for implementing and deploying distributed microgrid control algorithms. RIAPS provides a design-time tool suite for development and deployment of distributed microgrid control algorithms. With support from a number of run-time platform services, developed algorithms can be easily implemented and deployed into real microgrids. To demonstrate the unique features of RIAPS, we propose and implement a distributed microgrid secondary control algorithm capable of synchronized and proportional compensation of voltage unbalance using distributed generators. Test results show the effectiveness of the proposed control and the salient features of the RIAPS platform.
- M. Wilbur, A. Ayman, A. Ouyang, V. Poon, R. Kabir, A. Vadali, P. Pugliese, D. Freudberg, A. Laszka, and A. Dubey, Impact of COVID-19 on Public Transit Accessibility and Ridership, in Preprint at Arxiv, 2020.
@inproceedings{wilbur2020impact, author = {Wilbur, Michael and Ayman, Afiya and Ouyang, Anna and Poon, Vincent and Kabir, Riyan and Vadali, Abhiram and Pugliese, Philip and Freudberg, Daniel and Laszka, Aron and Dubey, Abhishek}, title = {Impact of COVID-19 on Public Transit Accessibility and Ridership}, booktitle = {Preprint at Arxiv}, year = {2020}, tag = {ai4cps,transit}, archiveprefix = {arXiv}, eprint = {2008.02413}, preprint = {https://arxiv.org/abs/2008.02413}, primaryclass = {physics.soc-ph} }
Public transit is central to cultivating equitable communities. Meanwhile, the novel coronavirus disease COVID-19 and associated social restrictions has radically transformed ridership behavior in urban areas. Perhaps the most concerning aspect of the COVID-19 pandemic is that low-income and historically marginalized groups are not only the most susceptible to economic shifts but are also most reliant on public transportation. As revenue decreases, transit agencies are tasked with providing adequate public transportation services in an increasingly hostile economic environment. Transit agencies therefore have two primary concerns. First, how has COVID-19 impacted ridership and what is the new post-COVID normal? Second, how has ridership varied spatio-temporally and between socio-economic groups? In this work we provide a data-driven analysis of COVID-19’s affect on public transit operations and identify temporal variation in ridership change. We then combine spatial distributions of ridership decline with local economic data to identify variation between socio-economic groups. We find that in Nashville and Chattanooga, TN, fixed-line bus ridership dropped by 66.9% and 65.1% from 2019 baselines before stabilizing at 48.4% and 42.8% declines respectively. The largest declines were during morning and evening commute time. Additionally, there was a significant difference in ridership decline between the highest-income areas and lowest-income areas (77% vs 58%) in Nashville.
- S. Nannapaneni, S. Mahadevan, A. Dubey, and Y.-T. T. Lee, Online monitoring and control of a cyber-physical manufacturing process under uncertainty, Journal of Intelligent Manufacturing, pp. 1–16, 2020.
@article{nannapaneni2020online, author = {Nannapaneni, Saideep and Mahadevan, Sankaran and Dubey, Abhishek and Lee, Yung-Tsun Tina}, title = {Online monitoring and control of a cyber-physical manufacturing process under uncertainty}, journal = {Journal of Intelligent Manufacturing}, year = {2020}, pages = {1--16}, tag = {platform}, doi = {https://doi.org/10.1007/s10845-020-01609-7}, publisher = {Springer} }
Recent technological advancements in computing, sensing and communication have led to the development of cyber-physical manufacturing processes, where a computing subsystem monitors the manufacturing process performance in real-time by analyzing sensor data and implements the necessary control to improve the product quality. This paper develops a predictive control framework where control actions are implemented after predicting the state of the manufacturing process or product quality at a future time using process models. In a cyber-physical manufacturing process, the product quality predictions may be affected by uncertainty sources from the computing subsystem (resource and communication uncertainty), manufacturing process (input uncertainty, process variability and modeling errors), and sensors (measurement uncertainty). In addition, due to the continuous interactions between the computing subsystem and the manufacturing process, these uncertainty sources may aggregate and compound over time. In some cases, some process parameters needed for model predictions may not be precisely known and may need to be derived from real time sensor data. This paper develops a dynamic Bayesian network approach, which enables the aggregation of multiple uncertainty sources, parameter estimation and robust prediction for online control. As the number of process parameters increase, their estimation using sensor data in real-time can be computationally expensive. To facilitate real-time analysis, variance-based global sensitivity analysis is used for dimension reduction. The proposed methodology of online monitoring and control under uncertainty, and dimension reduction, are illustrated for a cyber-physical turning process.
- B. Potteiger, F. Cai, A. Dubey, X. Koutsoukos, and Z. Zhang, Security in Mixed Time and Event Triggered Cyber-Physical Systems using Moving Target Defense, in 2020 IEEE 23rd International Symposium on Real-Time Distributed Computing (ISORC), 2020, pp. 89–97.
@inproceedings{Potteiger2020, author = {{Potteiger}, B. and {Cai}, F. and {Dubey}, A. and {Koutsoukos}, X. and {Zhang}, Z.}, title = {Security in Mixed Time and Event Triggered Cyber-Physical Systems using Moving Target Defense}, booktitle = {2020 IEEE 23rd International Symposium on Real-Time Distributed Computing (ISORC)}, year = {2020}, tag = {platform}, pages = {89-97}, doi = {https://doi.org/10.1109/ISORC49007.2020.00022} }
Memory corruption attacks such as code injection, code reuse, and non-control data attacks have become widely popular for compromising safety-critical Cyber-Physical Systems (CPS). Moving target defense (MTD) techniques such as instruction set randomization (ISR), address space randomization (ASR), and data space randomization (DSR) can be used to protect systems against such attacks. CPS often use time-triggered architectures to guarantee predictable and reliable operation. MTD techniques can cause time delays with unpredictable behavior. To protect CPS against memory corruption attacks, MTD techniques can be implemented in a mixed time and event-triggered architecture that provides capabilities for maintaining safety and availability during an attack. This paper presents a mixed time and event-triggered MTD security approach based on the ARINC 653 architecture that provides predictable and reliable operation during normal operation and rapid detection and reconfiguration upon detection of attacks. We leverage a hardware-in-the-loop testbed and an advanced emergency braking system (AEBS) case study to show the effectiveness of our approach.
- W. Barbour, M. Wilbur, R. Sandoval, A. Dubey, and D. B. Work, Streaming computation algorithms for spatiotemporal micromobility service availability, in 2020 IEEE Workshop on Design Automation for CPS and IoT (DESTION), 2020, pp. 32–38.
@inproceedings{barbour2020, author = {{Barbour}, W. and {Wilbur}, M. and {Sandoval}, R. and {Dubey}, A. and {Work}, D. B.}, title = {Streaming computation algorithms for spatiotemporal micromobility service availability}, booktitle = {2020 IEEE Workshop on Design Automation for CPS and IoT (DESTION)}, year = {2020}, tag = {transit}, pages = {32-38}, doi = {https://doi.org/10.1109/DESTION50928.2020.00012} }
Location-based services and fleet management are important components of modern smart cities. However, statistical analysis with large-scale spatiotemporal data in real-time is computationally challenging and can necessitate compromise in accuracy or problem simplification. The main contribution of this work is the presentation of a stream processing approach for real-time monitoring of resource equity in spatially-aware micromobility fleets. The approach makes localized updates to resource availability as needed, instead of batch computation of availability at regular update intervals. We find that the stream processing approach can compute, on average, 62 resource availability updates in the same execution time as a single batch computation. This advantage in processing time makes continuous real-time stream processing equivalent to a batch computation performed every 15 minutes, in terms of algorithm execution time. Since the stream processing approach considers every update to the fleet in real-time, resource availability is always up-to-date and there is no compromise in terms of accuracy.
- A. Bhattacharjee, A. D. Chhokra, H. Sun, S. Shekhar, A. Gokhale, G. Karsai, and A. Dubey, Deep-Edge: An Efficient Framework for Deep Learning Model Update on Heterogeneous Edge, in 2020 IEEE 4th International Conference on Fog and Edge Computing (ICFEC), 2020.
@inproceedings{Bhattacharjee_2020, author = {Bhattacharjee, Anirban and Chhokra, Ajay Dev and Sun, Hongyang and Shekhar, Shashank and Gokhale, Aniruddha and Karsai, Gabor and Dubey, Abhishek}, title = {Deep-Edge: An Efficient Framework for Deep Learning Model Update on Heterogeneous Edge}, booktitle = {2020 IEEE 4th International Conference on Fog and Edge Computing (ICFEC)}, year = {2020}, tag = {platform}, month = may, publisher = {IEEE}, doi = {http://dx.doi.org/10.1109/icfec50348.2020.00016}, isbn = {9781728173054}, journal = {2020 IEEE 4th International Conference on Fog and Edge Computing (ICFEC)}, url = {http://dx.doi.org/10.1109/ICFEC50348.2020.00016} }
Deep Learning (DL) model-based AI services are increasingly offered in a variety of predictive analytics services such as computer vision, natural language processing, speech recognition. However, the quality of the DL models can degrade over time due to changes in the input data distribution, thereby requiring periodic model updates. Although cloud data-centers can meet the computational requirements of the resource-intensive and time-consuming model update task, transferring data from the edge devices to the cloud incurs a significant cost in terms of network bandwidth and are prone to data privacy issues. With the advent of GPU-enabled edge devices, the DL model update can be performed at the edge in a distributed manner using multiple connected edge devices. However, efficiently utilizing the edge resources for the model update is a hard problem due to the heterogeneity among the edge devices and the resource interference caused by the co-location of the DL model update task with latency-critical tasks running in the background. To overcome these challenges, we present Deep-Edge, a load- and interference-aware, fault-tolerant resource management framework for performing model update at the edge that uses distributed training. This paper makes the following contributions. First, it provides a unified framework for monitoring, profiling, and deploying the DL model update tasks on heterogeneous edge devices. Second, it presents a scheduler that reduces the total re-training time by appropriately selecting the edge devices and distributing data among them such that no latency-critical applications experience deadline violations. Finally, we present empirical results to validate the efficacy of the framework using a real-world DL model update case-study based on the Caltech dataset and an edge AI cluster testbed.
- C. Hartsell, N. Mahadevan, H. Nine, T. Bapty, A. Dubey, and G. Karsai, Workflow Automation for Cyber Physical System Development Processes, in 2020 IEEE Workshop on Design Automation for CPS and IoT (DESTION), 2020.
@inproceedings{Hartsell_2020, author = {Hartsell, Charles and Mahadevan, Nagabhushan and Nine, Harmon and Bapty, Ted and Dubey, Abhishek and Karsai, Gabor}, title = {Workflow Automation for Cyber Physical System Development Processes}, booktitle = {2020 IEEE Workshop on Design Automation for CPS and IoT (DESTION)}, year = {2020}, tag = {ai4cps}, month = apr, publisher = {IEEE}, doi = {http://dx.doi.org/10.1109/DESTION50928.2020.00007}, isbn = {9781728199948}, journal = {2020 IEEE Workshop on Design Automation for CPS and IoT (DESTION)} }
Development of Cyber Physical Systems (CPSs) requires close interaction between developers with expertise in many domains to achieve ever-increasing demands for improved performance, reduced cost, and more system autonomy. Each engineering discipline commonly relies on domain-specific modeling languages, and analysis and execution of these models is often automated with appropriate tooling. However, integration between these heterogeneous models and tools is often lacking, and most of the burden for inter-operation of these tools is placed on system developers. To address this problem, we introduce a workflow modeling language for the automation of complex CPS development processes and implement a platform for execution of these models in the Assurance-based Learning-enabled CPS (ALC) Toolchain. Several illustrative examples are provided which show how these workflow models are able to automate many time-consuming integration tasks previously performed manually by system developers.
- Y. Chen, G. Wu, R. Sun, A. Dubey, A. Laszka, and P. Pugliese, A Review and Outlook of Energy Consumption Estimation Models for Electric Vehicles, in Preprint at Arxiv, 2020.
@inproceedings{chen2020review, author = {Chen, Yuche and Wu, Guoyuan and Sun, Ruixiao and Dubey, Abhishek and Laszka, Aron and Pugliese, Philip}, title = {A Review and Outlook of Energy Consumption Estimation Models for Electric Vehicles}, booktitle = {Preprint at Arxiv}, year = {2020}, tag = {transit}, archiveprefix = {arXiv}, eprint = {2003.12873}, preprint = {https://arxiv.org/abs/2003.12873}, primaryclass = {eess.SY} }
Electric vehicles (EVs) are critical to the transition to a low-carbon transportation system. The successful adoption of EVs heavily depends on energy consumption models that can accurately and reliably estimate electricity consumption. This paper reviews the state-of-the-art of EV energy consumption models, aiming to provide guidance for future development of EV applications. We summarize influential variables of EV energy consumption into four categories: vehicle component, vehicle dynamics, traffic and environment related factors. We classify and discuss EV energy consumption models in terms of modeling scale (microscopic vs. macroscopic) and methodology (data-driven vs. rule-based). Our review shows trends of increasing macroscopic models that can be used to estimate trip-level EV energy consumption and increasing data-driven models that utilized machine learning technologies to estimate EV energy consumption based on large volume real-world data. We identify research gaps for EV energy consumption models, including the development of energy estimation models for modes other than personal vehicles (e.g., electric buses, electric trucks, and electric non-road vehicles); the development of energy estimation models that are suitable for applications related to vehicle-to-grid integration; and the development of multi-scale energy estimation models as a holistic modeling approach.
- P. Ghosh, S. Eisele, A. Dubey, M. Metelko, I. Madari, P. Volgyesi, and G. Karsai, Designing a decentralized fault-tolerant software framework for smart grids and its applications, Journal of Systems Architecture, vol. 109, p. 101759, 2020.
@article{GHOSH2020101759, author = {Ghosh, Purboday and Eisele, Scott and Dubey, Abhishek and Metelko, Mary and Madari, Istvan and Volgyesi, Peter and Karsai, Gabor}, title = {Designing a decentralized fault-tolerant software framework for smart grids and its applications}, journal = {Journal of Systems Architecture}, year = {2020}, volume = {109}, pages = {101759}, tag = {platform}, issn = {1383-7621}, doi = {https://doi.org/10.1016/j.sysarc.2020.101759}, keywords = {Component, Fault tolerance, Distributed systems, Smart grid}, url = {http://www.sciencedirect.com/science/article/pii/S1383762120300539} }
The vision of the ‘Smart Grid’ anticipates a distributed real-time embedded system that implements various monitoring and control functions. As the reliability of the power grid is critical to modern society, the software supporting the grid must support fault tolerance and resilience of the resulting cyber-physical system. This paper describes the fault-tolerance features of a software framework called Resilient Information Architecture Platform for Smart Grid (RIAPS). The framework supports various mechanisms for fault detection and mitigation and works in concert with the applications that implement the grid-specific functions. The paper discusses the design philosophy for and the implementation of the fault tolerance features and presents an application example to show how it can be used to build highly resilient systems.
- S. Ramakrishna, C. Hartsell, A. Dubey, P. Pal, and G. Karsai, A Methodology for Automating Assurance Case Generation, in Thirteenth International Tools and Methods of Competitive Engineering Symposium (TMCE 2020), 2020.
@inproceedings{ramakrishna2020methodology, author = {Ramakrishna, Shreyas and Hartsell, Charles and Dubey, Abhishek and Pal, Partha and Karsai, Gabor}, title = {A Methodology for Automating Assurance Case Generation}, booktitle = {Thirteenth International Tools and Methods of Competitive Engineering Symposium (TMCE 2020)}, year = {2020}, tag = {ai4cps}, archiveprefix = {arXiv}, eprint = {2003.05388}, preprint = {https://arxiv.org/abs/2003.05388}, primaryclass = {cs.RO} }
Safety Case has become an integral component for safety-certification in various Cyber Physical System domains including automotive, aviation, medical devices, and military. The certification processes for these systems are stringent and require robust safety assurance arguments and substantial evidence backing. Despite the strict requirements, current practices still rely on manual methods that are brittle, do not have a systematic approach or thorough consideration of sound arguments. In addition, stringent certification requirements and ever-increasing system complexity make ad-hoc, manual assurance case generation (ACG) inefficient, time consuming, and expensive. To improve the current state of practice, we introduce a structured ACG tool which uses system design artifacts, accumulated evidence, and developer expertise to construct a safety case and evaluate it in an automated manner. We also illustrate the applicability of the ACG tool on a remote-control car testbed case study.
- A. Chhokra, N. Mahadevan, A. Dubey, and G. Karsa, Qualitative fault modeling in safety critical Cyber Physical Systems, in 12th System Analysis and Modelling Conference, 2020.
@inproceedings{chhokrasam2020, author = {Chhokra, Ajay and Mahadevan, Nagabhushan and Dubey, Abhishek and Karsa, Gabor}, title = {Qualitative fault modeling in safety critical Cyber Physical Systems}, booktitle = {12th System Analysis and Modelling Conference}, tag = {platform}, year = {2020} }
One of the key requirements for designing safety critical cyber physical systems (CPS) is to ensure resiliency. Typically, the cyber sub-system in a CPS is empowered with protection devices that quickly detect and isolate faulty components to avoid failures. However, these protection devices can have internal faults that can cause cascading failures, leading to system collapse. Thus, to guarantee the resiliency of the system, it is necessary to identifythe root cause(s) of a given system disturbance to take appropriate control actions. Correct failure diagnosis in such systems depends upon an integrated fault model of the system that captures the effect of faults in CPS as well as nominal and faulty operation of protection devices, sensors, and actuators. In this paper, we propose a novel graph based qualitative fault modeling formalism for CPS, called, Temporal Causal Diagrams(TCDs) that allow system designers to effectively represent faultsand their effects in both physical and cyber sub-systems. The paper also discusses in detail the fault propagation and execution semantics of a TCD model by translating to timed automata and thus allowing an efficient means to quickly analyze, validate and verify the fault model. In the end, we show the efficacy of the modeling approach with the help of a case study from energy system.
- H. Tu, S. Lukic, A. Dubey, and G. Karsai, An LSTM-Based Online Prediction Method for Building Electric Load During COVID-19, in Annual Conference of the PHM Society, 2020.
@inproceedings{haophm2020, author = {Tu, Hao and Lukic, Srdjan and Dubey, Abhishek and Karsai, Gabor}, title = {An LSTM-Based Online Prediction Method for Building Electric Load During COVID-19}, booktitle = {Annual Conference of the PHM Society}, year = {2020}, tag = {ai4cps,power} }
Accurate prediction of electric load is critical to optimally controlling and operating buildings. It provides the opportunities to reduce building energy consumption and to implement advanced functionalities such as demand response in the context of smart grid. However, buildings are nonstationary and it is important to consider the underlying concept changes that will affect the load pattern. In this paper we present an online learning method for predicting building electric load during concept changes such as COVID-19. The proposed methods is based on online Long Short-Term Memory (LSTM) recurrent neural network. To speed up the learning process during concept changes and improve prediction accuracy, an ensemble of multiple models with different learning rates is used. The learning rates are updated in realtime to best adapt to the new concept while maintaining the learned information for the prediction.
- S. Eisele, T. Eghtesad, N. Troutman, A. Laszka, and A. Dubey, Mechanisms for Outsourcing Computation via a Decentralized Market, in 14TH ACM International Conference on Distributed and Event Based Systems, 2020.
@inproceedings{eisele2020mechanisms, title = {Mechanisms for Outsourcing Computation via a Decentralized Market}, author = {Eisele, Scott and Eghtesad, Taha and Troutman, Nicholas and Laszka, Aron and Dubey, Abhishek}, year = {2020}, tag = {platform,decentralization}, booktitle = {14TH ACM International Conference on Distributed and Event Based Systems}, keywords = {transactive}, category = {selectiveconference} }
As the number of personal computing and IoT devices grows rapidly, so does the amount of computational power that is available at the edge. Since many of these devices are often idle, there is a vast amount of computational power that is currently untapped, and which could be used for outsourcing computation. Existing solutions for harnessing this power, such as volunteer computing (e.g., BOINC), are centralized platforms in which a single organization or company can control participation and pricing. By contrast, an open market of computational resources, where resource owners and resource users trade directly with each other, could lead to greater participation and more competitive pricing. To provide an open market, we introduce MODiCuM, a decentralized system for outsourcing computation. MODiCuM deters participants from misbehaving-which is a key problem in decentralized systems-by resolving disputes via dedicated mediators and by imposing enforceable fines. However, unlike other decentralized outsourcing solutions, MODiCuM minimizes computational overhead since it does not require global trust in mediation results. We provide analytical results proving that MODiCuM can deter misbehavior, and we evaluate the overhead of MODiCuM using experimental results based on an implementation of our platform.
- S. Eisele, C. Barreto, A. Dubey, X. Koutsoukos, T. Eghtesad, A. Laszka, and A. Mavridou, Blockchains for Transactive Energy Systems: Opportunities, Challenges, and Approaches, IEEE Computer, 2020.
@article{eisele2020Blockchains, author = {Eisele, Scott and Barreto, Carlos and Dubey, Abhishek and Koutsoukos, Xenofon and Eghtesad, Taha and Laszka, Aron and Mavridou, Anastasia}, title = {Blockchains for Transactive Energy Systems: Opportunities, Challenges, and Approaches}, journal = {IEEE Computer}, year = {2020}, tag = {platform,decentralization,power} }
The emergence of blockchains and smart contracts have renewed interest in electrical cyber-physical systems, especially in the area of transactive energy systems. However, despite recent advances, there remain significant challenges that impede the practical adoption of blockchains in transactive energy systems, which include implementing complex market mechanisms in smart contracts, ensuring safety of the power system, and protecting residential consumers’ privacy. To address these challenges, we present TRANSAX, a blockchain-based transactive energy system that provides an efficient, safe, and privacy-preserving market built on smart contracts. Implementation and deployment of TRANSAX in a verifiably correct and efficient way is based on VeriSolid, a framework for the correct-by-construction development of smart contracts, and RIAPS, a middleware for resilient distributed power systems
- M. Wilbur, C. Samal, J. P. Talusan, K. Yasumoto, and A. Dubey, Time-dependent Decentralized Routing using Federated Learning, in 2020 IEEE 23nd International Symposium on Real-Time Distributed Computing (ISORC), 2020.
@inproceedings{wilbur2020decentralized, title = {Time-dependent Decentralized Routing using Federated Learning}, author = {Wilbur, Michael and Samal, Chinmaya and Talusan, Jose Paolo and Yasumoto, Keiichi and Dubey, Abhishek}, booktitle = {2020 IEEE 23nd International Symposium on Real-Time Distributed Computing (ISORC)}, year = {2020}, tag = {decentralization,transit}, organization = {IEEE} }
Recent advancements in cloud computing have driven rapid development in data-intensive smart city applications by providing near real time processing and storage scalability. This has resulted in efficient centralized route planning services such as Google Maps, upon which millions of users rely. Route planning algorithms have progressed in line with the cloud environments in which they run. Current state of the art solutions assume a shared memory model, hence deployment is limited to multiprocessing environments in data centers. By centralizing these services, latency has become the limiting parameter in the technologies of the future, such as autonomous cars. Additionally, these services require access to outside networks, raising availability concerns in disaster scenarios. Therefore, this paper provides a decentralized route planning approach for private fog networks. We leverage recent advances in federated learning to collaboratively learn shared prediction models online and investigate our approach with a simulated case study from a mid-size U.S. city.
- C. Barreto, T. Eghtesad, S. Eisele, A. Laszka, A. Dubey, and X. Koutsoukos, Cyber-Attacks and Mitigation in Blockchain Based Transactive Energy Systems, in 3rd IEEE International Conference on IndustrialCyber-Physical Systems (ICPS 2020), 2020.
@inproceedings{barretocyber2020, author = {Barreto, Carlos and Eghtesad, Taha and Eisele, Scott and Laszka, Aron and Dubey, Abhishek and Koutsoukos, Xenofon}, title = {Cyber-Attacks and Mitigation in Blockchain Based Transactive Energy Systems}, booktitle = {3rd IEEE International Conference on IndustrialCyber-Physical Systems (ICPS 2020)}, year = {2020}, category = {selectiveconference}, keywords = {transactive}, project = {cps-reliability}, tag = {decentralization,power} }
Power grids are undergoing major changes due to the rapid adoption of intermittent renewable energy resources and the increased availability of energy storage devices. These trends drive smart-grid operators to envision a future where peer-to-peer energy trading occurs within microgrids, leading to the development of Transactive Energy Systems. Blockchains have garnered significant interest from both academia and industry for their potential application in decentralized TES, in large part due to their high level of resilience. In this paper, we introduce a novel class of attacks against blockchain based TES, which target the gateways that connect market participants to the system. We introduce a general model of blockchain based TES and study multiple threat models and attack strategies. We also demonstrate the impact of these attacks using a testbed based on GridLAB-D and a private Ethereum network. Finally, we study how to mitigate these attack.
- A. Ayman, M. Wilbur, A. Sivagnanam, P. Pugliese, A. Dubey, and A. Laszka, Data-Driven Prediction of Route-Level Energy Use for Mixed-Vehicle Transit Fleets, in 2020 IEEE International Conference on Smart Computing (SMARTCOMP) (SMARTCOMP 2020), Bologna, Italy, 2020.
@inproceedings{Lasz2006Data, author = {Ayman, Afiya and Wilbur, Michael and Sivagnanam, Amutheezan and Pugliese, Philip and Dubey, Abhishek and Laszka, Aron}, title = {{Data-Driven} Prediction of {Route-Level} Energy Use for {Mixed-Vehicle} Transit Fleets}, booktitle = {2020 IEEE International Conference on Smart Computing (SMARTCOMP) (SMARTCOMP 2020)}, address = {Bologna, Italy}, tag = {ai4cps,transit}, days = {21}, month = jun, year = {2020}, keywords = {data-driven prediction; electric vehicle; public transit; on-board diagnostics data; deep learning; traffic data} }
Due to increasing concerns about environmental impact, operating costs, and energy security, public transit agencies are seeking to reduce their fuel use by employing electric vehicles (EVs). However, because of the high upfront cost of EVs, most agencies can afford only mixed fleets of internal-combustion and electric vehicles. Making the best use of these mixed fleets presents a challenge for agencies since optimizing the assignment of vehicles to transit routes, scheduling charging, etc. require accurate predictions of electricity and fuel use. Recent advances in sensor-based technologies, data analytics, and machine learning enable remedying this situation; however, to the best of our knowledge, there exists no framework that would integrate all relevant data into a route-level prediction model for public transit. In this paper, we present a novel framework for the data-driven prediction of route-level energy use for mixed-vehicle transit fleets, which we evaluate using data collected from the bus fleet of CARTA, the public transit authority of Chattanooga, TN. We present a data collection and storage framework, which we use to capture system-level data, including traffic and weather conditions, and high-frequency vehicle-level data, including location traces, fuel or electricity use, etc. We present domain-specific methods and algorithms for integrating and cleansing data from various sources, including street and elevation maps. Finally, we train and evaluate machine learning models, including deep neural networks, decision trees, and linear regression, on our integrated dataset. Our results show that neural networks provide accurate estimates, while other models can help us discover relations between energy use and factors such as road and weather conditions.
- S. Ramakrishna, C. Harstell, M. P. Burruss, G. Karsai, and A. Dubey, Dynamic-weighted simplex strategy for learning enabled cyber physical systems, Journal of Systems Architecture, vol. 111, p. 101760, 2020.
@article{ramakrishna2020dynamic, title = {Dynamic-weighted simplex strategy for learning enabled cyber physical systems}, journal = {Journal of Systems Architecture}, volume = {111}, pages = {101760}, year = {2020}, tag = {a14cps}, issn = {1383-7621}, doi = {https://doi.org/10.1016/j.sysarc.2020.101760}, url = {https://www.sciencedirect.com/science/article/pii/S1383762120300540}, author = {Ramakrishna, Shreyas and Harstell, Charles and Burruss, Matthew P. and Karsai, Gabor and Dubey, Abhishek}, keywords = {Convolutional Neural Networks, Learning Enabled Components, Reinforcement Learning, Simplex Architecture} }
Cyber Physical Systems (CPS) have increasingly started using Learning Enabled Components (LECs) for performing perception-based control tasks. The simple design approach, and their capability to continuously learn has led to their widespread use in different autonomous applications. Despite their simplicity and impressive capabilities, these components are difficult to assure, which makes their use challenging. The problem of assuring CPS with untrusted controllers has been achieved using the Simplex Architecture. This architecture integrates the system to be assured with a safe controller and provides a decision logic to switch between the decisions of these controllers. However, the key challenges in using the Simplex Architecture are: (1) designing an effective decision logic, and (2) sudden transitions between controller decisions lead to inconsistent system performance. To address these research challenges, we make three key contributions: (1) dynamic-weighted simplex strategy – we introduce “weighted simplex strategy” as the weighted ensemble extension of the classical Simplex Architecture. We then provide a reinforcement learning based mechanism to find dynamic ensemble weights, (2) middleware framework – we design a framework that allows the use of the dynamic-weighted simplex strategy, and provides a resource manager to monitor the computational resources, and (3) hardware testbed – we design a remote-controlled car testbed called DeepNNCar to test and demonstrate the aforementioned key concepts. Using the hardware, we show that the dynamic-weighted simplex strategy has 60% fewer out-of-track occurrences (soft constraint violations), while demonstrating higher optimized speed (performance) of 0.4 m/s during indoor driving than the original LEC driven system.
- V. Sundar, S. Ramakrishna, Z. Rahiminasab, A. Easwaran, and A. Dubey, Out-of-Distribution Detection in Multi-Label Datasets using Latent Space of β-VAE, in 2020 IEEE Security and Privacy Workshops (SPW), Los Alamitos, CA, USA, 2020, pp. 250–255.
@inproceedings{sundar2020detecting, author = {Sundar, V. and Ramakrishna, S. and Rahiminasab, Z. and Easwaran, A. and Dubey, A.}, booktitle = {2020 IEEE Security and Privacy Workshops (SPW)}, title = {Out-of-Distribution Detection in Multi-Label Datasets using Latent Space of {\beta}-VAE}, year = {2020}, volume = {}, issn = {}, pages = {250-255}, keywords = {training;support vector machines;object detection;security;task analysis;testing;meteorology}, doi = {10.1109/SPW50608.2020.00057}, url = {https://doi.ieeecomputersociety.org/10.1109/SPW50608.2020.00057}, publisher = {IEEE Computer Society}, address = {Los Alamitos, CA, USA}, tag = {ai4cps}, month = may }
- J. P. Talusan, M. Wilbur, A. Dubey, and K. Yasumoto, On Decentralized Route Planning Using the Road Side Units as Computing Resources, in 2020 IEEE International Conference on Fog Computing (ICFC), 2020.
@inproceedings{rsuicfc2020, author = {Talusan, Jose Paolo and Wilbur, Michael and Dubey, Abhishek and Yasumoto, Keiichi}, title = {On Decentralized Route Planning Using the Road Side Units as Computing Resources}, booktitle = {2020 IEEE International Conference on Fog Computing (ICFC)}, year = {2020}, tag = {decentralization,transit}, organization = {IEEE}, category = {selectiveconference}, keywords = {transit, middleware} }
Residents in cities typically use third-party platforms such as Google Maps for route planning services. While providing near real-time processing, these state of the art centralized deployments are limited to multiprocessing environments in data centers. This raises privacy concerns, increases risk for critical data and causes vulnerability to network failure. In this paper, we propose to use decentralized road side units (RSU) (owned by the city) to perform route planning. We divide the city road network into grids, each assigned an RSU where traffic data is kept locally, increasing security and resiliency such that the system can perform even if some RSUs fail. Route generation is done in two steps. First, an optimal grid sequence is generated, prioritizing shortest path calculation accuracy but not RSU load. Second, we assign route planning tasks to the grids in the sequence. Keeping in mind RSU load and constraints, tasks can be allocated and executed in any non-optimal grid but with lower accuracy. We evaluate this system using Metropolitan Nashville road traffic data. We divided the area into 500 grids, configuring load and neighborhood sizes to meet delay constraints while maximizing model accuracy. The results show that there is a 30 percent decrease in processing time with a decrease in model accuracy of 99 percent to 92.3 percent, by simply increasing the search area to the optimal grid’s immediate neighborhood.
- S. Shekhar, A. Chhokra, H. Sun, A. Gokhale, A. Dubey, X. Koutsoukos, and G. Karsai, URMILA: Dynamically Trading-off Fog and Edge Resources for Performance and Mobility-Aware IoT Services, Journal of Systems Architecture, 2020.
@article{SHEKHAR2020101710, author = {Shekhar, Shashank and Chhokra, Ajay and Sun, Hongyang and Gokhale, Aniruddha and Dubey, Abhishek and Koutsoukos, Xenofon and Karsai, Gabor}, title = {URMILA: Dynamically Trading-off Fog and Edge Resources for Performance and Mobility-Aware IoT Services}, journal = {Journal of Systems Architecture}, year = {2020}, issn = {1383-7621}, tag = {platform,transit}, doi = {https://doi.org/10.1016/j.sysarc.2020.101710}, keywords = {Fog/Edge Computing, User Mobility, Latency-sensitive IoT Services, Resource Management, middleware, performance}, project = {cps-middleware}, url = {http://www.sciencedirect.com/science/article/pii/S1383762120300047} }
The fog/edge computing paradigm is increasingly being adopted to support a range of latency-sensitive IoT services due to its ability to assure the latency requirements of these services while supporting the elastic properties of cloud computing. IoT services that cater to user mobility, however, face a number of challenges in this context. First, since user mobility can incur wireless connectivity issues, executing these services entirely on edge resources, such as smartphones, will result in a rapid drain in the battery charge. In contrast, executing these services entirely on fog resources, such as cloudlets or micro data centers, will incur higher communication costs and increased latencies in the face of fluctuating wireless connectivity and signal strength. Second, a high degree of multi-tenancy on fog resources involving different IoT services can lead to performance interference issues due to resource contention. In order to address these challenges, this paper describes URMILA, which makes dynamic resource management decisions to achieve effective trade-offs between using the fog and edge resources yet ensuring that the latency requirements of the IoT services are met. We evaluate URMILA’s capabilities in the context of a real-world use case on an emulated but realistic IoT testbed.
- G. Pettet, A. Mukhopadhyay, M. Kochenderfer, Y. Vorobeychik, and A. Dubey, On Algorithmic Decision Procedures in Emergency Response Systems in Smart and Connected Communities, in Proceedings of the 19th Conference on Autonomous Agents and MultiAgent Systems, AAMAS 2020, Auckland, New Zealand, 2020.
@inproceedings{Pettet2020, author = {Pettet, Geoffrey and Mukhopadhyay, Ayan and Kochenderfer, Mykel and Vorobeychik, Yevgeniy and Dubey, Abhishek}, title = {On Algorithmic Decision Procedures in Emergency Response Systems in Smart and Connected Communities}, booktitle = {Proceedings of the 19th Conference on Autonomous Agents and MultiAgent Systems, {AAMAS} 2020, Auckland, New Zealand}, year = {2020}, tag = {ai4cps, decentralization,incident}, category = {selectiveconference}, keywords = {emergency, performance}, project = {smart-emergency-response,smart-cities}, timestamp = {Wed, 17 Jan 2020 07:24:00 +0200} }
Emergency Response Management (ERM) is a critical problem faced by communities across the globe. Despite its importance, it is common for ERM systems to follow myopic and straight-forward decision policies in the real world. Principled approaches to aid decision-making under uncertainty have been explored in this context but have failed to be accepted into real systems. We identify a key issue impeding their adoption — algorithmic approaches to emergency response focus on reactive, post-incident dispatching actions, i.e. optimally dispatching a responder after incidents occur. However, the critical nature of emergency response dictates that when an incident occurs, first responders always dispatch the closest available responder to the incident. We argue that the crucial period of planning for ERM systems is not post-incident, but between incidents. However, this is not a trivial planning problem - a major challenge with dynamically balancing the spatial distribution of responders is the complexity of the problem. An orthogonal problem in ERM systems is to plan under limited communication, which is particularly important in disaster scenarios that affect communication networks. We address both the problems by proposing two partially decentralized multi-agent planning algorithms that utilize heuristics and the structure of the dispatch problem. We evaluate our proposed approach using real-world data, and find that in several contexts, dynamic re-balancing the spatial distribution of emergency responders reduces both the average response time as well as its variance.
- S. Hasan, A. Dubey, G. Karsai, and X. Koutsoukos, A game-theoretic approach for power systems defense against dynamic cyber-attacks, International Journal of Electrical Power & Energy Systems, vol. 115, 2020.
@article{Hasan2020, author = {Hasan, Saqib and Dubey, Abhishek and Karsai, Gabor and Koutsoukos, Xenofon}, title = {A game-theoretic approach for power systems defense against dynamic cyber-attacks}, journal = {International Journal of Electrical Power \& Energy Systems}, year = {2020}, volume = {115}, issn = {0142-0615}, doi = {https://doi.org/10.1016/j.ijepes.2019.105432}, file = {:Hasan2020-A_Game_Theoretic_Approach_for_Power_Systems_Defense_against_Dynamic_Cyber_Attacks.pdf:PDF}, keywords = {Cascading failures, Cyber-attack, Dynamic attack, Game theory, Resilience, Smart grid, Static attack, smartgrid, reliability}, project = {cps-reliability}, tag = {platform,power}, url = {http://www.sciencedirect.com/science/article/pii/S0142061519302807} }
Technological advancements in today’s electrical grids give rise to new vulnerabilities and increase the potential attack surface for cyber-attacks that can severely affect the resilience of the grid. Cyber-attacks are increasing both in number as well as sophistication and these attacks can be strategically organized in chronological order (dynamic attacks), where they can be instantiated at different time instants. The chronological order of attacks enables us to uncover those attack combinations that can cause severe system damage but this concept remained unexplored due to the lack of dynamic attack models. Motivated by the idea, we consider a game-theoretic approach to design a new attacker-defender model for power systems. Here, the attacker can strategically identify the chronological order in which the critical substations and their protection assemblies can be attacked in order to maximize the overall system damage. However, the defender can intelligently identify the critical substations to protect such that the system damage can be minimized. We apply the developed algorithms to the IEEE-39 and 57 bus systems with finite attacker/defender budgets. Our results show the effectiveness of these models in improving the system resilience under dynamic attacks.
- W. Barbour, M. Wilbur, R. Sandoval, C. V. Geffen, B. Hall, A. Dubey, and D. Work, Data Driven Methods for Effective Micromobility Parking, in Proceedings of the Transportation Research Board Annual Meeting, 2020.
@inproceedings{micromobility2020, author = {Barbour, William and Wilbur, Michael and Sandoval, Ricardo and Geffen, Caleb Van and Hall, Brandon and Dubey, Abhishek and Work, Dan}, title = {Data Driven Methods for Effective Micromobility Parking}, booktitle = {Proceedings of the Transportation Research Board Annual Meeting}, year = {2020}, tag = {transit}, category = {selectiveconference}, keywords = {transit} }
Proliferation of shared urban mobility devices (SUMDs), particularly dockless e-scooters, has created opportunities for users with efficient, short trips, but raised management challenges for cities and regulators in terms of safety, infrastructure, and parking. There is a need in some high-demand areas for dedicated parking locations for dockless e-scooters and other devices. We propose the use of data generated by SUMD trips for establishing locations of parking facilities and assessing their required capacity and anticipated utilization. The problem objective is: find locations for a given number of parking facilities that maximize the number of trips that could reasonably be ended and parked at these facilities. Posed another way, what is the minimum number and best locations of parking facilities needed to cover a desired portion of trips at these facilities? In order to determine parking locations, areas of high-density trip destination points are found using unsupervised machine learning algorithms. The dwell time of each device is used to estimate the number of devices parked in a location over time and the necessary capacity of the parking facility. The methodology is tested on a dataset of approximately 100,000 e-scooter trips at Vanderbilt University in Nashville, Tennessee, USA. We find DBSCAN to be the most effective algorithm at determining high-performing parking locations. A selection of 19 parking locations, is enough to capture roughly 25 percent of all trips in the dataset. The vast majority of parking facilities found require a mean capacity of 6 scooters when sized for the 98th percentile observed demand.
- Y. Senarath, S. Nannapaneni, H. Purohit, and A. Dubey, Emergency Incident Detection from Crowdsourced Waze Data using Bayesian Information Fusion, in The 2020 IEEE/WIC/ACM International Joint Conference On Web Intelligence And Intelligent Agent Technology, 2020.
@inproceedings{senarath_emergency_2020, title = {Emergency {Incident} {Detection} from {Crowdsourced} {Waze} {Data} using {Bayesian} {Information} {Fusion}}, copyright = {All rights reserved}, url = {http://arxiv.org/abs/2011.05440}, urldate = {2021-01-31}, booktitle = {The 2020 {IEEE}/{WIC}/{ACM} {International} {Joint} {Conference} {On} {Web} {Intelligence} {And} {Intelligent} {Agent} {Technology}}, publisher = {IEEE}, author = {Senarath, Yasas and Nannapaneni, Saideep and Purohit, Hemant and Dubey, Abhishek}, month = nov, tag = {incident}, year = {2020}, note = {arXiv: 2011.05440}, keywords = {Computer Science - Artificial Intelligence, Computer Science - Social and Information Networks}, annote = {Comment: 8 pages, The 2020 IEEE/WIC/ACM International Joint Conference On Web Intelligence And Intelligent Agent Technology (WI-IAT '20)}, file = {arXiv Fulltext PDF:/Users/abhishek/Zotero/storage/B8WHQRUX/Senarath et al. - 2020 - Emergency Incident Detection from Crowdsourced Waz.pdf:application/pdf;arXiv.org Snapshot:/Users/abhishek/Zotero/storage/98PX572Y/2011.html:text/html} }
The number of emergencies have increased over the years with the growth in urbanization. This pattern has overwhelmed the emergency services with limited resources and demands the optimization of response processes. It is partly due to traditional ‘reactive’ approach of emergency services to collect data about incidents, where a source initiates a call to the emergency number (e.g., 911 in U.S.), delaying and limiting the potentially optimal response. Crowdsourcing platforms such as Waze provides an opportunity to develop a rapid, ‘proactive’ approach to collect data about incidents through crowd-generated observational reports. However, the reliability of reporting sources and spatio-temporal uncertainty of the reported incidents challenge the design of such a proactive approach. Thus, this paper presents a novel method for emergency incident detection using noisy crowdsourced Waze data. We propose a principled computational framework based on Bayesian theory to model the uncertainty in the reliability of crowd-generated reports and their integration across space and time to detect incidents. Extensive experiments using data collected from Waze and the official reported incidents in Nashville, Tenessee in the U.S. show our method can outperform strong baselines for both F1-score and AUC. The application of this work provides an extensible framework to incorporate different noisy data sources for proactive incident detection to improve and optimize emergency response operations in our communities.
- A. Chhokra, S. Hasan, A. Dubey, and G. Karsai, A Binary Decision Diagram Based Cascade Prognostics Scheme For Power Systems, in 2020 American control conference, 2020.
@inproceedings{chokraACC2020, author = {Chhokra, Ajay and Hasan, Saqib and Dubey, Abhishek and Karsai, Gabor}, title = {A Binary Decision Diagram Based Cascade Prognostics Scheme For Power Systems}, booktitle = {2020 American control conference}, year = {2020}, organization = {IEEE}, note = {accepted for publication}, category = {selective conference}, keywords = {smartgird}, tag = {platform,power} }
2019
- R. M. Borromeo, L. Chen, A. Dubey, S. Roy, and S. Thirumuruganathan, On Benchmarking for Crowdsourcing and Future of Work Platforms, IEEE Data Eng. Bull., vol. 42, no. 4, pp. 46–54, 2019.
@article{FTWShonan2019, author = {Borromeo, Ria Mae and Chen, Lei and Dubey, Abhishek and Roy, Sudeepa and Thirumuruganathan, Saravanan}, title = {On Benchmarking for Crowdsourcing and Future of Work Platforms}, journal = {{IEEE} Data Eng. Bull.}, volume = {42}, number = {4}, pages = {46--54}, year = {2019}, url = {http://sites.computer.org/debull/A19dec/p46.pdf}, timestamp = {Tue, 21 Jul 2020 00:40:32 +0200}, biburl = {https://dblp.org/rec/journals/debu/Borromeo0DRT19.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} }
- A. Laszka, A. Mavridou, S. Eisele, E. Statchtiari, and A. Dubey, VeriSolid for TRANSAX: Correct-by-Design Ethereum Smart Contracts for Energy Trading, in First International Summer School on Security and Privacy for Blockchains and Distributed Ledger Technologies, BDLT 2019, Vienna, Austria, 2019.
@inproceedings{LaszkaVerisolid2019, author = {Laszka, Aron and Mavridou, Anastasia and Eisele, Scott and Statchtiari, Emmanouela and Dubey, Abhishek}, title = {VeriSolid for TRANSAX: Correct-by-Design Ethereum Smart Contracts for Energy Trading}, booktitle = {First International Summer School on Security and Privacy for Blockchains and Distributed Ledger Technologies, BDLT 2019, Vienna, Austria}, year = {2019}, month = sep, category = {workshop}, file = {:LaszkaVerisolid2019Poster.pdf:PDF}, keywords = {blockchain, transactive}, project = {cps-blockchains,transactive-energy}, tag = {platform,decentralization,power} }
The adoption of blockchain based platforms is rising rapidly. Their popularity is explained by their ability to maintain a distributed public ledger, providing reliability, integrity, and auditability with- out a trusted entity. Recent platforms, e.g., Ethereum, also act as distributed computing platforms and enable the creation of smart contracts, i.e., software code that runs on the platform and automatically executes and enforces the terms of a contract. Since smart contracts can perform any computation, they allow the develop- ment of decentralized applications, whose execution is safeguarded by the security properties of the underlying platform. Due to their unique advantages, blockchain based platforms are envisioned to have a wide range of applications, ranging from financial to the Internet-of-Things. However, the trustworthiness of the platform guarantees only that a smart contract is executed correctly, not that the code of the contract is correct. In fact, a large number of contracts deployed in practice suffer from software vulnerabilities, which are often introduced due to the semantic gap between the assumptions that contract writers make about the underlying execution semantics and the actual semantics of smart contracts. A recent automated analysis of 19,336 smart contracts deployed in practice found that 8,333 of them suffered from at least one security issue. Although this study was based on smart contracts deployed on the public Ethereum blockchain, the analyzed security issues were largely plat- form agnostic. Security vulnerabilities in smart contracts present a serious issue for two main reasons. Firstly, smart-contract bugs cannot be patched. By design, once a contract is deployed, its func- tionality cannot be altered even by its creator. Secondly, once a faulty or malicious transaction is recorded, it cannot be removed from the blockchain (“code is law” principle). The only way to roll back a transaction is by performing a hard fork of the blockchain, which requires consensus among the stakeholders and undermines the trustworthiness of the platform. In light of this, it is crucial to ensure that a smart contract is se- cure before deploying it and trusting it with significant amounts of cryptocurrency. To this end, we present the VeriSolid framework for the formal verification and generation of contracts that are specified using a transition-system based model with rigorous operational semantics. VeriSolid provides an end-to-end design framework, which combined with a Solidity code generator, allows the correct- by-design development of Ethereum smart contracts. To the best of our knowledge, VeriSolid is the first framework to promote a model- based, correctness-by-design approach for blockchain-based smart contracts. Properties established at any step of the VeriSolid design flow are preserved in the resulting smart contracts, guaranteeing their correctness. VeriSolid fully automates the process of verifica- tion and code generation, while enhancing usability by providing easy-to-use graphical editors for the specification of transition sys- tems and natural-like language templates for the specification of formal properties. By performing verification early at design time, VeriSolid provides a cost-effective approach since fixing bugs later in the development process can be very expensive. Our verification approach can detect typical vulnerabilities, but it may also detect any violation of required properties. Since our tool applies verifi- cation at a high-level, it can provide meaningful feedback to the developer when a property is not satisfied, which would be much harder to do at bytecode level. We present the application of VeriSolid on smart contracts used in Smart Energy Systems such as transactive energy platforms. In particular, we used VeriSolid to design and generate the smart contract that serves as the core of the TRANSAX blockchain-based platform for trading energy futures. The designed smart contract allows energy producers and consumers to post offers for selling and buying energy. Since optimally matching selling offers with buying offers can be very expensive computationally, the contract relies on external solvers to compute and submit solutions to the matching problem, which are then checked by the contract. Using VeriSolid, we defined a set of safety properties and we were able to detect bugs after performing analysis with the NuSMV model checker.
- S. Basak, A. Dubey, and B. P. Leao, Analyzing the Cascading Effect of Traffic Congestion Using LSTM Networks, in IEEE Big Data, Los Angeles, Ca, 2019.
@inproceedings{basak2019bigdata, author = {Basak, Sanchita and Dubey, Abhishek and Leao, Bruno P.}, title = {Analyzing the Cascading Effect of Traffic Congestion Using LSTM Networks}, booktitle = {IEEE Big Data}, year = {2019}, tag = {ai4cps,incident,transit}, address = {Los Angeles, Ca}, category = {selectiveconference}, keywords = {reliability, transit} }
This paper presents a data-driven approach for predicting the propagation of traffic congestion at road seg-ments as a function of the congestion in their neighboring segments. In the past, this problem has mostly been addressed by modelling the traffic congestion over some standard physical phenomenon through which it is difficult to capture all the modalities of such a dynamic and complex system. While other recent works have focused on applying a generalized data-driven technique on the whole network at once, they often ignore intersection characteristics. On the contrary, we propose a city-wide ensemble of intersection level connected LSTM models and propose mechanisms for identifying congestion events using the predictions from the networks. To reduce the search space of likely congestion sinks we use the likelihood of congestion propagation in neighboring road segments of a congestion source that we learn from the past historical data. We validated our congestion forecasting framework on the real world traffic data of Nashville, USA and identified the onset of congestion in each of the neighboring segments of any congestion source with an average precision of 0.9269 and an average recall of 0.9118 tested over ten congestion events.
- M. A. Walker, D. C. Schmidt, and A. Dubey, Chapter Six - Testing at scale of IoT blockchain applications, in Advances in Computers, vol. 115, Oreilly, 2019, pp. 155–179.
@inbook{Walker2019, pages = {155--179}, title = {Chapter Six - Testing at scale of IoT blockchain applications}, publisher = {Oreilly}, year = {2019}, author = {Walker, Michael A. and Schmidt, Douglas C. and Dubey, Abhishek}, volume = {115}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/ac/WalkerSD19}, booktitle = {Advances in Computers}, doi = {10.1016/bs.adcom.2019.07.008}, file = {:Walker2019-Chapter_Six_Testing_at_Scale_of_IoT_Blockchain_Applications.pdf:PDF}, keywords = {cps-blockchains, blockchain}, project = {cps-blockchains}, tag = {decentralization}, timestamp = {Tue, 12 Nov 2019 00:00:00 +0100}, url = {https://doi.org/10.1016/bs.adcom.2019.07.008} }
Abstract Due to the ever-increasing adaptation of Blockchain technologies in the private, public, and business domains, both the use of Distributed Systems and the increased demand for their reliability has exploded recently, especially with their desired integration with Internet-of-Things devices. This has resulted in a lot of work being done in the fields of distributed system analysis and design, specifically in the areas of blockchain smart contract design and formal verification. However, the focus on formal verification methodologies has meant that less attention has been given toward more traditional testing methodologies, such as unit testing and integration testing. This includes a lack of full support by most, if not all, the major blockchain implementations for testing at scale, except on fully public test networks. This has several drawbacks, such as: (1) The inability to do repeatable testing under identical scenarios, (2) reliance upon public mining of blocks, which introduces unreasonable amounts of delay for a test driven development scenario that a private network could reduce or eliminate, and (3) the inability to design scenarios where parts of the network go down. In this chapter we discuss design, testing methodologies, and tools to allow Testing at Scale of IoT Blockchain Applications.
- P. Zhang, D. C. Schmidt, J. White, and A. Dubey, Chapter Seven - Consensus mechanisms and information security technologies, in Advances in Computers, vol. 115, Oreilly, 2019, pp. 181–209.
@inbook{Zhang2019, pages = {181--209}, title = {Chapter Seven - Consensus mechanisms and information security technologies}, publisher = {Oreilly}, year = {2019}, tag = {decentralization}, author = {Zhang, Peng and Schmidt, Douglas C. and White, Jules and Dubey, Abhishek}, volume = {115}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/ac/0034SWD19}, booktitle = {Advances in Computers}, doi = {10.1016/bs.adcom.2019.05.001}, file = {:Zhang2019-Chapter_Seven-Consensus_mechanisms_and_information_security_technologies.pdf:PDF}, keywords = {blockchain}, project = {cps-blockchains}, timestamp = {Tue, 12 Nov 2019 00:00:00 +0100}, url = {https://doi.org/10.1016/bs.adcom.2019.05.001} }
Distributed Ledger Technology (DLT) helps maintain and distribute predefined types of information and data in a decentralized manner. It removes the reliance on a third-party intermediary, while securing information exchange and creating shared truth via transaction records that are hard to tamper with. The successful operation of DLT stems largely from two computer science technologies: consensus mechanisms and information security protocols. Consensus mechanisms, such as Proof of Work (PoW) and Raft, ensure that the DLT network collectively agrees on contents stored in the ledger. Information security protocols, such as encryption and hashing, protect data integrity and safeguard data against unauthorized access. The most popular incarnation of DLT has been used in cryptocurrencies, such as Bitcoin and Ethereum, through public blockchains, which requires the application of more robust consensus protocols across the entire network. An example is PoW, which has been employed by Bitcoin, but which is also highly energy inefficient. Other forms of DLT include consortium and private blockchains where networks are configured within federated entities or a single organization, in which case less energy intensive consensus protocols (such as Raft) would suffice. This chapter surveys existing consensus mechanisms and information security technologies used in DLT.
- F. Sun, A. Dubey, J. White, and A. Gokhale, Transit-hub: a smart public transportation decision support system with multi-timescale analytical services, Cluster Computing, vol. 22, no. Suppl 1, pp. 2239–2254, Jan. 2019.
@article{Sun2019, author = {Sun, Fangzhou and Dubey, Abhishek and White, Jules and Gokhale, Aniruddha}, title = {Transit-hub: a smart public transportation decision support system with multi-timescale analytical services}, journal = {Cluster Computing}, year = {2019}, volume = {22}, tag = {transit}, number = {Suppl 1}, pages = {2239--2254}, month = jan, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/cluster/SunDWG19}, doi = {10.1007/s10586-018-1708-z}, file = {:Sun2019-Transit-hub_a_smart_public_transportation_decision_support_system_with_multi-timescale_analytical_services.pdf:PDF}, keywords = {transit}, project = {smart-cities,smart-transit}, timestamp = {Wed, 21 Aug 2019 01:00:00 +0200}, url = {https://doi.org/10.1007/s10586-018-1708-z} }
Public transit is a critical component of a smart and connected community. As such, citizens expect and require accurate information about real-time arrival/departures of transportation assets. As transit agencies enable large-scale integration of real-time sensors and support back-end data-driven decision support systems, the dynamic data-driven applications systems (DDDAS) paradigm becomes a promising approach to make the system smarter by providing online model learning and multi-time scale analytics as part of the decision support system that is used in the DDDAS feedback loop. In this paper, we describe a system in use in Nashville and illustrate the analytic methods developed by our team. These methods use both historical as well as real-time streaming data for online bus arrival prediction. The historical data is used to build classifiers that enable us to create expected performance models as well as identify anomalies. These classifiers can be used to provide schedule adjustment feedback to the metro transit authority. We also show how these analytics services can be packaged into modular, distributed and resilient micro-services that can be deployed on both cloud back ends as well as edge computing resources.
- A. Dubey, G. Karsai, P. Völgyesi, M. Metelko, I. Madari, H. Tu, Y. Du, and S. Lukic, Device Access Abstractions for Resilient Information Architecture Platform for Smart Grid, Embedded Systems Letters, vol. 11, no. 2, pp. 34–37, 2019.
@article{Dubey2019, author = {Dubey, Abhishek and Karsai, Gabor and V{\"{o}}lgyesi, P{\'{e}}ter and Metelko, Mary and Madari, Istv{\'{a}}n and Tu, Hao and Du, Yuhua and Lukic, Srdjan}, title = {Device Access Abstractions for Resilient Information Architecture Platform for Smart Grid}, journal = {Embedded Systems Letters}, year = {2019}, volume = {11}, number = {2}, pages = {34--37}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/esl/DubeyKVMMTDL19}, doi = {10.1109/LES.2018.2845854}, file = {:Dubey2019-Device_Access_Abstractions_for_Resilient_Information_Architecture_Platform_for_Smart_Grid.pdf:PDF}, keywords = {middleware, smartgrid}, project = {cps-middleware}, tag = {platform,power}, timestamp = {Fri, 05 Jul 2019 01:00:00 +0200}, url = {https://doi.org/10.1109/LES.2018.2845854} }
This letter presents an overview of design mechanisms to abstract device access protocols in the resilient information architecture platform for smart grid, a middleware for developing distributed smart grid applications. These mechanisms are required to decouple the application functionality from the specifics of the device mechanisms built by the device vendors.
- A. Dubey and Garcı́a-Valls Marisol, Introduction to the special issue of the 16th ACM workshop on Adaptive and Reflective Middleware (ARM), Journal of Systems Architecture - Embedded Systems Design, vol. 97, p. 8, 2019.
@article{Dubey2019a, author = {Dubey, Abhishek and Garc{\'{\i}}a{-}Valls, Marisol}, title = {Introduction to the special issue of the 16th {ACM} workshop on Adaptive and Reflective Middleware {(ARM)}}, journal = {Journal of Systems Architecture - Embedded Systems Design}, year = {2019}, volume = {97}, pages = {8}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/jsa/DubeyG19}, doi = {10.1016/j.sysarc.2019.03.006}, file = {:Dubey2019a-Introduction_to_the_special_issue_of_the_16th_ACM_workshop_on_Adaptive_and_Reflective_Middleware_ARM.pdf:PDF}, keywords = {middleware}, project = {cps-middleware}, timestamp = {Tue, 25 Jun 2019 01:00:00 +0200}, url = {https://doi.org/10.1016/j.sysarc.2019.03.006} }
- S. Basak, F. Sun, S. Sengupta, and A. Dubey, Data-Driven Optimization of Public Transit Schedule, in Big Data Analytics - 7th International Conference, BDA 2019, Ahmedabad, India, 2019, pp. 265–284.
@inproceedings{Basak2019, author = {Basak, Sanchita and Sun, Fangzhou and Sengupta, Saptarshi and Dubey, Abhishek}, title = {Data-Driven Optimization of Public Transit Schedule}, booktitle = {Big Data Analytics - 7th International Conference, {BDA} 2019, Ahmedabad, India}, year = {2019}, tag = {ai4cps,transit}, pages = {265--284}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/bigda/BasakSSD19}, category = {selectiveconference}, doi = {10.1007/978-3-030-37188-3\_16}, file = {:Basak2019-Data_Driven_Optimization_of_Public_Transit_Schedule.pdf:PDF}, keywords = {transit}, project = {smart-cities,smart-transit}, timestamp = {Fri, 13 Dec 2019 12:44:00 +0100}, url = {https://doi.org/10.1007/978-3-030-37188-3\_16} }
Bus transit systems are the backbone of public transportation in the United States. An important indicator of the quality of service in such infrastructures is on-time performance at stops, with published transit schedules playing an integral role governing the level of success of the service. However there are relatively few optimization architectures leveraging stochastic search that focus on optimizing bus timetables with the objective of maximizing probability of bus arrivals at timepoints with delays within desired on-time ranges. In addition to this, there is a lack of substantial research considering monthly and seasonal variations of delay patterns integrated with such optimization strategies. To address these, this paper makes the following contributions to the corpus of studies on transit on-time performance optimization: (a) an unsupervised clustering mechanism is presented which groups months with similar seasonal delay patterns, (b) the problem is formulated as a single-objective optimization task and a greedy algorithm, a genetic algorithm (GA) as well as a particle swarm optimization (PSO) algorithm are employed to solve it, (c) a detailed discussion on empirical results comparing the algorithms are provided and sensitivity analysis on hyper-parameters of the heuristics are presented along with execution times, which will help practitioners looking at similar problems. The analyses conducted are insightful in the local context of improving public transit scheduling in the Nashville metro region as well as informative from a global perspective as an elaborate case study which builds upon the growing corpus of empirical studies using nature-inspired approaches to transit schedule optimization.
- C. Hartsell, N. Mahadevan, S. Ramakrishna, A. Dubey, T. Bapty, T. T. Johnson, X. D. Koutsoukos, J. Sztipanovits, and G. Karsai, Model-based design for CPS with learning-enabled components, in Proceedings of the Workshop on Design Automation for CPS and IoT, DESTION@CPSIoTWeek 2019, Montreal, QC, Canada, 2019, pp. 1–9.
@inproceedings{Hartsell2019, author = {Hartsell, Charles and Mahadevan, Nagabhushan and Ramakrishna, Shreyas and Dubey, Abhishek and Bapty, Theodore and Johnson, Taylor T. and Koutsoukos, Xenofon D. and Sztipanovits, Janos and Karsai, Gabor}, title = {Model-based design for {CPS} with learning-enabled components}, booktitle = {Proceedings of the Workshop on Design Automation for {CPS} and IoT, DESTION@CPSIoTWeek 2019, Montreal, QC, Canada}, year = {2019}, tag = {ai4cps}, pages = {1--9}, month = apr, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/cpsweek/HartsellMRDBJKS19}, category = {workshop}, doi = {10.1145/3313151.3313166}, file = {:Hartsell2019-Model-based_design_for_CPS_with_learning-enabled_components.pdf:PDF}, keywords = {assurance}, project = {cps-autonomy}, timestamp = {Wed, 20 Nov 2019 00:00:00 +0100}, url = {https://doi.org/10.1145/3313151.3313166} }
Recent advances in machine learning led to the appearance of Learning-Enabled Components (LECs) in Cyber-Physical Systems. LECs are being evaluated and used for various, complex functions including perception and control. However, very little tool support is available for design automation in such systems. This paper introduces an integrated toolchain that supports the architectural modeling of CPS with LECs, but also has extensive support for the engineering and integration of LECs, including support for training data collection, LEC training, LEC evaluation and verification, and system software deployment. Additionally, the toolsuite supports the modeling and analysis of safety cases - a critical part of the engineering process for mission and safety critical systems.
- Y. Zhang, S. Eisele, A. Dubey, A. Laszka, and A. K. Srivastava, Cyber-Physical Simulation Platform for Security Assessment of Transactive Energy Systems, in 7th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek 2019, Montreal, QC, Canada, 2019, pp. 1–6.
@inproceedings{Zhang2019a, author = {Zhang, Yue and Eisele, Scott and Dubey, Abhishek and Laszka, Aron and Srivastava, Anurag K.}, title = {Cyber-Physical Simulation Platform for Security Assessment of Transactive Energy Systems}, booktitle = {7th Workshop on Modeling and Simulation of Cyber-Physical Energy Systems, MSCPES@CPSIoTWeek 2019, Montreal, QC, Canada}, year = {2019}, pages = {1--6}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/cpsweek/ZhangEDLS19}, category = {workshop}, doi = {10.1109/MSCPES.2019.8738802}, file = {:Zhang2019a-Cyber_Physical_Simulation_Platform_for_Security_Assessment_of_Transactive_Energy_Systems.pdf:PDF}, keywords = {transactive}, project = {transactive-energy,cps-reliability}, tag = {platform,decentralization,power}, timestamp = {Wed, 16 Oct 2019 14:14:56 +0200}, url = {https://doi.org/10.1109/MSCPES.2019.8738802} }
Transactive energy systems (TES) are emerging as a transformative solution for the problems that distribution system operators face due to an increase in the use of distributed energy resources and rapid growth in scalability of managing active distribution system (ADS). On the one hand, these changes pose a decentralized power system control problem, requiring strategic control to maintain reliability and resiliency for the community and for the utility. On the other hand, they require robust financial markets while allowing participation from diverse prosumers. To support the computing and flexibility requirements of TES while preserving privacy and security, distributed software platforms are required. In this paper, we enable the study and analysis of security concerns by developing Transactive Energy Security Simulation Testbed (TESST), a TES testbed for simulating various cyber attacks. In this work, the testbed is used for TES simulation with centralized clearing market, highlighting weaknesses in a centralized system. Additionally, we present a blockchain enabled decentralized market solution supported by distributed computing for TES, which on one hand can alleviate some of the problems that we identify, but on the other hand, may introduce newer issues. Future study of these differing paradigms is necessary and will continue as we develop our security simulation testbed.
- S. Nannapaneni and A. Dubey, Towards demand-oriented flexible rerouting of public transit under uncertainty, in Proceedings of the Fourth Workshop on International Science of Smart City Operations and Platforms Engineering, SCOPE@CPSIoTWeek 2019, Montreal, QC, Canada, 2019, pp. 35–40.
@inproceedings{Nannapaneni2019, author = {Nannapaneni, Saideep and Dubey, Abhishek}, title = {Towards demand-oriented flexible rerouting of public transit under uncertainty}, booktitle = {Proceedings of the Fourth Workshop on International Science of Smart City Operations and Platforms Engineering, SCOPE@CPSIoTWeek 2019, Montreal, QC, Canada}, year = {2019}, tag = {transit}, pages = {35--40}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/cpsweek/NannapaneniD19}, category = {workshop}, doi = {10.1145/3313237.3313302}, file = {:Nannapaneni2019-Towards_demand-oriented_flexible_rerouting_of_public_transit_under_uncertainty.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities}, timestamp = {Tue, 10 Sep 2019 13:47:28 +0200}, url = {https://doi.org/10.1145/3313237.3313302} }
This paper proposes a flexible rerouting strategy for the public transit to accommodate the spatio-temporal variation in the travel demand. Transit routes are typically static in nature, i.e., the buses serve well-defined routes; this results in people living in away from the bus routes choose alternate transit modes such as private automotive vehicles resulting in ever-increasing traffic congestion. In the flex-transit mode, we reroute the buses to accommodate high travel demand areas away from the static routes considering its spatio-temporal variation. We perform clustering to identify several flex stops; these are stops not on the static routes, but with high travel demand around them. We divide the bus stops on the static routes into critical and non-critical bus stops; critical bus stops refer to transfer points, where people change bus routes to reach their destinations. In the existing static scheduling process, some slack time is provided at the end of each trip to account for any travel delays. Thus, the additional travel time incurred due to taking flexible routes is constrained to be less than the available slack time. We use the percent increase in travel demand to analyze the effectiveness of the rerouting process. The proposed methodology is demonstrated using real-world travel data for Route 7 operated by the Nashville Metropolitan Transit Authority (MTA).
- A. Mavridou, A. Laszka, E. Stachtiari, and A. Dubey, VeriSolid: Correct-by-Design Smart Contracts for Ethereum, in Financial Cryptography and Data Security - 23rd International Conference, FC 2019, Frigate Bay, St. Kitts and Nevis, Revised Selected Papers, 2019, pp. 446–465.
@inproceedings{Mavridou2019, author = {Mavridou, Anastasia and Laszka, Aron and Stachtiari, Emmanouela and Dubey, Abhishek}, title = {VeriSolid: Correct-by-Design Smart Contracts for Ethereum}, booktitle = {Financial Cryptography and Data Security - 23rd International Conference, {FC} 2019, Frigate Bay, St. Kitts and Nevis, Revised Selected Papers}, year = {2019}, pages = {446--465}, tag = {platform,decentralization}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/fc/MavridouLSD19}, category = {selectiveconference}, doi = {10.1007/978-3-030-32101-7\_27}, file = {:Mavridou2019-VeriSolid_Correct_by_Design_Smart_Contracts_for_Ethereum.pdf:PDF}, keywords = {blockchain}, project = {cps-blockchains}, timestamp = {Mon, 14 Oct 2019 14:51:20 +0200}, url = {https://doi.org/10.1007/978-3-030-32101-7\_27} }
The adoption of blockchain based distributed ledgers is growing fast due to their ability to provide reliability, integrity, and auditability without trusted entities. One of the key capabilities of these emerging platforms is the ability to create self-enforcing smart contracts. However, the development of smart contracts has proven to be error-prone in practice, and as a result, contracts deployed on public platforms are often riddled with security vulnerabilities. This issue is exacerbated by the design of these platforms, which forbids updating contract code and rolling back malicious transactions. In light of this, it is crucial to ensure that a smart contract is secure before deploying it and trusting it with significant amounts of cryptocurrency. To this end, we introduce the VeriSolid framework for the formal verification of contracts that are specified using a transition-system based model with rigorous operational semantics. Our model-based approach allows developers to reason about and verify contract behavior at a high level of abstraction. VeriSolid allows the generation of Solidity code from the verified models, which enables the correct-by-design development of smart contracts.
- A. Mukhopadhyay, G. Pettet, C. Samal, A. Dubey, and Y. Vorobeychik, An online decision-theoretic pipeline for responder dispatch, in Proceedings of the 10th ACM/IEEE International Conference on Cyber-Physical Systems, ICCPS 2019, Montreal, QC, Canada, 2019, pp. 185–196.
@inproceedings{Mukhopadhyay2019, author = {Mukhopadhyay, Ayan and Pettet, Geoffrey and Samal, Chinmaya and Dubey, Abhishek and Vorobeychik, Yevgeniy}, title = {An online decision-theoretic pipeline for responder dispatch}, booktitle = {Proceedings of the 10th {ACM/IEEE} International Conference on Cyber-Physical Systems, {ICCPS} 2019, Montreal, QC, Canada}, year = {2019}, tag = {ai4cps,incident}, pages = {185--196}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iccps/MukhopadhyayPSD19}, category = {selectiveconference}, doi = {10.1145/3302509.3311055}, file = {:Mukhopadhyay2019-An_Online_Decision_Theoretic_Pipeline_for_Responder_Dispatch.pdf:PDF}, keywords = {emergency}, project = {smart-cities,smart-emergency-response}, timestamp = {Sun, 07 Apr 2019 16:25:36 +0200}, url = {https://doi.org/10.1145/3302509.3311055} }
The problem of dispatching emergency responders to service traffic accidents, fire, distress calls and crimes plagues urban areas across the globe. While such problems have been extensively looked at, most approaches are offline. Such methodologies fail to capture the dynamically changing environments under which critical emergency response occurs, and therefore, fail to be implemented in practice. Any holistic approach towards creating a pipeline for effective emergency response must also look at other challenges that it subsumes - predicting when and where incidents happen and understanding the changing environmental dynamics. We describe a system that collectively deals with all these problems in an online manner, meaning that the models get updated with streaming data sources. We highlight why such an approach is crucial to the effectiveness of emergency response, and present an algorithmic framework that can compute promising actions for a given decision-theoretic model for responder dispatch. We argue that carefully crafted heuristic measures can balance the trade-off between computational time and the quality of solutions achieved and highlight why such an approach is more scalable and tractable than traditional approaches. We also present an online mechanism for incident prediction, as well as an approach based on recurrent neural networks for learning and predicting environmental features that affect responder dispatch. We compare our methodology with prior state-of-the-art and existing dispatch strategies in the field, which show that our approach results in a reduction in response time with a drastic reduction in computational time.
- G. Pettet, A. Mukhopadhyay, C. Samal, A. Dubey, and Y. Vorobeychik, Incident management and analysis dashboard for fire departments: ICCPS demo, in Proceedings of the 10th ACM/IEEE International Conference on Cyber-Physical Systems, ICCPS 2019, Montreal, QC, Canada, 2019, pp. 336–337.
@inproceedings{Pettet2019, author = {Pettet, Geoffrey and Mukhopadhyay, Ayan and Samal, Chinmaya and Dubey, Abhishek and Vorobeychik, Yevgeniy}, title = {Incident management and analysis dashboard for fire departments: {ICCPS} demo}, booktitle = {Proceedings of the 10th {ACM/IEEE} International Conference on Cyber-Physical Systems, {ICCPS} 2019, Montreal, QC, Canada}, year = {2019}, pages = {336--337}, tag = {incident}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iccps/PettetMSDV19}, category = {poster}, doi = {10.1145/3302509.3313329}, file = {:Pettet2019-Incident_management_and_analysis_dashboard_for_fire_departments_ICCPS_demo.pdf:PDF}, keywords = {emergency}, project = {smart-cities,smart-emergency-response}, timestamp = {Sun, 07 Apr 2019 16:25:36 +0200}, url = {https://doi.org/10.1145/3302509.3313329} }
This work presents a dashboard tool that helps emergency responders analyze and manage spatial-temporal incidents like crime and traffic accidents. It uses state-of-the-art statistical models to learn incident probabilities based on factors such as prior incidents, time and weather. The dashboard can then present historic and predicted incident distributions. It also allows responders to analyze how moving or adding depots (stations for emergency responders) affects average response times, and can make dispatching recommendations based on heuristics. Broadly, it is a one-stop tool that helps responders visualize historical data as well as plan for and respond to incidents.
- C. Hartsell, N. Mahadevan, S. Ramakrishna, A. Dubey, T. Bapty, and G. Karsai, A CPS toolchain for learning-based systems: demo abstract, in Proceedings of the 10th ACM/IEEE International Conference on Cyber-Physical Systems, ICCPS 2019, Montreal, QC, Canada, 2019, pp. 342–343.
@inproceedings{Hartsell2019a, author = {Hartsell, Charles and Mahadevan, Nagabhushan and Ramakrishna, Shreyas and Dubey, Abhishek and Bapty, Theodore and Karsai, Gabor}, title = {A {CPS} toolchain for learning-based systems: demo abstract}, booktitle = {Proceedings of the 10th {ACM/IEEE} International Conference on Cyber-Physical Systems, {ICCPS} 2019, Montreal, QC, Canada}, year = {2019}, pages = {342--343}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iccps/HartsellMRDBK19}, category = {poster}, doi = {10.1145/3302509.3313332}, file = {:Hartsell2019a-A_CPS_Toolchain_for_Learning_Based_Systems_Demo_Abstract.pdf:PDF}, keywords = {assurance}, tag = {ai4cps}, project = {cps-autonomy}, timestamp = {Sun, 07 Apr 2019 16:25:36 +0200}, url = {https://doi.org/10.1145/3302509.3313332} }
Cyber-Physical Systems (CPS) are expected to perform tasks with ever-increasing levels of autonomy, often in highly uncertain environments. Traditional design techniques based on domain knowledge and analytical models are often unable to cope with epistemic uncertainties present in these systems. This challenge, combined with recent advances in machine learning, has led to the emergence of Learning-Enabled Components (LECs) in CPS. However, very little tool support is available for design automation of these systems. In this demonstration, we introduce an integrated toolchain for the development of CPS with LECs with support for architectural modeling, data collection, system software deployment, and LEC training, evaluation, and verification. Additionally, the toolchain supports the modeling and analysis of safety cases - a critical part of the engineering process for mission and safety critical systems.
- S. Shekhar, A. Chhokra, H. Sun, A. Gokhale, A. Dubey, and X. D. Koutsoukos, Supporting fog/edge-based cognitive assistance IoT services for the visually impaired: poster abstract, in Proceedings of the International Conference on Internet of Things Design and Implementation, IoTDI 2019, Montreal, QC, Canada, 2019, pp. 275–276.
@inproceedings{Shekhar2019, author = {Shekhar, Shashank and Chhokra, Ajay and Sun, Hongyang and Gokhale, Aniruddha and Dubey, Abhishek and Koutsoukos, Xenofon D.}, title = {Supporting fog/edge-based cognitive assistance IoT services for the visually impaired: poster abstract}, booktitle = {Proceedings of the International Conference on Internet of Things Design and Implementation, IoTDI 2019, Montreal, QC, Canada}, year = {2019}, pages = {275--276}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iotdi/ShekharCSGDK19}, category = {poster}, doi = {10.1145/3302505.3312592}, file = {:Shekhar2019-Supporting_fog_edge-based_cognitive_assistance_IoT_services_for_the_visually_impaired_poster_abstract.pdf:PDF}, keywords = {middleware}, tag = {platform,transit}, project = {cps-middleware,smart-cities}, timestamp = {Fri, 29 Mar 2019 00:00:00 +0100}, url = {https://doi.org/10.1145/3302505.3312592} }
The fog/edge computing paradigm is increasingly being adopted to support a variety of latency-sensitive IoT services, such as cognitive assistance to the visually impaired, due to its ability to assure the latency requirements of these services while continuing to benefit from the elastic properties of cloud computing. However, user mobility in such applications imposes a new set of challenges that must be addressed before such applications can be deployed and benefit the society. This paper presents ongoing work on a dynamic resource management middleware called URMILA that addresses these concerns. URMILA ensures that the service remains available despite user mobility and ensuing wireless connectivity issues by opportunistically leveraging both fog and edge resources in such a way that the latency requirements of the service are met while ensuring longevity of the battery life on the edge devices. We present the design principles of URMILA’s capabilities and a real-world cognitive assistance application that we have built and are testing on an emulated but realistic IoT testbed.
- P. Ghosh, S. Eisele, A. Dubey, M. Metelko, I. Madari, P. Völgyesi, and G. Karsai, On the Design of Fault-Tolerance in a Decentralized Software Platform for Power Systems, in IEEE 22nd International Symposium on Real-Time Distributed Computing, ISORC 2019, Valencia, Spain, 2019, pp. 52–60.
@inproceedings{Ghosh2019, author = {Ghosh, Purboday and Eisele, Scott and Dubey, Abhishek and Metelko, Mary and Madari, Istv{\'{a}}n and V{\"{o}}lgyesi, P{\'{e}}ter and Karsai, Gabor}, title = {On the Design of Fault-Tolerance in a Decentralized Software Platform for Power Systems}, booktitle = {{IEEE} 22nd International Symposium on Real-Time Distributed Computing, {ISORC} 2019, Valencia, Spain}, year = {2019}, pages = {52--60}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/GhoshEDMMVK19}, category = {selectiveconference}, doi = {10.1109/ISORC.2019.00018}, file = {:Ghosh2019-On_the_Design_of_Fault-Tolerance_in_a_Decentralized_Software_Platform_for_Power_Systems.pdf:PDF}, keywords = {middleware}, project = {cps-middleware,cps-reliability}, tag = {platform,decentralization,power}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2019.00018} }
The vision of the ‘Smart Grid’ assumes a distributed real-time embedded system that implements various monitoring and control functions. As the reliability of the power grid is critical to modern society, the software supporting the grid must support fault tolerance and resilience in the resulting cyber-physical system. This paper describes the fault-tolerance features of a software framework called Resilient Information Architecture Platform for Smart Grid (RIAPS). The framework supports various mechanisms for fault detection and mitigation and works in concert with the applications that implement the grid-specific functions. The paper discusses the design philosophy for and the implementation of the fault tolerance features and presents an application example to show how it can be used to build highly resilient systems.
- S. Eisele, P. Ghosh, K. Campanelli, A. Dubey, and G. Karsai, Demo: Transactive Energy Application with RIAPS, in IEEE 22nd International Symposium on Real-Time Distributed Computing, ISORC 2019, Valencia, Spain, May 7-9, 2019, 2019, pp. 85–86.
@inproceedings{Eisele2019, author = {Eisele, Scott and Ghosh, Purboday and Campanelli, Keegan and Dubey, Abhishek and Karsai, Gabor}, title = {Demo: Transactive Energy Application with {RIAPS}}, booktitle = {{IEEE} 22nd International Symposium on Real-Time Distributed Computing, {ISORC} 2019, Valencia, Spain, May 7-9, 2019}, year = {2019}, pages = {85--86}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/EiseleGCDK19}, category = {poster}, doi = {10.1109/ISORC.2019.00024}, file = {:Eisele2019-Demo_Transactive_Energy_Application_with_RIAPS.pdf:PDF}, keywords = {transactive}, project = {transactive-energy}, tag = {decentralization,power}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2019.00024} }
The modern electric grid is a complex, decentralized cyber-physical system requiring higher-level control techniques to balance the demand and supply of energy to optimize the overall energy usage. The concept of Transactive Energy utilizes distributed system principle to address this challenge. In this demonstration we show the usage of the distributed application management platform RIAPS in the implementation of one such Transactive Energy approach to control elements of a power system, which runs as a a simulation using the Gridlab-d simulation solver.
- M. P. Burruss, S. Ramakrishna, G. Karsai, and A. Dubey, DeepNNCar: A Testbed for Deploying and Testing Middleware Frameworks for Autonomous Robots, in IEEE 22nd International Symposium on Real-Time Distributed Computing, ISORC 2019, Valencia, Spain, May 7-9, 2019, 2019, pp. 87–88.
@inproceedings{Burruss2019, author = {Burruss, Matthew P. and Ramakrishna, Shreyas and Karsai, Gabor and Dubey, Abhishek}, title = {DeepNNCar: {A} Testbed for Deploying and Testing Middleware Frameworks for Autonomous Robots}, booktitle = {{IEEE} 22nd International Symposium on Real-Time Distributed Computing, {ISORC} 2019, Valencia, Spain, May 7-9, 2019}, year = {2019}, tag = {ai4cps}, pages = {87--88}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/BurrussRKD19}, category = {poster}, doi = {10.1109/ISORC.2019.00025}, file = {:Burruss2019-DeepNNCar_Testbed_for_Deploying_and_Testing_Middleware_Frameworks_for_Autonomous_Robots.pdf:PDF}, keywords = {assurance}, project = {cps-autonomy}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2019.00025} }
This demo showcases the features of an adaptive middleware framework for resource constrained autonomous robots like DeepNNCar (Figure 1). These robots use Learning Enabled Components (LECs), trained with deep learning models to perform control actions. However, these LECs do not provide any safety guarantees and testing them is challenging. To overcome these challenges, we have developed an adaptive middleware framework that (1) augments the LEC with safety controllers that can use different weighted simplex strategies to improve the systems safety guarantees, and (2) includes a resource manager to monitor the resource parameters (temperature, CPU Utilization), and offload tasks at runtime. Using DeepNNCar we will demonstrate the framework and its capability to adaptively switch between the controllers and strategies based on its safety and speed performance.
- S. Ramakrishna, A. Dubey, M. P. Burruss, C. Hartsell, N. Mahadevan, S. Nannapaneni, A. Laszka, and G. Karsai, Augmenting Learning Components for Safety in Resource Constrained Autonomous Robots, in IEEE 22nd International Symposium on Real-Time Distributed Computing, ISORC 2019, Valencia, Spain, May 7-9, 2019, 2019, pp. 108–117.
@inproceedings{Ramakrishna2019, author = {Ramakrishna, Shreyas and Dubey, Abhishek and Burruss, Matthew P. and Hartsell, Charles and Mahadevan, Nagabhushan and Nannapaneni, Saideep and Laszka, Aron and Karsai, Gabor}, title = {Augmenting Learning Components for Safety in Resource Constrained Autonomous Robots}, booktitle = {{IEEE} 22nd International Symposium on Real-Time Distributed Computing, {ISORC} 2019, Valencia, Spain, May 7-9, 2019}, year = {2019}, tag = {ai4cps}, pages = {108--117}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/RamakrishnaDBHM19}, category = {selectiveconference}, doi = {10.1109/ISORC.2019.00032}, file = {:Ramakrishna2019-Augmenting_Learning_Components_for_Safety_in_Resource_Constrained_Autonomous_Robots.pdf:PDF}, keywords = {assurance}, project = {cps-autonomy}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2019.00032} }
Learning enabled components (LECs) trained using data-driven algorithms are increasingly being used in autonomous robots commonly found in factories, hospitals, and educational laboratories. However, these LECs do not provide any safety guarantees, and testing them is challenging. In this paper, we introduce a framework that performs weighted simplex strategy based supervised safety control, resource management and confidence estimation of autonomous robots. Specifically, we describe two weighted simplex strategies: (a) simple weighted simplex strategy (SW-Simplex) that computes a weighted controller output by comparing the decisions between a safety supervisor and an LEC, and (b) a context-sensitive weighted simplex strategy (CSW-Simplex) that computes a context-aware weighted controller output. We use reinforcement learning to learn the contextual weights. We also introduce a system monitor that uses the current state information and a Bayesian network model learned from past data to estimate the probability of the robotic system staying in the safe working region. To aid resource constrained robots in performing complex computations of these weighted simplex strategies, we describe a resource manager that offloads tasks to an available fog nodes. The paper also describes a hardware testbed called DeepNNCar, which is a low cost resource-constrained RC car, built to perform autonomous driving. Using the hardware, we show that both SW-Simplex and CSW-Simplex have 40% and 60% fewer safety violations, while demonstrating higher optimized speed during indoor driving around 0.40m/s than the original system (using only LECs).
- S. Shekhar, A. Chhokra, H. Sun, A. Gokhale, A. Dubey, and X. D. Koutsoukos, URMILA: A Performance and Mobility-Aware Fog/Edge Resource Management Middleware, in IEEE 22nd International Symposium on Real-Time Distributed Computing, ISORC 2019, Valencia, Spain, May 7-9, 2019, 2019, pp. 118–125.
@inproceedings{Shekhar2019a, author = {Shekhar, Shashank and Chhokra, Ajay and Sun, Hongyang and Gokhale, Aniruddha and Dubey, Abhishek and Koutsoukos, Xenofon D.}, title = {{URMILA:} {A} Performance and Mobility-Aware Fog/Edge Resource Management Middleware}, booktitle = {{IEEE} 22nd International Symposium on Real-Time Distributed Computing, {ISORC} 2019, Valencia, Spain, May 7-9, 2019}, year = {2019}, pages = {118--125}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/ShekharCSGDK19}, category = {selectiveconference}, doi = {10.1109/ISORC.2019.00033}, file = {:Shekhar2019a-URMILA_A_Performance_and_Mobility-Aware_Fog_Edge_Resource_Management_Middleware.pdf:PDF}, keywords = {middleware, performance}, project = {cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2019.00033} }
Fog/Edge computing is increasingly used to support a wide range of latency-sensitive Internet of Things (IoT) applications due to its elastic computing capabilities that are offered closer to the users. Despite this promise, IoT applications with user mobility face many challenges since offloading the application functionality from the edge to the fog may not always be feasible due to the intermittent connectivity to the fog, and could require application migration among fog nodes due to user mobility. Likewise, executing the applications exclusively on the edge may not be feasible due to resource constraints and battery drain. To address these challenges, this paper describes URMILA, a resource management middleware that makes effective tradeoffs between using fog and edge resources while ensuring that the latency requirements of the IoT applications are met. We evaluate URMILA in the context of a real-world use case on an emulated but realistic IoT testbed.
- T. Krentz, A. Dubey, and G. Karsai, Short Paper: Towards An Edge-Located Time-Series Database, in IEEE 22nd International Symposium on Real-Time Distributed Computing, ISORC 2019, Valencia, Spain, May 7-9, 2019, 2019, pp. 151–154.
@inproceedings{Krentz2019, author = {Krentz, Timothy and Dubey, Abhishek and Karsai, Gabor}, title = {Short Paper: Towards An Edge-Located Time-Series Database}, booktitle = {{IEEE} 22nd International Symposium on Real-Time Distributed Computing, {ISORC} 2019, Valencia, Spain, May 7-9, 2019}, year = {2019}, tag = {platform}, pages = {151--154}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/KrentzDK19}, category = {selectiveconference}, doi = {10.1109/ISORC.2019.00037}, file = {:Krentz2019-Towards_An_Edge-Located_Time-Series_Database.pdf:PDF}, keywords = {middleware}, project = {cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2019.00037} }
Smart infrastructure demands resilient data storage, and emerging applications execute queries on this data over time. Typically, time-series databases serve these queries; however, cloud-based time-series storage can be prohibitively expensive. As smart devices proliferate, the amount of computing power and memory available in our connected infrastructure provides the opportunity to move resilient time-series data storage and analytics to the edge. This paper proposes time-series storage in a Distributed Hash Table (DHT), and a novel key-generation technique that provides time-indexed reads and writes for key-value pairs. Experimental results show this technique meets demands for smart infrastructure situations.
- G. Pettet, S. Sahoo, and A. Dubey, Towards an Adaptive Multi-Modal Traffic Analytics Framework at the Edge, in IEEE International Conference on Pervasive Computing and Communications Workshops, PerCom Workshops 2019, Kyoto, Japan, March 11-15, 2019, 2019, pp. 511–516.
@inproceedings{Pettet2019a, author = {Pettet, Geoffrey and Sahoo, Saroj and Dubey, Abhishek}, title = {Towards an Adaptive Multi-Modal Traffic Analytics Framework at the Edge}, booktitle = {{IEEE} International Conference on Pervasive Computing and Communications Workshops, PerCom Workshops 2019, Kyoto, Japan, March 11-15, 2019}, year = {2019}, pages = {511--516}, tag = {platform,incident,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/percom/PettetSD19}, category = {workshop}, doi = {10.1109/PERCOMW.2019.8730577}, file = {:Pettet2019a-Towards_an_Adaptive_Multi-Modal_Traffic_Analytics_Framework_at_the_Edge.pdf:PDF}, keywords = {middleware, transit}, project = {cps-middleware,smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/PERCOMW.2019.8730577} }
The Internet of Things (IoT) requires distributed, large scale data collection via geographically distributed devices. While IoT devices typically send data to the cloud for processing, this is problematic for bandwidth constrained applications. Fog and edge computing (processing data near where it is gathered, and sending only results to the cloud) has become more popular, as it lowers network overhead and latency. Edge computing often uses devices with low computational capacity, therefore service frameworks and middleware are needed to efficiently compose services. While many frameworks use a top-down perspective, quality of service is an emergent property of the entire system and often requires a bottom up approach. We define services as multi-modal, allowing resource and performance tradeoffs. Different modes can be composed to meet an application’s high level goal, which is modeled as a function. We examine a case study for counting vehicle traffic through intersections in Nashville. We apply object detection and tracking to video of the intersection, which must be performed at the edge due to privacy and bandwidth constraints. We explore the hardware and software architectures, and identify the various modes. This paper lays the foundation to formulate the online optimization problem presented by the system which makes tradeoffs between the quantity of services and their quality constrained by available resources.
- C. Hartsell, N. Mahadevan, S. Ramakrishna, A. Dubey, T. Bapty, T. T. Johnson, X. D. Koutsoukos, J. Sztipanovits, and G. Karsai, CPS Design with Learning-Enabled Components: A Case Study, in Proceedings of the 30th International Workshop on Rapid System Prototyping, RSP 2019, New York, NY, USA, October 17-18, 2019, 2019, pp. 57–63.
@inproceedings{Hartsell2019b, author = {Hartsell, Charles and Mahadevan, Nagabhushan and Ramakrishna, Shreyas and Dubey, Abhishek and Bapty, Theodore and Johnson, Taylor T. and Koutsoukos, Xenofon D. and Sztipanovits, Janos and Karsai, Gabor}, title = {{CPS} Design with Learning-Enabled Components: {A} Case Study}, booktitle = {Proceedings of the 30th International Workshop on Rapid System Prototyping, {RSP} 2019, New York, NY, USA, October 17-18, 2019}, year = {2019}, pages = {57--63}, tag = {ai4cps}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/rsp/HartsellMRDBJKS19}, category = {selectiveconference}, doi = {10.1145/3339985.3358491}, file = {:Hartsell2019b-CPS_Design_with_Learning-Enabled_Components_A_Case_Study.pdf:PDF}, keywords = {assurance}, project = {cps-autonomy}, timestamp = {Thu, 28 Nov 2019 12:43:50 +0100}, url = {https://doi.org/10.1145/3339985.3358491} }
Cyber-Physical Systems (CPS) are used in many applications where they must perform complex tasks with a high degree of autonomy in uncertain environments. Traditional design flows based on domain knowledge and analytical models are often impractical for tasks such as perception, planning in uncertain environments, control with ill-defined objectives, etc. Machine learning based techniques have demonstrated good performance for such difficult tasks, leading to the introduction of Learning-Enabled Components (LEC) in CPS. Model based design techniques have been successful in the development of traditional CPS, and toolchains which apply these techniques to CPS with LECs are being actively developed. As LECs are critically dependent on training and data, one of the key challenges is to build design automation for them. In this paper, we examine the development of an autonomous Unmanned Underwater Vehicle (UUV) using the Assurance-based Learning-enabled Cyber-physical systems (ALC) Toolchain. Each stage of the development cycle is described including architectural modeling, data collection, LEC training, LEC evaluation and verification, and system-level assurance.
- J. P. Talusan, F. Tiausas, K. Yasumoto, M. Wilbur, G. Pettet, A. Dubey, and S. Bhattacharjee, Smart Transportation Delay and Resiliency Testbed Based on Information Flow of Things Middleware, in IEEE International Conference on Smart Computing, SMARTCOMP 2019, Washington, DC, USA, June 12-15, 2019, 2019, pp. 13–18.
@inproceedings{Talusan2019, author = {Talusan, Jose Paolo and Tiausas, Francis and Yasumoto, Keiichi and Wilbur, Michael and Pettet, Geoffrey and Dubey, Abhishek and Bhattacharjee, Shameek}, title = {Smart Transportation Delay and Resiliency Testbed Based on Information Flow of Things Middleware}, booktitle = {{IEEE} International Conference on Smart Computing, {SMARTCOMP} 2019, Washington, DC, USA, June 12-15, 2019}, year = {2019}, pages = {13--18}, tag = {platform,incident,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/TalusanTYWPDB19}, category = {workshop}, doi = {10.1109/SMARTCOMP.2019.00022}, file = {:Talusan2019-Smart_Transportation_Delay_and_Resiliency_Testbed_Based_on_Information_Flow_of_Things_Middleware.pdf:PDF}, keywords = {middleware, transit}, project = {cps-middleware,smart-transit}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2019.00022} }
Edge and Fog computing paradigms are used to process big data generated by the increasing number of IoT devices. These paradigms have enabled cities to become smarter in various aspects via real-time data-driven applications. While these have addressed some flaws of cloud computing some challenges remain particularly in terms of privacy and security. We create a testbed based on a distributed processing platform called the Information flow of Things (IFoT) middleware. We briefly describe a decentralized traffic speed query and routing service implemented on this framework testbed. We configure the testbed to test countermeasure systems that aim to address the security challenges faced by prior paradigms. Using this testbed, we investigate a novel decentralized anomaly detection approach for time-sensitive distributed smart transportation systems.
- S. Basak, S. Sengupta, and A. Dubey, Mechanisms for Integrated Feature Normalization and Remaining Useful Life Estimation Using LSTMs Applied to Hard-Disks, in IEEE International Conference on Smart Computing, SMARTCOMP 2019, Washington, DC, USA, 2019, pp. 208–216.
@inproceedings{Basak2019a, author = {Basak, Sanchita and Sengupta, Saptarshi and Dubey, Abhishek}, title = {Mechanisms for Integrated Feature Normalization and Remaining Useful Life Estimation Using LSTMs Applied to Hard-Disks}, booktitle = {{IEEE} International Conference on Smart Computing, {SMARTCOMP} 2019, Washington, DC, USA}, year = {2019}, pages = {208--216}, month = jun, tag = {ai4cps}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/BasakSD19}, category = {selectiveconference}, doi = {10.1109/SMARTCOMP.2019.00055}, file = {:Basak2019a-Mechanisms_for_Integrated_Feature_Normalization_and_Remaining_Useful_Life_Estimation_Using_LSTMs_Applied_to_Hard-Disks.pdf:PDF}, keywords = {reliability}, project = {cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2019.00055} }
In this paper we focus on application of data-driven methods for remaining useful life estimation in components where past failure data is not uniform across devices, i.e. there is a high variance in the minimum and maximum value of the key parameters. The system under study is the hard disks used in computing cluster. The data used for analysis is provided by Backblaze as discussed later. In the article, we discuss the architecture of of the long short term neural network used and describe the mechanisms to choose the various hyper-parameters. Further, we describe the challenges faced in extracting effective training sets from highly unorganized and class-imbalanced big data and establish methods for online predictions with extensive data pre-processing, feature extraction and validation through online simulation sets with unknown remaining useful lives of the hard disks. Our algorithm performs especially well in predicting RUL near the critical zone of a device approaching failure. With the proposed approach we are able to predict whether a disk is going to fail in next ten days with an average precision of 0.8435. We also show that the architecture trained on a particular model is generalizable and transferable as it can be used to predict RUL for devices in other models from same manufacturer.
- M. Wilbur, A. Dubey, B. Leão, and S. Bhattacharjee, A Decentralized Approach for Real Time Anomaly Detection in Transportation Networks, in IEEE International Conference on Smart Computing, SMARTCOMP 2019, Washington, DC, USA, 2019, pp. 274–282.
@inproceedings{Wilbur2019, author = {Wilbur, Michael and Dubey, Abhishek and Le{\~{a}}o, Bruno and Bhattacharjee, Shameek}, title = {A Decentralized Approach for Real Time Anomaly Detection in Transportation Networks}, booktitle = {{IEEE} International Conference on Smart Computing, {SMARTCOMP} 2019, Washington, DC, USA}, year = {2019}, pages = {274--282}, month = jun, tag = {ai4cps,platform,decentralization,incident,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/WilburDLB19}, category = {selectiveconference}, doi = {10.1109/SMARTCOMP.2019.00063}, file = {:Wilbur2019-A_Decentralized_Approach_for_Real_Time_Anomaly_Detection_in_Transportation_Networks.pdf:PDF}, keywords = {transit, reliability}, project = {cps-reliability,smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2019.00063} }
- C. Samal, A. Dubey, and L. J. Ratliff, Mobilytics-Gym: A Simulation Framework for Analyzing Urban Mobility Decision Strategies, in IEEE International Conference on Smart Computing, SMARTCOMP 2019, Washington, DC, USA, 2019, pp. 283–291.
@inproceedings{Samal2019, author = {Samal, Chinmaya and Dubey, Abhishek and Ratliff, Lillian J.}, title = {Mobilytics-Gym: {A} Simulation Framework for Analyzing Urban Mobility Decision Strategies}, booktitle = {{IEEE} International Conference on Smart Computing, {SMARTCOMP} 2019, Washington, DC, USA}, year = {2019}, pages = {283--291}, month = jun, tag = {transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/SamalDR19}, category = {selectiveconference}, doi = {10.1109/SMARTCOMP.2019.00064}, file = {:Samal2019-Mobilytics-Gym_A_Simulation_Framework_for_Analyzing_Urban_Mobility_Decision_Strategies.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2019.00064} }
The rise in deep learning models in recent years has led to various innovative solutions for intelligent transportation technologies. Use of personal and on-demand mobility services puts a strain on the existing road network in a city. To mitigate this problem, city planners need a simulation framework to evaluate the effect of any incentive policy in nudging commuters towards alternate modes of travel, such as bike and car-share options. In this paper, we leverage MATSim, an agent-based simulation framework, to integrate agent preference models that capture the altruistic behavior of an agent in addition to their disutility proportional to the travel time and cost. These models are learned in a data-driven approach and can be used to evaluate the sensitivity of an agent to system-level disutility and monetary incentives given, e.g., by the transportation authority. This framework provides a standardized environment to evaluate the effectiveness of any particular incentive policy of a city, in nudging its residents towards alternate modes of transportation. We show the effectiveness of the approach and provide analysis using a case study from the Metropolitan Nashville area.
- H. Tu, Y. Du, H. Yu, A. Dubey, S. Lukic, and G. Karsai, Resilient Information Architecture Platform for the Smart Grid (RIAPS): A Novel Open-Source Platform for Microgrid Control, IEEE Transactions on Industrial Electronics, pp. 1–1, 2019.
@article{Tu2019, author = {{Tu}, H. and {Du}, Y. and {Yu}, H. and Dubey, Abhishek and {Lukic}, S. and {Karsai}, G.}, title = {Resilient Information Architecture Platform for the Smart Grid (RIAPS): A Novel Open-Source Platform for Microgrid Control}, journal = {IEEE Transactions on Industrial Electronics}, year = {2019}, pages = {1-1}, issn = {1557-9948}, doi = {10.1109/TIE.2019.2952803}, file = {:Tu2019-Resilient_Information_Architecture_Platform_for_the_Smart_Grid(RIAPS)_A_Novel_Open-Source_Platform_for_Microgrid_Control.pdf:PDF}, keywords = {smartgrid}, project = {cps-middleware,cps-reliability,smart-energy}, tag = {decentralization,power} }
Microgrids are seen as an effective way to achieve reliable, resilient, and efficient operation of the power distribution system. Core functions of the microgrid control system are defined by the IEEE standard 2030.7; however, the algorithms that realize these functions are not standardized, and are a topic of research. Furthermore, the corresponding controller hardware, operating system, and communication system to implement these functions vary significantly from one implementation to the next. In this paper, we introduce an open-source platform, Resilient Information Architecture Platform for the Smart Grid (RIAPS), ideally suited for implementing and deploying distributed microgrid control algorithms. RIAPS provides a design-time tool suite for development and deployment of distributed microgrid control algorithms. With support from a number of run-time platform services, developed algorithms can be easily implemented and deployed into real microgrids. To demonstrate the unique features of RIAPS, we propose and implement a distributed microgrid secondary control algorithm capable of synchronized and proportional compensation of voltage unbalance using distributed generators. Test results show the effectiveness of the proposed control and the salient features of the RIAPS platform.
- S. Basak, A. Aman, A. Laszka, A. Dubey, and B. Leao, Data-Driven Detection of Anomalies and Cascading Failures in Traffic Networks, in Proceedings of the 11th Annual Conference of the Prognostics and Health Management Society (PHM), 2019.
@inproceedings{Basak2019b, author = {Basak, Sanchita and Aman, Afiya and Laszka, Aron and Dubey, Abhishek and Leao, Bruno}, title = {Data-Driven Detection of Anomalies and Cascading Failures in Traffic Networks}, booktitle = {Proceedings of the 11th Annual Conference of the Prognostics and Health Management Society (PHM)}, year = {2019}, month = oct, tag = {ai4cps,transit}, attachments = {https://www.isis.vanderbilt.edu/sites/default/files/PHM_traffic_cascades_paper.pdf}, category = {conference}, doi = {https://doi.org/10.36001/phmconf.2019.v11i1.861}, file = {:Basak2019b-Data_Driven_Detection_of_Anomalies_and_Cascading_Failures_in_Traffic_Networks.pdf:PDF}, keywords = {transit, reliability}, project = {smart-transit,smart-cities,cps-reliability} }
Traffic networks are one of the most critical infrastructures for any community. The increasing integration of smart and connected sensors in traffic networks provides researchers with unique opportunities to study the dynamics of this critical community infrastructure. Our focus in this paper is on the failure dynamics of traffic networks. By failure, we mean in this domain the hindrance of the normal operation of a traffic network due to cyber anomalies or physical incidents that cause cascaded congestion throughout the network. We are specifically interested in analyzing the cascade effects of traffic congestion caused by physical incidents, focusing on developing mechanisms to isolate and identify the source of a congestion. To analyze failure propagation, it is crucial to develop (a) monitors that can identify an anomaly and (b) a model to capture the dynamics of anomaly propagation. In this paper, we use real traffic data from Nashville, TN to demonstrate a novel anomaly detector and a Timed Failure Propagation Graph based diagnostics mechanism. Our novelty lies in the ability to capture the the spatial information and the interconnections of the traffic network as well as the use of recurrent neural network architectures to learn and predict the operation of a graph edge as a function of its immediate peers, including both incoming and outgoing branches. Our results show that our LSTM-based traffic-speed predictors attain an average mean squared error of 6.55\times10^-4 on predicting normalized traffic speed, while Gaussian Process Regression based predictors attain a much higher average mean squared error of 1.78\times10^-2. We are also able to detect anomalies with high precision and recall, resulting in an AUC (Area Under Curve) of 0.8507 for the precision-recall curve. To study physical traffic incidents, we augment the real data with simulated data generated using SUMO, a traffic simulator. Finally, we analyzed the cascading effect of the congestion propagation by formulating the problem as a Timed Failure Propagation Graph, which led us in identifying the source of a failure/congestion accurately.
- A. Dubey, W. Emfinger, A. Gokhale, P. Kumar, D. McDermet, T. Bapty, and G. Karsai, Enabling Strong Isolation for Distributed Real-Time Applications in Edge Computing Scenarios, IEEE Aerospace and Electronic Systems Magazine, vol. 34, no. 7, pp. 32–45, Jul. 2019.
@article{Dubey2019c, author = {Dubey, Abhishek and {Emfinger}, W. and {Gokhale}, A. and {Kumar}, P. and {McDermet}, D. and {Bapty}, T. and {Karsai}, G.}, title = {Enabling Strong Isolation for Distributed Real-Time Applications in Edge Computing Scenarios}, journal = {IEEE Aerospace and Electronic Systems Magazine}, year = {2019}, volume = {34}, number = {7}, tag = {platform}, pages = {32-45}, month = jul, issn = {1557-959X}, doi = {10.1109/MAES.2019.2905921}, file = {:Dubey2019c-Enabling_Strong_Isolation_for_Distributed_Real-Time_Applications_in_Edge_Computing_Scenarios.pdf:PDF}, keywords = {middleware}, project = {cps-middleware,cps-reliability} }
Distributed coexisting applications found in the military and space domains, which operate over managed but shared computing resources at the edge require strong isolation from each other. The state of the art for computation sharing at the edge is traditionally based on Docker and similar pseudovirtualization features. Our team has been working on an end-to-end architecture that provides strong spatial and temporal isolation similar to what has become standard in avionics communities. In this paper, we describe an open-source extension to Linux that we have designed and implemented for our distributed real-time embedded managed systems (DREMS) architecture. The key concepts are the partitioning scheduler, strong security design, and a health management interface.
- A. Oruganti, S. Basak, F. Sun, H. Baroud, and A. Dubey, Modeling and Predicting the Cascading Effects of Delay in Transit Systems, in Transportation Research Board Annual Meeting, 2019.
@inproceedings{Oruganti2019, author = {Oruganti, Aparna and Basak, Sanchita and Sun, Fangzhou and Baroud, Hiba and Dubey, Abhishek}, title = {Modeling and Predicting the Cascading Effects of Delay in Transit Systems}, booktitle = {Transportation Research Board Annual Meeting}, year = {2019}, tag = {transit}, attachments = {https://www.isis.vanderbilt.edu/sites/default/files/final poster.pdf}, category = {selectiveconference}, file = {:Oruganti2019-Modeling_and_Predicting_the_Cascading_Effects_of_Delay_in_Transit_Systems.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities} }
An effective real-time estimation of the travel time for vehicles, using AVL(Automatic Vehicle Locators) has added a new dimension to the smart city planning. In this paper, we used data collected over several months from a transit agency and show how this data can be potentially used to learn patterns of travel time during specially planned events like NFL (National Football League) games and music award ceremonies. The impact of NFL games along with consideration of other factors like weather, traffic condition, distance is discussed with their relative importance to the prediction of travel time. Statistical learning models are used to predict travel time and subsequently assess the cascading effects of delay. The model performance is determined based on its predictive accuracy according to the out-of-sample error. In addition, the models help identify the most significant variables that influence the delay in the transit system. In order to compare the actual and predicted travel time for days having special events, heat maps are generated showing the delay impacts in different time windows between two timepoint-segments in comparison to a non-game day. This work focuses on the prediction and visualization of the delay in the public transit system and the analysis of its cascading effects on the entire transportation network. According to the study results, we are able to explain more than 80% of the variance in the bus travel time at each segment and can make future travel predictions during planned events with an out-of-sample error of 2.0 minutes using information on the bus schedule, traffic, weather, and scheduled events. According to the variable importance analysis, traffic information is most significant in predicting the delay in the transit system.
2018
- Garcı́a-Valls Marisol, A. Dubey, and V. J. Botti, Introducing the new paradigm of Social Dispersed Computing: Applications, Technologies and Challenges, Journal of Systems Architecture - Embedded Systems Design, vol. 91, pp. 83–102, 2018.
@article{GarciaValls2018, author = {Garc{\'{\i}}a{-}Valls, Marisol and Dubey, Abhishek and Botti, Vicent J.}, title = {Introducing the new paradigm of Social Dispersed Computing: Applications, Technologies and Challenges}, journal = {Journal of Systems Architecture - Embedded Systems Design}, year = {2018}, volume = {91}, tag = {platform,decentralization}, pages = {83--102}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/jsa/Garcia-VallsDB18}, doi = {10.1016/j.sysarc.2018.05.007}, file = {:Garcia-Valls2018-Introducing_the_new_paradigm_of_Social_Dispersed_Computing_Applications_Technologies_and_Challenges.pdf:PDF}, keywords = {middleware}, project = {cps-middleware}, timestamp = {Mon, 16 Sep 2019 01:00:00 +0200}, url = {https://doi.org/10.1016/j.sysarc.2018.05.007} }
If last decade viewed computational services as a utilitythen surely this decade has transformed computation into a commodity. Computation is now progressively integrated into the physical networks in a seamless way that enables cyber-physical systems (CPS) and the Internet of Things (IoT) meet their latency requirements. Similar to the concept of “platform as a service” or “software as a service”, both cloudlets and fog computing have found their own use cases. Edge devices (that we call end or user devices for disambiguation) play the role of personal computers, dedicated to a user and to a set of correlated applications. In this new scenario, the boundaries between the network node, the sensor, and the actuator are blurring, driven primarily by the computation power of IoT nodes like single board computers and the smartphones. The bigger data generated in this type of networks needs clever, scalable, and possibly decentralized computing solutions that can scale independently as required. Any node can be seen as part of a graph, with the capacity to serve as a computing or network router node, or both. Complex applications can possibly be distributed over this graph or network of nodes to improve the overall performance like the amount of data processed over time. In this paper, we identify this new computing paradigm that we call Social Dispersed Computing, analyzing key themes in it that includes a new outlook on its relation to agent based applications. We architect this new paradigm by providing supportive application examples that include next generation electrical energy distribution networks, next generation mobility services for transportation, and applications for distributed analysis and identification of non-recurring traffic congestion in cities. The paper analyzes the existing computing paradigms (e.g., cloud, fog, edge, mobile edge, social, etc.), solving the ambiguity of their definitions; and analyzes and discusses the relevant foundational software technologies, the remaining challenges, and research opportunities.
- S. Pradhan, A. Dubey, S. Khare, S. Nannapaneni, A. S. Gokhale, S. Mahadevan, D. C. Schmidt, and M. Lehofer, CHARIOT: Goal-Driven Orchestration Middleware for Resilient IoT Systems, TCPS, vol. 2, no. 3, pp. 16:1–16:37, 2018.
@article{Pradhan2018, author = {Pradhan, Subhav and Dubey, Abhishek and Khare, Shweta and Nannapaneni, Saideep and Gokhale, Aniruddha S. and Mahadevan, Sankaran and Schmidt, Douglas C. and Lehofer, Martin}, title = {{CHARIOT:} Goal-Driven Orchestration Middleware for Resilient IoT Systems}, journal = {{TCPS}}, year = {2018}, volume = {2}, number = {3}, pages = {16:1--16:37}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/tcps/PradhanDKNGMSL18}, doi = {10.1145/3134844}, tag = {ai4cps,platform}, file = {:Pradhan2018-CHARIOT_Goal-Driven_Orchestration_Middleware_for_Resilient_IoT_Systems.pdf:PDF}, keywords = {reliability, middleware}, project = {cps-middleware,cps-reliability}, timestamp = {Wed, 21 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.1145/3134844} }
An emerging trend in Internet of Things (IoT) applications is to move the computation (cyber) closer to the source of the data (physical). This paradigm is often referred to as edge computing. If edge resources are pooled together they can be used as decentralized shared resources for IoT applications, providing increased capacity to scale up computations and minimize end-to-end latency. Managing applications on these edge resources is hard, however, due to their remote, distributed, and (possibly) dynamic nature, which necessitates autonomous management mechanisms that facilitate application deployment, failure avoidance, failure management, and incremental updates. To address these needs, we present CHARIOT, which is orchestration middleware capable of autonomously managing IoT systems consisting of edge resources and applications. CHARIOT implements a three-layer architecture. The topmost layer comprises a system description language, the middle layer comprises a persistent data storage layer and the corresponding schema to store system information, and the bottom layer comprises a management engine that uses information stored persistently to formulate constraints that encode system properties and requirements, thereby enabling the use of Satisfiability Modulo Theories (SMT) solvers to compute optimal system (re)configurations dynamically at runtime. This paper describes the structure and functionality of CHARIOT and evaluates its efficacy as the basis for a smart parking system case study that uses sensors to manage parking spaces.
- A. Laszka, S. Eisele, A. Dubey, G. Karsai, and K. Kvaternik, TRANSAX: A Blockchain-Based Decentralized Forward-Trading Energy Exchanged for Transactive Microgrids, in 24th IEEE International Conference on Parallel and Distributed Systems, ICPADS 2018, Singapore, December 11-13, 2018, 2018, pp. 918–927.
@inproceedings{Laszka2018, author = {Laszka, Aron and Eisele, Scott and Dubey, Abhishek and Karsai, Gabor and Kvaternik, Karla}, title = {{TRANSAX:} {A} Blockchain-Based Decentralized Forward-Trading Energy Exchanged for Transactive Microgrids}, booktitle = {24th {IEEE} International Conference on Parallel and Distributed Systems, {ICPADS} 2018, Singapore, December 11-13, 2018}, year = {2018}, pages = {918--927}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/icpads/LaszkaEDKK18}, category = {selectiveconference}, doi = {10.1109/PADSW.2018.8645001}, file = {:Laszka2018-TRANSAX_A_Blockchain-Based_Decentralized_Forward-Trading_Energy_Exchanged_for_Transactive_Microgrids.pdf:PDF}, keywords = {transactive, blockchain}, project = {transactive-energy,cps-blockchains}, tag = {decentralization,power}, timestamp = {Wed, 16 Oct 2019 14:14:56 +0200}, url = {https://doi.org/10.1109/PADSW.2018.8645001} }
Power grids are undergoing major changes due to rapid growth in renewable energy and improvements in battery technology. Prompted by the increasing complexity of power systems, decentralized IoT solutions are emerging, which arrange local communities into transactive microgrids. The core functionality of these solutions is to provide mechanisms for matching producers with consumers while ensuring system safety. However, there are multiple challenges that these solutions still face: privacy, trust, and resilience. The privacy challenge arises because the time series of production and consumption data for each participant is sensitive and may be used to infer personal information. Trust is an issue because a producer or consumer can renege on the promised energy transfer. Providing resilience is challenging due to the possibility of failures in the infrastructure that is required to support these market based solutions. In this paper, we develop a rigorous solution for transactive microgrids that addresses all three challenges by providing an innovative combination of MILP solvers, smart contracts, and publish-subscribe middleware within a framework of a novel distributed application platform, called Resilient Information Architecture Platform for Smart Grid. Towards this purpose, we describe the key architectural concepts, including fault tolerance, and show the trade-off between market efficiency and resource requirements.
- S. Hasan, A. Ghafouri, A. Dubey, G. Karsai, and X. D. Koutsoukos, Vulnerability analysis of power systems based on cyber-attack and defense models, in 2018 IEEE Power & Energy Society Innovative Smart Grid Technologies Conference, ISGT 2018, Washington, DC, USA, February 19-22, 2018, 2018, pp. 1–5.
@inproceedings{Hasan2018, author = {Hasan, Saqib and Ghafouri, Amin and Dubey, Abhishek and Karsai, Gabor and Koutsoukos, Xenofon D.}, title = {Vulnerability analysis of power systems based on cyber-attack and defense models}, booktitle = {2018 {IEEE} Power {\&} Energy Society Innovative Smart Grid Technologies Conference, {ISGT} 2018, Washington, DC, USA, February 19-22, 2018}, year = {2018}, pages = {1--5}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isgt/HasanGDKK18}, category = {selectiveconference}, doi = {10.1109/ISGT.2018.8403337}, file = {:Hasan2018-Vulnerability_analysis_of_power_systems_based_on_cyber-attack_and_defense_models.pdf:PDF}, keywords = {smartgrid}, project = {cps-reliability}, tag = {platform,power}, timestamp = {Wed, 16 Oct 2019 14:14:57 +0200}, url = {https://doi.org/10.1109/ISGT.2018.8403337} }
Reliable operation of power systems is a primary challenge for the system operators. With the advancement in technology and grid automation, power systems are becoming more vulnerable to cyber-attacks. The main goal of adversaries is to take advantage of these vulnerabilities and destabilize the system. This paper describes a game-theoretic approach to attacker / defender modeling in power systems. In our models, the attacker can strategically identify the subset of substations that maximize damage when compromised. However, the defender can identify the critical subset of substations to protect in order to minimize the damage when an attacker launches a cyber-attack. The algorithms for these models are applied to the standard IEEE-14, 39, and 57 bus examples to identify the critical set of substations given an attacker and a defender budget.
- S. Eisele, A. Laszka, A. Mavridou, and A. Dubey, SolidWorx: A Resilient and Trustworthy Transactive Platform for Smart and Connected Communities, in IEEE International Conference on Internet of Things and Blockchains, 2018, pp. 1263–1272.
@inproceedings{Eisele2018, author = {Eisele, Scott and Laszka, Aron and Mavridou, Anastasia and Dubey, Abhishek}, title = {SolidWorx: {A} Resilient and Trustworthy Transactive Platform for Smart and Connected Communities}, booktitle = {{IEEE} International Conference on Internet of Things and Blockchains}, year = {2018}, pages = {1263--1272}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/ithings/EiseleLMD18}, category = {selectiveconference}, doi = {10.1109/Cybermatics\_2018.2018.00221}, file = {:Eisele2018-SolidWorx_A_Resilient_and_Trustworthy_Transactive_Platform_for_Smart_and_Connected_Communities.pdf:PDF}, keywords = {blockchain, transactive}, project = {cps-blockchains,transactive-energy}, tag = {decentralization,power}, timestamp = {Wed, 16 Oct 2019 14:14:56 +0200}, url = {https://doi.org/10.1109/Cybermatics\_2018.2018.00221} }
Internet of Things and data sciences are fueling the development of innovative solutions for various applications in Smart and Connected Communities (SCC). These applications provide participants with the capability to exchange not only data but also resources, which raises the concerns of integrity, trust, and above all the need for fair and optimal solutions to the problem of resource allocation. This exchange of information and resources leads to a problem where the stakeholders of the system may have limited trust in each other. Thus, collaboratively reaching consensus on when, how, and who should access certain resources becomes problematic. This paper presents SolidWorx, a blockchain-based platform that provides key mechanisms required for arbitrating resource consumption across different SCC applications in a domain-agnostic manner. For example, it introduces and implements a hybrid-solver pattern, where complex optimization computation is handled off-blockchain while solution validation is performed by a smart contract. To ensure correctness, the smart contract of SolidWorx is generated and verified using a model-based approach.
- W. Barbour, C. Samal, S. Kuppa, A. Dubey, and D. B. Work, On the Data-Driven Prediction of Arrival Times for Freight Trains on U.S. Railroads, in 21st International Conference on Intelligent Transportation Systems, ITSC 2018, Maui, HI, USA, November 4-7, 2018, 2018, pp. 2289–2296.
@inproceedings{Barbour2018, author = {Barbour, William and Samal, Chinmaya and Kuppa, Shankara and Dubey, Abhishek and Work, Daniel B.}, title = {On the Data-Driven Prediction of Arrival Times for Freight Trains on {U.S.} Railroads}, booktitle = {21st International Conference on Intelligent Transportation Systems, {ITSC} 2018, Maui, HI, USA, November 4-7, 2018}, year = {2018}, pages = {2289--2296}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/itsc/BarbourSKDW18}, category = {selectiveconference}, doi = {10.1109/ITSC.2018.8569406}, file = {:Barbour2018-On_the_Data-Driven_Prediction_of_Arrival_Times_for_Freight_Trains_on_U.S._Railroads.pdf:PDF}, keywords = {transit}, tag = {transit}, project = {smart-transit,cps-reliability,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:57 +0200}, url = {https://doi.org/10.1109/ITSC.2018.8569406} }
The high capacity utilization and the pre-dominantly single-track network topology of freight railroads in the United States causes large variability and unpredictability of train arrival times. Predicting accurate estimated times of arrival (ETAs) is an important step for railroads to increase efficiency and automation, reduce costs, and enhance customer service. We propose using machine learning algorithms trained on historical railroad operational data to generate ETAs in real time. The machine learning framework is able to utilize the many data points produced by individual trains traversing a network track segment and generate periodic ETA predictions with a single model. In this work we compare the predictive performance of linear and non-linear support vector regression, random forest regression, and deep neural network models, tested on a section of the railroad in Tennessee, USA using over two years of historical data. Support vector regression and deep neural network models show similar results with maximum ETA error reduction of 26% over a statistical baseline predictor. The random forest models show over 60% error reduction compared to baseline at some points and average error reduction of 42%.
- F. Sun, A. Dubey, C. Samal, H. Baroud, and C. Kulkarni, Short-Term Transit Decision Support System Using Multi-task Deep Neural Networks, in 2018 IEEE International Conference on Smart Computing, SMARTCOMP 2018, Taormina, Sicily, Italy, June 18-20, 2018, 2018, pp. 155–162.
@inproceedings{Sun2018, author = {Sun, Fangzhou and Dubey, Abhishek and Samal, Chinmaya and Baroud, Hiba and Kulkarni, Chetan}, title = {Short-Term Transit Decision Support System Using Multi-task Deep Neural Networks}, booktitle = {2018 {IEEE} International Conference on Smart Computing, {SMARTCOMP} 2018, Taormina, Sicily, Italy, June 18-20, 2018}, year = {2018}, pages = {155--162}, tag = {ai4cps,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/SunDSBK18}, category = {selectiveconference}, doi = {10.1109/SMARTCOMP.2018.00086}, file = {:Sun2018-Short-Term_Transit_Decision_Support_System_Using_Multi-task_Deep_Neural_Networks.pdf:PDF}, keywords = {transit}, project = {smart-transit,cps-reliability,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2018.00086} }
Unpredictability is one of the top reasons that prevent people from using public transportation. To improve the on-time performance of transit systems, prior work focuses on updating schedule periodically in the long-term and providing arrival delay prediction in real-time. But when no real-time transit and traffic feed is available (e.g., one day ahead), there is a lack of effective contextual prediction mechanism that can give alerts of possible delay to commuters. In this paper, we propose a generic tool-chain that takes standard General Transit Feed Specification (GTFS) transit feeds and contextual information (recurring delay patterns before and after big events in the city and the contextual information such as scheduled events and forecasted weather conditions) as inputs and provides service alerts as output. Particularly, we utilize shared route segment networks and multi-task deep neural networks to solve the data sparsity and generalization issues. Experimental evaluation shows that the proposed toolchain is effective at predicting severe delay with a relatively high recall of 76% and F1 score of 55%.
- C. Samal, A. Dubey, and L. J. Ratliff, Mobilytics- An Extensible, Modular and Resilient Mobility Platform, in 2018 IEEE International Conference on Smart Computing, SMARTCOMP 2018, Taormina, Sicily, Italy, June 18-20, 2018, 2018, pp. 356–361.
@inproceedings{Samal2018, author = {Samal, Chinmaya and Dubey, Abhishek and Ratliff, Lillian J.}, title = {Mobilytics- An Extensible, Modular and Resilient Mobility Platform}, booktitle = {2018 {IEEE} International Conference on Smart Computing, {SMARTCOMP} 2018, Taormina, Sicily, Italy, June 18-20, 2018}, year = {2018}, pages = {356--361}, tag = {platform,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/SamalDR18}, category = {selectiveconference}, doi = {10.1109/SMARTCOMP.2018.00029}, file = {:Samal2018-Mobilytics-An_Extensible_Modular_and_Resilient_Mobility_Platform.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2018.00029} }
Transportation management platforms provide communities the ability to integrate the available mobility options and localized transportation demand management policies. A central component of a transportation management platform is the mobility planning application. Given the societal relevance of these platforms, it is necessary to ensure that they operate resiliently. Modularity and extensibility are also critical properties that are required for manageability. Modularity allows to isolate faults easily. Extensibility enables update of policies and integration of new mobility modes or new routing algorithms. However, state of the art mobility planning applications like open trip planner, are monolithic applications, which makes it difficult to scale and modify them dynamically. This paper describes a microservices based modular multi-modal mobility platform Mobilytics, that integrates mobility providers, commuters, and community stakeholders. We describe our requirements, architecture, and discuss the resilience challenges, and how our platform functions properly in presence of failure. Conceivably, the patterns and principles manifested in our system can serve as guidelines for current and future practitioners in this field.
- C. Samal, L. Zheng, F. Sun, L. J. Ratliff, and A. Dubey, Towards a Socially Optimal Multi-Modal Routing Platform, CoRR, vol. abs/1802.10140, 2018.
@article{Samal2018a, author = {Samal, Chinmaya and Zheng, Liyuan and Sun, Fangzhou and Ratliff, Lillian J. and Dubey, Abhishek}, title = {Towards a Socially Optimal Multi-Modal Routing Platform}, journal = {CoRR}, tag = {transit}, year = {2018}, volume = {abs/1802.10140}, archiveprefix = {arXiv}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/corr/abs-1802-10140}, eprint = {1802.10140}, file = {:Samal2018a-Towards_a_Socially_Optimal_Multi-Modal_Routing_Platform.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities}, timestamp = {Mon, 13 Aug 2018 01:00:00 +0200}, url = {http://arxiv.org/abs/1802.10140} }
The increasing rate of urbanization has added pressure on the already constrained transportation networks in our communities. Ride-sharing platforms such as Uber and Lyft are becoming a more commonplace, particularly in urban environments. While such services may be deemed more convenient than riding public transit due to their on-demand nature, reports show that they do not necessarily decrease the congestion in major cities. One of the key problems is that typically mobility decision support systems focus on individual utility and react only after congestion appears. In this paper, we propose socially considerate multi-modal routing algorithms that are proactive and consider, via predictions, the shared effect of riders on the overall efficacy of mobility services. We have adapted the MATSim simulator framework to incorporate the proposed algorithms present a simulation analysis of a case study in Nashville, Tennessee that assesses the effects of our routing models on the traffic congestion for different levels of penetration and adoption of socially considerate routes. Our results indicate that even at a low penetration (social ratio), we are able to achieve an improvement in system-level performance.
- S. Basak, S. Sengupta, and A. Dubey, A Data-driven Prognostic Architecture for Online Monitoring of Hard Disks Using Deep LSTM Networks, CoRR, vol. abs/1810.08985, 2018.
@article{Basak2018, author = {Basak, Sanchita and Sengupta, Saptarshi and Dubey, Abhishek}, title = {A Data-driven Prognostic Architecture for Online Monitoring of Hard Disks Using Deep {LSTM} Networks}, journal = {CoRR}, year = {2018}, tag = {ai4cps}, volume = {abs/1810.08985}, archiveprefix = {arXiv}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/corr/abs-1810-08985}, eprint = {1810.08985}, file = {:Basak2018-A_Data-driven_Prognostic_Architecture_for_Online_Monitoring_of_Hard_Disks_Using_Deep_LSTM_Networks.pdf:PDF}, timestamp = {Wed, 31 Oct 2018 00:00:00 +0100}, url = {http://arxiv.org/abs/1810.08985} }
With the advent of pervasive cloud computing technologies, service reliability and availability are becoming major concerns,especially as we start to integrate cyber-physical systems with the cloud networks. A number of smart and connected community systems such as emergency response systems utilize cloud networks to analyze real-time data streams and provide context-sensitive decision support.Improving overall system reliability requires us to study all the aspects of the end-to-end of this distributed system,including the backend data servers. In this paper, we describe a bi-layered prognostic architecture for predicting the Remaining Useful Life (RUL) of components of backend servers,especially those that are subjected to degradation. We show that our architecture is especially good at predicting the remaining useful life of hard disks. A Deep LSTM Network is used as the backbone of this fast, data-driven decision framework and dynamically captures the pattern of the incoming data. In the article, we discuss the architecture of the neural network and describe the mechanisms to choose the various hyper-parameters. We describe the challenges faced in extracting effective training sets from highly unorganized and class-imbalanced big data and establish methods for online predictions with extensive data pre-processing, feature extraction and validation through test sets with unknown remaining useful lives of the hard disks. Our algorithm performs especially well in predicting RUL near the critical zone of a device approaching failure.The proposed architecture is able to predict whether a disk is going to fail in next ten days with an average precision of 0.8435.In future, we will extend this architecture to learn and predict the RUL of the edge devices in the end-to-end distributed systems of smart communities, taking into consideration context-sensitive external features such as weather.
- H. Tu, Y. Du, H. Yu, S. Lukic, M. Metelko, P. Volgyesi, A. Dubey, and G. Karsai, A Hardware-in-the-Loop Real-Time Testbed for Microgrid Hierarchical Control, in 2018 IEEE Energy Conversion Congress and Exposition (ECCE), 2018, pp. 2053–2059.
@inproceedings{Tu2018, author = {{Tu}, H. and {Du}, Y. and {Yu}, H. and {Lukic}, S. and {Metelko}, M. and {Volgyesi}, P. and Dubey, Abhishek and {Karsai}, G.}, title = {A Hardware-in-the-Loop Real-Time Testbed for Microgrid Hierarchical Control}, booktitle = {2018 IEEE Energy Conversion Congress and Exposition (ECCE)}, year = {2018}, pages = {2053-2059}, month = sep, category = {conference}, doi = {10.1109/ECCE.2018.8557737}, file = {:Tu2018-A_Hardware-in-the-Loop_Real-Time_Testbed_for_Microgrid_Hierarchical_Control.pdf:PDF}, issn = {2329-3721}, keywords = {smartgrid}, project = {cps-middleware,smart-energy}, tag = {platform,power} }
To maintain a stable, flexible and economic operation of a microgrid, hierarchical control architecture consisting of primary, secondary and tertiary control is proposed. However, the differences in dynamics of microgrid, bandwidths of control levels and speed of communication channels make it difficult to comprehensively validate the performance of the hierarchical control schemes. In this paper we propose a hardware-in-the-loop real-time testbed for microgrid hierarchical control. The proposed testbed can be used to validate control performance under different microgrid operating modes (grid-tied or islanded), different primary control schemes (current or voltage mode) and different secondary control approaches (centralized or distributed). The integration of industry-grade hardware that runs primary and secondary control into the testbed allows for complete emulation of microgrid operation, and facilitates the study of the effects of measurement noise, sampling and communication delays.
- F. Sun, A. Dubey, C. Kulkarni, N. Mahadevan, and A. G. Luna, A data driven health monitoring approach to extending small sats mission, in Conference Proceedings, Annual Conference of The Prognostics And Health Management Society, 2018.
@inproceedings{Sun2018a, author = {Sun, Fangzhou and Dubey, Abhishek and Kulkarni, C and Mahadevan, Nagbhushan and Luna, Ali Guarneros}, title = {A data driven health monitoring approach to extending small sats mission}, booktitle = {Conference Proceedings, Annual Conference of The Prognostics And Health Management Society}, year = {2018}, tag = {platform}, category = {conference}, file = {:Sun2018a-A_data_driven_health_monitoring_approach_to_extending_small_sats_mission.pdf:PDF}, keywords = {reliability}, project = {cps-reliability} }
In the next coming years, the International Space Station (ISS) plans to launch several small-sat missions powered by lithium-ion battery packs. An extended version of such mission requires dependable, energy dense, and durable power sources as well as system health monitoring. Hence a good health estimation framework to increase mission success is absolutely necessary as the devices are subjected to high demand operating conditions. This paper describes a hierarchical architecture which combines data-driven anomaly detection methods with a fine-grained model-based diagnosis and prognostics architecture. At the core of the architecture is a distributed stack of deep neural network that detects and classifies the data traces from nearby satellites based on prior observations. Any identified anomaly is transmitted to the ground, which then uses model-based diagnosis and prognosis framework to make health state estimation. In parallel, periodically the data traces from the satellites are transported to the ground and analyzed using model-based techniques. This data is then used to train the neural networks, which are run from ground systems and periodically updated. The collaborative architecture enables quick data-driven inference on the satellite and more intensive analysis on the ground where often time and power consumption are not constrained. The current work demonstrates implementation of this architecture through an initial battery data set. In the future we propose to apply this framework to other electric and electronic components on-board the small satellites.
- Y. Du, H. Tu, S. Lukic, A. Dubey, and G. Karsai, Distributed Microgrid Synchronization Strategy Using a Novel Information Architecture Platform, in 2018 IEEE Energy Conversion Congress and Exposition (ECCE), 2018, pp. 2060–2066.
@inproceedings{Du2018, author = {{Du}, Y. and {Tu}, H. and {Lukic}, S. and Dubey, Abhishek and {Karsai}, G.}, title = {Distributed Microgrid Synchronization Strategy Using a Novel Information Architecture Platform}, booktitle = {2018 IEEE Energy Conversion Congress and Exposition (ECCE)}, year = {2018}, pages = {2060-2066}, month = sep, category = {conference}, doi = {10.1109/ECCE.2018.8557695}, file = {:Du2018-Distributed_Microgrid_Synchronization_Strategy_Using_a_Novel_Information_Architecture_Platform.pdf:PDF}, issn = {2329-3721}, keywords = {smartgrid}, project = {cps-middleware,cps-reliability,smart-energy}, tag = {power} }
To seamlessly reconnect an islanded microgrid to the main grid, voltage phasors on both sides of the point of common coupling need to be synchronized before the main relay closes. In this paper, a distributed control strategy is proposed for microgrid synchronization operation. The proposed controller design utilizes pinning-based consensus algorithm to avoid system single point of failure. It is able to actively track the main grid frequency, provide a good coordination between frequency and phase regulation and ensure all distributed generations in the system proportionally share the load. Implementation of such distributed algorithm in practice is difficult because it requires mitigation of both distributed computing and power system engineering challenges. In this paper, a novel software platform called RIAPS platform is presented that helps implementing the proposed distributed synchronization strategy in practical hardware controllers. The performance of the controllers are validated using a real-time controller hardware-in-the-loop microgrid testbed.
- H. Tu, Y. Du, H. Yu, S. Lukic, P. Volgyesi, M. Metelko, A. Dubey, and G. Karsai, An Adaptive Interleaving Algorithm for Multi-Converter Systems, in 2018 9th IEEE International Symposium on Power Electronics for Distributed Generation Systems (PEDG), 2018, pp. 1–7.
@inproceedings{Tu2018a, author = {{Tu}, H. and {Du}, Y. and {Yu}, H. and {Lukic}, S. and {Volgyesi}, P. and {Metelko}, M. and Dubey, Abhishek and {Karsai}, G.}, title = {An Adaptive Interleaving Algorithm for Multi-Converter Systems}, booktitle = {2018 9th IEEE International Symposium on Power Electronics for Distributed Generation Systems (PEDG)}, year = {2018}, pages = {1-7}, month = jun, category = {conference}, doi = {10.1109/PEDG.2018.8447801}, file = {:Tu2018a-An_Adaptive_Interleaving_Algorithm_for_Multi-Converter_Systems.pdf:PDF}, issn = {2329-5767}, keywords = {smartgrid}, project = {cps-middleware,cps-reliability,smart-energy}, tag = {power} }
To integrate DC distributed generation (DG) with micro-source into the existing AC grid, a DC distribution bus can be used to couple on-site photovoltaics (PV), battery energy storage systems (BESS), and DC loads. If the converters connected to the DC bus are interleaved, the DC bus capacitor size could be minimized. In this paper, we propose an interleaving algorithm for multi-converter systems to minimize the current harmonics at switching frequency on the DC bus. The proposed algorithm is implemented using Resilient Information Architecture Platform for Smart Grid (RIAPS) platform. Hardware-in-the-Loop (HIL) simulation results based on Opal- RT are presented to validate its performance. The influence of synchronization frequency on the proposed algorithm are also considered.
- S. Nannapaneni, S. Mahadevan, and A. Dubey, Real-Time Control of Cyber-Physical Manufacturing Process Under Uncertainty, in Proceedings of ASME 2018 13th International Manufacturing Science and Engineering Conference, 2018, vol. Volume 3: Manufacturing Equipment and Systems.
@inproceedings{Nannapaneni2018, author = {Nannapaneni, Saideep and Mahadevan, Sankaran and Dubey, Abhishek}, title = {Real-Time Control of Cyber-Physical Manufacturing Process Under Uncertainty}, booktitle = {Proceedings of ASME 2018 13th International Manufacturing Science and Engineering Conference}, year = {2018}, volume = {Volume 3: Manufacturing Equipment and Systems}, series = {International Manufacturing Science and Engineering Conference}, month = jun, note = {V003T02A001}, tag = {platform}, category = {conference}, doi = {10.1115/MSEC2018-6460}, eprint = {https://asmedigitalcollection.asme.org/MSEC/proceedings-pdf/MSEC2018/51371/V003T02A001/2520174/v003t02a001-msec2018-6460.pdf}, keywords = {reliability}, project = {cps-reliability}, url = {https://doi.org/10.1115/MSEC2018-6460} }
Modern manufacturing processes are increasing becoming cyber-physical in nature, where a computational system monitors the system performance, provides real-time process control by analyzing sensor data collected regarding process and product characteristics, in order to increase the quality of the manufactured product. Such real-time process monitoring and control techniques are useful in precision and ultra-precision machining processes. However, the output product quality is affected by several uncertainty sources in various stages of the manufacturing process such as the sensor uncertainty, computational system uncertainty, control input uncertainty, and the variability in the manufacturing process. The computational system may be a single computing node or a distributed computing network; the latter scenario introduces additional uncertainty due to the communication between several computing nodes. Due to the continuous monitoring process, these uncertainty sources aggregate and compound over time, resulting in variations of product quality. Therefore, characterization of the various uncertainty sources and their impact on the product quality are necessary to increase the efficiency and productivity of the overall manufacturing process. To this end, this paper develops a two-level dynamic Bayesian network methodology, where the higher level captures the uncertainty in the sensors, control inputs, and the manufacturing process while the lower level captures the uncertainty in the communication between several computing nodes. In addition, we illustrate the use of a variance-based global sensitivity analysis approach for dimension reduction in a high-dimensional manufacturing process, in order to enable real-time analysis for process control. The proposed methodologies of process control under uncertainty and dimension reduction are illustrated for a cyber-physical turning process.
- S. Nannapaneni, A. Dubey, and S. Mahadevan, Automated aircraft separation safety assurance using Bayesian networks, in 2018 Aviation Technology, Integration, and Operations Conference, 2018, p. 3199.
@inproceedings{Nannapaneni2018a, author = {Nannapaneni, Saideep and Dubey, Abhishek and Mahadevan, Sankaran}, title = {Automated aircraft separation safety assurance using Bayesian networks}, booktitle = {2018 Aviation Technology, Integration, and Operations Conference}, year = {2018}, pages = {3199}, category = {conference}, keywords = {reliability}, project = {cps-reliability} }
- Y. Du, H. Tu, S. Lukic, D. Lubkeman, A. Dubey, and G. Karsai, Development of a Controller Hardware-in-the-Loop Platform for Microgrid Distributed Control Applications, in 2018 IEEE Electronic Power Grid (eGrid), 2018, pp. 1–6.
@inproceedings{DuTu2018, author = {{Du}, Y. and {Tu}, H. and {Lukic}, S. and {Lubkeman}, D. and Dubey, Abhishek and {Karsai}, G.}, title = {Development of a Controller Hardware-in-the-Loop Platform for Microgrid Distributed Control Applications}, booktitle = {2018 IEEE Electronic Power Grid (eGrid)}, year = {2018}, pages = {1-6}, month = nov, category = {selectiveconference}, doi = {10.1109/eGRID.2018.8598696}, file = {:DuTu2018-Development_of_a_Controller_Hardware-in-the-Loop_Platform_for_Microgrid_Distributed_Control_Applications.pdf:PDF}, issn = {null}, keywords = {smartgrid}, tag = {power} }
Microgrids (MGs) are ideally suited for distributed control solutions. However, implementation and validation of the developed distributed control algorithms are quite challenging. In this paper we propose a Controller Hardware-in-the-Loop (CHIL) platform for MG distributed control applications that satisfy the requirements of IEEE Std. 2030.7 for MG control systems. We describe two main features of the proposed platform: 1) a software platform that enables the implementation of control algorithms that have been developed analytically and 2) a real-time MG testbed that replicates practical MG operation environment by using real-time communication network and grid solutions. Implementation and validation of a distributed MG synchronization operation control strategy are used to demonstrate the performance of the proposed CHIL platform.
- Y. Du, H. Tu, S. Lukic, D. Lubkeman, A. Dubey, and G. Karsai, Resilient Information Architecture Platform for Smart Systems (RIAPS): Case Study for Distributed Apparent Power Control, in 2018 IEEE/PES Transmission and Distribution Conference and Exposition (T D), 2018, pp. 1–5.
@inproceedings{DuTu2018a, author = {{Du}, Y. and {Tu}, H. and {Lukic}, S. and {Lubkeman}, D. and Dubey, Abhishek and {Karsai}, G.}, title = {Resilient Information Architecture Platform for Smart Systems (RIAPS): Case Study for Distributed Apparent Power Control}, booktitle = {2018 IEEE/PES Transmission and Distribution Conference and Exposition (T D)}, year = {2018}, pages = {1-5}, tag = {platform}, month = apr, category = {selectiveconference}, doi = {10.1109/TDC.2018.8440324}, file = {:DuTu2018a-Resilient_Information_Architecture_Platform_for_Smart_Systems_Case_Study_Distributed_Apparent_Power_Control.pdf:PDF}, issn = {2160-8563}, keywords = {middleware, smartgrid} }
Maintaining voltage and frequency stability in an islanded microgrid is challenging, due to the low system inertia. In addition, islanded microgrids have limited generation capability, requiring that all DGs contribute proportionally to meet the system power consumption. This paper proposes a distributed control algorithm for optimal apparent power utilization in islanded microgrids. The developed algorithm improves system apparent power utilization by maintaining proportional power sharing among DGs. A decentralized platform called Resilient Information Architecture Platform for Smart Systems (RIAPS) is introduced that runs on processors embedded within the DGs. The proposed algorithm is fully implemented in RIAPS platform and validated on a real-time microgrid testbed.
- H. Purohit, S. Nannapaneni, A. Dubey, P. Karuna, and G. Biswas, Structured Summarization of Social Web for Smart Emergency Services by Uncertain Concept Graph, in 2018 IEEE International Science of Smart City Operations and Platforms Engineering in Partnership with Global City Teams Challenge (SCOPE-GCTC), 2018, pp. 30–35.
@inproceedings{Purohit2018, author = {{Purohit}, H. and {Nannapaneni}, S. and Dubey, Abhishek and {Karuna}, P. and {Biswas}, G.}, title = {Structured Summarization of Social Web for Smart Emergency Services by Uncertain Concept Graph}, booktitle = {2018 IEEE International Science of Smart City Operations and Platforms Engineering in Partnership with Global City Teams Challenge (SCOPE-GCTC)}, year = {2018}, pages = {30-35}, month = apr, tag = {decentralization,incident}, category = {workshop}, doi = {10.1109/SCOPE-GCTC.2018.00012}, file = {:Purohit2018-Structured_Summarization_of_Social_Web_for_Smart_Emergency_Services_by_Uncertain_Concept_Graph.pdf:PDF}, issn = {null}, keywords = {emergency} }
The Web has empowered emergency services to enhance operations by collecting real-time information about incidents from diverse data sources such as social media. However, the high volume of unstructured data from the heterogeneous sources with varying degrees of veracity challenges the timely extraction and integration of relevant information to summarize the current situation. Existing work on event detection and summarization on social media relates to this challenge of timely extraction of information during an evolving event. However, it is limited in both integrating incomplete information from diverse sources and using the integrated information to dynamically infer knowledge representation of the situation that captures optimal actions (e.g., allocate available finite ambulances to incident regions). In this paper, we present a novel concept of an Uncertain Concept Graph (UCG) that is capable of representing dynamic knowledge of a disaster event from heterogeneous data sources, particularly for the regions of interest, and resources/services required. The information sources, incident regions, and resources (e.g., ambulances) are represented as nodes in UCG, while the edges represent the weighted relationships between these nodes. We then propose a solution for probabilistic edge inference between nodes in UCG. We model a novel optimization problem for the edge assignment between a service resource to a region node over time trajectory. The output of such structured summarization over time can be valuable for modeling event dynamics in the real world beyond emergency management, across different smart city operations such as transportation.
- A. Chhokra, A. Dubey, N. Mahadevan, S. Hasan, and G. Karsai, Diagnosis in Cyber-Physical Systems with Fault Protection Assemblies, in Diagnosability, Security and Safety of Hybrid Dynamic and Cyber-Physical Systems, M. Sayed-Mouchaweh, Ed. Cham: Springer International Publishing, 2018, pp. 201–225.
@inbook{Chhokra2018, chapter = {Chapter 8}, pages = {201--225}, title = {Diagnosis in Cyber-Physical Systems with Fault Protection Assemblies}, publisher = {Springer International Publishing}, year = {2018}, author = {Chhokra, Ajay and Dubey, Abhishek and Mahadevan, Nagabhushan and Hasan, Saqib and Karsai, Gabor}, editor = {Sayed-Mouchaweh, Moamar}, address = {Cham}, isbn = {978-3-319-74962-4}, booktitle = {Diagnosability, Security and Safety of Hybrid Dynamic and Cyber-Physical Systems}, doi = {10.1007/978-3-319-74962-4_8}, file = {:Chhokra2018-Diagnosis_In_Cyber-Physical_Systems_with_Fault_Protection_Assemblies.pdf:PDF}, keywords = {reliability, smartgrid}, tag = {platform,power}, url = {https://doi.org/10.1007/978-3-319-74962-4_8} }
Fault Protection Assemblies are used in cyber-physical systems for automated fault-isolation. These devices alter the mode of the system using locally available information in order to stop fault propagation. For example, in electrical networks relays and breakers isolate faults in order to arrest failure propagation and protect the healthy parts of the system. However, these assemblies themselves can have faults, which may inadvertently induce secondary failures. Often these secondary failures lead to cascade effects, which then lead to total system collapse. This behavior is often seen in electrical transmission systems where failures of relays and breakers may cause overloading and the disconnection of parts of an otherwise healthy system. In the past, we had developed a consistency based diagnosis approach for physical systems based on the temporal failure propagation graph. We now describe an extension that uses the concept of timed discrete event observers in combination with the timed failure propagation graphs to extend the hypothesis to include the possibility of failures in the fault protection units. Using a simulated power system case study, we show that the combined approach is able to diagnose faults in both the plant and the protection devices.
- A. Chhokra, A. Dubey, N. Mahadevan, G. Karsai, D. Balasubramanian, and S. Hasan, Hierarchical Reasoning about Faults in Cyber-Physical Energy Systems using Temporal Causal Diagrams, International Journal of Prognostics and Health Management, vol. 9, no. 1, Feb. 2018.
@article{Chhokra2018a, author = {Chhokra, Ajay and Dubey, Abhishek and Mahadevan, Nagabhushan and Karsai, Gabor and Balasubramanian, Daniel and Hasan, Saqib}, title = {Hierarchical Reasoning about Faults in Cyber-Physical Energy Systems using Temporal Causal Diagrams}, journal = {International Journal of Prognostics and Health Management}, year = {2018}, volume = {9}, number = {1}, month = feb, attachments = {https://www.isis.vanderbilt.edu/sites/default/files/ijphm_18_001_0.pdf}, file = {:Chhokra2018a-Hierarchical_Reasoning_about_Faults_in_Cyber-Physical_Energy_Systems_using_Temporal_Causal_Diagrams.pdf:PDF}, keywords = {reliability, smartgrid}, tag = {platform,power}, type = {Journal Article}, url = {https://www.phmsociety.org/node/2290} }
The resiliency and reliability of critical cyber physical systems like electrical power grids are of paramount importance. These systems are often equipped with specialized protection devices to detect anomalies and isolate faults in order to arrest failure propagation and protect the healthy parts of the system. However, due to the limited situational awareness and hidden failures the protection devices themselves, through their operation (or mis-operation) may cause overloading and the disconnection of parts of an otherwise healthy system. This can result in cascading failures that lead to a blackout. Diagnosis of failures in such systems is extremely challenging because of the need to account for faults in both the physical systems as well as the protection devices, and the failure-effect propagation across the system. Our approach for diagnosing such cyber-physical systems is based on the concept of Temporal Causal Diagrams (TCD-s) that capture the timed discrete models of protection devices and their interactions with a system failure propagation graph. In this paper we present a refinement of the TCD language with a layer of independent local observers that aid in diagnosis. We describe a hierarchical two-tier failure diagnosis approach and showcase the results for 4 different scenarios involving both cyber and physical faults in a standard Western System Coordinating Council (WSCC) 9 bus system.
- A. Laszka, A. Mavridou, and A. Dubey, Resilient and Trustworthy Transactive Platform for Smart and Connected Communities, in High Confidence Software and Systems Conference, 2018.
@conference{DubeyHCSS2018, author = {Laszka, Aron and Mavridou, Anastasia and Dubey, Abhishek}, title = {Resilient and Trustworthy Transactive Platform for Smart and Connected Communities}, booktitle = {High Confidence Software and Systems Conference}, year = {2018}, keywords = {blockchain}, project = {cps-reliability}, tag = {platform,decentralization}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200} }
2017
- A. Mukhopadhyay, Y. Vorobeychik, A. Dubey, and G. Biswas, Prioritized Allocation of Emergency Responders based on a Continuous-Time Incident Prediction Model, in Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, AAMAS 2017, São Paulo, Brazil, May 8-12, 2017, 2017, pp. 168–177.
@inproceedings{Mukhopadhyay2017, author = {Mukhopadhyay, Ayan and Vorobeychik, Yevgeniy and Dubey, Abhishek and Biswas, Gautam}, title = {Prioritized Allocation of Emergency Responders based on a Continuous-Time Incident Prediction Model}, booktitle = {Proceedings of the 16th Conference on Autonomous Agents and MultiAgent Systems, {AAMAS} 2017, S{\~{a}}o Paulo, Brazil, May 8-12, 2017}, year = {2017}, pages = {168--177}, tag = {ai4cps,incident}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/atal/MukhopadhyayVDB17}, category = {selectiveconference}, file = {:Mukhopadhyay2017-Prioritized_Allocation_of_Emergency_Responders_based_on_a_Continuous-Time_Incident_Prediction_Model.pdf:PDF}, keywords = {emergency}, project = {smart-emergency-response,smart-cities}, timestamp = {Wed, 27 Sep 2017 07:24:00 +0200}, url = {http://dl.acm.org/citation.cfm?id=3091154} }
Efficient emergency response is a major concern in densely populated urban areas. Numerous techniques have been proposed to allocate emergency responders to optimize response times, coverage, and incident prevention. Effective response depends, in turn, on effective prediction of incidents occurring in space and time, a problem which has also received considerable prior attention. We formulate a non-linear mathematical program maximizing expected incident coverage, and propose a novel algorithmic framework for solving this problem. In order to aid the optimization problem, we propose a novel incident prediction mechanism. Prior art in incident prediction does not generally consider incident priorities which are crucial in optimal dispatch, and spatial modeling either considers each discretized area independently, or learns a homogeneous model. We bridge these gaps by learning a joint distribution of both incident arrival time and severity, with spatial heterogeneity captured using a hierarchical clustering approach. Moreover, our decomposition of the joint arrival and severity distributions allows us to independently learn the continuous-time arrival model, and subsequently use a multinomial logistic regression to capture severity, conditional on incident time. We use real traffic accident and response data from the urban area around Nashville, USA, to evaluate the proposed approach, showing that it significantly outperforms prior art as well as the real dispatch method currently in use.
- F. Sun, A. Dubey, and J. White, DxNAT - Deep neural networks for explaining non-recurring traffic congestion, in 2017 IEEE International Conference on Big Data, BigData 2017, Boston, MA, USA, December 11-14, 2017, 2017, pp. 2141–2150.
@inproceedings{Sun2017, author = {Sun, Fangzhou and Dubey, Abhishek and White, Jules}, title = {DxNAT - Deep neural networks for explaining non-recurring traffic congestion}, booktitle = {2017 {IEEE} International Conference on Big Data, BigData 2017, Boston, MA, USA, December 11-14, 2017}, year = {2017}, pages = {2141--2150}, tag = {ai4cps,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/bigdataconf/SunDW17}, category = {selectiveconference}, doi = {10.1109/BigData.2017.8258162}, file = {:Sun2017-DxNAT-Deep_neural_networks_for_explaining_non-recurring_traffic_congestion.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities,cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:51 +0200}, url = {https://doi.org/10.1109/BigData.2017.8258162} }
Non-recurring traffic congestion is caused by temporary disruptions, such as accidents, sports games, adverse weather, etc. We use data related to real-time traffic speed, jam factors (a traffic congestion indicator), and events collected over a year from Nashville, TN to train a multi-layered deep neural network. The traffic dataset contains over 900 million data records. The network is thereafter used to classify the real-time data and identify anomalous operations. Compared with traditional approaches of using statistical or machine learning techniques, our model reaches an accuracy of 98.73 percent when identifying traffic congestion caused by football games. Our approach first encodes the traffic across a region as a scaled image. After that the image data from different timestamps is fused with event- and time-related data. Then a crossover operator is used as a data augmentation method to generate training datasets with more balanced classes. Finally, we use the receiver operating characteristic (ROC) analysis to tune the sensitivity of the classifier. We present the analysis of the training time and the inference time separately.
- A. Ghafouri, A. Laszka, A. Dubey, and X. D. Koutsoukos, Optimal detection of faulty traffic sensors used in route planning, in Proceedings of the 2nd International Workshop on Science of Smart City Operations and Platforms Engineering, SCOPE@CPSWeek 2017, Pittsburgh, PA, USA, April 21, 2017, 2017, pp. 1–6.
@inproceedings{Ghafouri2017, author = {Ghafouri, Amin and Laszka, Aron and Dubey, Abhishek and Koutsoukos, Xenofon D.}, title = {Optimal detection of faulty traffic sensors used in route planning}, booktitle = {Proceedings of the 2nd International Workshop on Science of Smart City Operations and Platforms Engineering, SCOPE@CPSWeek 2017, Pittsburgh, PA, USA, April 21, 2017}, year = {2017}, pages = {1--6}, tag = {ai4cps,platform,incident,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/cpsweek/GhafouriLDK17}, category = {workshop}, doi = {10.1145/3063386.3063767}, file = {:Ghafouri2017-Optimal_detection_of_faulty_traffic_sensors_used_in_route_planning.pdf:PDF}, keywords = {transit}, project = {cps-reliability,smart-transit,smart-cities}, timestamp = {Tue, 06 Nov 2018 16:59:05 +0100}, url = {https://doi.org/10.1145/3063386.3063767} }
In a smart city, real-time traffic sensors may be deployed for various applications, such as route planning. Unfortunately, sensors are prone to failures, which result in erroneous traffic data. Erroneous data can adversely affect applications such as route planning, and can cause increased travel time. To minimize the impact of sensor failures, we must detect them promptly and accurately. However, typical detection algorithms may lead to a large number of false positives (i.e., false alarms) and false negatives (i.e., missed detections), which can result in suboptimal route planning. In this paper, we devise an effective detector for identifying faulty traffic sensors using a prediction model based on Gaussian Processes. Further, we present an approach for computing the optimal parameters of the detector which minimize losses due to false-positive and false-negative errors. We also characterize critical sensors, whose failure can have high impact on the route planning application. Finally, we implement our method and evaluate it numerically using a real- world dataset and the route planning platform OpenTripPlanner.
- J. Tan, C. Kendrick, A. Dubey, and S. Rhee, Indicator frameworks, in Proceedings of the 2nd International Workshop on Science of Smart City Operations and Platforms Engineering, SCOPE@CPSWeek 2017, Pittsburgh, PA, USA, April 21, 2017, 2017, pp. 19–25.
@inproceedings{Tan2017, author = {Tan, Joshua and Kendrick, Christine and Dubey, Abhishek and Rhee, Sokwoo}, title = {Indicator frameworks}, booktitle = {Proceedings of the 2nd International Workshop on Science of Smart City Operations and Platforms Engineering, SCOPE@CPSWeek 2017, Pittsburgh, PA, USA, April 21, 2017}, year = {2017}, pages = {19--25}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/cpsweek/TanKDR17}, category = {workshop}, doi = {10.1145/3063386.3063762}, file = {:Tan2017-indicator_frameworks.pdf:PDF}, project = {smart-cities}, timestamp = {Tue, 06 Nov 2018 16:59:05 +0100}, url = {https://doi.org/10.1145/3063386.3063762} }
We develop a diagrammatic tool for constructing correlations between random variables, called an abstract indicator framework. Abstract indicator frameworks are modeled o operational (key performance) indicator frameworks as they are used in city planning and project governance, and give a rigorous, statistically-motivated process for constructing operational indicator frameworks.
- A. Chhokra, A. Kulkarni, S. Hasan, A. Dubey, N. Mahadevan, and G. Karsai, A Systematic Approach of Identifying Optimal Load Control Actions for Arresting Cascading Failures in Power Systems, in Proceedings of the 2nd Workshop on Cyber-Physical Security and Resilience in Smart Grids, SPSR-SG@CPSWeek 2017, Pittsburgh, PA, USA, April 21, 2017, 2017, pp. 41–46.
@inproceedings{Chhokra2017, author = {Chhokra, Ajay and Kulkarni, Amogh and Hasan, Saqib and Dubey, Abhishek and Mahadevan, Nagabhushan and Karsai, Gabor}, title = {A Systematic Approach of Identifying Optimal Load Control Actions for Arresting Cascading Failures in Power Systems}, booktitle = {Proceedings of the 2nd Workshop on Cyber-Physical Security and Resilience in Smart Grids, SPSR-SG@CPSWeek 2017, Pittsburgh, PA, USA, April 21, 2017}, year = {2017}, tag = {platform}, pages = {41--46}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/cpsweek/ChhokraKHDMK17}, category = {workshop}, doi = {10.1145/3055386.3055395}, file = {:Chhokra2017-A_Systematic_Approach_of_Identifying_Optimal_Load_Control_Actions_for_Arresting_Cascading_Failures_in_Power_Systems.pdf:PDF}, keywords = {reliability, smartgrid}, project = {cps-reliability}, timestamp = {Tue, 06 Nov 2018 16:59:05 +0100}, url = {https://doi.org/10.1145/3055386.3055395} }
Cascading outages in power networks cause blackouts which lead to huge economic and social consequences. The traditional form of load shedding is avoidable in many cases by identifying optimal load control actions. However, if there is a change in the system topology (adding or removing loads, lines etc), the calculations have to be performed again. This paper addresses this problem by providing a workflow that 1) generates system models from IEEE CDF specifications, 2) identifies a collection of blackout causing contingencies, 3) dynamically sets up an optimization problem, and 4) generates a table of mitigation strategies in terms of minimal load curtailment. We demonstrate the applicability of our proposed methodology by finding load curtailment actions for N-k contingencies (k = 1, 2, 3) in IEEE 14 Bus system.
- A. Dubey, G. Karsai, and S. Pradhan, Resilience at the edge in cyber-physical systems, in Second International Conference on Fog and Mobile Edge Computing, FMEC 2017, Valencia, Spain, May 8-11, 2017, 2017, pp. 139–146.
@inproceedings{Dubey2017, author = {Dubey, Abhishek and Karsai, Gabor and Pradhan, Subhav}, title = {Resilience at the edge in cyber-physical systems}, booktitle = {Second International Conference on Fog and Mobile Edge Computing, {FMEC} 2017, Valencia, Spain, May 8-11, 2017}, year = {2017}, tag = {platform}, pages = {139--146}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/fmec/DubeyKP17}, category = {selectiveconference}, doi = {10.1109/FMEC.2017.7946421}, file = {:Dubey2017-Resilience_at_the_edge_in_cyber-physical_systems.pdf:PDF}, keywords = {reliability}, project = {cps-reliability,cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:56 +0200}, url = {https://doi.org/10.1109/FMEC.2017.7946421} }
As the number of low cost computing devices at the edge of communication network increase, there are greater opportunities to enable innovative capabilities, especially in cyber-physical systems. For example, micro-grid power systems can make use of computing capabilities at the edge of a Smart Grid to provide more robust and decentralized control. However, the downside to distributing intelligence to the edge away from the controlled environment of the data centers is the increased risk of failures. The paper introduces a framework for handling these challenges. The contribution of this framework is to support strategies to (a) tolerate the transient faults as they appear due to network fluctuations or node failures, and to (b) systematically reconfigure the application if the faults persist.
- S. Eisele, G. Pettet, A. Dubey, and G. Karsai, Towards an architecture for evaluating and analyzing decentralized Fog applications, in IEEE Fog World Congress, FWC 2017, Santa Clara, CA, USA, October 30 - Nov. 1, 2017, 2017, pp. 1–6.
@inproceedings{Eisele2017, author = {Eisele, Scott and Pettet, Geoffrey and Dubey, Abhishek and Karsai, Gabor}, title = {Towards an architecture for evaluating and analyzing decentralized Fog applications}, booktitle = {{IEEE} Fog World Congress, {FWC} 2017, Santa Clara, CA, USA, October 30 - Nov. 1, 2017}, year = {2017}, tag = {platform,decentralization}, pages = {1--6}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/fwc/EiselePDK17}, category = {workshop}, doi = {10.1109/FWC.2017.8368531}, file = {:Eisele2017-Towards_an_architecture_for_evaluating_and_analyzing_decentralized_Fog_applications.pdf:PDF}, keywords = {middleware}, project = {cps-reliability,cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:51 +0200}, url = {https://doi.org/10.1109/FWC.2017.8368531} }
As the number of low cost computing devices at the edge of network increases, there are greater opportunities to enable novel, innovative capabilities, especially in decentralized cyber-physical systems. For example, in an urban setting, a set of networked, collaborating processors at the edge can be used to dynamically detect traffic densities via image processing and then use those densities to control the traffic flow by coordinating traffic light sequences, in a decentralized architecture. In this paper we describe a testbed and an application framework for such applications.
- A. Chhokra, S. Hasan, A. Dubey, N. Mahadevan, and G. Karsai, Diagnostics and prognostics using temporal causal models for cyber physical energy systems, in Proceedings of the 8th International Conference on Cyber-Physical Systems, ICCPS 2017, Pittsburgh, Pennsylvania, USA, April 18-20, 2017, 2017, p. 87.
@inproceedings{Chhokra2017a, author = {Chhokra, Ajay and Hasan, Saqib and Dubey, Abhishek and Mahadevan, Nagabhushan and Karsai, Gabor}, title = {Diagnostics and prognostics using temporal causal models for cyber physical energy systems}, booktitle = {Proceedings of the 8th International Conference on Cyber-Physical Systems, {ICCPS} 2017, Pittsburgh, Pennsylvania, USA, April 18-20, 2017}, year = {2017}, tag = {platform,power}, pages = {87}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iccps/ChhokraHDMK17}, category = {poster}, doi = {10.1145/3055004.3064843}, file = {:Chhokra2017a-Diagnostics_and_prognostics_using_temporal_causal_models_for_cyber_physical_energy_systems.pdf:PDF}, keywords = {reliability, smartgrid}, project = {cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:57 +0200}, url = {https://doi.org/10.1145/3055004.3064843} }
Reliable operation of cyber-physical systems such as power transmission and distribution systems is crtiical for the seamless functioning of a vibrant economy. These systems consist of tightly coupled physical (energy sources, transmission and distribution lines, and loads) and computational components (protection devices, energy management systems, etc.). The protection devices such as distance relays help in preventing failure propagation by isolating faulty physical components. However, these devices rely on hard thresholds and local information, often ignoring system-level effects introduced by the distributed control algorithms. This leads to scenarios wherein a local mitigation in a subsytem could trigger a larger fault cascade, possibly resulting in a blackout.Efficient models and tools that curtail such systematic failures by performing fault diagnosis and prognosis are therefore necessary.
- S. Eisele, A. Dubey, G. Karsai, and S. Lukic, Transactive energy demo with RIAPS platform, in Proceedings of the 8th International Conference on Cyber-Physical Systems, ICCPS 2017, Pittsburgh, Pennsylvania, USA, April 18-20, 2017, 2017, p. 91.
@inproceedings{Eisele2017a, author = {Eisele, Scott and Dubey, Abhishek and Karsai, Gabor and Lukic, Srdjan}, title = {Transactive energy demo with {RIAPS} platform}, booktitle = {Proceedings of the 8th International Conference on Cyber-Physical Systems, {ICCPS} 2017, Pittsburgh, Pennsylvania, USA, April 18-20, 2017}, year = {2017}, pages = {91}, tag = {decentralization,power}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iccps/EiseleDKL17}, category = {poster}, doi = {10.1145/3055004.3064845}, file = {:Eisele2017a-Transactive_energy_demo_with_RIAPS_platform.pdf:PDF}, keywords = {transactive}, project = {cps-reliability,cps-middleware,transactive-energy}, timestamp = {Wed, 16 Oct 2019 14:14:57 +0200}, url = {https://doi.org/10.1145/3055004.3064845} }
This work presents a platform for decentralized distributed computing called Resilient Information Architecture for the Smart Grid (RIAPS) through a transactional energy and a traffic application.
- A. Laszka, A. Dubey, M. Walker, and D. C. Schmidt, Providing privacy, safety, and security in IoT-based transactive energy systems using distributed ledgers, in Proceedings of the Seventh International Conference on the Internet of Things, IOT 2017, Linz, Austria, October 22-25, 2017, 2017, pp. 13:1–13:8.
@inproceedings{Laszka2017, author = {Laszka, Aron and Dubey, Abhishek and Walker, Michael and Schmidt, Douglas C.}, title = {Providing privacy, safety, and security in IoT-based transactive energy systems using distributed ledgers}, booktitle = {Proceedings of the Seventh International Conference on the Internet of Things, {IOT} 2017, Linz, Austria, October 22-25, 2017}, year = {2017}, pages = {13:1--13:8}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iot/LaszkaDWS17}, category = {selectiveconference}, doi = {10.1145/3131542.3131562}, file = {:Laszka2017-Providing_privacy_safety_and_security_in_IoT-based_transactive_energy_systems_using_distributed_ledgers.pdf:PDF}, keywords = {transactive, blockchain}, project = {cps-reliability,cps-blockchains,transactive-energy}, tag = {decentralization,power}, timestamp = {Tue, 12 Nov 2019 00:00:00 +0100}, url = {https://doi.org/10.1145/3131542.3131562} }
Power grids are undergoing major changes due to rapid growth in renewable energy resources and improvements in battery technology. While these changes enhance sustainability and efficiency, they also create significant management challenges as the complexity of power systems increases. To tackle these challenges, decentralized Internet-of-Things (IoT) solutions are emerging, which arrange local communities into transactive microgrids. Within a transactive microgrid, “prosumers” (i.e., consumers with energy generation and storage capabilities) can trade energy with each other, thereby smoothing the load on the main grid using local supply. It is hard, however, to provide security, safety, and privacy in a decentralized and transactive energy system. On the one hand, prosumers’ personal information must be protected from their trade partners and the system operator. On the other hand, the system must be protected from careless or malicious trading, which could destabilize the entire grid. This paper describes Privacypreserving Energy of cyb (PETra), which is a secure and safe solution for transactive microgrids that enables consumers to trade energy without sacrificing their privacy. PETra builds on distributed ledgers, such as blockchains, and provides anonymity for communication, bidding, and trading.
- S. Hasan, A. Chhokra, A. Dubey, N. Mahadevan, G. Karsai, R. Jain, and S. Lukic, A simulation testbed for cascade analysis, in IEEE Power & Energy Society Innovative Smart Grid Technologies Conference, ISGT 2017, Washington, DC, USA, April 23-26, 2017, 2017, pp. 1–5.
@inproceedings{Hasan2017, author = {Hasan, Saqib and Chhokra, Ajay and Dubey, Abhishek and Mahadevan, Nagabhushan and Karsai, Gabor and Jain, Rishabh and Lukic, Srdjan}, title = {A simulation testbed for cascade analysis}, booktitle = {{IEEE} Power {\&} Energy Society Innovative Smart Grid Technologies Conference, {ISGT} 2017, Washington, DC, USA, April 23-26, 2017}, year = {2017}, pages = {1--5}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isgt/HasanCDMKJL17}, category = {selectiveconference}, doi = {10.1109/ISGT.2017.8086080}, file = {:Hasan2017-A_simulation_testbed_for_cascade_analysis.pdf:PDF}, keywords = {smartgrid}, project = {cps-reliability}, tag = {platform,power}, timestamp = {Wed, 16 Oct 2019 14:14:57 +0200}, url = {https://doi.org/10.1109/ISGT.2017.8086080} }
Electrical power systems are heavily instrumented with protection assemblies (relays and breakers) that detect anomalies and arrest failure propagation. However, failures in these discrete protection devices could have inadvertent consequences, including cascading failures resulting in blackouts. This paper aims to model the behavior of these discrete protection devices in nominal and faulty conditions and apply it towards simulation and contingency analysis of cascading failures in power transmission systems. The behavior under fault conditions are used to identify and explain conditions for blackout evolution which are not otherwise obvious. The results are demonstrated using a standard IEEE-14 Bus System.
- S. P. Khare, J. Sallai, A. Dubey, and A. S. Gokhale, Short Paper: Towards Low-Cost Indoor Localization Using Edge Computing Resources, in 20th IEEE International Symposium on Real-Time Distributed Computing, ISORC 2017, Toronto, ON, Canada, May 16-18, 2017, 2017, pp. 28–31.
@inproceedings{Khare2017, author = {Khare, Shweta Prabhat and Sallai, J{\'{a}}nos and Dubey, Abhishek and Gokhale, Aniruddha S.}, title = {Short Paper: Towards Low-Cost Indoor Localization Using Edge Computing Resources}, booktitle = {20th {IEEE} International Symposium on Real-Time Distributed Computing, {ISORC} 2017, Toronto, ON, Canada, May 16-18, 2017}, year = {2017}, tag = {transit}, pages = {28--31}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/KhareSDG17}, category = {selectiveconference}, doi = {10.1109/ISORC.2017.23}, file = {:Khare2017-Short_Paper_Towards_Low-Cost_Indoor_Localization_Using_Edge_Computing_Resources.pdf:PDF}, keywords = {performance, middleware}, project = {cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2017.23} }
Emerging smart services, such as indoor smart parking or patient monitoring and tracking in hospitals, incur a significant technical roadblock stemming primarily from a lack of cost-effective and easily deployable localization framework that impedes their widespread deployment. To address this concern, in this paper we present a low-cost, indoor localization and navigation system, which performs continuous and real-time processing of Bluetooth Low Energy (BLE) and IEEE 802.15.4a compliant Ultra-wideband (UWB) sensor data to localize and navigate the concerned entity to its desired location. Our approach depends upon fusing the two feature sets, using the UWB to calibrate the BLE localization mechanism.
- S. Eisele, I. Madari, A. Dubey, and G. Karsai, RIAPS: Resilient Information Architecture Platform for Decentralized Smart Systems, in 20th IEEE International Symposium on Real-Time Distributed Computing, ISORC 2017, Toronto, ON, Canada, May 16-18, 2017, 2017, pp. 125–132.
@inproceedings{Eisele2017b, author = {Eisele, Scott and Madari, Istv{\'{a}}n and Dubey, Abhishek and Karsai, Gabor}, title = {{RIAPS:} Resilient Information Architecture Platform for Decentralized Smart Systems}, booktitle = {20th {IEEE} International Symposium on Real-Time Distributed Computing, {ISORC} 2017, Toronto, ON, Canada, May 16-18, 2017}, year = {2017}, pages = {125--132}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/EiseleMDK17}, category = {selectiveconference}, doi = {10.1109/ISORC.2017.22}, file = {:Eisele2017b-RIAPS_Resilient_Information_Architecture_Platform_for_Decentralized_Smart_Systems.pdf:PDF}, keywords = {middleware}, project = {smart-transit,smart-cities}, tag = {platform,decentralization,power}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2017.22} }
The emerging Fog Computing paradigm provides an additional computational layer that enables new capabilities in real-time data-driven applications. This is especially interesting in the domain of Smart Grid as the boundaries between traditional generation, distribution, and consumer roles are blurring. This is a reflection of the ongoing trend of intelligence distribution in Smart Systems. In this paper, we briefly describe a component-based decentralized software platform called Resilient Information Architecture Platform for Smart Systems (RIAPS) which provides an infrastructure for such systems. We briefly describe some initial applications built using this platform. Then, we focus on the design and integration choices for a resilient Discovery Manager service that is a critical component of this infrastructure. The service allows applications to discover each other, work collaboratively, and ensure the stability of the Smart System.
- J. Bergquist, A. Laszka, M. Sturm, and A. Dubey, On the design of communication and transaction anonymity in blockchain-based transactive microgrids, in Proceedings of the 1st Workshop on Scalable and Resilient Infrastructures for Distributed Ledgers, SERIAL@Middleware 2017, Las Vegas, NV, USA, December 11-15, 2017, 2017, pp. 3:1–3:6.
@inproceedings{Bergquist2017, author = {Bergquist, Jonatan and Laszka, Aron and Sturm, Monika and Dubey, Abhishek}, title = {On the design of communication and transaction anonymity in blockchain-based transactive microgrids}, booktitle = {Proceedings of the 1st Workshop on Scalable and Resilient Infrastructures for Distributed Ledgers, SERIAL@Middleware 2017, Las Vegas, NV, USA, December 11-15, 2017}, year = {2017}, pages = {3:1--3:6}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/middleware/BergquistLSD17}, category = {workshop}, doi = {10.1145/3152824.3152827}, file = {:Bergquist2017-On_the_design_of_communication_and_transaction_anonymity_in_blockchain-based_transactive_microgrids.pdf:PDF}, keywords = {transactive}, project = {transactive-energy,cps-middleware,cps-reliability}, tag = {decentralization,platform}, timestamp = {Tue, 06 Nov 2018 16:57:13 +0100}, url = {https://doi.org/10.1145/3152824.3152827} }
Transactive microgrids are emerging as a transformative solution for the problems faced by distribution system operators due to an increase in the use of distributed energy resources and a rapid acceleration in renewable energy generation, such as wind and solar power. Distributed ledgers have recently found widespread interest in this domain due to their ability to provide transactional integrity across decentralized computing nodes. However, the existing state of the art has not focused on the privacy preservation requirement of these energy systems – the transaction level data can provide much greater insights into a prosumer’s behavior compared to smart meter data. There are specific safety requirements in transactive microgrids to ensure the stability of the grid and to control the load. To fulfil these requirements, the distribution system operator needs transaction information from the grid, which poses a further challenge to the privacy-goals. This problem is made worse by requirement for off-blockchain communication in these networks. In this paper, we extend a recently developed trading workflow called PETra and describe our solution for communication and transactional anonymity.
- M. A. Walker, A. Dubey, A. Laszka, and D. C. Schmidt, PlaTIBART: a platform for transactive IoT blockchain applications with repeatable testing, in Proceedings of the 4th Workshop on Middleware and Applications for the Internet of Things, M4IoT@Middleware 2017, Las Vegas, NV, USA, December 11, 2017, 2017, pp. 17–22.
@inproceedings{Walker2017, author = {Walker, Michael A. and Dubey, Abhishek and Laszka, Aron and Schmidt, Douglas C.}, title = {PlaTIBART: a platform for transactive IoT blockchain applications with repeatable testing}, booktitle = {Proceedings of the 4th Workshop on Middleware and Applications for the Internet of Things, M4IoT@Middleware 2017, Las Vegas, NV, USA, December 11, 2017}, year = {2017}, pages = {17--22}, tag = {decentralization}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/middleware/WalkerDLS17}, category = {workshop}, doi = {10.1145/3152141.3152392}, file = {:Walker2017-PlaTIBART_a_platform_for_transactive_IoT_blockchain_applications_with_repeatable_testing.pdf:PDF}, keywords = {blockchain}, project = {transactive-energy,cps-middleware,cps-reliability}, timestamp = {Tue, 06 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.1145/3152141.3152392} }
With the advent of blockchain-enabled IoT applications, there is an increased need for related software patterns, middleware concepts, and testing practices to ensure adequate quality and productivity. IoT and blockchain each provide different design goals, concepts, and practices that must be integrated, including the distributed actor model and fault tolerance from IoT and transactive information integrity over untrustworthy sources from blockchain. Both IoT and blockchain are emerging technologies and both lack codified patterns and practices for development of applications when combined. This paper describes PlaTIBART, which is a platform for transactive IoT blockchain applications with repeatable testing that combines the Actor pattern (which is a commonly used model of computation in IoT) together with a custom Domain Specific Language (DSL) and test network management tools. We show how PlaTIBART has been applied to develop, test, and analyze fault-tolerant IoT blockchain applications.
- P. Völgyesi, A. Dubey, T. Krentz, I. Madari, M. Metelko, and G. Karsai, Time synchronization services for low-cost fog computing applications, in International Symposium on Rapid System Prototyping, RSP 2017, Shortening the Path from Specification to Prototype, October 19-20, 2017, Seoul, South Korea, 2017, pp. 57–63.
@inproceedings{Voelgyesi2017, author = {V{\"{o}}lgyesi, P{\'{e}}ter and Dubey, Abhishek and Krentz, Timothy and Madari, Istv{\'{a}}n and Metelko, Mary and Karsai, Gabor}, title = {Time synchronization services for low-cost fog computing applications}, booktitle = {International Symposium on Rapid System Prototyping, {RSP} 2017, Shortening the Path from Specification to Prototype, October 19-20, 2017, Seoul, South Korea}, year = {2017}, tag = {platform,decentralization}, pages = {57--63}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/rsp/VolgyesiDKMMK17}, category = {selectiveconference}, doi = {10.1145/3130265.3130325}, file = {:Voelgyesi2017-Time_synchronization_services_for_low-cost_fog_computing_applications.pdf:PDF}, keywords = {middleware}, project = {cps-middleware,cps-reliability}, timestamp = {Tue, 06 Nov 2018 11:07:11 +0100}, url = {https://doi.org/10.1145/3130265.3130325} }
This paper presents the time synchronization infrastructure for a low-cost run-time platform and application framework specifically targeting Smart Grid applications. Such distributed applications require the execution of reliable and accurate time-coordinated actions and observations both within islands of deployments and across geographically distant nodes. The time synchronization infrastructure is built on well-established technologies: GPS, NTP, PTP, PPS and Linux with real-time extensions, running on low-cost BeagleBone Black hardware nodes. We describe the architecture, implementation, instrumentation approach, performance results and present an example from the application domain. Also, we discuss an important finding on the effect of the Linux RT_PREEMPT real-time patch on the accuracy of the PPS subsystem and its use for GPS-based time references.
- C. Samal, F. Sun, and A. Dubey, SpeedPro: A Predictive Multi-Model Approach for Urban Traffic Speed Estimation, in 2017 IEEE International Conference on Smart Computing, SMARTCOMP 2017, Hong Kong, China, May 29-31, 2017, 2017, pp. 1–6.
@inproceedings{Samal2017, author = {Samal, Chinmaya and Sun, Fangzhou and Dubey, Abhishek}, title = {SpeedPro: {A} Predictive Multi-Model Approach for Urban Traffic Speed Estimation}, booktitle = {2017 {IEEE} International Conference on Smart Computing, {SMARTCOMP} 2017, Hong Kong, China, May 29-31, 2017}, year = {2017}, pages = {1--6}, tag = {ai4cps,transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/SamalSD17}, category = {workshop}, doi = {10.1109/SMARTCOMP.2017.7947048}, file = {:Samal2017-SpeedPro_A_Predictive_Multi-Model_Approach_for_Urban_Traffic_Speed_Estimation.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2017.7947048} }
Data generated by GPS-equipped probe vehicles, especially public transit vehicles can be a reliable source for traffic speed estimation. Traditionally, this estimation is done by learning the parameters of a model that describes the relationship between the speed of the probe vehicle and the actual traffic speed. However, such approaches typically suffer from data sparsity issues. Furthermore, most state of the art approaches does not consider the effect of weather and the driver of the probe vehicle on the parameters of the learned model. In this paper, we describe a multivariate predictive multi-model approach called SpeedPro that (a) first identifies similar clusters of operation from the historic data that includes the real-time position of the probe vehicle, the weather data, and anonymized driver identifier, and then (b) uses these different models to estimate the traffic speed in real-time as a function of current weather, driver and probe vehicle speed. When the real-time information is not available our approach uses a different model that uses the historical weather and traffic information for estimation. Our results show that the purely historical data is less accurate than the model that uses the real-time information.
- F. Sun, C. Samal, J. White, and A. Dubey, Unsupervised Mechanisms for Optimizing On-Time Performance of Fixed Schedule Transit Vehicles, in 2017 IEEE International Conference on Smart Computing, SMARTCOMP 2017, Hong Kong, China, May 29-31, 2017, 2017, pp. 1–8.
@inproceedings{Sun2017a, author = {Sun, Fangzhou and Samal, Chinmaya and White, Jules and Dubey, Abhishek}, title = {Unsupervised Mechanisms for Optimizing On-Time Performance of Fixed Schedule Transit Vehicles}, booktitle = {2017 {IEEE} International Conference on Smart Computing, {SMARTCOMP} 2017, Hong Kong, China, May 29-31, 2017}, year = {2017}, tag = {ai4cps,transit}, pages = {1--8}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/SunSWD17}, category = {selectiveconference}, doi = {10.1109/SMARTCOMP.2017.7947057}, file = {:Sun2017a-Unsupervised_Mechanisms_for_Optimizing_On-Time_Performance_of_Fixed_Schedule_Transit_Vehicles.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2017.7947057} }
The on-time arrival performance of vehicles at stops is a critical metric for both riders and city planners to evaluate the reliability of a transit system. However, it is a non-trivial task for transit agencies to adjust the existing bus schedule to optimize the on-time performance for the future. For example, severe weather conditions and special events in the city could slow down traffic and cause bus delay. Furthermore, the delay of previous trips may affect the initial departure time of consecutive trips and generate accumulated delay. In this paper, we formulate the problem as a single-objective optimization task with constraints and propose a greedy algorithm and a genetic algorithm to generate bus schedules at timepoints that improve the bus on-time performance at timepoints which is indicated by whether the arrival delay is within the desired range. We use the Nashville bus system as a case study and simulate the optimization performance using historical data. The comparative analysis of the results identifies that delay patterns change over time and reveals the efficiency of the greedy and genetic algorithms.
- S. Nannapaneni, A. Dubey, and S. Mahadevan, Performance evaluation of smart systems under uncertainty, in 2017 IEEE SmartWorld, 2017, pp. 1–8.
@inproceedings{Nannapaneni2017, author = {Nannapaneni, Saideep and Dubey, Abhishek and Mahadevan, Sankaran}, title = {Performance evaluation of smart systems under uncertainty}, booktitle = {2017 {IEEE} SmartWorld}, year = {2017}, tag = {platform}, pages = {1--8}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/uic/NannapaneniDM17}, category = {selectiveconference}, doi = {10.1109/UIC-ATC.2017.8397430}, file = {:Nannapaneni2017-Performance_evaluation_of_smart_systems_under_uncertainty.pdf:PDF}, keywords = {performance}, project = {cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:50 +0200}, url = {https://doi.org/10.1109/UIC-ATC.2017.8397430} }
This paper develops a model-based framework for the quantification and propagation of multiple uncertainty sources affecting the performance of a smart system. A smart system, in general, performs sensing, control and actuation for proper functioning of a physical subsystem (also referred to as a plant). With strong feedback coupling between several subsystems, the uncertainty in the quantities of interest (QoI) amplifies over time. The coupling in a generic smart system occurs at two levels: (1) coupling between individual subsystems (plant, cyber, actuation, sensors), and (2) coupling between nodes in a distributed computational subsystem. In this paper, a coupled smart system is decoupled and considered as a feed-forward system over time and modeled using a two-level Dynamic Bayesian Network (DBN), one at each level of coupling (between subsystems and between nodes). A DBN can aggregate uncertainty from multiple sources within a time step and across time steps. The DBN associated with a smart system can be learned using available system models, physics models and data. The proposed methodology is demonstrated for the design of a smart indoor heating system (identification of sensors and a wireless network) within cost constraints that enables room-by-room temperature control. We observe that sensor uncertainty has a higher impact on the performance of the heating system compared to the uncertainty in the wireless network.
- G. Pettet, S. Nannapaneni, B. Stadnick, A. Dubey, and G. Biswas, Incident analysis and prediction using clustering and Bayesian network, in 2017 IEEE SmartWorld, 2017, pp. 1–8.
@inproceedings{Pettet2017, author = {Pettet, Geoffrey and Nannapaneni, Saideep and Stadnick, Benjamin and Dubey, Abhishek and Biswas, Gautam}, title = {Incident analysis and prediction using clustering and Bayesian network}, booktitle = {2017 {IEEE} SmartWorld}, year = {2017}, tag = {ai4cps,incident}, pages = {1--8}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/uic/PettetNSDB17}, category = {selectiveconference}, doi = {10.1109/UIC-ATC.2017.8397587}, file = {:Pettet2017-Incident_analysis_and_prediction_using_clustering_and_Bayesian_network.pdf:PDF}, keywords = {emergency}, project = {smart-emergency-response,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:50 +0200}, url = {https://doi.org/10.1109/UIC-ATC.2017.8397587} }
Advances in data collection and storage infrastructure offer an unprecedented opportunity to integrate both data and emergency resources in a city into a dynamic learning system that can anticipate and rapidly respond to heterogeneous incidents. In this paper, we describe integration methods for spatio-temporal incident forecasting using previously collected vehicular accident data provided to us by the Nashville Fire Department. The literature provides several techniques that focus on analyzing features and predicting accidents for specific situations (specific intersections in a city, or certain segments of a freeway, for example), but these models break down when applied to a large, general area consisting of many road and intersection types and other factors like weather conditions. We use Similarity Based Agglomerative Clustering (SBAC) analysis to categorize incidents to account for these variables. Thereafter, we use survival analysis to learn the likelihood of incidents per cluster. The mapping of the clusters to the spatial locations is achieved using a Bayesian network. The prediction methods we have developed lay the foundation for future work on an optimal emergency vehicle allocation and dispatch system in Nashville.
- A. Dubey and Garcı́a-Valls Marisol, Eds., Proceedings of the 16th Workshop on Adaptive and Reflective Middleware, ARM@Middleware 2017, Las Vegas, NV, USA, December 11 - 15, 2017. ACM, 2017.
@proceedings{Dubey2017a, title = {Proceedings of the 16th Workshop on Adaptive and Reflective Middleware, ARM@Middleware 2017, Las Vegas, NV, USA, December 11 - 15, 2017}, year = {2017}, editor = {Dubey, Abhishek and Garc{\'{\i}}a{-}Valls, Marisol}, publisher = {{ACM}}, isbn = {978-1-4503-5168-3}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/middleware/2017arm}, category = {workshop}, file = {:Dubey2017a-Proceedings_of_the_16th_Workshop_on_ARM.pdf:PDF}, project = {cps-middleware}, timestamp = {Wed, 28 Feb 2018 00:00:00 +0100}, url = {http://dl.acm.org/citation.cfm?id=3152881} }
- K. Kvaternik, A. Laszka, M. Walker, D. C. Schmidt, M. Sturm, M. Lehofer, and A. Dubey, Privacy-Preserving Platform for Transactive Energy Systems, in preprint at arxiv, 2017, vol. abs/1709.09597.
@inproceedings{Kvaternik2017, author = {Kvaternik, Karla and Laszka, Aron and Walker, Michael and Schmidt, Douglas C. and Sturm, Monika and Lehofer, Martin and Dubey, Abhishek}, title = {Privacy-Preserving Platform for Transactive Energy Systems}, booktitle = {preprint at arxiv}, year = {2017}, volume = {abs/1709.09597}, archiveprefix = {arXiv}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/corr/abs-1709-09597}, eprint = {1709.09597}, file = {:Kvaternik2017-Privacy_Preserving_Platform_for_Transactive_Energy_Systems.pdf:PDF}, journal = {CoRR}, keywords = {transactive}, project = {transactive-energy,smart-energy}, tag = {decentralization,power}, timestamp = {Tue, 12 Nov 2019 00:00:00 +0100}, url = {http://arxiv.org/abs/1709.09597} }
Transactive energy systems (TES) are emerging as a transformative solution for the problems faced by distribution system operators due to an increase in the use of distributed energy resources and a rapid acceleration in renewable energy generation. These, on one hand, pose a decentralized power system controls problem, requiring strategic microgrid control to maintain stability for the community and for the utility. On the other hand, they require robust financial markets operating on distributed software platforms that preserve privacy. In this paper, we describe the implementation of a novel, blockchain-based transactive energy system. We outline the key requirements and motivation of this platform, describe the lessons learned, and provide a description of key architectural components of this system.
- A. Dubey, G. Karsai, A. Gokhale, W. Emfinger, and P. Kumar, Drems-os: An operating system for managed distributed real-time embedded systems, in 2017 6th International Conference on Space Mission Challenges for Information Technology (SMC-IT), 2017, pp. 114–119.
@inproceedings{Dubey2017b, author = {Dubey, Abhishek and Karsai, Gabor and Gokhale, Aniruddha and Emfinger, William and Kumar, Pranav}, title = {Drems-os: An operating system for managed distributed real-time embedded systems}, booktitle = {2017 6th International Conference on Space Mission Challenges for Information Technology (SMC-IT)}, year = {2017}, tag = {platform}, pages = {114--119}, organization = {IEEE}, category = {conference}, file = {:Dubey2017b-Drems-os_An_operating_system_for_managed_distributed_real-time_embedded_systems.pdf:PDF}, keywords = {middleware}, project = {cps-middleware} }
Distributed real-time and embedded (DRE) systems executing mixed criticality task sets are increasingly being deployed in mobile and embedded cloud computing platforms, including space applications. These DRE systems must not only operate over a range of temporal and spatial scales, but also require stringent assurances for secure interactions between the system’s tasks without violating their individual timing constraints. To address these challenges, this paper describes a novel distributed operating system focusing on the scheduler design to support the mixed criticality task sets. Empirical results from experiments involving a case study of a cluster of satellites emulated in a laboratory testbed validate our claims.
- S. Hasan, A. Ghafouri, A. Dubey, G. Karsai, and X. Koutsoukos, Heuristics-based approach for identifying critical N-k contingencies in power systems, in 2017 Resilience Week (RWS), 2017, pp. 191–197.
@inproceedings{Hasan2017a, author = {{Hasan}, S. and {Ghafouri}, A. and Dubey, Abhishek and {Karsai}, G. and {Koutsoukos}, X.}, title = {Heuristics-based approach for identifying critical N-k contingencies in power systems}, booktitle = {2017 Resilience Week (RWS)}, year = {2017}, pages = {191-197}, month = sep, category = {conference}, doi = {10.1109/RWEEK.2017.8088671}, file = {:Hasan2017a-Heuristics-based_approach_for_identifying_critical_N_k_contingencies_in_power_systems.pdf:PDF}, issn = {null}, keywords = {smartgrid}, project = {cps-reliability,smart-energy}, tag = {platform,power} }
Reliable operation of electrical power systems in the presence of multiple critical N - k contingencies is an important challenge for the system operators. Identifying all the possible N - k critical contingencies to design effective mitigation strategies is computationally infeasible due to the combinatorial explosion of the search space. This paper describes two heuristic algorithms based on the iterative pruning of the candidate contingency set to effectively and efficiently identify all the critical N - k contingencies resulting in system failure. These algorithms are applied to the standard IEEE-14 bus system, IEEE-39 bus system, and IEEE-57 bus system to identify multiple critical N - k contingencies.
- Y. Du, H. Tu, S. Lukic, D. Lubkeman, A. Dubey, and G. Karsai, Implementation of a distributed microgrid controller on the Resilient Information Architecture Platform for Smart Systems (RIAPS), in 2017 North American Power Symposium (NAPS), 2017, pp. 1–6.
@inproceedings{DuTu2017, author = {{Du}, Y. and {Tu}, H. and {Lukic}, S. and {Lubkeman}, D. and Dubey, Abhishek and {Karsai}, G.}, title = {Implementation of a distributed microgrid controller on the Resilient Information Architecture Platform for Smart Systems (RIAPS)}, booktitle = {2017 North American Power Symposium (NAPS)}, year = {2017}, pages = {1-6}, month = sep, category = {selectiveconference}, doi = {10.1109/NAPS.2017.8107305}, file = {:DuTu2017-Implementation_of_a_distributed_microgrid_controller_on_RIAPS.pdf:PDF}, issn = {null}, keywords = {smartgrid}, tag = {power} }
Formation of microgrids have been proposed as a solution to improve grid reliability, and enable smoother integration of renewables into the grid. Microgrids are sections of the grid that can operate in isolation from the main power system. Maintaining power balance within an islanded microgrid is a challenging task, due to the low system inertia, which requires complex control to maintain stable and optimized operation. Many studies have demonstrated feasible distributed microgrid controllers that can maintain microgrid stability in grid connected and islanded modes. However, there is little emphasis on how to implement these distributed algorithms on a computational platform that allows for fast and seamless deployment. This paper introduces a decentralized software platform called Resilient Information Architecture Platform for Smart Systems (RIAPS) that runs on processors embedded with the microgrid component. As an example, we describe the implementation of a distributed microgrid secondary control and resynchronization algorithms on RIAPS platform. The controller developed on RIAPS platform is validated on a real-time microgrid testbed.
- S. Hasan, A. Dubey, A. Chhokra, N. Mahadevan, G. Karsai, and X. Koutsoukos, A modeling framework to integrate exogenous tools for identifying critical components in power systems, in 2017 Workshop on Modeling and Simulation of Cyber-Physical Energy Systems (MSCPES), 2017, pp. 1–6.
@inproceedings{Hasan2017b, author = {{Hasan}, S. and Dubey, Abhishek and {Chhokra}, A. and {Mahadevan}, N. and {Karsai}, G. and {Koutsoukos}, X.}, title = {A modeling framework to integrate exogenous tools for identifying critical components in power systems}, booktitle = {2017 Workshop on Modeling and Simulation of Cyber-Physical Energy Systems (MSCPES)}, year = {2017}, pages = {1-6}, month = apr, category = {workshop}, doi = {10.1109/MSCPES.2017.8064540}, file = {:Hasan2017b-A_modeling_framework_to_integrate_exogenous_tools_for_identifying_critical_components_in_power_systems.pdf:PDF}, keywords = {smartgrid}, tag = {platform,power} }
Cascading failures in electrical power systems are one of the major causes of concern for the modem society as it results in huge socio-economic loss. Tools for analyzing these failures while considering different aspects of the system are typically very expensive. Thus, researchers tend to use multiple tools to perform various types of analysis on the same system model in order to understand the reasons for these failures in detail. Modeling a simple system in multiple platforms is a tedious, error prone and time consuming process. This paper describes a domain specific modeling language (DSML) for power systems. It identifies and captures the right abstractions for modeling components in different analysis tools. A framework is proposed that deals with system modeling using the developed DSML, identifying the type of analysis to be performed, choosing the appropriate tool(s) needed for the analysis from the tool-chain, transforming the model based on the required specifications of a particular tool and performing the analysis. A case study is done on WSCC-9 Bus System, IEEE-14 Bus System and IEEE-39 Bus System to demonstrate the entire workflow of the framework in identifying critical components for power systems.
- S. Nannapaneni, S. Mahadevan, A. Dubey, D. Lechevalier, A. Narayanan, and S. Rachuri, Automated Uncertainty Quantification Through Information Fusion in Manufacturing Processes, Smart and Sustainable Manufacturing Systems, vol. 1, no. 1, pp. 153–177, 2017.
@article{Nannapaneni2017a, author = {Nannapaneni, S. and Mahadevan, S. and Dubey, A. and Lechevalier, D. and Narayanan, A. and Rachuri, S.}, title = {Automated Uncertainty Quantification Through Information Fusion in Manufacturing Processes}, journal = {Smart and Sustainable Manufacturing Systems}, year = {2017}, volume = {1}, number = {1}, pages = {153-177}, issn = {25206478}, file = {:Nannapaneni2017a-Automated_Uncertainty_Quantification_through_Information_Fusion_in_Manufacturing_Processes.pdf:PDF}, keywords = {performance}, language = {eng} }
Evaluation of key performance indicators (KPIs) such as energy consumption is essential for decision-making during the design and operation of smart manufacturing systems. The measurements of KPIs are strongly affected by several uncertainty sources such as input material uncertainty, the inherent variability in the manufacturing process, model uncertainty, and the uncertainty in the sensor measurements of operational data. A comprehensive understanding of the uncertainty sources and their effect on the KPIs is required to make the manufacturing processes more efficient. Towards this objective, this paper proposed an automated methodology to generate a hierarchical Bayesian network (HBN) for a manufacturing system from semantic system models, physics-based models, and available data in an automated manner, which can be used to perform uncertainty quantification (UQ) analysis. The semantic system model, which is a high-level model describing the system along with its parameters, is assumed to be available in the generic modeling environment (GME) platform. Apart from semantic description, physics-based models, if available, are assumed to be available in model libraries. The proposed methodology was divided into two tasks: (1) automated hierarchical Bayesian network construction using the semantic system model, available models and data, and (2) automated uncertainty quantification (UQ) analysis. A metamodel of an HBN was developed using the GME, along with a syntax representation for the associated conditional probability tables/distributions. The constructed HBN corresponding to a system was represented as an instance model of the HBN metamodel. On the metamodel, a model interpreter was written to be able to carry out the UQ analysis in an automated manner for any HBN instance model conforming to the HBN metamodel. The proposed methodologies are demonstrated using an injection molding process.
2016
- S. Pradhan, A. Dubey, T. Levendovszky, P. S. Kumar, W. Emfinger, D. Balasubramanian, W. Otte, and G. Karsai, Achieving resilience in distributed software systems via self-reconfiguration, Journal of Systems and Software, vol. 122, pp. 344–363, 2016.
@article{Pradhan2016, author = {Pradhan, Subhav and Dubey, Abhishek and Levendovszky, Tihamer and Kumar, Pranav Srinivas and Emfinger, William and Balasubramanian, Daniel and Otte, William and Karsai, Gabor}, title = {Achieving resilience in distributed software systems via self-reconfiguration}, journal = {Journal of Systems and Software}, year = {2016}, volume = {122}, tag = {platform,a14cps}, pages = {344--363}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/jss/PradhanDLKEBOK16}, doi = {10.1016/j.jss.2016.05.038}, file = {:Pradhan2016-Achieving_resilience_in_distributed_software_systems_via_self-reconfiguration.pdf:PDF}, keywords = {reliability}, project = {cps-middleware,cps-reliability}, timestamp = {Mon, 06 Nov 2017 00:00:00 +0100}, url = {https://doi.org/10.1016/j.jss.2016.05.038} }
Improvements in mobile networking combined with the ubiquitous availability and adoption of low-cost development boards have enabled the vision of mobile platforms of Cyber-Physical Systems (CPS), such as fractionated spacecraft and UAV swarms. Computation and communication resources, sensors, and actuators that are shared among different applications characterize these systems. The cyber-physical nature of these systems means that physical environments can affect both the resource availability and software applications that depend on resource availability. While many application development and management challenges associated with such systems have been described in existing literature, resilient operation and execution have received less attention. This paper describes our work on improving runtime support for resilience in mobile CPS, with a special focus on our runtime infrastructure that provides autonomous resilience via self-reconfiguration. We also describe the interplay between this runtime infrastructure and our design-time tools, as the later is used to statically determine the resilience properties of the former. Finally, we present a use case study to demonstrate and evaluate our design-time resilience analysis and runtime self-reconfiguration infrastructure.
- G. Martins, A. Moondra, A. Dubey, A. Bhattacharjee, and X. D. Koutsoukos, Computation and Communication Evaluation of an Authentication Mechanism for Time-Triggered Networked Control Systems, Sensors, vol. 16, no. 8, p. 1166, 2016.
@article{Martins2016, author = {Martins, Gon{\c{c}}alo and Moondra, Arul and Dubey, Abhishek and Bhattacharjee, Anirban and Koutsoukos, Xenofon D.}, title = {Computation and Communication Evaluation of an Authentication Mechanism for Time-Triggered Networked Control Systems}, journal = {Sensors}, year = {2016}, volume = {16}, number = {8}, pages = {1166}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/sensors/MartinsMDBK16}, doi = {10.3390/s16081166}, file = {:Martins2016-Computation_and_Communication_Evaluation_of an Authentication_Mechanism_for_Time-Triggered_Networked_Control_Systems.pdf:PDF}, keywords = {reliability}, project = {cps-middleware,cps-reliability}, timestamp = {Wed, 14 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.3390/s16081166} }
In modern networked control applications, confidentiality and integrity are important features to address in order to prevent against attacks. Moreover, network control systems are a fundamental part of the communication components of current cyber-physical systems (e.g., automotive communications). Many networked control systems employ Time-Triggered (TT) architectures that provide mechanisms enabling the exchange of precise and synchronous messages. TT systems have computation and communication constraints, and with the aim to enable secure communications in the network, it is important to evaluate the computational and communication overhead of implementing secure communication mechanisms. This paper presents a comprehensive analysis and evaluation of the effects of adding a Hash-based Message Authentication (HMAC) to TT networked control systems. The contributions of the paper include (1) the analysis and experimental validation of the communication overhead, as well as a scalability analysis that utilizes the experimental result for both wired and wireless platforms and (2) an experimental evaluation of the computational overhead of HMAC based on a kernel-level Linux implementation. An automotive application is used as an example, and the results show that it is feasible to implement a secure communication mechanism without interfering with the existing automotive controller execution times. The methods and results of the paper can be used for evaluating the performance impact of security mechanisms and, thus, for the design of secure wired and wireless TT networked control systems.
- A. Oruganti, F. Sun, H. Baroud, and A. Dubey, DelayRadar: A multivariate predictive model for transit systems, in 2016 IEEE International Conference on Big Data, BigData 2016, Washington DC, USA, December 5-8, 2016, 2016, pp. 1799–1806.
@inproceedings{Oruganti2016, author = {Oruganti, Aparna and Sun, Fangzhou and Baroud, Hiba and Dubey, Abhishek}, title = {DelayRadar: {A} multivariate predictive model for transit systems}, booktitle = {2016 {IEEE} International Conference on Big Data, BigData 2016, Washington DC, USA, December 5-8, 2016}, year = {2016}, pages = {1799--1806}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/bigdataconf/OrugantiSBD16}, category = {selectiveconference}, doi = {10.1109/BigData.2016.7840797}, file = {:Oruganti2016-DelayRadar_A_multivariate_predictive_model_for_transit_systems.pdf:PDF}, keywords = {transit}, tag = {transit}, project = {smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:51 +0200}, url = {https://doi.org/10.1109/BigData.2016.7840797} }
Effective public transit operations are one of the fundamental requirements for a modern community. Recently, a number of transit agencies have started integrating automated vehicle locators in their fleet, which provides a real-time estimate of the time of arrival. In this paper, we use the data collected over several months from one such transit system and show how this data can be potentially used to learn long term patterns of travel time. More specifically, we study the effect of weather and other factors such as traffic on the transit system delay. These models can later be used to understand the seasonal variations and to design adaptive and transient transit schedules. Towards this goal, we also propose an online architecture called DelayRadar. The novelty of DelayRadar lies in three aspects: (1) a data store that collects and integrates real-time and static data from multiple data sources, (2) a predictive statistical model that analyzes the data to make predictions on transit travel time, and (3) a decision making framework to develop an optimal transit schedule based on variable forecasts related to traffic, weather, and other impactful factors. This paper focuses on identifying the model with the best predictive accuracy to be used in DelayRadar. According to the preliminary study results, we are able to explain more than 70% of the variance in the bus travel time and we can make future travel predictions with an out-of-sample error of 4.8 minutes with information on the bus schedule, traffic, and weather.
- S. Pradhan, A. Dubey, S. Khare, F. Sun, J. Sallai, A. S. Gokhale, D. C. Schmidt, M. Lehofer, and M. Sturm, Poster Abstract: A Distributed and Resilient Platform for City-Scale Smart Systems, in IEEE/ACM Symposium on Edge Computing, SEC 2016, Washington, DC, USA, October 27-28, 2016, 2016, pp. 99–100.
@inproceedings{Pradhan2016a, author = {Pradhan, Subhav and Dubey, Abhishek and Khare, Shweta and Sun, Fangzhou and Sallai, J{\'{a}}nos and Gokhale, Aniruddha S. and Schmidt, Douglas C. and Lehofer, Martin and Sturm, Monika}, title = {Poster Abstract: {A} Distributed and Resilient Platform for City-Scale Smart Systems}, booktitle = {{IEEE/ACM} Symposium on Edge Computing, {SEC} 2016, Washington, DC, USA, October 27-28, 2016}, year = {2016}, tag = {platform}, pages = {99--100}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/edge/PradhanDKSSGSLS16}, category = {poster}, doi = {10.1109/SEC.2016.28}, file = {:Pradhan2016a-Poster_Abstract_A_Distributed_and_Resilient_Platform_for_City-Scale_Smart_Systems.pdf:PDF}, keywords = {middleware}, project = {cps-middleware,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:56 +0200}, url = {https://doi.org/10.1109/SEC.2016.28} }
The advent of the Internet of Things (IoT) is driving several technological trends. The first trend is an increased level of integration between edge devices and commodity computers. This trend, in conjunction with low power-devices, energy harvesting, and improved battery technology, is enabling the next generation of information technology (IT) innovation: city-scale smart systems. These types of IoT systems can operate at multiple time-scales, ranging from closed-loop control requiring strict real-time decision and actuation to near real-time operation with humans-in-the-loop, as well as to long-term analysis, planning, and decision-making.
- W. Emfinger, A. Dubey, P. Völgyesi, J. Sallai, and G. Karsai, Demo Abstract: RIAPS - A Resilient Information Architecture Platform for Edge Computing, in IEEE/ACM Symposium on Edge Computing, SEC 2016, Washington, DC, USA, October 27-28, 2016, 2016, pp. 119–120.
@inproceedings{Emfinger2016, author = {Emfinger, William and Dubey, Abhishek and V{\"{o}}lgyesi, P{\'{e}}ter and Sallai, J{\'{a}}nos and Karsai, Gabor}, title = {Demo Abstract: {RIAPS} - {A} Resilient Information Architecture Platform for Edge Computing}, booktitle = {{IEEE/ACM} Symposium on Edge Computing, {SEC} 2016, Washington, DC, USA, October 27-28, 2016}, year = {2016}, pages = {119--120}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/edge/EmfingerDVSK16}, category = {poster}, doi = {10.1109/SEC.2016.23}, file = {:Emfinger2016-Demo_Abstract_RIAPS-A_Resilient_Information_Architecture_Platform_for_Edge_Computing.pdf:PDF}, keywords = {middleware}, project = {cps-middleware}, tag = {platform,decentralization,power}, timestamp = {Wed, 16 Oct 2019 14:14:56 +0200}, url = {https://doi.org/10.1109/SEC.2016.23} }
The emerging CPS/IoT ecosystem platforms such as Beaglebone Black, Raspberry Pi, Intel Edison and other edge devices such as SCALE, Paradrop are providing new capabilities for data collection, analysis and processing at the edge (also referred to as Fog Computing). This allows the dynamic composition of computing and communication networks that can be used to monitor and control the physical phenomena closer to the physical system. However, there are still a number of challenges that exist and must be resolved before we see wider applicability of these platforms for applications in safety-critical application domains such as Smart Grid and Traffic Control.
- A. Chhokra, A. Dubey, N. Mahadevan, and G. Karsai, Poster Abstract: Distributed Reasoning for Diagnosing Cascading Outages in Cyber Physical Energy Systems, in 7th ACM/IEEE International Conference on Cyber-Physical Systems, ICCPS 2016, Vienna, Austria, April 11-14, 2016, 2016, p. 33:1.
@inproceedings{Chhokra2016, author = {Chhokra, Ajay and Dubey, Abhishek and Mahadevan, Nagabhushan and Karsai, Gabor}, title = {Poster Abstract: Distributed Reasoning for Diagnosing Cascading Outages in Cyber Physical Energy Systems}, booktitle = {7th {ACM/IEEE} International Conference on Cyber-Physical Systems, {ICCPS} 2016, Vienna, Austria, April 11-14, 2016}, year = {2016}, pages = {33:1}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iccps/ChhokraDMK16}, category = {poster}, doi = {10.1109/ICCPS.2016.7479113}, file = {:Chhokra2016-Poster_Abstract_Distributed_Reasoning_for_Diagnosing_Cascading_Outages_in_Cyber_Physical_Energy_Systems.pdf:PDF}, keywords = {smartgrid}, project = {cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:57 +0200}, url = {https://doi.org/10.1109/ICCPS.2016.7479113} }
The power grid incorporates a number of protection elements such as distance relays that detect faults and prevent the propagation of failure effects from influencing the rest of system. However, the decision of these protection elements is only influenced by local information in the form of bus voltage/current (V-I) samples. Due to lack of system wide perspective, erroneous settings, and latent failure modes, protection devices often mis-operate and cause cascading effects that ultimately lead to blackouts. Blackouts around the world have been triggered or worsened by circuit breakers tripping, including the blackout of 2003 in North America, where the secondary/ remote protection relays incorrectly opened the breaker. Tools that aid the operators in finding the root cause of the problem on-line are required. However, high system complexity and the interdependencies between the cyber and physical elements of the system and the mis-operation of protection devices make the failure diagnosis a challenging problem.
- S. Pradhan, A. Dubey, and A. S. Gokhale, WiP Abstract: Platform for Designing and Managing Resilient and Extensible CPS, in 7th ACM/IEEE International Conference on Cyber-Physical Systems, ICCPS 2016, Vienna, Austria, April 11-14, 2016, 2016, p. 39:1.
@inproceedings{Pradhan2016b, author = {Pradhan, Subhav and Dubey, Abhishek and Gokhale, Aniruddha S.}, title = {WiP Abstract: Platform for Designing and Managing Resilient and Extensible {CPS}}, booktitle = {7th {ACM/IEEE} International Conference on Cyber-Physical Systems, {ICCPS} 2016, Vienna, Austria, April 11-14, 2016}, year = {2016}, pages = {39:1}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/iccps/PradhanDG16}, category = {poster}, doi = {10.1109/ICCPS.2016.7479128}, file = {:Pradhan2016b-WiP_Abstract_Platform_for_Designing_and_Managing_Resilient_and_Extensible_CPS.pdf:PDF}, keywords = {performance, middleware}, project = {cps-reliability,cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:57 +0200}, url = {https://doi.org/10.1109/ICCPS.2016.7479128} }
Extensible Cyber-Physical Systems (CPS) are loosely connected, multi-domain platforms that "virtualize" their resources to provide an open platform capable of hosting different cyber-physical applications. These cyber- physical platforms are extensible since resources and applications can be added or removed at any time. However, realizing such platform requires resolving challenges emanating from different properties; for this paper, we focus on resilience. Resilience is important for extensible CPS to make sure that extensibility of a system doesn’t result in failures and anomalies.
- A. Dubey, S. Pradhan, D. C. Schmidt, S. Rusitschka, and M. Sturm, The Role of Context and Resilient Middleware in Next Generation Smart Grids, in Proceedings of the 3rd Workshop on Middleware for Context-Aware Applications in the IoT, M4IoT@Middleware 2016, Trento, Italy, December 12-13, 2016, 2016, pp. 1–6.
@inproceedings{Dubey2016, author = {Dubey, Abhishek and Pradhan, Subhav and Schmidt, Douglas C. and Rusitschka, Sebnem and Sturm, Monika}, title = {The Role of Context and Resilient Middleware in Next Generation Smart Grids}, booktitle = {Proceedings of the 3rd Workshop on Middleware for Context-Aware Applications in the IoT, M4IoT@Middleware 2016, Trento, Italy, December 12-13, 2016}, year = {2016}, pages = {1--6}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/middleware/DubeyPSRS16}, category = {workshop}, doi = {10.1145/3008631.3008632}, file = {:Dubey2016-The_Role_of_Context_and_Resilient_Middleware_in_Next_Generation_Smart_Grids.pdf:PDF}, keywords = {smartgrid, middleware}, project = {cps-reliability,cps-middleware}, tag = {platform,power}, timestamp = {Tue, 06 Nov 2018 16:57:13 +0100}, url = {https://doi.org/10.1145/3008631.3008632} }
The emerging trends of volatile distributed energy resources and micro-grids are putting pressure on electrical power system infrastructure. This pressure is motivating the integration of digital technology and advanced power-industry practices to improve the management of distributed electricity generation, transmission, and distribution, thereby creating a web of systems. Unlike legacy power system infrastructure, however, this emerging next-generation smart grid should be context-aware and adaptive to enable the creation of applications needed to enhance grid robustness and efficiency. This paper describes key factors that are driving the architecture of smart grids and describes orchestration middleware needed to make the infrastructure resilient. We use an example of adaptive protection logic in smart grid substations as a use case to motivate the need for contextawareness and adaptivity.
- S. Shekhar, F. Sun, A. Dubey, A. Gokhale, H. Neema, M. Lehofer, and D. Freudberg, A Smart Decision Support System for Public Transit Operations, in Internet of Things and Data Analytics Handbook, 2016.
@inbook{Shekhar2016, title = {A Smart Decision Support System for Public Transit Operations}, year = {2016}, tag = {transit}, author = {Shekhar, Shashank and Sun, Fangzhou and Dubey, Abhishek and Gokhale, Aniruddha and Neema, Himanshu and Lehofer, Martin and Freudberg, Dan}, booktitle = {Internet of Things and Data Analytics Handbook}, file = {:Shekhar2016-Transit_Hub_A_Smart_Decision_Support_System_for_Public_Transit_Operations.pdf:PDF}, keywords = {transit} }
- S. Pradhan, A. Dubey, and A. S. Gokhale, Designing a Resilient Deployment and Reconfiguration Infrastructure for Remotely Managed Cyber-Physical Systems, in Software Engineering for Resilient Systems - 8th International Workshop, SERENE 2016, Gothenburg, Sweden, September 5-6, 2016, Proceedings, 2016, pp. 88–104.
@inbook{Pradhan2016c, pages = {88--104}, title = {Designing a Resilient Deployment and Reconfiguration Infrastructure for Remotely Managed Cyber-Physical Systems}, year = {2016}, tag = {platform}, author = {Pradhan, Subhav and Dubey, Abhishek and Gokhale, Aniruddha S.}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/serene/PradhanDG16}, booktitle = {Software Engineering for Resilient Systems - 8th International Workshop, {SERENE} 2016, Gothenburg, Sweden, September 5-6, 2016, Proceedings}, doi = {10.1007/978-3-319-45892-2\_7}, file = {:Pradhan2016c-Designing_a_Resilient_Deployment_and_Reconfiguration_Infrastructure_for_Remotely_Managed_CPS.pdf:PDF}, keywords = {middleware, reliability}, project = {cps-reliability,cps-middleware}, timestamp = {Tue, 14 May 2019 10:00:48 +0200}, url = {https://doi.org/10.1007/978-3-319-45892-2\_7} }
Multi-module Cyber-Physical Systems (CPS), such as satellite clusters, swarms of Unmanned Aerial Vehicles (UAV), and fleets of Unmanned Underwater Vehicles (UUV) provide a CPS cluster-as-a-service for CPS applications. The distributed and remote nature of these systems often necessitates the use of Deployment and Configuration (D&C) services to manage the lifecycle of these applications. Fluctuating resources, volatile cluster membership and changing environmental conditions necessitate resilience. Thus, the D&C infrastructure does not only have to undertake basic management actions, such as activation of new applications and deactivation of existing applications, but also has to autonomously reconfigure existing applications to mitigate failures including D&C infrastructure failures. This paper describes the design and architectural considerations to realize such a D&C infrastructure for component-based distributed systems. Experimental results demonstrating the autonomous resilience capabilities are presented.
- S. Nannapaneni, S. Mahadevan, S. Pradhan, and A. Dubey, Towards Reliability-Based Decision Making in Cyber-Physical Systems, in 2016 IEEE International Conference on Smart Computing, SMARTCOMP 2016, St Louis, MO, USA, May 18-20, 2016, 2016, pp. 1–6.
@inproceedings{Nannapaneni2016, author = {Nannapaneni, Saideep and Mahadevan, Sankaran and Pradhan, Subhav and Dubey, Abhishek}, title = {Towards Reliability-Based Decision Making in Cyber-Physical Systems}, booktitle = {2016 {IEEE} International Conference on Smart Computing, {SMARTCOMP} 2016, St Louis, MO, USA, May 18-20, 2016}, year = {2016}, pages = {1--6}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/NannapaneniMPD16}, category = {workshop}, doi = {10.1109/SMARTCOMP.2016.7501724}, file = {:Nannapaneni2016-Towards_Reliability-Based_Decision_Making_in_Cyber-Physical_Systems.pdf:PDF}, keywords = {reliability, performance}, project = {cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2016.7501724} }
Cyber-physical systems (CPS) are systems with a tight integration between the computational (also referred to as software or cyber) and physical (hardware) components. While the reliability evaluation of physical systems is well-understood and well-studied, reliability evaluation of CPS is difficult because software systems do not degrade and follow a well-defined failure model like physical systems. In this paper, we propose a framework for formulating the CPS reliability evaluation as a dependence problem derived from the software component dependences, functional requirements and physical system dependences. We also consider sensor failures, and propose a method for estimating software failures in terms of associated hardware and software inputs. This framework is codified in a domain-specific modeling language, where every system-level function is mapped to a set of required components using functional decomposition and function-component association; this provides details about operational constraints and dependences. We also illustrate how the encoded information can be used to make reconfiguration decisions at runtime. The proposed methodology is demonstrated using a smart parking system, which provides localization and guidance for parking within indoor environments.
- F. Sun, Y. Pan, J. White, and A. Dubey, Real-Time and Predictive Analytics for Smart Public Transportation Decision Support System, in 2016 IEEE International Conference on Smart Computing, SMARTCOMP 2016, St Louis, MO, USA, May 18-20, 2016, 2016, pp. 1–8.
@inproceedings{Sun2016, author = {Sun, Fangzhou and Pan, Yao and White, Jules and Dubey, Abhishek}, title = {Real-Time and Predictive Analytics for Smart Public Transportation Decision Support System}, booktitle = {2016 {IEEE} International Conference on Smart Computing, {SMARTCOMP} 2016, St Louis, MO, USA, May 18-20, 2016}, year = {2016}, pages = {1--8}, tag = {transit}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/smartcomp/SunPWD16}, category = {selectiveconference}, doi = {10.1109/SMARTCOMP.2016.7501714}, file = {:Sun2016-Real-Time_and_Predictive_Analytics_for_Smart_Public_Transportation_Decision_Support_System.pdf:PDF}, keywords = {transit}, project = {smart-transit,smart-cities}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/SMARTCOMP.2016.7501714} }
Public bus transit plays an important role in city transportation infrastructure. However, public bus transit is often difficult to use because of lack of real- time information about bus locations and delay time, which in the presence of operational delays and service alerts makes it difficult for riders to predict when buses will arrive and plan trips. Precisely tracking vehicle and informing riders of estimated times of arrival is challenging due to a number of factors, such as traffic congestion, operational delays, varying times taken to load passengers at each stop. In this paper, we introduce a public transportation decision support system for both short-term as well as long-term prediction of arrival bus times. The system uses streaming real-time bus position data, which is updated once every minute, and historical arrival and departure data - available for select stops to predict bus arrival times. Our approach combines clustering analysis and Kalman filters with a shared route segment model in order to produce more accurate arrival time predictions. Experiments show that compared to the basic arrival time prediction model that is currently being used by the city, our system reduces arrival time prediction errors by 25 percent on average when predicting the arrival delay an hour ahead and 47 percent when predicting within a 15 minute future time window.
- H. Neema, W. Emfinger, and A. Dubey, A Reusable and Extensible Web-Based Co-Simulation Platform for Transactive Energy Systems, in Proceedings of the 3rd International Transactive Energy Systems, Portland, Oregon, USA, 2016, vol. 12.
@inproceedings{Neema2016, author = {Neema, Himanshu and Emfinger, William and Dubey, Abhishek}, title = {A Reusable and Extensible Web-Based Co-Simulation Platform for Transactive Energy Systems}, booktitle = {Proceedings of the 3rd International Transactive Energy Systems, Portland, Oregon, USA}, year = {2016}, volume = {12}, category = {workshop}, file = {:Neema2016-A_Reusable_and_Extensible_Web-Based_Co-Simulation_Platform_for_Transactive_Energy_Systems.pdf:PDF}, keywords = {transactive}, tag = {platform,power} }
Rapid evolution of energy generation technology and increased used of distributed energy resources (DER) is continually pushing utilities to adapt and evolve business models to align with these changes. Today, more consumers are also producing energy using green generation technologies and energy pricing is becoming rather competitive and transactional, needing utilities to increase flexibility of grid operations and incorporate transactive energy systems (TES). However, a huge bottleneck is to ensure stable grid operations while gaining efficiency. A comprehensive platform is therefore needed for grid-scale multi-aspects integrated evaluations. For instance, cyber-attacks in a road traffic controller’s communication network can subtly divert electric vehicles in a particular area, causing surge in the grid loads due to increased EV charging and people activity, which can potentially disrupt, an otherwise robust, grid. To evaluate such a scenario, multiple special-purpose simulators (e.g., SUMO, OMNeT++, GridlabD, etc.) must be run in an integrated manner. To support this, we are developing a cloud-deployed web- and model-based simulation integration platform that enables integrated evaluations of transactive energy systems and is highly extensible and customizable for utility-specific custom simulation tools.
- S. Pradhan, A. Dubey, S. Neema, and A. Gokhale, Towards a generic computation model for smart city platforms, in 2016 1st International Workshop on Science of Smart City Operations and Platforms Engineering (SCOPE) in partnership with Global City Teams Challenge (GCTC) (SCOPE - GCTC), 2016, pp. 1–6.
@inproceedings{Pradhan2016d, author = {{Pradhan}, S. and Dubey, Abhishek and {Neema}, S. and {Gokhale}, A.}, title = {Towards a generic computation model for smart city platforms}, booktitle = {2016 1st International Workshop on Science of Smart City Operations and Platforms Engineering (SCOPE) in partnership with Global City Teams Challenge (GCTC) (SCOPE - GCTC)}, year = {2016}, pages = {1-6}, tag = {platform}, month = apr, category = {workshop}, doi = {10.1109/SCOPE.2016.7515059}, file = {:Pradhan2016d-Towards_a_Generic_Computation_Model_for_Smart_City_Platforms.pdf:PDF}, issn = {null}, keywords = {middleware} }
Smart emergency response systems, smart transportation systems, smart parking spaces are some examples of multi-domain smart city systems that require large-scale, open platforms for integration and execution. These platforms illustrate high degree of heterogeneity. In this paper, we focus on software heterogeneity arising from different types of applications. The source of variability among applications stems from (a) timing requirements, (b) rate and volume of data they interact with, and (c) behavior depending on whether they are stateful or stateless. These variations result in applications with different computation models. However, a smart city system can comprise multi-domain applications with different types and therefore computation models. As such, a key challenge that arises is that of integration; we require some mechanism to facilitate integration and interaction between applications that use different computation models. In this paper, we first identify computation models based on different application types. Second, we present a generic computation model and explain how it can map to previously identified computation models. Finally, we briefly describe how the generic computation model fits in our overall smart city platform architecture.
- S. Nannapaneni, A. Dubey, S. Abdelwahed, S. Mahadevan, S. Neema, and T. Bapty, Mission-based reliability prediction in component-based systems, International Journal of Prognostics and Health Management, vol. 7, no. 001, 2016.
@article{Nannapaneni2016a, author = {Nannapaneni, Saideep and Dubey, Abhishek and Abdelwahed, Sherif and Mahadevan, Sankaran and Neema, Sandeep and Bapty, Ted}, title = {Mission-based reliability prediction in component-based systems}, journal = {International Journal of Prognostics and Health Management}, year = {2016}, volume = {7}, number = {001}, file = {:Nannapaneni2016a-Mission-based_reliability_prediction_in_component-based_systems.pdf:PDF}, keywords = {reliability} }
This paper develops a framework for the extraction of a reliability block diagram in component-based systems for reliability prediction with respect to specific missions. A mission is defined as a composition of several high-level functions occurring at different stages and for a specific time during the mission. The high-level functions are decomposed into lower-level functions, which are then mapped to their corresponding components or component assemblies. The reliability block diagram is obtained using functional decomposition and function-component association. Using the reliability block diagram and the reliability information on the components such as failure rates, the reliability of the system carrying out a mission can be estimated. The reliability block diagram is evaluated by converting it into a logic (Boolean) expression. A modeling language created using the Generic Modeling Environment (GME) platform is used, which enables modeling of a system and captures the functional decomposition and function-component association in the system. This framework also allows for real-time monitoring of the system performance where the reliability of the mission can be computed over time as the mission progresses. The uncertainties in the failure rates and operational time of each high-level function are also considered which are quantified through probability distributions using the Bayesian framework. The dependence between failures of components are also considered and are quantified through a Bayesian network (BN). Other quantities of interest such as mission feasibility and function availability can also be assessed using this framework. Mission feasibility analysis determines if the mission can be accomplished given the current state of components in the system, and function availability provides information whether the function will be available in the future given the current state of the system. The proposed methodology is demonstrated using a radio-controlled (RC) car to carry out a simple surveillance mission.
- G. Biswas, H. Khorasgani, G. Stanje, A. Dubey, S. Deb, and S. Ghoshal, An application of data driven anomaly identification to spacecraft telemetry data, in Prognostics and Health Management Conference, 2016.
@inproceedings{Biswas2016, author = {Biswas, Gautam and Khorasgani, Hamed and Stanje, Gerald and Dubey, Abhishek and Deb, Somnath and Ghoshal, Sudipto}, title = {An application of data driven anomaly identification to spacecraft telemetry data}, booktitle = {Prognostics and Health Management Conference}, year = {2016}, tag = {ai4cps}, category = {conference}, file = {:Biswas2016-An_application_of_data_driven_anomaly_identification_to_spacecraft_telemetry_data.pdf:PDF}, keywords = {reliability} }
In this paper, we propose a mixed method for analyzing telemetry data from a robotic space mission. The idea is to first apply unsupervised learning methods to the telemetry data divided into temporal segments. The large clusters that ensue typically represent the nominal operations of the spacecraft and are not of interest from an anomaly detection viewpoint. However, the smaller clusters and outliers that result from this analysis may represent specialized modes of operation, e.g., conduct of a specialized experiment on board the spacecraft, or they may represent true anomalous or unexpected behaviors. To differentiate between specialized modes and anomalies, we employ a supervised method of consulting human mission experts in the approach presented in this paper. Our longer term goal is to develop more automated methods for detecting anomalies in time series data, and once anomalies are identified, use feature selection methods to build online detectors that can be used in future missions, thus contributing to making operations more effective and improving overall safety of the mission.
- G. Biswas, H. Khorasgani, G. Stanje, A. Dubey, S. Deb, and S. Ghoshal, An approach to mode and anomaly detection with spacecraft telemetry data, International Journal of Prognostics and Health Management, 2016.
@article{Biswas2016a, author = {Biswas, Gautam and Khorasgani, Hamed and Stanje, Gerald and Dubey, Abhishek and Deb, Somnath and Ghoshal, Sudipto}, title = {An approach to mode and anomaly detection with spacecraft telemetry data}, journal = {International Journal of Prognostics and Health Management}, year = {2016}, tag = {a14cps}, file = {:Biswas2016a-An_approach_to_mode_and_anomaly_detection_with_spacecraft_telemetry_data.pdf:PDF}, keywords = {reliability} }
This paper discusses a mixed method that combines unsupervised learning methods and human expert input for analyzing telemetry data from long-duration robotic space missions. Our goal is to develop more automated methods detecting anomalies in time series data. Once anomalies are identified using unsupervised learning methods we use feature selection methods followed by expert input to derive the knowledge required for building on-line detectors. These detectors can be used in later phases of the current mission and in future missions for improving operations and overall safety of the mission. Whereas the primary focus in this paper is on developing data-driven anomaly detection methods, we also present a computational platform for data mining and analytics that can operate on historical data offline, as well as incoming telemetry data on-line.
2015
- N. Mahadevan, A. Dubey, A. Chhokra, H. Guo, and G. Karsai, Using temporal causal models to isolate failures in power system protection devices, IEEE Instrum. Meas. Mag., vol. 18, no. 4, pp. 28–39, 2015.
@article{Mahadevan2015, author = {Mahadevan, Nagabhushan and Dubey, Abhishek and Chhokra, Ajay and Guo, Huangcheng and Karsai, Gabor}, title = {Using temporal causal models to isolate failures in power system protection devices}, journal = {{IEEE} Instrum. Meas. Mag.}, year = {2015}, volume = {18}, number = {4}, pages = {28--39}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/imm/MahadevanDCGK15}, doi = {10.1109/MIM.2015.7155770}, file = {:Mahadevan2015-Using_temporal_causal_models_to_isolate_failures_in_power_system_protection_devices.pdf:PDF}, keywords = {smartgrid, reliability}, project = {cps-reliability,smart-energy}, tag = {platform,power}, timestamp = {Sun, 28 May 2017 01:00:00 +0200}, url = {https://doi.org/10.1109/MIM.2015.7155770} }
We introduced the modeling paradigm of Temporal Causal Diagrams (TCD) in this paper. TCDs capture fault propagation and behavior (nominal and faulty) of system components. An example model for the power transmission systems was also described. This TCD model was then used to develop an executable simulation model in Simulink/ Stateflow. Though this translation of TCD to an executable model is currently done manually, we are developing model templates and tools to automate this process. Simulations results (i.e., event traces) for a couple of single and multi-fault scenarios were also presented. As part of our future work, we wish to test and study the scalability of this approach towards a larger power transmission system taking into account a far richer set of protection elements. Further, we wish to consider more realistic event traces from the fault scenarios including missing, inconsistent and out-of-sequence alarms and events.
- D. Balasubramanian, A. Dubey, W. Otte, T. Levendovszky, A. S. Gokhale, P. S. Kumar, W. Emfinger, and G. Karsai, DREMS ML: A wide spectrum architecture design language for distributed computing platforms, Sci. Comput. Program., vol. 106, pp. 3–29, 2015.
@article{Balasubramanian2015, author = {Balasubramanian, Daniel and Dubey, Abhishek and Otte, William and Levendovszky, Tihamer and Gokhale, Aniruddha S. and Kumar, Pranav Srinivas and Emfinger, William and Karsai, Gabor}, title = {{DREMS} {ML:} {A} wide spectrum architecture design language for distributed computing platforms}, journal = {Sci. Comput. Program.}, year = {2015}, tag = {platform}, volume = {106}, pages = {3--29}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/scp/Balasubramanian15}, doi = {10.1016/j.scico.2015.04.002}, file = {:Balasubramanian2015-DREMS_ML_A_wide_spectrum_architecture_design_language_for_distributed_computing_platforms.pdf:PDF}, keywords = {middleware}, project = {cps-middleware}, timestamp = {Sat, 27 May 2017 01:00:00 +0200}, url = {https://doi.org/10.1016/j.scico.2015.04.002} }
Complex sensing, processing and control applications running on distributed platforms are difficult to design, develop, analyze, integrate, deploy and operate, especially if resource constraints, fault tolerance and security issues are to be addressed. While technology exists today for engineering distributed, real-time component-based applications, many problems remain unsolved by existing tools. Model-driven development techniques are powerful, but there are very few existing and complete tool chains that offer an end-to-end solution to developers, from design to deployment. There is a need for an integrated model-driven development environment that addresses all phases of application lifecycle including design, development, verification, analysis, integration, deployment, operation and maintenance, with supporting automation in every phase. Arguably, a centerpiece of such a model-driven environment is the modeling language. To that end, this paper presents a wide-spectrum architecture design language called DREMS ML that itself is an integrated collection of individual domain-specific sub-languages. We claim that the language promotes “correct-by-construction” software development and integration by supporting each individual phase of the application lifecycle. Using a case study, we demonstrate how the design of DREMS ML impacts the development of embedded systems.
- S. M. Pradhan, A. Dubey, A. S. Gokhale, and M. Lehofer, CHARIOT: a domain specific language for extensible cyber-physical systems, in Proceedings of the Workshop on Domain-Specific Modeling, DSM@SPLASH 2015, Pittsburgh, PA, USA, October 27, 2015, 2015, pp. 9–16.
@inproceedings{Pradhan2015, author = {Pradhan, Subhav M. and Dubey, Abhishek and Gokhale, Aniruddha S. and Lehofer, Martin}, title = {{CHARIOT:} a domain specific language for extensible cyber-physical systems}, booktitle = {Proceedings of the Workshop on Domain-Specific Modeling, DSM@SPLASH 2015, Pittsburgh, PA, USA, October 27, 2015}, year = {2015}, pages = {9--16}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/oopsla/PradhanDGL15}, category = {workshop}, doi = {10.1145/2846696.2846708}, file = {:Pradhan2015-CHARIOT_a_domain_specific_language_for_extensible_cyber-physical_systems.pdf:PDF}, keywords = {middleware, reliability}, project = {cps-middleware,cps-reliability}, timestamp = {Tue, 06 Nov 2018 16:57:16 +0100}, url = {https://doi.org/10.1145/2846696.2846708} }
Wider adoption, availability and ubiquity of wireless networking technologies, integrated sensors, actuators, and edge computing devices is facilitating a paradigm shift by allowing us to transition from traditional statically configured vertical silos of CyberPhysical Systems (CPS) to next generation CPS that are more open, dynamic and extensible. Fractionated spacecraft, smart cities computing architectures, Unmanned Aerial Vehicle (UAV) clusters, platoon of vehicles on highways are all examples of extensible CPS wherein extensibility is implied by the dynamic aggregation of physical resources, affect of physical dynamics on availability of computing resources, and various multi-domain applications hosted on these systems. However, realization of extensible CPS requires resolving design-time and run-time challenges emanating from properties specific to these systems. In this paper, we first describe different properties of extensible CPS - dynamism, extensibility, remote deployment, security, heterogeneity and resilience. Then we identify different design-time challenges stemming from heterogeneity and resilience requirements. We particularly focus on software heterogeneity arising from availability of various communication middleware. We then present appropriate solutions in the context of a novel domain specific language, which can be used to design resilient systems while remaining agnostic to middleware heterogeneities. We also describe how this language and its features have evolved from our past work. We use a platform of fractionated spacecraft to describe our solution.
- R. Jain, S. M. Lukic, A. Chhokra, N. Mahadevan, A. Dubey, and G. Karsai, An improved distance relay model with directional element, and memory polarization for TCD based fault propagation studies, in 2015 North American Power Symposium (NAPS), 2015, pp. 1–6.
@inproceedings{Jain2015, author = {{Jain}, R. and {Lukic}, S. M. and {Chhokra}, A. and {Mahadevan}, N. and Dubey, Abhishek and {Karsai}, G.}, title = {An improved distance relay model with directional element, and memory polarization for TCD based fault propagation studies}, booktitle = {2015 North American Power Symposium (NAPS)}, year = {2015}, pages = {1-6}, month = oct, category = {selectiveconference}, doi = {10.1109/NAPS.2015.7335206}, file = {:Jain2015-An_improved_distance_relay_model_with_directional_element_and_memory_polarization_for_TCD_based_fault_propagation_studies.pdf:PDF}, issn = {null}, keywords = {smartgrid}, tag = {power} }
Modern Power Systems have evolved into a very complex network of multiple sources, lines, breakers, loads and others. The performance of these interdependent components decide the reliability of the power systems. A tool called “Reasoner” is being developed to deduce fault propagations using a Temporal Causal Diagram (TCD) approach. It translates the physical system as a Cause-effect model. This work discusses the development of an advanced distance relay model, which monitors the system, and challenges the operation of reasoner for refinement. Process of generation of a Fault and Discrepancy Mapping file from the test system is presented. This file is used by the reasoner to scrutinize relays’ responses for active system faults, and hypothesize potential mis-operations (or cyber faults) with a confidence metric. Analyzer (relay model) is integrated to OpenDSS for fault analysis. The understanding of the system interdependency (fault propagation behavior) using reasoner can make the grid more robust against cascaded failures.
- A. Chhokra, S. Abdelwahed, A. Dubey, S. Neema, and G. Karsai, From system modeling to formal verification, in 2015 Electronic System Level Synthesis Conference (ESLsyn), 2015, pp. 41–46.
@inproceedings{Chhokra2015, author = {{Chhokra}, A. and {Abdelwahed}, S. and Dubey, Abhishek and {Neema}, S. and {Karsai}, G.}, title = {From system modeling to formal verification}, booktitle = {2015 Electronic System Level Synthesis Conference (ESLsyn)}, year = {2015}, tag = {platform}, pages = {41-46}, month = jun, category = {conference}, file = {:Chhokra2015-From_system_modeling_to_formal_verification.pdf:PDF}, issn = {2117-4628}, keywords = {reliability} }
Due to increasing design complexity, modern systems are modeled at a high level of abstraction. SystemC is widely accepted as a system level language for modeling complex embedded systems. Verification of these SystemC designs nullifies the chances of error propagation down to the hardware. Due to lack of formal semantics of SystemC, the verification of such designs is done mostly in an unsystematic manner. This paper provides a new modeling environment that enables the designer to simulate and formally verify the designs by generating SystemC code. The generated SystemC code is automatically translated to timed automata for formal analysis.
- A. Chhokra, A. Dubey, N. Mahadevan, and G. Karsai, A component-based approach for modeling failure propagations in power systems, in 2015 Workshop on Modeling and Simulation of Cyber-Physical Energy Systems (MSCPES), 2015, pp. 1–6.
@inproceedings{Chhokra2015a, author = {{Chhokra}, A. and Dubey, Abhishek and {Mahadevan}, N. and {Karsai}, G.}, title = {A component-based approach for modeling failure propagations in power systems}, booktitle = {2015 Workshop on Modeling and Simulation of Cyber-Physical Energy Systems (MSCPES)}, year = {2015}, pages = {1-6}, month = apr, category = {workshop}, doi = {10.1109/MSCPES.2015.7115412}, file = {:Chhokra2015a-A_component-based_approach_for_modeling_failure_propagations_in_power_systems.pdf:PDF}, keywords = {smartgrid}, tag = {platform,power} }
Resiliency and reliability is of paramount impor- tance for energy cyber physical systems. Electrical protection systems including detection elements such as Distance Relays and actuation elements such as Breakers are designed to protect the system from abnormal operations and arrest failure propagation by rapidly isolating the faulty components. However, failure in the protection devices themselves can and do lead to major system events and fault cascades, often leading to blackouts. This paper augments our past work on Temporal Causal Diagrams (TCD), a modeling formalism designed to help reason about the failure progressions by (a) describing a way to generate the TCD model from the system specification, and (b) understand the system failure dynamics for TCD reasoners by configuring simulation models.
- A. Dubey, M. Sturm, M. Lehofer, and J. Sztipanovits, Smart City Hubs: Opportunities for Integrating and Studying Human CPS at Scale, in Workshop on Big Data Analytics in CPS: Enabling the Move from IoT to Real-Time Control, 2015.
@inproceedings{Dubey2015, author = {Dubey, Abhishek and Sturm, Monika and Lehofer, Martin and Sztipanovits, Janos}, title = {Smart City Hubs: Opportunities for Integrating and Studying Human CPS at Scale}, booktitle = {Workshop on Big Data Analytics in CPS: Enabling the Move from IoT to Real-Time Control}, year = {2015}, category = {workshop}, tag = {transit}, file = {:Dubey2015-Smart_city_hubs_Opportunities_for_integrating_and_studying_human_cps_at_scale.pdf:PDF}, keywords = {transit}, url = {http://www.isis.vanderbilt.edu/sites/default/files/extendedAbstract.pdf} }
- S. Pradhan, A. Dubey, W. R. Otte, G. Karsai, and A. Gokhale, Towards a Product Line of Heterogeneous Distributed Applications, Institute for Software Integrated Systems, Vanderbilt University, Nashville, Technical Report ISIS-15-117, 2015.
@techreport{Pradhan2015a, author = {Pradhan, Subhav and Dubey, Abhishek and Otte, William R and Karsai, Gabor and Gokhale, Aniruddha}, title = {Towards a Product Line of Heterogeneous Distributed Applications}, institution = {Institute for Software Integrated Systems, Vanderbilt University}, year = {2015}, type = {Technical Report}, number = {ISIS-15-117}, address = {Nashville}, month = {4/2015}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/TechReport2013.pdf}, file = {:Pradhan2015a-Towards_a_product_line_of_heterogeneous_distributed_applications.pdf:PDF}, issn = {ISIS-15-117}, keywords = {middleware}, owner = {abhishek}, timestamp = {2015.10.16}, url = {http://www.isis.vanderbilt.edu/sites/default/files/Pradhan_SEAMS_TechReport.pdf} }
Next generation large-scale distributed systems – such as smart cities – are dynamic, heterogeneous and multi-domain in nature. The same is true for applications hosted on these systems. Application heterogeneity stems from their Unit of Composition (UoC); some applications might be coarse-grained and composed from processes or actors, whereas others might be fine-grained and composed from software components. Software components can further amplify heterogeneity since there exists different component models for different domains. Lifecycle management of such distributed, heterogeneous applications is a considerable challenge. In this paper, we solve this problem by reasoning about these systems as a Software Product Line (SPL) where individual dimensions of heterogeneity can be considered as product variants. To enable such reasoning, first, we present UMRELA (Universal feature-Model for distRibutEd appLicAtions), a conceptual feature model that identifies commonalities and variability points for capturing and representing distributed applications and their target system. This results in a product line of a family of distributed applications. UMRELA facilitates representation of initial configuration point, and the configuration space of the system. The latter represents all possible states the system can reach and is used as an implicit encoding to calculate new configuration points at runtime. Second, we present a prototype Application Management Framework (AMF) as a proof of concept configuration management tool that uses UMRELA to manage heterogeneous distributed applications.
2014
- T. Levendovszky, A. Dubey, W. Otte, D. Balasubramanian, A. Coglio, S. Nyako, W. Emfinger, P. S. Kumar, A. S. Gokhale, and G. Karsai, Distributed Real-Time Managed Systems: A Model-Driven Distributed Secure Information Architecture Platform for Managed Embedded Systems, IEEE Software, vol. 31, no. 2, pp. 62–69, 2014.
@article{Levendovszky2014, author = {Levendovszky, Tihamer and Dubey, Abhishek and Otte, William and Balasubramanian, Daniel and Coglio, Alessandro and Nyako, Sandor and Emfinger, William and Kumar, Pranav Srinivas and Gokhale, Aniruddha S. and Karsai, Gabor}, title = {Distributed Real-Time Managed Systems: {A} Model-Driven Distributed Secure Information Architecture Platform for Managed Embedded Systems}, journal = {{IEEE} Software}, year = {2014}, volume = {31}, number = {2}, tag = {platform}, pages = {62--69}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/software/LevendovszkyDOBCNEKGK14}, doi = {10.1109/MS.2013.143}, file = {:Levendovszky2014-Distributed_Real_Time_Managed_Systems.pdf:PDF}, keywords = {middleware}, project = {cps-middleware,cps-reliability}, timestamp = {Thu, 18 May 2017 01:00:00 +0200}, url = {https://doi.org/10.1109/MS.2013.143} }
Architecting software for a cloud computing platform built from mobile embedded devices incurs many challenges that aren’t present in traditional cloud computing. Both effectively managing constrained resources and isolating applications without adverse performance effects are needed. A practical design- and runtime solution incorporates modern software development practices and technologies along with novel approaches to address these challenges. The patterns and principles manifested in this system can potentially serve as guidelines for current and future practitioners in this field.
- W. Emfinger, G. Karsai, A. Dubey, and A. S. Gokhale, Analysis, verification, and management toolsuite for cyber-physical applications on time-varying networks, in Proceedings of the 4th ACM SIGBED International Workshop on Design, Modeling, and Evaluation of Cyber-Physical Systems, CyPhy 2014, Berlin, Germany, April 14-17, 2014, 2014, pp. 44–47.
@inproceedings{Emfinger2014, author = {Emfinger, William and Karsai, Gabor and Dubey, Abhishek and Gokhale, Aniruddha S.}, title = {Analysis, verification, and management toolsuite for cyber-physical applications on time-varying networks}, booktitle = {Proceedings of the 4th {ACM} {SIGBED} International Workshop on Design, Modeling, and Evaluation of Cyber-Physical Systems, CyPhy 2014, Berlin, Germany, April 14-17, 2014}, year = {2014}, tag = {platform}, pages = {44--47}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/cyphy/EmfingerKDG14}, category = {workshop}, doi = {10.1145/2593458.2593459}, file = {:Emfinger2014-Analysis_verification_and_management_toolsuite_for_cyber-physical_applications_on_time-varying_networks.pdf:PDF}, keywords = {performance}, project = {cps-reliability}, timestamp = {Tue, 06 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.1145/2593458.2593459} }
Cyber-Physical Systems (CPS) are increasingly utilizing advances in wireless mesh networking among computing nodes to facilitate communication and control for distributed applications. Factors such as interference or node mobility cause such wireless networks to experience changes in both topology and link capacities. These dynamic networks pose a reliability concern for high-criticality or mixed-criticality systems which require strict guarantees about system performance and robustness prior to deployment. To address the design- and run-time verification and reliability concerns created by these dynamic networks, we are developing an integrated modeling, analysis, and run-time toolsuite which provides (1) network profiles that model the dynamics of system network resources and application network requirements over time, (2) design-time verification of application performance on dynamic networks, and (3) management of the CPS network resources during run-time. In this paper we present the foundations for the analysis of dynamic networks and show experimental validations of this analysis. We conclude with a focus on future work and applications to the field
- G. Karsai, D. Balasubramanian, A. Dubey, and W. Otte, Distributed and Managed: Research Challenges and Opportunities of the Next Generation Cyber-Physical Systems, in 17th IEEE International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, ISORC 2014, Reno, NV, USA, June 10-12, 2014, 2014, pp. 1–8.
@inproceedings{Karsai2014, author = {Karsai, Gabor and Balasubramanian, Daniel and Dubey, Abhishek and Otte, William}, title = {Distributed and Managed: Research Challenges and Opportunities of the Next Generation Cyber-Physical Systems}, booktitle = {17th {IEEE} International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, {ISORC} 2014, Reno, NV, USA, June 10-12, 2014}, year = {2014}, pages = {1--8}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/KarsaiBDO14}, category = {selectiveconference}, doi = {10.1109/ISORC.2014.36}, file = {:Karsai2014-Distributed_and_Managed.pdf:PDF}, keywords = {middleware}, project = {cps-reliability,cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2014.36} }
Cyber-physical systems increasingly rely on distributed computing platforms where sensing, computing, actuation, and communication resources are shared by a multitude of applications. Such ’cyber-physical cloud computing platforms’ present novel challenges because the system is built from mobile embedded devices, is inherently distributed, and typically suffers from highly fluctuating connectivity among the modules. Architecting software for these systems raises many challenges not present in traditional cloud computing. Effective management of constrained resources and application isolation without adversely affecting performance are necessary. Autonomous fault management and real-time performance requirements must be met in a verifiable manner. It is also both critical and challenging to support multiple end-users whose diverse software applications have changing demands for computational and communication resources, while operating on different levels and in separate domains of security. The solution presented in this paper is based on a layered architecture consisting of a novel operating system, a middleware layer, and component-structured applications. The component model facilitates the construction of software applications from modular and reusable components that are deployed in the distributed system and interact only through well-defined mechanisms. The complexity of creating applications and performing system integration is mitigated through the use of a domain-specific model-driven development process that relies on a domain-specific modeling language and its accompanying graphical modeling tools, software generators for synthesizing infrastructure code, and the extensive use of model-based analysis for verification and validation.
- D. Balasubramanian, T. Levendovszky, A. Dubey, and G. Karsai, Taming Multi-Paradigm Integration in a Software Architecture Description Language, in Proceedings of the 8th Workshop on Multi-Paradigm Modeling co-located with the 17th International Conference on Model Driven Engineering Languages and Systems, MPM@MODELS 2014, Valencia, Spain, September 30, 2014, 2014, pp. 67–76.
@inproceedings{Balasubramanian2014, author = {Balasubramanian, Daniel and Levendovszky, Tihamer and Dubey, Abhishek and Karsai, Gabor}, title = {Taming Multi-Paradigm Integration in a Software Architecture Description Language}, booktitle = {Proceedings of the 8th Workshop on Multi-Paradigm Modeling co-located with the 17th International Conference on Model Driven Engineering Languages and Systems, MPM@MODELS 2014, Valencia, Spain, September 30, 2014}, year = {2014}, tag = {platform}, pages = {67--76}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/models/BalasubramanianLDK14}, category = {workshop}, file = {:Balasubramanian2014-Taming_Multi-Paradigm_Integration_in_a_Software_Architecture_Description_Language.pdf:PDF}, keywords = {middleware}, project = {cps-reliability,cps-middleware}, timestamp = {Thu, 18 Jul 2019 11:36:32 +0200}, url = {http://ceur-ws.org/Vol-1237/paper7.pdf} }
Software architecture description languages offer a convenient way of describing the high-level structure of a software system. Such descriptions facilitate rapid prototyping, code generation and automated analysis. One of the big challenges facing the software community is the design of architecture description languages that are general enough to describe a wide-range of systems, yet detailed enough to capture domain-specific properties and provide a high level of tool automation. This paper presents the multi-paradigm challenges we faced and solutions we built when creating a domain-specific modeling language for software architectures of distributed real-time systems.
- P. S. Kumar, A. Dubey, and G. Karsai, Colored Petri Net-based Modeling and Formal Analysis of Component-based Applications, in Proceedings of the 11th Workshop on Model-Driven Engineering, Verification and Validation co-located with 17th International Conference on Model Driven Engineering Languages and Systems, MoDeVVa@MODELS 2014, Valencia, Spain, September 30, 2014, 2014, pp. 79–88.
@inproceedings{Kumar2014, author = {Kumar, Pranav Srinivas and Dubey, Abhishek and Karsai, Gabor}, title = {Colored Petri Net-based Modeling and Formal Analysis of Component-based Applications}, booktitle = {Proceedings of the 11th Workshop on Model-Driven Engineering, Verification and Validation co-located with 17th International Conference on Model Driven Engineering Languages and Systems, MoDeVVa@MODELS 2014, Valencia, Spain, September 30, 2014}, year = {2014}, tag = {platform}, pages = {79--88}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/models/KumarDK14}, category = {workshop}, keywords = {performance}, project = {cps-reliability,cps-middleware}, timestamp = {Tue, 28 May 2019 16:23:34 +0200}, url = {http://ceur-ws.org/Vol-1235/paper-10.pdf} }
Distributed Real-Time Embedded (DRE) Systems that address safety and mission-critical system requirements are applied in a variety of domains today. Complex, integrated systems like managed satellite clusters expose heterogeneous concerns such as strict timing requirements, complexity in system integration, deployment, and repair; and resilience to faults. Integrating appropriate modeling and analysis techniques into the design of such systems helps ensure predictable, dependable and safe operation upon deployment. This paper describes how we can model and analyze applications for these systems in order to verify system properties such as lack of deadline violations. Our approach is based on (1) formalizing the component operation scheduling using Colored Petri nets (CPN), (2) modeling the abstract temporal behavior of application components, and (3) integrating the business logic and the component operation scheduling models into a concrete CPN, which is then analyzed. This model-driven approach enables a verication-driven workow wherein the application model can be rened and restructured before actual code development.
- D. Balasubramanian, A. Dubey, W. R. Otte, W. Emfinger, P. S. Kumar, and G. Karsai, A Rapid Testing Framework for a Mobile Cloud, in 25nd IEEE International Symposium on Rapid System Prototyping, RSP 2014, New Delhi, India, October 16-17, 2014, 2014, pp. 128–134.
@inproceedings{Balasubramanian2014a, author = {Balasubramanian, Daniel and Dubey, Abhishek and Otte, William R. and Emfinger, William and Kumar, Pranav Srinivas and Karsai, Gabor}, title = {A Rapid Testing Framework for a Mobile Cloud}, booktitle = {25nd {IEEE} International Symposium on Rapid System Prototyping, {RSP} 2014, New Delhi, India, October 16-17, 2014}, year = {2014}, pages = {128--134}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/rsp/BalasubramanianDOEKK14}, category = {selectiveconference}, doi = {10.1109/RSP.2014.6966903}, file = {:Balasubramanian2014a-A_Rapid_Testing_Framework_for_a_Mobile_Cloud.pdf:PDF}, keywords = {middleware}, project = {cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:50 +0200}, url = {https://doi.org/10.1109/RSP.2014.6966903} }
Mobile clouds such as network-connected vehicles and satellite clusters are an emerging class of systems that are extensions to traditional real-time embedded systems: they provide long-term mission platforms made up of dynamic clusters of heterogeneous hardware nodes communicating over ad hoc wireless networks. Besides the inherent complexities entailed by a distributed architecture, developing software and testing these systems is difficult due to a number of other reasons, including the mobile nature of such systems, which can require a model of the physical dynamics of the system for accurate simulation and testing. This paper describes a rapid development and testing framework for a distributed satellite system. Our solutions include a modeling language for configuring and specifying an application’s interaction with the middleware layer, a physics simulator integrated with hardware in the loop to provide the system’s physical dynamics and the integration of a network traffic tool to dynamically vary the network bandwidth based on the physical dynamics.
- N. Mahadevan, A. Dubey, G. Karsai, A. Srivastava, and C.-C. Liu, Temporal Causal Diagrams for diagnosing failures in cyber-physical systems, in Annual Conference of the Prognostics and Health Management Society, 2014.
@inproceedings{Mahadevan2014, author = {Mahadevan, Nagabhushan and Dubey, Abhishek and Karsai, Gabor and Srivastava, Anurag and Liu, Chen-Ching}, title = {Temporal Causal Diagrams for diagnosing failures in cyber-physical systems}, booktitle = {Annual Conference of the Prognostics and Health Management Society}, year = {2014}, month = jan, category = {conference}, file = {:Mahadevan2014-Temporal_Causal_Diagrams_for_Diagnosing_Failures_in_Cyber_Physical_Systems.pdf:PDF}, keywords = {reliability, smartgrid}, tag = {platform,power} }
Resilient and reliable operation of cyber physical systems of societal importance such as Smart Electric Grids is one of the top national priorities. Due to their critical nature, these systems are equipped with fast-acting, local protection mechanisms. However, commonly misguided protection actions together with system dynamics can lead to un-intentional cascading effects. This paper describes the ongoing work using Temporal Causal Diagrams (TCD), a refinement of the Timed Failure Propagation Graphs (TFPG), to diagnose problems associated with the power transmission lines protected by a combination of relays and breakers. The TCD models represent the faults and their propagation as TFPG, the nominal and faulty behavior of components (including local, discrete controllers and protection devices) as Timed Discrete Event Systems (TDES), and capture the cumulative and cascading effects of these interactions. The TCD diagnosis engine includes an extended TFPG-like reasoner which in addition to observing the alarms and mode changes (as the TFPG), monitors the event traces (that correspond to the behavioral aspects of the model) to generate hypotheses that consistently explain all the observations. In this paper, we show the results of applying the TCD to a segment of a power transmission system that is protected by distance relays and breakers.
- S. Pradhan, W. Emfinger, A. Dubey, W. R. Otte, D. Balasubramanian, A. Gokhale, G. Karsai, and A. Coglio, Establishing Secure Interactions across Distributed Applications in Satellite Clusters, in 2014 IEEE International Conference on Space Mission Challenges for Information Technology, 2014, pp. 67–74.
@inproceedings{Pradhan2014, author = {{Pradhan}, S. and {Emfinger}, W. and Dubey, Abhishek and {Otte}, W. R. and {Balasubramanian}, D. and {Gokhale}, A. and {Karsai}, G. and {Coglio}, A.}, title = {Establishing Secure Interactions across Distributed Applications in Satellite Clusters}, booktitle = {2014 IEEE International Conference on Space Mission Challenges for Information Technology}, year = {2014}, tag = {platform}, pages = {67-74}, month = sep, category = {conference}, doi = {10.1109/SMC-IT.2014.17}, file = {:Pradhan2014-Establishing_Secure_Interactions_across_Distributed_Applications_in_Satellite_Clusters.pdf:PDF}, issn = {null}, keywords = {middleware} }
Recent developments in small satellites have led to an increasing interest in building satellite clusters as open systems that provide a "cluster-as-a-service" in space. Since applications with different security classification levels must be supported in these open systems, the system must provide strict information partitioning such that only applications with matching security classifications interact with each other. The anonymous publish/subscribe communication pattern is a powerful interaction abstraction that has enjoyed great success in previous space software architectures, such as NASA’s Core Flight Executive. However, the difficulty is that existing solutions that support anonymous publish/subscribe communication, such as the OMG Data Distribution Service (DDS), do not support information partitioning based on security classifications, which is a key requirement for some systems. This paper makes two contributions to address these limitations. First, we present a transport mechanism called Secure Transport that uses a lattice of labels to represent security classifications and enforces Multi-Level Security (MLS) policies to ensure strict information partitioning. Second, we present a novel discovery service that allows us to use an existing DDS implementation with our custom transport mechanism to realize a publish/subscribe middleware with information partitioning based on security classifications of applications. We also include an evaluation of our solution in the context of a use case scenario.
- S. Pradhan, W. Otte, A. Dubey, A. Gokhale, and G. Karsai, Key Considerations for a Resilient and Autonomous Deployment and Configuration Infrastructure for Cyber-Physical Systems, in Proceedings of the 11th IEEE International Conference and Workshops on the Engineering of Autonomic and Autonomous Systems (EASe’14), 2014.
@inproceedings{Pradhan2014a, author = {Pradhan, Subhav and Otte, William and Dubey, Abhishek and Gokhale, Aniruddha and Karsai, Gabor}, title = {Key Considerations for a Resilient and Autonomous Deployment and Configuration Infrastructure for Cyber-Physical Systems}, booktitle = {Proceedings of the 11th IEEE International Conference and Workshops on the Engineering of Autonomic and Autonomous Systems (EASe'14)}, year = {2014}, tag = {platform}, organization = {Citeseer}, category = {conference}, file = {:Pradhan2014a-Key_Considerations_for_a_Resilient_and_Autonomous_Deployment_and_Configuration_Infrastructure_for_CPS.pdf:PDF}, keywords = {middleware, reliability} }
Multi-module Cyber-Physical Systems (CPSs), such as satellite clusters, swarms of Unmanned Aerial Vehicles (UAV), and fleets of Unmanned Underwater Vehicles (UUV) are examples of managed distributed real-time systems where mission-critical applications, such as sensor fusion or coordinated flight control, are hosted. These systems are dynamic and reconfigurable, and provide a “CPS cluster-as-a-service” for mission-specific scientific applications that can benefit from the elasticity of the cluster membership and heterogeneity of the cluster members. The distributed and remote nature of these systems often necessitates the use of Deployment and Configuration (D&C) services to manage the lifecycle of software applications. Fluctuating resources, volatile cluster membership and changing environmental conditions require resilient D&C services. However, the dynamic nature of the system often precludes human intervention during the D&C activities, which motivates the need for a self-adaptive D&C infrastructure that supports autonomous resilience. Such an infrastructure must have the ability to adapt existing applications on-the-fly in order to provide application resilience and must itself be able to adapt to account for changes in the system as well as tolerate failures. This paper makes two contributions towards addressing these needed. First, we identify the key challenges in achieving such a self-adaptive D&C infrastructure. Second, we present our ideas on resolving these challenges and realizing a self-adaptive D&C infrastructure.
- G. Martins, A. Bhattacharjee, A. Dubey, and X. D. Koutsoukos, Performance evaluation of an authentication mechanism in time-triggered networked control systems, in 2014 7th International Symposium on Resilient Control Systems (ISRCS), 2014, pp. 1–6.
@inproceedings{Martins2014, author = {{Martins}, G. and {Bhattacharjee}, A. and Dubey, Abhishek and {Koutsoukos}, X. D.}, title = {Performance evaluation of an authentication mechanism in time-triggered networked control systems}, booktitle = {2014 7th International Symposium on Resilient Control Systems (ISRCS)}, year = {2014}, tag = {platform}, pages = {1-6}, month = aug, category = {conference}, doi = {10.1109/ISRCS.2014.6900098}, file = {:Martins2014-Performance_Evaluation_of_an_Authentication_Mechanism_in_Time-Triggered_Network_Control_Systems.pdf:PDF}, issn = {null}, keywords = {middleware, performance} }
An important challenge in networked control systems is to ensure the confidentiality and integrity of the message in order to secure the communication and prevent attackers or intruders from compromising the system. However, security mechanisms may jeopardize the temporal behavior of the network data communication because of the computation and communication overhead. In this paper, we study the effect of adding Hash Based Message Authentication (HMAC) to a time-triggered networked control system. Time Triggered Architectures (TTAs) provide a deterministic and predictable timing behavior that is used to ensure safety, reliability and fault tolerance properties. The paper analyzes the computation and communication overhead of adding HMAC and the impact on the performance of the time-triggered network. Experimental validation and performance evaluation results using a TTEthernet network are also presented.
- W. R. Otte, A. Dubey, and G. Karsai, A resilient and secure software platform and architecture for distributed spacecraft, in Sensors and Systems for Space Applications VII, 2014, vol. 9085, pp. 121–130.
@inproceedings{Otte2014, author = {Otte, William R. and Dubey, Abhishek and Karsai, Gabor}, title = {{A resilient and secure software platform and architecture for distributed spacecraft}}, booktitle = {Sensors and Systems for Space Applications VII}, year = {2014}, tag = {platform}, editor = {Pham, Khanh D. and Cox, Joseph L.}, volume = {9085}, pages = {121 -- 130}, organization = {International Society for Optics and Photonics}, publisher = {SPIE}, category = {conference}, doi = {10.1117/12.2054055}, file = {:Otte2014-A_resilient_and_secure_software_platform_and_architecture_for_distributed_spacecraft.pdf:PDF}, keywords = {middleware}, url = {https://doi.org/10.1117/12.2054055} }
A distributed spacecraft is a cluster of independent satellite modules flying in formation that communicate via ad-hoc wireless networks. This system in space is a cloud platform that facilitates sharing sensors and other computing and communication resources across multiple applications, potentially developed and maintained by different organizations. Effectively, such architecture can realize the functions of monolithic satellites at a reduced cost and with improved adaptivity and robustness. Openness of these architectures pose special challenges because the distributed software platform has to support applications from different security domains and organizations, and where information flows have to be carefully managed and compartmentalized. If the platform is used as a robust shared resource its management, configuration, and resilience becomes a challenge in itself. We have designed and prototyped a distributed software platform for such architectures. The core element of the platform is a new operating system whose services were designed to restrict access to the network and the file system, and to enforce resource management constraints for all non-privileged processes Mixed-criticality applications operating at different security labels are deployed and controlled by a privileged management process that is also pre-configuring all information flows. This paper describes the design and objective of this layer.
- S. Nannapaneni, A. Dubey, S. Abdelwahed, S. Mahadevan, and S. Neema, A Model-Based Approach for Reliability Assessment in Component-Based Systems, in PHM 2014 - Proceedings of the Annual Conference of the Prognostics and Health Management Society 2014, 2014.
@inproceedings{Nannapaneni2014, author = {Nannapaneni, Saideep and Dubey, Abhishek and Abdelwahed, Sherif and Mahadevan, Sankaran and Neema, Sandeep}, title = {A Model-Based Approach for Reliability Assessment in Component-Based Systems}, booktitle = {PHM 2014 - Proceedings of the Annual Conference of the Prognostics and Health Management Society 2014}, year = {2014}, month = oct, tag = {platform}, category = {conference}, file = {:Nannapaneni2014-A_Model-based_approach_for_reliability_assessment_in_component_based_systems.pdf:PDF}, keywords = {reliability} }
This paper describes a formal framework for reliability assessment of component-based systems with respect to specific missions. A mission comprises of different timed mission stages, with each stage requiring a number of highlevel functions. The work presented here describes a modeling language to capture the functional decomposition and missions of a system. The components and their alternatives are mapped to basic functions which are used to implement the system-level functions. Our contribution is the extraction of mission-specific reliability block diagram from these high-level models of component assemblies. This is then used to compute the mission reliability using reliability information of components. This framework can be used for real-time monitoring of system performance where reliability of the mission is computed over time as the mission is in progress. Other quantities of interest such as mission feasibility, function availability can also be computed using this framework. Mission feasibility answers the question whether the mission can be accomplished given the current state of components in the system and function availability provides information if the function is available in the future given the current state of the system. The software used in this framework includes Generic Modeling Environment (GME) and Python. GME is used for modeling the system and Python for reliability computations. The proposed methodology is demonstrated using a radio-controlled (RC) car in carrying out a simple surveillance mission.
- S. Pradhan, W. Otte, A. Dubey, C. Szabo, A. Gokhale, and G. Karsai, Towards a Self-adaptive Deployment and Configuration Infrastructure for Cyber-Physical Systems, Institute for Software Integrated Systems, Vanderbilt University, Nashville, Technical Report ISIS-14-102, 2014.
@techreport{Pradhan2014b, author = {Pradhan, Subhav and Otte, William and Dubey, Abhishek and Szabo, Csanad and Gokhale, Aniruddha and Karsai, Gabor}, title = {Towards a Self-adaptive Deployment and Configuration Infrastructure for Cyber-Physical Systems}, institution = {Institute for Software Integrated Systems, Vanderbilt University}, year = {2014}, tag = {platform}, type = {Technical Report}, number = {ISIS-14-102}, address = {Nashville}, month = {6/2014}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/TechReport2013.pdf}, file = {:Pradhan2014b-Towards_a_self-adaptive_deployment_and_configuration_infrastructure_for_CPS.pdf:PDF}, issn = {ISIS-14-102}, keywords = {middleware}, owner = {abhishek}, timestamp = {2015.10.16}, url = {http://www.isis.vanderbilt.edu/sites/default/files/Pradhan_SEAMS_TechReport.pdf} }
Multi-module Cyber-Physical Systems (CPSs), such as satellite clusters, swarms of Unmanned Aerial Vehicles (UAV), and fleets of Unmanned Underwater Vehicles (UUV) are examples of managed distributed real-time systems where mission-critical applications, such as sensor fusion or coordinated flight control, are hosted. These systems are dynamic and reconfigurable, and provide a "CPS cluster-as-a-service’’ for mission-specific scientific applications that can benefit from the elasticity of the cluster membership and heterogeneity of the cluster members. Distributed and remote nature of these systems often necessitates the use of Deployment and Configuration (D&C) services to manage lifecycle of software applications. Fluctuating resources, volatile cluster membership and changing environmental conditions require resilience. However, due to the dynamic nature of the system, human intervention is often infeasible. This necessitates a self-adaptive D&C infrastructure that supports autonomous resilience. Such an infrastructure must have the ability to adapt existing applications on the fly in order to provide application resilience and must itself be able to adapt to account for changes in the system as well as tolerate failures. This paper describes the design and architectural considerations to realize a self-adaptive, D&C infrastructure for CPSs. Previous efforts in this area have resulted in D&C infrastructures that support application adaptation via dynamic re-deployment and re-configuration mechanisms. Our work, presented in this paper, improves upon these past efforts by implementing a self-adaptive D&C infrastructure which itself is resilient. The paper concludes with experimental results that demonstrate the autonomous resilience capabilities of our new D&C infrastructure.
2013
- A. Dubey and G. Karsai, Software health management, Innovations in System and Software Engineering, vol. 9, no. 4, p. 217, 2013.
@article{Dubey2013, author = {Dubey, Abhishek and Karsai, Gabor}, title = {Software health management}, journal = {{Innovations in System and Software Engineering}}, year = {2013}, volume = {9}, number = {4}, tag = {platform}, pages = {217}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/isse/DubeyK13}, doi = {10.1007/s11334-013-0226-7}, file = {:Dubey2013-Software_Health_Management.pdf:PDF}, keywords = {reliability}, project = {cps-reliability,cps-middleware}, timestamp = {Tue, 26 Jun 2018 01:00:00 +0200} }
- N. Mahadevan, A. Dubey, D. Balasubramanian, and G. Karsai, Deliberative, search-based mitigation strategies for model-based software health management, ISSE, vol. 9, no. 4, pp. 293–318, 2013.
@article{Mahadevan2013, author = {Mahadevan, Nagabhushan and Dubey, Abhishek and Balasubramanian, Daniel and Karsai, Gabor}, title = {Deliberative, search-based mitigation strategies for model-based software health management}, journal = {{ISSE}}, year = {2013}, tag = {platform}, volume = {9}, number = {4}, pages = {293--318}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/isse/MahadevanDBK13}, doi = {10.1007/s11334-013-0215-x}, file = {:Mahadevan2013-Deliberative_search-based_mitigation_strategies_for_model-based_software_health_management.pdf:PDF}, keywords = {reliability}, project = {cps-reliability,cps-middleware}, timestamp = {Sun, 28 May 2017 01:00:00 +0200}, url = {https://doi.org/10.1007/s11334-013-0215-x} }
Rising software complexity in aerospace systems makes them very difficult to analyze and prepare for all possible fault scenarios at design time; therefore, classical run-time fault tolerance techniques such as self-checking pairs and triple modular redundancy are used. However, several recent incidents have made it clear that existing software fault tolerance techniques alone are not sufficient. To improve system dependability, simpler, yet formally specified and verified run-time monitoring, diagnosis, and fault mitigation capabilities are needed. Such architectures are already in use for managing the health of vehicles and systems. Software health management is the application of these techniques to software systems. In this paper, we briefly describe the software health management techniques and architecture developed by our research group. The foundation of the architecture is a real-time component framework (built upon ARINC-653 platform services) that defines a model of computation for software components. Dedicated architectural elements: the Component Level Health Manager (CLHM) and System Level Health Manager (SLHM) provide the health management services: anomaly detection, fault source isolation, and fault mitigation. The SLHM includes a diagnosis engine that (1) uses a Timed Failure Propagation Graph (TFPG) model derived from the component assembly model, (2) reasons about cascading fault effects in the system, and (3) isolates the fault source component(s). Thereafter, the appropriate system-level mitigation action is taken. The main focus of this article is the description of the fault mitigation architecture that uses goal-based deliberative reasoning to determine the best mitigation actions for recovering the system from the identified failure mode.
- S. Pradhan, W. Otte, A. Dubey, A. S. Gokhale, and G. Karsai, Towards a resilient deployment and configuration infrastructure for fractionated spacecraft, SIGBED Review, vol. 10, no. 4, pp. 29–32, 2013.
@article{Pradhan2013, author = {Pradhan, Subhav and Otte, William and Dubey, Abhishek and Gokhale, Aniruddha S. and Karsai, Gabor}, title = {Towards a resilient deployment and configuration infrastructure for fractionated spacecraft}, journal = {{SIGBED} Review}, year = {2013}, volume = {10}, tag = {platform}, number = {4}, pages = {29--32}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/sigbed/PradhanODGK13}, doi = {10.1145/2583687.2583694}, file = {:Pradhan2013-Towards_a_resilient_deployment_and_configuration_infrastructure_for_fractionated_spacecraft.pdf:PDF}, keywords = {reliability}, project = {cps-reliability,cps-middleware}, timestamp = {Tue, 06 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.1145/2583687.2583694} }
Fractionated spacecraft are clusters of small, independent modules that interact wirelessly to realize the functionality of a traditional monolithic spacecraft. System F6 (F6 stands for Future, Fast, Flexible, Fractionated, Free-Flying spacecraft) is a DARPA program for fractionated spacecraft. Software applications in F6 are implemented in the context of the F6 Information Architecture Platform (IAP), which provides component-based abstractions for composing distributed applications. The lifecycle of these distributed applications must be managed autonomously by a deployment and configuration (D&C) infrastructure, which can redeploy and reconfigure the running applications in response to faults and other anomalies that may occur during system operation. Addressing these D&C requirements is hard due to the significant fluctuation in resource availabilities, constraints on resources, and safety and security concerns. This paper presents the key architectural ideas that are required in realizing such a D&C infrastructure.
- W. Otte, A. Dubey, S. Pradhan, P. Patil, A. S. Gokhale, G. Karsai, and J. Willemsen, F6COM: A component model for resource-constrained and dynamic space-based computing environments, in 16th IEEE International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, ISORC 2013, Paderborn, Germany, June 19-21, 2013, 2013, pp. 1–8.
@inproceedings{Otte2013, author = {Otte, William and Dubey, Abhishek and Pradhan, Subhav and Patil, Prithviraj and Gokhale, Aniruddha S. and Karsai, Gabor and Willemsen, Johnny}, title = {{F6COM:} {A} component model for resource-constrained and dynamic space-based computing environments}, booktitle = {16th {IEEE} International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, {ISORC} 2013, Paderborn, Germany, June 19-21, 2013}, year = {2013}, pages = {1--8}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/OtteDPPGKW13}, category = {selectiveconference}, doi = {10.1109/ISORC.2013.6913199}, file = {:Otte2013-F6COM_A_Component_Model.pdf:PDF}, keywords = {middleware}, project = {cps-reliability,cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2013.6913199} }
Component-based programming models are well-suited to the design of large-scale, distributed applications because of the ease with which distributed functionality can be developed, deployed, and validated using the models’ compositional properties. Existing component models supported by standardized technologies, such as the OMG’s CORBA Component Model (CCM), however, incur a number of limitations in the context of cyber physical systems (CPS) that operate in highly dynamic, resource-constrained, and uncertain environments, such as space environments, yet require multiple quality of service (QoS) assurances, such as timeliness, reliability, and security. To overcome these limitations, this paper presents the design of a novel component model called F6COM that is developed for applications operating in the context of a cluster of fractionated spacecraft. Although F6COM leverages the compositional capabilities and port abstractions of existing component models, it provides several new features. Specifically, F6COM abstracts the component operations as tasks, which are scheduled sequentially based on a specified scheduling policy. The infrastructure ensures that at any time at most one task of a component can be active - eliminating race conditions and deadlocks without requiring complicated and error-prone synchronization logic to be written by the component developer. These tasks can be initiated due to (a) interactions with other components, (b) expiration of timers, both sporadic and periodic, and (c) interactions with input/output devices. Interactions with other components are facilitated by ports. To ensure secure information flows, every port of an F6COM component is associated with a security label such that all interactions are executed within a security context. Thus, all component interactions can be subjected to Mandatory Access Control checks by a Trusted Computing Base that facilitates the interactions. Finally, F6COM provides capabilities to monitor task execution deadlines and to configure component-specific fault mitigation actions.
- A. Dubey, G. Karsai, and N. Mahadevan, Fault-Adaptivity in Hard Real-Time Component-Based Software Systems, in Software Engineering for Self-Adaptive Systems II: International Seminar, Dagstuhl Castle, Germany, October 24-29, 2010 Revised Selected and Invited Papers, R. de Lemos, H. Giese, H. A. Müller, and M. Shaw, Eds. Berlin, Heidelberg: Springer Berlin Heidelberg, 2013, pp. 294–323.
@inbook{Dubey2010, pages = {294--323}, title = {Fault-Adaptivity in Hard Real-Time Component-Based Software Systems}, publisher = {Springer Berlin Heidelberg}, year = {2013}, tag = {platform}, author = {Dubey, Abhishek and Karsai, Gabor and Mahadevan, Nagabhushan}, editor = {de Lemos, Rog{\'e}rio and Giese, Holger and M{\"u}ller, Hausi A. and Shaw, Mary}, address = {Berlin, Heidelberg}, isbn = {978-3-642-35813-5}, booktitle = {Software Engineering for Self-Adaptive Systems II: International Seminar, Dagstuhl Castle, Germany, October 24-29, 2010 Revised Selected and Invited Papers}, doi = {10.1007/978-3-642-35813-5_12}, file = {:Dubey2010-Fault-Adaptivity_in_Hard_Real-Time_Component-Based_Software_Systems.pdf:PDF}, keywords = {reliability}, project = {cps-middleware,cps-reliability}, url = {https://doi.org/10.1007/978-3-642-35813-5_12} }
Complexity in embedded software systems has reached the point where we need run-time mechanisms that provide fault management services. Testing and verification may not cover all possible scenarios that a system encounters, hence a simpler, yet formally specified run-time monitoring, diagnosis, and fault mitigation architecture is needed to increase the software system’s dependability. The approach described in this paper borrows concepts and principles from the field of ‘Systems Health Management’ for complex aerospace systems and implements a novel two level health management architecture that can be applied in the context of a model-based software development process.
- A. Dubey, G. Karsai, N. Mahadevan, A. Srivastava, C. C. Liu, and S. Lukic, Understanding Failure Dynamics in the Smart Electric Grid, in NSF Energy Cyber Physical System Workshop, Washington DC, 2013.
@inproceedings{Dubey2013a, author = {Dubey, A and Karsai, G and Mahadevan, N and Srivastava, A and Liu, CC and Lukic, S}, title = {Understanding Failure Dynamics in the Smart Electric Grid}, booktitle = {NSF Energy Cyber Physical System Workshop, Washington DC}, year = {2013}, tag = {platform}, category = {workshop}, file = {:Dubey2013a-Understanding_failture_dynamics_in_the_smart_electric_grid.pdf:PDF}, keywords = {smartgrid} }
- A. Dubey, A. Gokhale, G. Karsai, W. Otte, and J. Willemsen, A model-driven software component framework for fractionated spacecraft, in Proceedings of the 5th International Conference on Spacecraft Formation Flying Missions and Technologies (SFFMT), 2013.
@inproceedings{Dubey2013b, author = {Dubey, Abhishek and Gokhale, Aniruddha and Karsai, Gabor and Otte, W and Willemsen, Johnny}, title = {A model-driven software component framework for fractionated spacecraft}, booktitle = {Proceedings of the 5th International Conference on Spacecraft Formation Flying Missions and Technologies (SFFMT)}, year = {2013}, organization = {IEEE Munich, Germany}, category = {workshop}, file = {:Dubey2013b-A_model-driven_software_component_framework_for_fractionated_spacecraft.pdf:PDF}, keywords = {middleware} }
Fractionated spacecraft is a novel space architecture that uses a cluster of small spacecraft modules (with their own attitude control and propulsion systems) connected via wireless links to accomplish complex missions. Resources, such as sensors, persistent storage space, processing power, and downlink bandwidth can be shared among the members of the cluster thanks to the networking. Such spacecraft can serve as a cost effective, highly adaptable, and fault tolerant platform for running various distributed mission software applications that collect, process, and downlink data. Naturally, a key component in such a system is the software platform: the distributed operating system and software infrastructure that makes such applications possible. Existing operating systems are insufficient, and newer technologies like component frameworks do not address all the requirements of such flexible space architectures. The high degree of flexibility and the need for thorough planning and analysis of the resource management necessitates the use of advanced development techniques. This paper describes the core principles and design of a software component framework for fractionated spacecraft that is a special case of a distributed real-time embedded system. Additionally we describe how a model-driven development environment helps with the design and engineering of complex applications for this platform.
- J. Shi, R. Amgai, S. Abdelwahed, A. Dubey, J. Humphreys, M. Alattar, and R. Jia, Generic modeling and analysis framework for shipboard system design, in 2013 IEEE Electric Ship Technologies Symposium (ESTS), 2013, pp. 420–428.
@inproceedings{Shi2013, author = {{Shi}, J. and {Amgai}, R. and {Abdelwahed}, S. and Dubey, Abhishek and {Humphreys}, J. and {Alattar}, M. and {Jia}, R.}, title = {Generic modeling and analysis framework for shipboard system design}, booktitle = {2013 IEEE Electric Ship Technologies Symposium (ESTS)}, year = {2013}, pages = {420-428}, month = apr, category = {workshop}, doi = {10.1109/ESTS.2013.6523770}, file = {:Shi2013-Generic_modeling_and_analysis_framework_for_shipboard_system_design.pdf:PDF}, issn = {null}, keywords = {middleware}, tag = {platform,power} }
This paper proposes a novel modeling and simulation environment for ship design based on the principles of Model Integrated Computing (MIC). The proposed approach facilitates the design and analysis of shipboard power systems and similar systems that integrate components from different fields of expertise. The conventional simulation platforms such as Matlab\textregistered, Simulink\textregistered, PSCAD\textregistered and VTB\textregistered require the designers to have explicit knowledge of the syntactic and semantic information of the desired domain within the tools. This constraint, however, severely slows down the design and analysis process, and causes cross-domain or cross-platform operations remain error prone and expensive. Our approach focuses on the development of a modeling environment that provides generic support for a variety of application across different domains by capturing modeling concepts, composition principles and operation constraints. For the preliminary demonstration of the modeling concept, in this paper we limit the scope of design to cross-platform implementations of the proposed environment by developing an application model of a simplified shipboard power system and using Matlab engine and VTB solver separately to evaluate the performance with different respects. In the case studies a fault scenario is pre-specified and tested on the system model. The corresponding time domain bus voltage magnitude and angle profiles are generated via invoking external solver, displayed to users and then saved for future analysis.
- W. Emfinger, P. Kumar, A. Dubey, W. Otte, A. Gokhale, and G. Karsai, Drems: A toolchain and platform for the rapid application development, integration, and deployment of managed distributed real-time embedded systems, in IEEE Real-time Systems Symposium, 2013.
@inproceedings{Emfinger2013, author = {Emfinger, William and Kumar, Pranav and Dubey, Abhishek and Otte, William and Gokhale, Aniruddha and Karsai, Gabor}, title = {Drems: A toolchain and platform for the rapid application development, integration, and deployment of managed distributed real-time embedded systems}, booktitle = {IEEE Real-time Systems Symposium}, year = {2013}, tag = {platform}, category = {poster}, file = {:Emfinger2013-DREMS_A_toolchain_and_platform_for_rapid.pdf:PDF}, keywords = {middleware} }
- N. Mahadevan, A. Dubey, D. Balasubramanian, and G. Karsai, Deliberative Reasoning in Software Health Management, Institute for Software Integrated Systems, Vanderbilt University, techreport ISIS-13-101, 2013.
@techreport{Mahadevan2013a, author = {Mahadevan, Nagabhushan and Dubey, Abhishek and Balasubramanian, Daniel and Karsai, Gabor}, title = {Deliberative Reasoning in Software Health Management}, institution = {Institute for Software Integrated Systems, Vanderbilt University}, year = {2013}, type = {techreport}, tag = {platform}, number = {ISIS-13-101}, month = {04/2013}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/TechReport2013.pdf}, file = {:Mahadevan2013a-Deliberative_reasoning_in_software_health_management.pdf:PDF}, issn = {ISIS-13-101}, keywords = {performance, reliability} }
Rising software complexity in aerospace systems makes them very dicult to analyze and prepare for all possible fault scenarios at design-time. Therefore, classical run-time fault-tolerance techniques, such as self-checking pairs and triple modular redundancy are used. However, several recent incidents have made it clear that existing software fault tolerance techniques alone are not sucient. To improve system dependability, simpler, yet formally specied and veried run-time monitoring, diagnosis, and fault mitigation are needed. Such architectures are already in use for managing the health of vehicles and systems. Software health management is the application of adapting and applying these techniques to software. In this paper, we briey describe the software health management technique and architecture developed by our research group. The foundation of the architecture is a real-time component framework (built upon ARINC-653 platform services) that denes a model of computation for software components. Dedicated architectural elements: the Component Level Health Manager (CLHM) and System Level Health Manager (SLHM) are providing health management services: anomaly detection, fault source isolation, and fault mitigation. The SLHM includes a diagnosis engine that uses a Timed Failure Propagation (TFPG) model derived from the component assembly model, and it reasons about cascading fault eects in the system and isolates the fault source component(s). Thereafter, the appropriate system level mitigation action is taken. The main focus of this article is the description of the fault mitigation architecture that uses goal-based deliberative reasoning to determine the best mitigation actions for recovering the system from the identied failure mode.
2012
- N. Mahadevan, A. Dubey, and G. Karsai, Architecting Health Management into Software Component Assemblies: Lessons Learned from the ARINC-653 Component Mode, in 15th IEEE International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, ISORC 2012, Shenzhen, China, April 11-13, 2012, 2012, pp. 79–86.
@inproceedings{Mahadevan2012, author = {Mahadevan, Nagabhushan and Dubey, Abhishek and Karsai, Gabor}, title = {Architecting Health Management into Software Component Assemblies: Lessons Learned from the {ARINC-653} Component Mode}, booktitle = {15th {IEEE} International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, {ISORC} 2012, Shenzhen, China, April 11-13, 2012}, year = {2012}, pages = {79--86}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/MahadevanDK12}, category = {selectiveconference}, doi = {10.1109/ISORC.2012.19}, file = {:Mahadevan2012-Architecting_Health_Management_into_Software_Component_Assemblies.pdf:PDF}, keywords = {performance, reliability}, project = {cps-reliability,cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2012.19} }
Complex real-time software systems require an active fault management capability. While testing, verification and validation schemes and their constant evolution help improve the dependability of these systems, an active fault management strategy is essential to potentially mitigate the unacceptable behaviors at run-time. In our work we have applied the experience gained from the field of Systems Health Management towards component-based software systems. The software components interact via well-defined concurrency patterns and are executed on a real-time component framework built upon ARINC-653 platform services. In this paper, we present the lessons learned in architecting and applying a two-level health management strategy to assemblies of software components.
- A. Dabholkar, A. Dubey, A. S. Gokhale, G. Karsai, and N. Mahadevan, Reliable Distributed Real-Time and Embedded Systems through Safe Middleware Adaptation, in IEEE 31st Symposium on Reliable Distributed Systems, SRDS 2012, Irvine, CA, USA, October 8-11, 2012, 2012, pp. 362–371.
@inproceedings{Dabholkar2012, author = {Dabholkar, Akshay and Dubey, Abhishek and Gokhale, Aniruddha S. and Karsai, Gabor and Mahadevan, Nagabhushan}, title = {Reliable Distributed Real-Time and Embedded Systems through Safe Middleware Adaptation}, booktitle = {{IEEE} 31st Symposium on Reliable Distributed Systems, {SRDS} 2012, Irvine, CA, USA, October 8-11, 2012}, year = {2012}, pages = {362--371}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/srds/DabholkarDGKM12}, category = {selectiveconference}, doi = {10.1109/SRDS.2012.59}, file = {:Dabholkar2012-Reliable_Distributed_Real-Time_and_Embedded_Systems_through_Safe_Middleware_Adaptation.pdf:PDF}, keywords = {middleware, reliability}, project = {cps-reliability,cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:49 +0200}, url = {https://doi.org/10.1109/SRDS.2012.59} }
Distributed real-time and embedded (DRE) systems are a class of real-time systems formed through a composition of predominantly legacy, closed and statically scheduled real-time subsystems, which comprise over-provisioned resources to deal with worst-case failure scenarios. The formation of the system-of-systems leads to a new range of faults that manifest at different granularities for which no statically defined fault tolerance scheme applies. Thus, dynamic and adaptive fault tolerance mechanisms are needed which must execute within the available resources without compromising the safety and timeliness of existing real-time tasks in the individual subsystems. To address these requirements, this paper describes a middleware solution called Safe Middleware Adaptation for Real-Time Fault Tolerance (SafeMAT), which opportunistically leverages the available slack in the over-provisioned resources of individual subsystems. SafeMAT comprises three primary artifacts: (1) a flexible and configurable distributed, runtime resource monitoring framework that can pinpoint in real-time the available slack in the system that is used in making dynamic and adaptive fault tolerance decisions, (2) a safe and resource aware dynamic failure adaptation algorithm that enables efficient recovery from different granularities of failures within the available slack in the execution schedule while ensuring real-time constraints are not violated and resources are not overloaded, and (3) a framework that empirically validates the correctness of the dynamic mechanisms and the safety of the DRE system. Experimental results evaluating SafeMAT on an avionics application indicates that SafeMAT incurs only 9-15% runtime fail over and 2-6% processor utilization overheads thereby providing safe and predictable failure adaptability in real-time.
- R. Mehrotra, A. Dubey, S. Abdelwahed, and A. N. Tantawi, Power-Aware Modeling and Autonomic Management Framework for Distributed Computing Systems, in Handbook of Energy-Aware and Green Computing - Two Volume Set, CRC Press, 2012, pp. 621–648.
@inbook{Mehrotra2012, pages = {621--648}, title = {Power-Aware Modeling and Autonomic Management Framework for Distributed Computing Systems}, publisher = {CRC Press}, year = {2012}, tag = {platform}, author = {Mehrotra, Rajat and Dubey, Abhishek and Abdelwahed, Sherif and Tantawi, Asser N.}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/reference/crc/MehrotraDAT12}, booktitle = {Handbook of Energy-Aware and Green Computing - Two Volume Set}, file = {:Mehrotra2012-Power-Aware_Modeling_and_Autonomic_Management_Framework_for_Distributed_Computing_Systems.pdf:PDF}, keywords = {performance}, project = {cps-middleware}, timestamp = {Wed, 12 Jul 2017 01:00:00 +0200}, url = {http://www.crcnetbase.com/doi/abs/10.1201/b16631-34} }
- Qian Chen, R. Mehrotra, A. Dubey, S. Abdelwahed, and K. Rowland, On state of the art in virtual machine security, in 2012 Proceedings of IEEE Southeastcon, 2012, pp. 1–6.
@inproceedings{Chen2012, author = {{Qian Chen} and {Mehrotra}, R. and Dubey, Abhishek and {Abdelwahed}, S. and {Rowland}, K.}, title = {On state of the art in virtual machine security}, booktitle = {2012 Proceedings of IEEE Southeastcon}, year = {2012}, pages = {1-6}, month = mar, category = {conference}, doi = {10.1109/SECon.2012.6196905}, file = {:Chen2012-On_state_of_the_art_in_virtual_machine_security.pdf:PDF}, issn = {1091-0050}, keywords = {reliability} }
Data centers and computing service providers are striving to improve the utilization of their computing resources. This is primarily due to the need of resources to be more economical and power efficient. Virtualization is one of the concepts that provide flexibility to host multiple operating system stacks on a single hardware. By effectively partitioning the computing resources, it reduces the total number of physical servers and consolidates several services on a single physical rack. Each virtual machine behaves like an independent machine (may be duplicate of the original one) while the scheduling of hardware resources among different virtual machines is performed with the help of a Virtual Machine Monitor (VMM). Proliferation of virtual machines in the enterprise architecture creates need for identification of potential security risks as well as appropriate solutions for the identified risks to ensure the integrity of the underlying applications hosted at the virtual machines. This paper describes available virtualization technologies, corresponding security vulnerabilities, and available solutions.
- A. Dubey, W. Emfinger, A. Gokhale, G. Karsai, W. R. Otte, J. Parsons, C. Szabo, A. Coglio, E. Smith, and P. Bose, A software platform for fractionated spacecraft, in 2012 IEEE Aerospace Conference, 2012, pp. 1–20.
@inproceedings{Dubey2012, author = {Dubey, Abhishek and {Emfinger}, W. and {Gokhale}, A. and {Karsai}, G. and {Otte}, W. R. and {Parsons}, J. and {Szabo}, C. and {Coglio}, A. and {Smith}, E. and {Bose}, P.}, title = {A software platform for fractionated spacecraft}, booktitle = {2012 IEEE Aerospace Conference}, year = {2012}, tag = {platform}, pages = {1-20}, month = mar, category = {conference}, doi = {10.1109/AERO.2012.6187334}, file = {:Dubey2012-A_software_platform_for_fractionated_spacecraft.pdf:PDF}, issn = {1095-323X}, keywords = {middleware} }
A fractionated spacecraft is a cluster of independent modules that interact wirelessly to maintain cluster flight and realize the functions usually performed by a monolithic satellite. This spacecraft architecture poses novel software challenges because the hardware platform is inherently distributed, with highly fluctuating connectivity among the modules. It is critical for mission success to support autonomous fault management and to satisfy real-time performance requirements. It is also both critical and challenging to support multiple organizations and users whose diverse software applications have changing demands for computational and communication resources, while operating on different levels and in separate domains of security. The solution proposed in this paper is based on a layered architecture consisting of a novel operating system, a middleware layer, and component-structured applications. The operating system provides primitives for concurrency, synchronization, and secure information flows; it also enforces application separation and resource management policies. The middleware provides higher-level services supporting request/response and publish/subscribe interactions for distributed software. The component model facilitates the creation of software applications from modular and reusable components that are deployed in the distributed system and interact only through well-defined mechanisms. Two cross-cutting aspects - multi-level security and multi-layered fault management - are addressed at all levels of the architecture. The complexity of creating applications and performing system integration is mitigated through the use of a domain-specific model-driven development process that relies on a dedicated modeling language and its accompanying graphical modeling tools, software generators for synthesizing infrastructure code, and the extensive use of model-based analysis for verification and validation.
- A. Dubey, N. Mahadevan, and G. Karsai, A deliberative reasoner for model-based software health management, in The Eighth International Conference on Autonomic and Autonomous Systems, 2012, pp. 86–92.
@inproceedings{Dubey2012a, author = {Dubey, Abhishek and Mahadevan, Nagabhushan and Karsai, Gabor}, title = {A deliberative reasoner for model-based software health management}, booktitle = {The Eighth International Conference on Autonomic and Autonomous Systems}, year = {2012}, tag = {platform}, pages = {86--92}, category = {selectiveconference}, file = {:Dubey2012a-A_Deliberative_Reasoner_for_Model-Based_Software_Health_Management.pdf:PDF}, keywords = {performance, reliability} }
While traditional design-time and off-line approaches to testing and verification contribute significantly to improving and ensuring high dependability of software, they may not cover all possible fault scenarios that a system could encounter at runtime. Thus, runtime health management of complex embedded software systems is needed to improve their dependability. Our approach to Software Health Management uses concepts from the field of Systems Health Management: detection, diagnosis and mitigation. In earlier work we had shown how to use a reactive mitigation strategy specified using a timed state machine model for system health manager. This paper describes the algorithm and key concepts for an alternative approach to system mitigation using a deliberative strategy, which relies on a function-allocation model to identify alternative component-assembly configurations that can restore the functions needed for the goals of the system.
- J. Chalfant, B. Langland, S. Abdelwahed, C. Chryssostomidis, R. Dougal, A. Dubey, T. El Mezyani, J. D. Herbst, T. Kiehne, J. Ordonez, and others, A collaborative early-stage ship design environment, in CEM Publications, 2012.
@inbook{Chalfant2012, title = {A collaborative early-stage ship design environment}, year = {2012}, author = {Chalfant, Julie and Langland, Blake and Abdelwahed, Sherif and Chryssostomidis, Chryssostomos and Dougal, Roger and Dubey, Abhishek and El Mezyani, Touria and Herbst, JD and Kiehne, Thomas and Ordonez, Juan and others}, file = {:Chalfant2012-A_collaborative_early-stage_ship_design_environment.pdf:PDF}, journal = {CEM Publications}, keywords = {middleware} }
Recent advances in sensor and weapons systems are significantly increasing the electrical power that is required and the thermal loads that must be dissipated onboard US Navy ships. Thus, design tools and methods must bring detailed consideration of all disciplines early in the design process, including electrical, thermal and controls in addition to the traditional naval architecture and marine engineering. Effective interface of the multiple disciplines demands a collaborative design process. The Electric Ship Research and Development Consortium (ESRDC) has developed the backbone structure of a collaborative design environment with the goal of bringing together many disciplines early in the ship design process. This design environment brings many innovations, especially in the arena of simultaneous collaborative design. This paper describes the Smart Ship System Design (S3D) environment as developed to date, along with overall and discipline-specific visions of implementation of the environment in ship design.
- R. Mehrotra, A. Dubey, S. Abdelwahed, and K. W. Rowland, RFDMon: A Real-time and Fault-tolerant Distributed System Monitoring Approach, in The 8th International Conference on Autonomic and Autonomous Systems ICAS 2012, 2012.
@inproceedings{Mehrotra2012a, author = {Mehrotra, Rajat and Dubey, Abhishek and Abdelwahed, Sherif and Rowland, Krisa W.}, title = {RFDMon: A Real-time and Fault-tolerant Distributed System Monitoring Approach}, booktitle = {The 8th International Conference on Autonomic and Autonomous Systems {ICAS} 2012}, year = {2012}, tag = {platform}, category = {selectiveconference}, file = {:Mehrotra2012a-RFDMon_A_real-time_and_fault-tolerant_distributed_system_monitoring_approach.pdf:PDF}, keywords = {performance} }
One of the main requirements for building an autonomic system is to have a robust monitoring framework. In this paper, a systematic distributed event based (DEB) system monitoring framework “RFDMon” is presented for measuring system variables (CPU utilization, memory utilization, disk utilization, network utilization, etc.), system health (temperature and voltage of Motherboard and CPU) application performance variables (application response time, queue size, and throughput), and scientific application data structures (PBS information and MPI variables) accurately with minimum latency at a specified rate and with controllable resource utilization. This framework is designed to be tolerant to faults in monitoring framework, self-configuring (can start and stop monitoring the nodes and configure monitors for threshold values/changes for publishing the measurements), aware of execution of the framework on multiple nodes through HEARTBEAT messages, extensive (monitors multiple parameters through periodic and aperiodic sensors), resource constrainable (computational resources can be limited for monitors), and expandable for adding extra monitors on the fly. Since RFDMon uses a Data Distribution Services (DDS) middleware, it can be used for deploying in systems with heterogeneous nodes. Additionally, it provides a functionality to limit the maximum cap on resources consumed by monitoring processes such that it reduces the effect on the availability of resources for the applications.
- W. P. Monceaux, D. E. Evans, K. N. Rappold, C. D. Butler, S. Abdelwahed, R. Mehrotra, and A. Dubey, Implementing Autonomic Computing Methods to Improve Attack Resilience in Web Services, DTIC Document, 2012.
@techreport{4574, author = {Monceaux, Weston P and Evans, Deland E and Rappold, Keith N and Butler, Cary D and Abdelwahed, Sherif and Mehrotra, Rajat and Dubey, Abhishek}, title = {Implementing Autonomic Computing Methods to Improve Attack Resilience in Web Services}, institution = {DTIC Document}, year = {2012}, keywords = {performance}, pages = {422} }
- A. Dubey, G. Karsai, and N. Mahadevan, Formalization of a Component Model for Real-time Systems, Institute for Software Integrated Systems, Vanderbilt University, ISIS-12-102, 2012.
@techreport{Dubey2012b, author = {Dubey, Abhishek and Karsai, Gabor and Mahadevan, Nagabhushan}, title = {Formalization of a Component Model for Real-time Systems}, institution = {Institute for Software Integrated Systems, Vanderbilt University}, year = {2012}, tag = {platform}, number = {ISIS-12-102}, month = {04/2012}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/ISIS-12-102-TechReport.pdf}, file = {:Dubey2012b-Formalization_of_a_Component_Model_for_Real-time_Systems.pdf:PDF}, issn = {ISIS-12-102}, keywords = {middleware} }
Component-based software development for real-time systems necessitates a well-defined ‘component model’ that allows compositional analysis and reasoning about systems. Such a model defines what a component is, how it works, and how it interacts with other components. It is especially important for real-time systems to have such a component model, as many problems in these systems arise from poorly understood and analyzed component interactions. In this paper we describe a component model for hard real-time systems that relies on the services of an ARINC-653 compliant real-time operating system platform. The model provides high-level abstractions of component interactions, both for the synchronous and asynchronous case. We present a formalization of the component model in the form of timed transition traces. Such formalization is necessary to be able to derive interesting system level properties such as fault propagation graphs from models of component assemblies. We provide a brief discussion about such system level fault propagation templates for this component model.
- A. Dubey, N. Mahadevan, and G. Karsai, The Inertial Measurement Unit Example: A Software Health Management Case Study, Insitute for Software Integrated Systems, Vanderbilt University, ISIS-12-101, 2012.
@techreport{Dubey2012c, author = {Dubey, Abhishek and Mahadevan, Nagabhushan and Karsai, Gabor}, title = {The Inertial Measurement Unit Example: A Software Health Management Case Study}, institution = {Insitute for Software Integrated Systems, Vanderbilt University}, year = {2012}, number = {ISIS-12-101}, month = {02/2012}, tag = {platform}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/TechReport_IMU.pdf}, file = {:Dubey2012c-The_Inertial_Measurement_Unit_Example.pdf:PDF}, issn = {ISIS-12-101}, keywords = {reliability} }
This report captures in detail a Two-level Software Health Management strategy on a real-life example of an Inertial Measurement Unit subsystem. We describe in detail the design of the component and system level health management strategy. Results are expressed as relevant portions of the detailed logs that shows the successful adaptation of the monitor/ detect/ diagnose/ mitigate approach to Software Health Management.
2011
- S. Nordstrom, A. Dubey, T. Keskinpala, S. Neema, and T. Bapty, Autonomic Healing of Model-Based Systems, JACIC, vol. 8, no. 4, pp. 87–99, 2011.
@article{Nordstrom2011, author = {Nordstrom, Steven and Dubey, Abhishek and Keskinpala, Turker and Neema, Sandeep and Bapty, Theodore}, title = {Autonomic Healing of Model-Based Systems}, journal = {{JACIC}}, tag = {platform}, year = {2011}, volume = {8}, number = {4}, pages = {87--99}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/jacic/NordstromDKNB11}, doi = {10.2514/1.31940}, keywords = {reliability}, project = {cps-reliability}, timestamp = {Thu, 18 May 2017 01:00:00 +0200}, url = {https://doi.org/10.2514/1.31940} }
- A. Dubey, G. Karsai, and N. Mahadevan, A component model for hard real-time systems: CCM with ARINC-653, Softw., Pract. Exper., vol. 41, no. 12, pp. 1517–1550, 2011.
@article{Dubey2011, author = {Dubey, Abhishek and Karsai, Gabor and Mahadevan, Nagabhushan}, title = {A component model for hard real-time systems: {CCM} with {ARINC-653}}, journal = {Softw., Pract. Exper.}, year = {2011}, tag = {platform}, volume = {41}, number = {12}, pages = {1517--1550}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/spe/DubeyKM11}, doi = {10.1002/spe.1083}, file = {:Dubey2011-A_component_model_for_hard_real-time_systems_CCM_with_ARINC-653.pdf:PDF}, keywords = {middleware}, project = {cps-reliability,cps-middleware}, timestamp = {Sun, 28 May 2017 01:00:00 +0200}, url = {https://doi.org/10.1002/spe.1083} }
Size and complexity of software in safety critical system is increasing at a rapid pace. One technology that can be used to mitigate this complexity is component-based software development. However, in spite of the apparent benefits of a component-based approach to development, little work has been done in applying these concepts to hard real time systems. This paper improves the state of the art by making three contributions: (1) we present a component model for hard real time systems and define the semantics of different types of component interactions; (2) we present an implementation of a middleware that supports this component model. This middleware combines an open source CORBA Component Model (CCM) implementation (MICO) with ARINC-653: a state of the art RTOS standard, (3) finally; we describe a modeling environment that enables design, analysis, and deployment of component assemblies. We conclude with a discussion of lessons learned during this exercise. Our experiences point towards extending both the CCM as well as revising the ARINC-653.
- N. Roy, A. Dubey, and A. S. Gokhale, Efficient Autoscaling in the Cloud Using Predictive Models for Workload Forecasting, in IEEE International Conference on Cloud Computing, CLOUD 2011, Washington, DC, USA, 4-9 July, 2011, 2011, pp. 500–507.
@inproceedings{Roy2011a, author = {Roy, Nilabja and Dubey, Abhishek and Gokhale, Aniruddha S.}, title = {Efficient Autoscaling in the Cloud Using Predictive Models for Workload Forecasting}, booktitle = {{IEEE} International Conference on Cloud Computing, {CLOUD} 2011, Washington, DC, USA, 4-9 July, 2011}, year = {2011}, pages = {500--507}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/IEEEcloud/RoyDG11}, category = {selectiveconference}, doi = {10.1109/CLOUD.2011.42}, file = {:Roy2011a-Efficient_Autoscaling_in_the_Cloud_Using_Predictive_Models_for_Workload_Forecasting.pdf:PDF}, keywords = {performance}, project = {cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:54 +0200}, url = {https://doi.org/10.1109/CLOUD.2011.42} }
Large-scale component-based enterprise applications that leverage Cloud resources expect Quality of Service(QoS) guarantees in accordance with service level agreements between the customer and service providers. In the context of Cloud computing, auto scaling mechanisms hold the promise of assuring QoS properties to the applications while simultaneously making efficient use of resources and keeping operational costs low for the service providers. Despite the perceived advantages of auto scaling, realizing the full potential of auto scaling is hard due to multiple challenges stemming from the need to precisely estimate resource usage in the face of significant variability in client workload patterns. This paper makes three contributions to overcome the general lack of effective techniques for workload forecasting and optimal resource allocation. First, it discusses the challenges involved in auto scaling in the cloud. Second, it develops a model-predictive algorithm for workload forecasting that is used for resource auto scaling. Finally, empirical results are provided that demonstrate that resources can be allocated and deal located by our algorithm in a way that satisfies both the application QoS while keeping operational costs low.
- N. Mahadevan, A. Dubey, and G. Karsai, Application of software health management techniques, in 2011 ICSE Symposium on Software Engineering for Adaptive and Self-Managing Systems, SEAMS 2011, Waikiki, Honolulu , HI, USA, May 23-24, 2011, 2011, pp. 1–10.
@inproceedings{Mahadevan2011, author = {Mahadevan, Nagabhushan and Dubey, Abhishek and Karsai, Gabor}, title = {Application of software health management techniques}, booktitle = {2011 {ICSE} Symposium on Software Engineering for Adaptive and Self-Managing Systems, {SEAMS} 2011, Waikiki, Honolulu , HI, USA, May 23-24, 2011}, year = {2011}, pages = {1--10}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/icse/MahadevanDK11}, category = {selectiveconference}, doi = {10.1145/1988008.1988010}, file = {:Mahadevan2011-Application_of_software_health_management_techniques.pdf:PDF}, keywords = {performance, reliability}, project = {cps-middleware,cps-reliability}, timestamp = {Tue, 06 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.1145/1988008.1988010} }
The growing complexity of software used in large-scale, safety critical cyber-physical systems makes it increasingly difficult to expose and hence correct all potential defects. There is a need to augment the existing fault tolerance methodologies with new approaches that address latent software defects exposed at runtime. This paper describes an approach that borrows and adapts traditional ‘System Health Management’ techniques to improve software dependability through simple formal specification of runtime monitoring, diagnosis, and mitigation strategies. The two-level approach to health management at the component and system level is demonstrated on a simulated case study of an Air Data Inertial Reference Unit (ADIRU). An ADIRU was categorized as the primary failure source for the in-flight upset caused in the Malaysian Air flight 124 over Perth, Australia in 2005.
- N. Roy, A. Dubey, A. S. Gokhale, and L. W. Dowdy, A Capacity Planning Process for Performance Assurance of Component-based Distributed Systems, in ICPE’11 - Second Joint WOSP/SIPEW International Conference on Performance Engineering, Karlsruhe, Germany, March 14-16, 2011, 2011, pp. 259–270.
@inproceedings{Roy2011b, author = {Roy, Nilabja and Dubey, Abhishek and Gokhale, Aniruddha S. and Dowdy, Larry W.}, title = {A Capacity Planning Process for Performance Assurance of Component-based Distributed Systems}, booktitle = {ICPE'11 - Second Joint {WOSP/SIPEW} International Conference on Performance Engineering, Karlsruhe, Germany, March 14-16, 2011}, year = {2011}, tag = {platform}, pages = {259--270}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/wosp/RoyDGD11}, category = {selectiveconference}, doi = {10.1145/1958746.1958784}, file = {:Roy2011b-A_Capacity_Planning_Process_for_Performance_Assurance_of_Component-based_Distributed_Systems.pdf:PDF}, keywords = {performance}, project = {cps-middleware}, timestamp = {Tue, 06 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.1145/1958746.1958784} }
For service providers of multi-tiered component-based applications, such as web portals, assuring high performance and availability to their customers without impacting revenue requires effective and careful capacity planning that aims at minimizing the number of resources, and utilizing them efficiently while simultaneously supporting a large customer base and meeting their service level agreements. This paper presents a novel, hybrid capacity planning process that results from a systematic blending of 1) analytical modeling, where traditional modeling techniques are enhanced to overcome their limitations in providing accurate performance estimates; 2) profile-based techniques, which determine performance profiles of individual software components for use in resource allocation and balancing resource usage; and 3) allocation heuristics that determine minimum number of resources to allocate software components. Our results illustrate that using our technique, performance (i.e., bounded response time) can be assured while reducing operating costs by using 25% less resources and increasing revenues by handling 20% more clients compared to traditional approaches.
- S. Abdelwahed, A. Dubey, G. Karsai, and N. Mahadevan, Model-based Tools and Techniques for Real-Time System and Software Health Management, in Machine Learning and Knowledge Discovery for Engineering Systems Health Management, CRC Press, 2011, p. 285.
@inbook{Abdelwahed2011, chapter = {Chapter 9}, pages = {285}, title = {Model-based Tools and Techniques for Real-Time System and Software Health Management}, publisher = {CRC Press}, tag = {platform}, year = {2011}, author = {Abdelwahed, Sherif and Dubey, Abhishek and Karsai, Gabor and Mahadevan, Nagabhushan}, booktitle = {Machine Learning and Knowledge Discovery for Engineering Systems Health Management}, doi = {10.1201/b11580-15}, keywords = {performance, reliability}, organization = {CRC Press}, url = {https://doi.org/10.1201/b11580} }
The ultimate challenge in system health management is the theory for and application of the technology to systems, for instance to an entire vehicle. The main problem the designer faces is complexity; simply the sheer size of the system, the number of data points, anomalies, and failure modes can be overwhelming. Furthermore, systems are heterogeneous and one has to have a systems engineer’s view to understand interactions among systems. Yet, system-level health management is crucial, as faults increasingly arise from system-level effects and interactions. While individual subsystems tend to have built-in redundancy or local anomaly detection, fault management, and prognostics features, the system integrators are 287required to provide the same capabilities for the entire vehicle, across different engineering subsystems and areas.
- R. Mehrotra, A. Dubey, S. Abdelwahed, and W. Monceaux, Large Scale Monitoring and Online Analysis in a Distributed Virtualized Environment, in 2011 Eighth IEEE International Conference and Workshops on Engineering of Autonomic and Autonomous Systems, 2011, pp. 1–9.
@inproceedings{Mehrotra2011, author = {{Mehrotra}, R. and Dubey, Abhishek and {Abdelwahed}, S. and {Monceaux}, W.}, title = {Large Scale Monitoring and Online Analysis in a Distributed Virtualized Environment}, booktitle = {2011 Eighth IEEE International Conference and Workshops on Engineering of Autonomic and Autonomous Systems}, year = {2011}, pages = {1-9}, month = apr, tag = {platform}, category = {conference}, doi = {10.1109/EASe.2011.17}, file = {:Mehrotra2011-Large_Scale_Monitoring_and_Online_Analysis_in_a_Distributed_Virtualized_Environment.pdf:PDF}, issn = {2168-1872}, keywords = {performance} }
Due to increase in number and complexity of the large scale systems, performance monitoring and multidimensional quality of service (QoS) management has become a difficult and error prone task for system administrators. Recently, the trend has been to use virtualization technology, which facilitates hosting of multiple distributed systems with minimum infrastructure cost via sharing of computational and memory resources among multiple instances, and allows dynamic creation of even bigger clusters. An effective monitoring technique should not only be fine grained with respect to the measured variables, but also should be able to provide a high level overview of the distributed systems to the administrator of all variables that can affect the QoS requirements. At the same time, the technique should not add performance burden to the system. Finally, it should be integrated with a control methodology that manages performance of the enterprise system. In this paper, a systematic distributed event based (DEB) performance monitoring approach is presented for distributed systems by measuring system variables (physical/virtual CPU utilization and memory utilization), application variables (application queue size, queue waiting time, and service time), and performance variables (response time, throughput, and power consumption) accurately with minimum latency at a specified rate. Furthermore, we have shown that proposed monitoring approach can be utilized to provide input to an application monitoring utility to understand the underlying performance model of the system for a successful on-line control of the distributed systems for achieving predefined QoS parameters.
- A. Dubey, G. Karsai, and N. Mahadevan, Model-based software health management for real-time systems, in 2011 Aerospace Conference, 2011, pp. 1–18.
@inproceedings{Dubey2011a, author = {Dubey, Abhishek and {Karsai}, G. and {Mahadevan}, N.}, title = {Model-based software health management for real-time systems}, booktitle = {2011 Aerospace Conference}, year = {2011}, pages = {1-18}, tag = {platform}, month = mar, category = {conference}, doi = {10.1109/AERO.2011.5747559}, file = {:Dubey2011a-Model-based_software_health_management_for_real-time_systems.pdf:PDF}, issn = {1095-323X}, keywords = {performance, reliability} }
Complexity of software systems has reached the point where we need run-time mechanisms that can be used to provide fault management services. Testing and verification may not cover all possible scenarios that a system will encounter, hence a simpler, yet formally specified run-time monitoring, diagnosis, and fault mitigation architecture is needed to increase the software system’s dependability. The approach described in this paper borrows concepts and principles from the field of “Systems Health Management” for complex systems and implements a two level health management strategy that can be applied through a model-based software development process. The Component-level Health Manager (CLHM) for software components provides a localized and limited functionality for managing the health of a component locally. It also reports to the higher-level System Health Manager (SHM) which manages the health of the overall system. SHM consists of a diagnosis engine that uses the timed fault propagation (TFPG) model based on the component assembly. It reasons about the anomalies reported by CLHM and hypothesizes about the possible fault sources. Thereafter, necessary system level mitigation action can be taken. System-level mitigation approaches are subject of on-going investigations and have not been included in this paper. We conclude the paper with case study and discussion.
- R. Mehrotra, A. Dubey, J. Kwalkowski, M. Paterno, A. Singh, R. Herber, and S. Abdelwahed, RFDMon: A Real-Time and Fault-Tolerant Distributed System Monitoring Approach, Vanderbilt University, Nashville, 2011.
@techreport{4477, author = {Mehrotra, Rajat and Dubey, Abhishek and Kwalkowski, Jim and Paterno, Marc and Singh, Amitoj and Herber, Randolph and Abdelwahed, Sherif}, title = {RFDMon: A Real-Time and Fault-Tolerant Distributed System Monitoring Approach}, institution = {Vanderbilt University}, year = {2011}, address = {Nashville}, tag = {platform}, month = {10/2011}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/SensorReport_Paper.pdf}, issn = {ISIS-11-107}, keywords = {performance} }
- T. Saxena and A. Dubey, Meta-Tools For Designing Scientific Workflow Management Systems: Part-I, Survey, Insititute for Software Integrated Systems, Vanderbilt University, ISIS-11-105, 2011.
@techreport{Saxena2011, author = {Saxena, Tripti and Dubey, Abhishek}, title = {Meta-Tools For Designing Scientific Workflow Management Systems: Part-I, Survey}, institution = {Insititute for Software Integrated Systems, Vanderbilt University}, year = {2011}, number = {ISIS-11-105}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/Survey-report.pdf}, file = {:Saxena2011-Meta-tools_for_Designing_Scientific_Workflow_Management_Systems_Survey.pdf:PDF} }
Scientific workflows require the coordination of data processing activities, resulting in executions driven by data dependencies. Due to the scales involved and the repetition of analysis, typically workflows are analyzed in coordinated campaigns, each execution managed and controlled by the workflow management system. In this respect, a workflow management system is required to (1) provide facilities for specifying workflows: intermediate steps, inputs/outputs, and parameters, (2) manage the execution of the workflow based on specified parameters, (3) provide facilities for managing data provenance, and (4) provide facilities to monitor the progress of the workflow, include facilities to detect anomalies, isolate faults and provide recovery actions. In this paper, part-I of a two part series, we provide a comparison of some state of the art workflow management systems with respect to these four primary requirements.
- N. Mahadevan, A. Dubey, and G. Karsai, A Case Study On The Application of Software Health Management Techniques, Institute For Software Integrated Systems, Vanderbilt University, Nashville, ISIS-11-101, 2011.
@techreport{Mahadevan2011a, author = {Mahadevan, Nagabhushan and Dubey, Abhishek and Karsai, Gabor}, title = {A Case Study On The Application of Software Health Management Techniques}, institution = {Institute For Software Integrated Systems, Vanderbilt University}, year = {2011}, number = {ISIS-11-101}, address = {Nashville}, tag = {platform}, month = {01/2011}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/ADIRUTechReport.pdf}, file = {:Mahadevan2011a-A_case_study_on_the_application_of_software_health_management_techniques.pdf:PDF} }
Ever increasing complexity of software used in large-scale, safety critical cyber-physical systems makes it increasingly difficult to expose and thence correct all potential bugs. There is a need to augment the existing fault tolerance methodologies with new approaches that address latent software bugs exposed at runtime. This paper describes an approach that borrows and adapts traditional ‘Systems Health Management’ techniques to improve software dependability through simple formal specification of runtime monitoring, diagnosis and mitigation strategies. The two-level approach of Health Management at Component and System level is demonstrated on a simulated case study of an Air Data Inertial Reference Unit (ADIRU). That subsystem was categorized as the primary failure source for the in-flight upset caused in the Malaysian Air flight 124 over Perth, Australia in August 2005.
2010
- A. Dubey, G. Karsai, R. Kereskényi, and N. Mahadevan, A Real-Time Component Framework: Experience with CCM and ARINC-653, in 13th IEEE International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, ISORC 2010, Carmona, Sevilla, Spain, 5-6 May 2010, 2010, pp. 143–150.
@inproceedings{Dubey2010a, author = {Dubey, Abhishek and Karsai, Gabor and Keresk{\'{e}}nyi, R{\'{o}}bert and Mahadevan, Nagabhushan}, title = {A Real-Time Component Framework: Experience with {CCM} and {ARINC-653}}, booktitle = {13th {IEEE} International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, {ISORC} 2010, Carmona, Sevilla, Spain, 5-6 May 2010}, year = {2010}, pages = {143--150}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/DubeyKKM10}, category = {selectiveconference}, doi = {10.1109/ISORC.2010.39}, file = {:Dubey2010a-A_Real-Time_Component_Framework_Experience_with_CCM_and_ARINC-653.pdf:PDF}, keywords = {middleware}, project = {cps-middleware,cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2010.39} }
The complexity of software in systems like aerospace vehicles has reached the point where new techniques are needed to ensure system dependability while improving the productivity of developers. One possible approach is to use precisely defined software execution platforms that (1) enable the system to be composed from separate components, (2) restrict component interactions and prevent fault propagation, and (3) whose compositional properties are well-known. In this paper we describe the initial steps towards building a platform that combines component-based software construction with hard real-time operating system services. Specifically, the paper discusses how the CORBA Component Model (CCM) could be combined with the ARINC-653 platform services and the lessons learned from this experiment. The results point towards both extending the CCM as well as revising the ARINC-653.
- R. Mehrotra, A. Dubey, S. Abdelwahed, and A. N. Tantawi, Integrated Monitoring and Control for Performance Management of Distributed Enterprise Systems, in MASCOTS 2010, 18th Annual IEEE/ACM International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems, Miami, Florida, USA, August 17-19, 2010, 2010, pp. 424–426.
@inproceedings{Mehrotra2010, author = {Mehrotra, Rajat and Dubey, Abhishek and Abdelwahed, Sherif and Tantawi, Asser N.}, title = {Integrated Monitoring and Control for Performance Management of Distributed Enterprise Systems}, booktitle = {{MASCOTS} 2010, 18th Annual {IEEE/ACM} International Symposium on Modeling, Analysis and Simulation of Computer and Telecommunication Systems, Miami, Florida, USA, August 17-19, 2010}, year = {2010}, pages = {424--426}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/mascots/MehrotraDAT10}, category = {poster}, doi = {10.1109/MASCOTS.2010.57}, file = {:Mehrotra2010-Integrated_Monitoring_and_Control.pdf:PDF}, keywords = {performance}, project = {cps-middleware}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/MASCOTS.2010.57} }
This paper describes an integrated monitoring and control framework for managing performance of distributed enterprise systems.
- J. Balasubramanian, A. S. Gokhale, A. Dubey, F. Wolf, C. Lu, C. D. Gill, and D. C. Schmidt, Middleware for Resource-Aware Deployment and Configuration of Fault-Tolerant Real-time Systems, in 16th IEEE Real-Time and Embedded Technology and Applications Symposium, RTAS 2010, Stockholm, Sweden, April 12-15, 2010, 2010, pp. 69–78.
@inproceedings{Balasubramanian2010, author = {Balasubramanian, Jaiganesh and Gokhale, Aniruddha S. and Dubey, Abhishek and Wolf, Friedhelm and Lu, Chenyang and Gill, Christopher D. and Schmidt, Douglas C.}, title = {Middleware for Resource-Aware Deployment and Configuration of Fault-Tolerant Real-time Systems}, booktitle = {16th {IEEE} Real-Time and Embedded Technology and Applications Symposium, {RTAS} 2010, Stockholm, Sweden, April 12-15, 2010}, year = {2010}, tag = {platform}, pages = {69--78}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/rtas/BalasubramanianGDWLGS10}, category = {selectiveconference}, doi = {10.1109/RTAS.2010.30}, file = {:Balasubramanian2010-Middleware_for_Resource-Aware_Deployment_and_Configuration.pdf:PDF}, keywords = {middleware, performance}, project = {cps-middleware,cps-reliability}, timestamp = {Tue, 05 Nov 2019 00:00:00 +0100}, url = {https://doi.org/10.1109/RTAS.2010.30} }
Developing large-scale distributed real-time and embedded (DRE) systems is hard in part due to complex deployment and configuration issues involved in satisfying multiple quality for service (QoS) properties, such as real-timeliness and fault tolerance. This paper makes three contributions to the study of deployment and configuration middleware for DRE systems that satisfy multiple QoS properties. First, it describes a novel task allocation algorithm for passively replicated DRE systems to meet their real-time and fault-tolerance QoS properties while consuming significantly less resources. Second, it presents the design of a strategizable allocation engine that enables application developers to evaluate different allocation algorithms. Third, it presents the design of a middleware agnostic configuration framework that uses allocation decisions to deploy application components/replicas and configure the underlying middleware automatically on the chosen nodes. These contributions are realized in the DeCoRAM (Deployment and Configuration Reasoning and Analysis via Modeling) middleware. Empirical results on a distributed testbed demonstrate DeCoRAM’s ability to handle multiple failures and provide efficient and predictable real-time performance.
- T. Saxena, A. Dubey, D. Balasubramanian, and G. Karsai, Enabling self-management by using model-based design space exploration, in 2010 Seventh IEEE International Conference and Workshops on Engineering of Autonomic and Autonomous Systems, 2010, pp. 137–144.
@inproceedings{Saxena2010, author = {Saxena, Tripti and Dubey, Abhishek and Balasubramanian, Daniel and Karsai, Gabor}, title = {Enabling self-management by using model-based design space exploration}, booktitle = {2010 Seventh IEEE International Conference and Workshops on Engineering of Autonomic and Autonomous Systems}, year = {2010}, pages = {137--144}, organization = {IEEE}, category = {conference}, file = {:Saxena2010-Enabling_Self-Management_by_Using_Model-based_Design_Space_Exploration.pdf:PDF}, keywords = {reliability} }
Reconfiguration and self-management are important properties for systems that operate in hazardous and uncontrolled environments, such as inter-planetary space. These systems need a reconfiguration mechanism that provides recovery from individual component failures as well as the ability to dynamically adapt to evolving mission goals. One way to provide this functionality is to define a model of alternative system configurations and allow the system to choose the current configuration based on its current state, including environmental parameters and goals. The primary difficulties with this approach are (1) the state space of configurations can grow very large, which can make explicit enumeration infeasible, and (2) the component failures and evolving system goals must be somehow encoded in the system configuration model. This paper describes an online reconfiguration method based on model-based designspace exploration. We symbolically encode the set of valid system configurations and assert the current system state and goals as symbolic constraints. Our initial work indicates that this method scales and is capable of providing effective online dynamic reconfiguration.
- P. Pan, A. Dubey, and L. Piccoli, Dynamic Workflow Management and Monitoring Using DDS, in 2010 Seventh IEEE International Conference and Workshops on Engineering of Autonomic and Autonomous Systems, 2010, pp. 20–29.
@inproceedings{Pan2010, author = {{Pan}, P. and Dubey, Abhishek and {Piccoli}, L.}, title = {Dynamic Workflow Management and Monitoring Using DDS}, booktitle = {2010 Seventh IEEE International Conference and Workshops on Engineering of Autonomic and Autonomous Systems}, year = {2010}, pages = {20-29}, month = mar, category = {conference}, doi = {10.1109/EASe.2010.12}, file = {:Pan2010-Dynamic_workflow_management_and_monitoring_using_dds.pdf:PDF}, issn = {2168-1872}, keywords = {performance} }
Large scientific computing data-centers require a distributed dependability subsystem that can provide fault isolation and recovery and is capable of learning and predicting failures to improve the reliability of scientific workflows. This paper extends our previous work on the autonomic scientific workflow management systems by presenting a hierarchical dynamic workflow management system that tracks the state of job execution using timed state machines. Workflow monitoring is achieved using a reliable distributed monitoring framework, which employs publish-subscribe middleware built upon OMG Data Distribution Service standard. Failure recovery is achieved by stopping and restarting the failed portions of workflow directed acyclic graph.
- L. Piccoli, A. Dubey, J. N. Simone, and J. B. Kowalkowlski, LQCD workflow execution framework: Models, provenance and fault-tolerance, Journal of Physics: Conference Series, vol. 219, no. 7, p. 072047, Apr. 2010.
@article{Piccoli2010, author = {Piccoli, Luciano and Dubey, Abhishek and Simone, James N and Kowalkowlski, James B}, title = {{LQCD} workflow execution framework: Models, provenance and fault-tolerance}, journal = {Journal of Physics: Conference Series}, year = {2010}, volume = {219}, number = {7}, pages = {072047}, month = apr, doi = {10.1088/1742-6596/219/7/072047}, file = {:Piccoli2010-LQCD_workflow_execution_framework.pdf:PDF}, keywords = {performance}, publisher = {{IOP} Publishing} }
Large computing clusters used for scientific processing suffer from systemic failures when operated over long continuous periods for executing workflows. Diagnosing job problems and faults leading to eventual failures in this complex environment is difficult, specifically when the success of an entire workflow might be affected by a single job failure. In this paper, we introduce a model-based, hierarchical, reliable execution framework that encompass workflow specification, data provenance, execution tracking and online monitoring of each workflow task, also referred to as participants. The sequence of participants is described in an abstract parameterized view, which is translated into a concrete data dependency based sequence of participants with defined arguments. As participants belonging to a workflow are mapped onto machines and executed, periodic and on-demand monitoring of vital health parameters on allocated nodes is enabled according to pre-specified rules. These rules specify conditions that must be true pre-execution, during execution and post-execution. Monitoring information for each participant is propagated upwards through the reflex and healing architecture, which consists of a hierarchical network of decentralized fault management entities, called reflex engines. They are instantiated as state machines or timed automatons that change state and initiate reflexive mitigation action(s) upon occurrence of certain faults. We describe how this cluster reliability framework is combined with the workflow execution framework using formal rules and actions specified within a structure of first order predicate logic that enables a dynamic management design that reduces manual administrative workload, and increases cluster-productivity.
- H. Neema, A. Dubey, and G. Karsai, A Report On Simulating External Applications With SOAMANET in the Loop, Insitute For Software Integrated Systems, Nashville, ISIS-10-108, 2010.
@techreport{4201, author = {Neema, Himanshu and Dubey, Abhishek and Karsai, Gabor}, title = {A Report On Simulating External Applications With SOAMANET in the Loop}, institution = {Insitute For Software Integrated Systems}, year = {2010}, number = {ISIS-10-108}, address = {Nashville}, month = {08/2010}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/report.pdf} }
- R. Mehrotra, A. Dubey, S. Abdelwahed, and A. Tantawi, Model Identification for Performance Management of Distributed Enterprise Systems, Institute for Software Integrated Systems, Nashville, Technical Report ISIS-10-104, 2010.
@techreport{4181, author = {Mehrotra, Rajat and Dubey, Abhishek and Abdelwahed, Sherif and Tantawi, Asser}, title = {Model Identification for Performance Management of Distributed Enterprise Systems}, institution = {Institute for Software Integrated Systems}, year = {2010}, type = {Technical Report}, number = {ISIS-10-104}, address = {Nashville}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/Paper_4.pdf}, issn = {ISIS-10-104} }
- A. Dubey, G. Karsai, and N. Mahadevan, Towards Model-based Software Health Management for Real-Time Systems, Institute for Software Integrated Systems, 2010.
@techreport{4196, author = {Dubey, Abhishek and Karsai, Gabor and Mahadevan, Nagabhushan}, title = {Towards Model-based Software Health Management for Real-Time Systems}, institution = {Institute for Software Integrated Systems}, year = {2010}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/Report_0.pdf , http://www.isis.vanderbilt.edu/sites/default/files/Report.pdf}, issn = {ISIS-10-106} }
The complexity of software systems has reached the point where we need run-time mechanisms that can be used to provide fault management services. Testing and verification may not cover all possible scenarios that a system can encounter, hence a simpler, yet formally specified run-time monitoring, diagnosis, and fault mitigation architecture is needed to increase the software system’s dependability. The approach described in this paper borrows concepts and principles from the field of ‘Systems Health Management’ for complex systems. The paper introduces the fundamental ideas for software health management, and then illustrates how these can be implemented in a model-based software development process, including a case study and related work.
Before 2009
- A. Dubey, R. Mehrotra, S. Abdelwahed, and A. N. Tantawi, Performance modeling of distributed multi-tier enterprise systems, SIGMETRICS Performance Evaluation Review, vol. 37, no. 2, pp. 9–11, 2009.
@article{Dubey2009, author = {Dubey, Abhishek and Mehrotra, Rajat and Abdelwahed, Sherif and Tantawi, Asser N.}, title = {Performance modeling of distributed multi-tier enterprise systems}, journal = {{SIGMETRICS} Performance Evaluation Review}, year = {2009}, volume = {37}, tag = {platform}, number = {2}, pages = {9--11}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/sigmetrics/DubeyMAT09}, doi = {10.1145/1639562.1639566}, file = {:Dubey2009-Performance_modeling_of_distributed_multi-tier_enterprise_systems.pdf:PDF}, keywords = {performance}, project = {cps-middleware}, timestamp = {Tue, 06 Nov 2018 00:00:00 +0100}, url = {https://doi.org/10.1145/1639562.1639566} }
- A. Dubey, D. Riley, S. Abdelwahed, and T. Bapty, Modeling and Analysis of Probabilistic Timed Systems, in 16th Annual IEEE International Conference and Workshop on the Engineering of Computer Based Systems, ECBS 2009, San Francisco, California, USA, 14-16 April 2009, 2009, pp. 69–78.
@inproceedings{Dubey2009a, author = {Dubey, Abhishek and Riley, Derek and Abdelwahed, Sherif and Bapty, Ted}, title = {Modeling and Analysis of Probabilistic Timed Systems}, booktitle = {16th Annual {IEEE} International Conference and Workshop on the Engineering of Computer Based Systems, {ECBS} 2009, San Francisco, California, USA, 14-16 April 2009}, year = {2009}, pages = {69--78}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/ecbs/DubeyRAB09}, category = {conference}, doi = {10.1109/ECBS.2009.44}, file = {:Dubey2009a-Modeling_and_Analysis_of_Probabilistic_Timed_Systems.pdf:PDF}, keywords = {performance}, project = {cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:51 +0200}, url = {https://doi.org/10.1109/ECBS.2009.44} }
Probabilistic models are useful for analyzing systems which operate under the presence of uncertainty. In this paper, we present a technique for verifying safety and liveness properties for probabilistic timed automata. The proposed technique is an extension of a technique used to verify stochastic hybrid automata using an approximation with Markov Decision Processes. A case study for CSMA/CD protocol has been used to show case the methodology used in our technique.
- A. Dubey, Algorithms for Synthesizing Safe Sets of Operation for Embedded Systems, in 16th Annual IEEE International Conference and Workshop on the Engineering of Computer Based Systems, ECBS 2009, San Francisco, California, USA, 14-16 April 2009, 2009, pp. 149–155.
@inproceedings{Dubey2009b, author = {Dubey, Abhishek}, title = {Algorithms for Synthesizing Safe Sets of Operation for Embedded Systems}, booktitle = {16th Annual {IEEE} International Conference and Workshop on the Engineering of Computer Based Systems, {ECBS} 2009, San Francisco, California, USA, 14-16 April 2009}, year = {2009}, pages = {149--155}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/ecbs/Dubey09}, category = {conference}, doi = {10.1109/ECBS.2009.43}, file = {:Dubey2009b-Algorithms_for_Synthesizing_Safe_Sets_of_Operation_for_Embedded_Systems.pdf:PDF}, keywords = {reliability}, project = {cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:51 +0200}, url = {https://doi.org/10.1109/ECBS.2009.43} }
A large number of embedded computing systems are modeled as hybrid system with both discrete and continuous dynamics. In this paper, we present algorithms for analyzing nonlinear time-invariant continuous-time systems by employing reachability algorithms. We propose synthesis algorithms for finding sets of initial states for the continuous dynamical systems so that temporal properties, such as safety and liveness properties, are satisfied. The initial sets produced by the algorithms are related to some classical concepts for continuous dynamical systems, such as invariant sets and domains of attraction.
- A. Dubey, G. Karsai, and S. Abdelwahed, Compensating for Timing Jitter in Computing Systems with General-Purpose Operating Systems, in 2009 IEEE International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, ISORC 2009, Tokyo, Japan, 17-20 March 2009, 2009, pp. 55–62.
@inproceedings{Dubey2009c, author = {Dubey, Abhishek and Karsai, Gabor and Abdelwahed, Sherif}, title = {Compensating for Timing Jitter in Computing Systems with General-Purpose Operating Systems}, booktitle = {2009 {IEEE} International Symposium on Object/Component/Service-Oriented Real-Time Distributed Computing, {ISORC} 2009, Tokyo, Japan, 17-20 March 2009}, year = {2009}, pages = {55--62}, tag = {platform}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/isorc/DubeyKA09}, category = {selectiveconference}, doi = {10.1109/ISORC.2009.28}, file = {:Dubey2009c-Compensating_for_Timing_Jitter_in_Computing_Systems_with_General-Purpose_Operating_Systems.pdf:PDF}, keywords = {performance}, project = {cps-middleware,cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:53 +0200}, url = {https://doi.org/10.1109/ISORC.2009.28} }
Fault-tolerant frameworks for large scale computing clusters require sensor programs, which are executed periodically to facilitate performance and fault management. By construction, these clusters use general purpose operating systems such as Linux that are built for best average case performance and do not provide deterministic scheduling guarantees. Consequently, periodic applications show jitter in execution times relative to the expected execution time. Obtaining a deterministic schedule for periodic tasks in general purpose operating systems is difficult without using kernel-level modifications such as RTAI and RTLinux. However, due to performance and administrative issues kernel modification cannot be used in all scenarios. In this paper, we address the problem of jitter compensation for periodic tasks that cannot rely on modifying the operating system kernel. ; Towards that, (a) we present motivating examples; (b) we present a feedback controller based approach that runs in the user space and actively compensates periodic schedule based on past jitter; This approach is platform-agnostic i.e. it can be used in different operating systems without modification; and (c) we show through analysis and experiments that this approach is platform-agnostic i.e. it can be used in different operating systems without modification and also that it maintains a stable system with bounded total jitter.
- A. Dubey, L. Piccoli, J. B. Kowalkowski, J. N. Simone, X. Sun, G. Karsai, and S. Neema, Using Runtime Verification to Design a Reliable Execution Framework for Scientific Workflows, in 2009 Sixth IEEE Conference and Workshops on Engineering of Autonomic and Autonomous Systems, 2009, pp. 87–96.
@inproceedings{Dubey2009d, author = {Dubey, Abhishek and {Piccoli}, L. and {Kowalkowski}, J. B. and {Simone}, J. N. and {Sun}, X. and {Karsai}, G. and {Neema}, S.}, title = {Using Runtime Verification to Design a Reliable Execution Framework for Scientific Workflows}, booktitle = {2009 Sixth IEEE Conference and Workshops on Engineering of Autonomic and Autonomous Systems}, year = {2009}, pages = {87-96}, month = apr, category = {conference}, doi = {10.1109/EASe.2009.13}, file = {:Dubey2009d-Using_runtime_verification_to_design_a_reliable_execution_framework_for_scientific_workflows.pdf:PDF}, issn = {2168-1872}, keywords = {reliability} }
In this paper, we describe the design of a scientific workflow execution framework that integrates runtime verification to monitor its execution and checking it against the formal specifications. For controlling workflow execution, this framework provides for data provenance, execution tracking and online monitoring of each work flow task, also referred to as participants. The sequence of participants is described in an abstract parameterized view, which is used to generate concrete data dependency based sequence of participants with defined arguments. As participants belonging to a workflow are mapped onto machines and executed, periodic and on-demand monitoring of vital health parameters on allocated nodes is enabled according to pre-specified invariant conditions with actions to be taken upon violation of invariants.
- A. Dubey, N. Mahadevan, and R. Kereskenyi, Reflex and healing architecture for software health management, in International workshop on software health management. IEEE conference on space mission challenges for information technology, 2009.
@inproceedings{Dubey2009e, author = {Dubey, Abhishek and Mahadevan, Nagbhushan and Kereskenyi, Robert}, title = {Reflex and healing architecture for software health management}, booktitle = {International workshop on software health management. IEEE conference on space mission challenges for information technology}, year = {2009}, category = {workshop}, file = {:Dubey2009e-Reflex_and_healing_architecture_for_software_health_management.pdf:PDF}, keywords = {reliability} }
- A. Dubey, G. Karsai, R. Kereskenyi, and N. Mahadevan, Towards a Real-time Component Framework for Software Health Management, Institute for Software Integrated Systems, Nashville, Technical Report ISIS-09-111, 2009.
@techreport{4137, author = {Dubey, Abhishek and Karsai, Gabor and Kereskenyi, Robert and Mahadevan, Nagabhushan}, title = {Towards a Real-time Component Framework for Software Health Management}, institution = {Institute for Software Integrated Systems}, year = {2009}, type = {Technical Report}, number = {ISIS-09-111}, address = {Nashville}, month = {11/2009}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/TechReport2009.pdf} }
The complexity of software in systems like aerospace vehicles has reached the point where new techniques are needed to ensure system dependability. Such techniques include a novel direction called ‘Software Health Management’ (SHM) that extends classic software fault tolerance with techniques borrowed from System Health Management. In this paper the initial steps towards building a SHM approach are described that combine component-based software construction with hard real-time operating system platforms. Specifically, the paper discusses how the CORBA Component Model could be combined with the ARINC-653 platform services and the lessons learned from this experiment. The results point towards both extending the CCM as well as revising the ARINC-653
- A. Dubey, Towards Dynamic CPU Demand Estimation in Multi-Tiered Web Setup, IBM Research, 2009.
@techreport{SWHM31, author = {Dubey, Abhishek}, title = {Towards Dynamic CPU Demand Estimation in Multi-Tiered Web Setup}, institution = {IBM Research}, year = {2009}, issn = {ISIS-09-111}, owner = {abhishek}, timestamp = {2010.09.24}, url = {https://wiki.isis.vanderbilt.edu/mbshm/images/3/3e/TechReport2009.pdf} }
The complexity of software in systems like aerospace vehicles has reached the point where new techniques are needed to ensure system dependability. Such techniques include a novel direction called Software Health Management (SHM) that extends classic software fault tolerance with techniques borrowed from System Health Management. In this paper, the initial steps towards building a SHM approach are described that combine component-based software construction with hard real-time operating system platforms. Specifically, the paper discusses how the CORBA Component Model could be combined with the ARINC-653 platform services and the lessons learned from this experiment. The results point towards both extending the CCM as well as revising the ARINC-653.
- A. Dubey, A Discussion on Supervisory Control Theory in Real-Time Discrete Event Systems, Institute for Software Integrated Systems, ISIS-09-112, 2009.
@techreport{4136, author = {Dubey, Abhishek}, title = {A Discussion on Supervisory Control Theory in Real-Time Discrete Event Systems}, institution = {Institute for Software Integrated Systems}, year = {2009}, number = {ISIS-09-112}, month = {11/2009}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/TechReport.pdf}, isbn = {ISIS-09-112} }
- J. Balasubramanian, A. Gokhale, F. Wolf, A. Dubey, C. Lu, C. Gill, and D. C. Schmidt, Resource-Aware Deployment and Configuration of Fault-tolerant Real-time Systems, Institute for Software Integrated Systems, Vanderbilt University, ISIS-09-109, 2009.
@techreport{4121, author = {Balasubramanian, Jaiganesh and Gokhale, Aniruddha and Wolf, Friedhelm and Dubey, Abhishek and Lu, Chenyang and Gill, Chris and Schmidt, Douglas C.}, title = {Resource-Aware Deployment and Configuration of Fault-tolerant Real-time Systems}, institution = {Institute for Software Integrated Systems, Vanderbilt University}, year = {2009}, number = {ISIS-09-109}, month = {10/2009}, attachments = {http://www.isis.vanderbilt.edu/sites/default/files/decoram_tr09_0.pdf}, issn = {ISIS-09-109} }
- A. Dubey, S. Neema, J. Kowalkowski, and A. Singh, Scientific Computing Autonomic Reliability Framework, in Fourth International Conference on e-Science, e-Science 2008, 7-12 December 2008, Indianapolis, IN, USA, 2008, pp. 352–353.
@inproceedings{Dubey2008, author = {Dubey, Abhishek and Neema, Sandeep and Kowalkowski, Jim and Singh, Amitoj}, title = {Scientific Computing Autonomic Reliability Framework}, booktitle = {Fourth International Conference on e-Science, e-Science 2008, 7-12 December 2008, Indianapolis, IN, {USA}}, year = {2008}, pages = {352--353}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/conf/eScience/DubeyNKS08}, category = {poster}, doi = {10.1109/eScience.2008.113}, file = {:Dubey2008-Scientific_Computing_Autonomic_Reliability_Framework.pdf:PDF}, keywords = {reliability}, project = {cps-middleware,cps-reliability}, timestamp = {Wed, 16 Oct 2019 14:14:49 +0200}, url = {https://doi.org/10.1109/eScience.2008.113} }
Large scientific computing clusters require a distributed dependability subsystem that can provide fault isolation and recovery and is capable of learning and predicting failures, to improve the reliability of scientific workflows. In this paper, we outline the key ideas in the design of a Scientific Computing Autonomic Reliability Framework (SCARF) for large computing clusters used in the Lattice Quantum Chromo Dynamics project at Fermi Lab.
- A. Dubey, S. Nordstrom, T. Keskinpala, S. Neema, T. Bapty, and G. Karsai, Towards A Model-Based Autonomic Reliability Framework for Computing Clusters, in Fifth IEEE Workshop on Engineering of Autonomic and Autonomous Systems (ease 2008), 2008, pp. 75–85.
@inproceedings{Dubey2008a, author = {Dubey, Abhishek and {Nordstrom}, S. and {Keskinpala}, T. and {Neema}, S. and {Bapty}, T. and {Karsai}, G.}, title = {Towards A Model-Based Autonomic Reliability Framework for Computing Clusters}, booktitle = {Fifth IEEE Workshop on Engineering of Autonomic and Autonomous Systems (ease 2008)}, year = {2008}, pages = {75-85}, month = mar, category = {conference}, doi = {10.1109/EASe.2008.15}, file = {:Dubey2008a-Towards_a_model-based_autonomic_reliability_framework_for_computing_clusters.pdf:PDF}, issn = {2168-1872}, keywords = {reliability} }
One of the primary problems with computing clusters is to ensure that they maintain a reliable working state most of the time to justify economics of operation. In this paper, we introduce a model-based hierarchical reliability framework that enables periodic monitoring of vital health parameters across the cluster and provides for autonomic fault mitigation. We also discuss some of the challenges faced by autonomic reliability frameworks in cluster environments such as non-determinism in task scheduling in standard operating systems such as Linux and need for synchronized execution of monitoring sensors across the cluster. Additionally, we present a solution to these problems in the context of our framework, which utilizes a feedback controller based approach to compensate for the scheduling jitter in non real-time operating systems. Finally, we present experimental data that illustrates the effectiveness of our approach.
- A. Dubey, S. Nordstrom, T. Keskinpala, S. Neema, T. Bapty, and G. Karsai, Towards a verifiable real-time, autonomic, fault mitigation framework for large scale real-time systems, ISSE, vol. 3, no. 1, pp. 33–52, 2007.
@article{Dubey2007, author = {Dubey, Abhishek and Nordstrom, Steven and Keskinpala, Turker and Neema, Sandeep and Bapty, Ted and Karsai, Gabor}, title = {Towards a verifiable real-time, autonomic, fault mitigation framework for large scale real-time systems}, journal = {{ISSE}}, year = {2007}, tag = {platform}, volume = {3}, number = {1}, pages = {33--52}, bibsource = {dblp computer science bibliography, https://dblp.org}, biburl = {https://dblp.org/rec/bib/journals/isse/DubeyNKNBK07}, doi = {10.1007/s11334-006-0015-7}, file = {:Dubey2007-Towards_a_verifiable_real-time_autonomic_fault_mitigation_framework.pdf:PDF}, project = {cps-middleware,cps-reliability}, timestamp = {Sun, 28 May 2017 01:00:00 +0200}, url = {https://doi.org/10.1007/s11334-006-0015-7} }
Designing autonomic fault responses is difficult, particularly in large-scale systems, as there is no single ‘perfect’ fault mitigation response to a given failure. The design of appropriate mitigation actions depend upon the goals and state of the application and environment. Strict time deadlines in real-time systems further exacerbate this problem. Any autonomic behavior in such systems must not only be functionally correct but should also conform to properties of liveness, safety and bounded time responsiveness. This paper details a real-time fault-tolerant framework, which uses a reflex and healing architecture to provide fault mitigation capabilities for large-scale real-time systems. At the heart of this architecture is a real-time reflex engine, which has a state-based failure management logic that can respond to both event- and time-based triggers. We also present a semantic domain for verifying properties of systems, which use this framework of real-time reflex engines. Lastly, a case study, which examines the details of such an approach, is presented.
- S. Nordstrom, A. Dubey, T. Keskinpala, R. Datta, S. Neema, and T. Bapty, Model Predictive Analysis for AutonomicWorkflow Management in Large-scale Scientific Computing Environments, in Fourth IEEE International Workshop on Engineering of Autonomic and Autonomous Systems (EASe’07), 2007, pp. 37–42.
@inproceedings{Nordstrom2007, author = {{Nordstrom}, S. and Dubey, Abhishek and {Keskinpala}, T. and {Datta}, R. and {Neema}, S. and {Bapty}, T.}, title = {Model Predictive Analysis for AutonomicWorkflow Management in Large-scale Scientific Computing Environments}, booktitle = {Fourth IEEE International Workshop on Engineering of Autonomic and Autonomous Systems (EASe'07)}, year = {2007}, pages = {37-42}, month = mar, category = {conference}, doi = {10.1109/EASE.2007.18}, file = {:Nordstrom2007-Model_predictive_analysis_for_autonomicworkflow_management_in_large-scale_scientific_computing_environments.pdf:PDF}, issn = {null}, keywords = {reliability} }
In large scale scientific computing, proper planning and management of computational resources lead to higher system utilizations and increased scientific productivity. Scientists are increasingly leveraging the use of business process management techniques and workflow management tools to balance the needs of the scientific analyses with the availability of computational resources. However, the advancements in productivity from execution of workflows in a large scale computing environments are often thwarted by runtime resource failures. This paper presents our initial work toward autonomic model based fault analysis in workflow based environments
- S. Nordstrom, T. Bapty, S. Neema, A. Dubey, and T. Keskinpala, A Guided Explorative Approach for Autonomic Healing of Model-Based Systems, in Second IEEE conference on Space Mission Challenges for Information Technology (SMC-IT), Pasadena, CA, 2006.
@inproceedings{Nordstrom2016, author = {Nordstrom, Steven and Bapty, Ted and Neema, Sandeep and Dubey, Abhishek and Keskinpala, Turker}, title = {A Guided Explorative Approach for Autonomic Healing of Model-Based Systems}, booktitle = {Second IEEE conference on Space Mission Challenges for Information Technology (SMC-IT)}, year = {2006}, address = {Pasadena, CA}, month = jul, category = {conference}, file = {:Nordstrom2006-A_guided_explorative_approach_for_autonomic_healing_of_model_based_systems.pdf:PDF}, keywords = {autonomic; guided; healing; reflex-healing;model-based; model integrated; embedded} }
Embedded computing is an area in which many of the Self-* properties of autonomic systems are desirable. Model based tools for designing embedded systems, while proven successful in many applications, are not yet applicable toward building autonomic or self-sustaining embedded systems. This paper reports on the progress made by our group in developing a model based toolset which specifically targets the creation of autonomic embedded systems.
- S. Nordstrom, A. Dubey, T. Keskinpala, S. Neema, and T. Bapty, GHOST: Guided Healing and Optimization Search Technique for Healing Large-Scale Embedded Systems, in Third IEEE International Workshop on Engineering of Autonomic Autonomous Systems (EASE’06), 2006, pp. 54–60.
@inproceedings{Nordstrom2006a, author = {{Nordstrom}, S. and Dubey, Abhishek and {Keskinpala}, T. and {Neema}, S. and {Bapty}, T.}, title = {GHOST: Guided Healing and Optimization Search Technique for Healing Large-Scale Embedded Systems}, booktitle = {Third IEEE International Workshop on Engineering of Autonomic Autonomous Systems (EASE'06)}, year = {2006}, pages = {54-60}, month = mar, category = {conference}, doi = {10.1109/EASE.2006.8}, file = {:Nordstrom2006a-Ghost_Guided_Healing_and_Optimization_Search_Techniques.pdf:PDF}, issn = {2168-1872}, keywords = {reliability} }
Reflex and healing architectures have been shown to provide adequate user-defined initial failure mitigation behaviors in the presence of system faults. What is lacking, however, is a user-guided means of healing the system after the initial reflexes have been enacted. This process should be autonomic in the sense that new system configurations can be achieved by defining a priori only a small set of criteria to which the healed system should conform. What follows is an explanation of this technique for guided healing which allows system designers to direct the healing process from a higher level in such a way that the resulting system configurations satisfy their particular needs. A brief example outlining the application of this approach is given
- A. Dubey, S. Nordstrom, T. Keskinpala, S. Neema, and T. Bapty, Verifying Autonomic Fault Mitigation Strategies in Large Scale Real-Time Systems, in Third IEEE International Workshop on Engineering of Autonomic Autonomous Systems (EASE’06), 2006, pp. 129–140.
@inproceedings{Dubey2006, author = {Dubey, Abhishek and {Nordstrom}, S. and {Keskinpala}, T. and {Neema}, S. and {Bapty}, T.}, title = {Verifying Autonomic Fault Mitigation Strategies in Large Scale Real-Time Systems}, booktitle = {Third IEEE International Workshop on Engineering of Autonomic Autonomous Systems (EASE'06)}, year = {2006}, pages = {129-140}, month = mar, category = {conference}, doi = {10.1109/EASE.2006.24}, file = {:Dubey2006-Verifying_autonomic_fault_mitigation_strategies_in_large_scale_real-time_systems.pdf:PDF}, issn = {2168-1872}, keywords = {reliability} }
In large scale real-time systems many problems associated with self-management are exacerbated by the addition of time deadlines. In these systems any autonomic behavior must not only be functionally correct but they must also not violate properties of liveness, safety and bounded time responsiveness. In this paper we present and analyze a realtime reflex engine for providing fault mitigation capability to large scale real time systems. We also present a semantic domain for analyzing and verifying the properties of such systems along with the framework of real-time reflex engines
- T. Keskinpala, A. Dubey, S. Nordstrom, T. Bapty, and S. Neema, A Model Driven Tool for Automated System Level Testing of Middleware, in Systems Testing and Validation, 2006, p. 19.
@inproceedings{Keskinpala2006, author = {Keskinpala, Turker and Dubey, Abhishek and Nordstrom, Steve and Bapty, Ted and Neema, Sandeep}, title = {A Model Driven Tool for Automated System Level Testing of Middleware}, booktitle = {Systems Testing and Validation}, year = {2006}, pages = {19}, category = {conference}, file = {:Keskinpala2006-A_Model_Driven_Tool_for_Automated_System_Level_Testing_of_Middleware.pdf:PDF}, keywords = {reliability} }
This paper presents a contribution to the challenges of manually creating test configurations and deployments for high performance distributed middleware frameworks. We present our testing tool based on the Model Integrated Computing (MIC) paradigm and describe and discuss its generative abilities that can be used to generate many test configurations and deployment scenarios from high-level system specifications through model replication.
- A. Dubey, X. Wu, H. Su, and T. J. Koo, Computation Platform for Automatic Analysis of Embedded Software Systems Using Model Based Approach, in Automated Technology for Verification and Analysis, Berlin, Heidelberg, 2005, pp. 114–128.
@inproceedings{Dubey2005, author = {Dubey, Abhishek and Wu, X. and Su, H. and Koo, T. J.}, title = {Computation Platform for Automatic Analysis of Embedded Software Systems Using Model Based Approach}, booktitle = {Automated Technology for Verification and Analysis}, year = {2005}, editor = {Peled, Doron A. and Tsay, Yih-Kuen}, pages = {114--128}, tag = {platform}, address = {Berlin, Heidelberg}, publisher = {Springer Berlin Heidelberg}, category = {selectiveconference}, file = {:Dubey2005-Computation_Platform_for_Automatic_Analysis_of_Embedded_Software_Systems_Using_Model_Based_Approach.pdf:PDF}, isbn = {978-3-540-31969-6}, keywords = {reliability}, project = {cps-reliability} }
In this paper, we describe a computation platform called ReachLab, which enables automatic analysis of embedded software systems that interact with continuous environment. Algorithms are used to specify how the state space of the system model should be explored in order to perform analysis. In ReachLab, both system models and analysis algorithm models are specified in the same framework using Hybrid System Analysis and Design Language (HADL), which is a meta-model based language. The platform allows the models of algorithms to be constructed hierarchically and promotes their reuse in constructing more complex algorithms. Moreover, the platform is designed in such a way that the concerns of design and implementation of analysis algorithms are separated. On one hand, the models of analysis algorithms are abstract and therefore the design of algorithms can be made independent of implementation details. On the other hand, translators are provided to automatically generate implementations from the models for computing analysis results based on computation kernels. Multiple computation kernels, which are based on specific computation tools such as d/dt and the Level Set toolbox, are supported and can be chosen to enable hybrid state space exploration. An example is provided to illustrate the design and implementation process in ReachLab.